├── .dockerignore ├── .github └── workflows │ ├── ci-test.yml │ ├── codeql-analysis.yml │ ├── config │ └── whispers-config.yml │ ├── docker-extras.yml │ ├── docker.yml │ ├── pypi.yaml │ ├── secrets-scan.yml │ ├── stale.yml │ ├── test-yaml-lint.yml │ └── test.yml ├── .gitignore ├── .pre-commit-config.yaml ├── AUTHORS ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── MAINTAINERS ├── Makefile ├── README.md ├── bin ├── gen_pcap_manifest └── poseidon ├── config ├── poseidon.config ├── rules.yaml └── volos_config.yaml ├── docker-compose.yaml ├── docs ├── img │ ├── Rookies16Badge_1.png │ ├── faucet.png │ └── poseidon-logo.png └── poseidon_stats.json ├── helpers ├── api │ └── Dockerfile ├── faucet │ ├── dashboards.yaml │ ├── docker-compose-faucet.yaml │ ├── docker-compose-monitoring.yaml │ ├── docker-compose.yaml │ ├── faucet.rules.yml │ ├── gauge.yaml │ └── prometheus-docker-compose.yml └── rabbitmq │ └── Dockerfile ├── lib ├── poseidon_api │ ├── poetry.lock │ ├── poseidon_api │ │ ├── __init__.py │ │ ├── api.py │ │ ├── data.py │ │ └── routes.py │ ├── pyproject.toml │ └── tests │ │ └── test_api.py ├── poseidon_cli │ ├── poetry.lock │ ├── poseidon_cli │ │ ├── __init__.py │ │ ├── __main__.py │ │ ├── cli.py │ │ └── commands.py │ ├── pyproject.toml │ └── tests │ │ ├── faucetconfgetsetter.py │ │ ├── test_cli.py │ │ └── test_commands.py └── poseidon_core │ ├── poetry.lock │ ├── poseidon_core │ ├── __init__.py │ ├── __main__.py │ ├── constants.py │ ├── controllers │ │ ├── __init__.py │ │ ├── faucet │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ └── faucet.py │ │ ├── sdnconnect.py │ │ └── sdnevents.py │ ├── helpers │ │ ├── __init__.py │ │ ├── actions.py │ │ ├── collector.py │ │ ├── config.py │ │ ├── endpoint.py │ │ ├── exception_decor.py │ │ ├── log.py │ │ ├── metadata.py │ │ ├── prometheus.py │ │ └── rabbit.py │ ├── main.py │ ├── metadata │ │ └── nmap-mac-prefixes.txt │ └── operations │ │ ├── __init__.py │ │ ├── monitor.py │ │ ├── primitives │ │ ├── __init__.py │ │ ├── acl.py │ │ ├── coprocess.py │ │ ├── meter.py │ │ └── mirror.py │ │ └── volos │ │ ├── __init__.py │ │ ├── acls.py │ │ └── volos.py │ ├── pyproject.toml │ └── tests │ ├── faucetconfgetsetter.py │ ├── sample_acls.yaml │ ├── sample_content.txt │ ├── sample_faucet_config.yaml │ ├── test_actions.py │ ├── test_collector.py │ ├── test_endpoint.py │ ├── test_faucet.py │ ├── test_log.py │ ├── test_main.py │ ├── test_prometheus.py │ └── test_volos.py ├── release ├── update_docker_compose.py └── update_workers_json.py ├── renovate.json ├── tests ├── test-e2e-ovs.yml ├── test-ipv4.pcap ├── test_e2e.sh ├── test_gen_pcap_manifest.py └── test_worker.py └── workers ├── Dockerfile ├── __init__.py ├── requirements.txt ├── worker.py └── workers.json /.dockerignore: -------------------------------------------------------------------------------- 1 | .gitignore 2 | .pre-commit-config.yaml 3 | CHANGELOG.md 4 | CODE_OF_CONDUCT.md 5 | CONTRIBUTING.md 6 | Dockerfile 7 | LICENSE 8 | MAINTAINERS 9 | Makefile 10 | helpers/* 11 | -------------------------------------------------------------------------------- /.github/workflows/ci-test.yml: -------------------------------------------------------------------------------- 1 | name: CI Test 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | 7 | ci-test: 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | python-version: [ '3.8', '3.9', '3.10' ] 12 | 13 | steps: 14 | - uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 1 17 | 18 | - name: Set up Python 19 | uses: actions/setup-python@v4 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | 23 | - name: Install Poetry 24 | uses: snok/install-poetry@v1 25 | with: 26 | virtualenvs-create: true 27 | virtualenvs-in-project: true 28 | version: 1.4.2 29 | - name: Load cached venv 30 | id: cached-poetry-dependencies 31 | uses: actions/cache@v3 32 | with: 33 | path: .venv 34 | key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} 35 | - name: Install dependencies 36 | if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' 37 | run: | 38 | sudo apt-get update && sudo apt-get install -y --no-install-recommends gcc git g++ libev-dev libyaml-dev 39 | cd lib/poseidon_api && poetry install --no-interaction && cd ../../ 40 | cd lib/poseidon_cli && poetry install --no-interaction && cd ../../ 41 | cd lib/poseidon_core && poetry install --no-interaction && cd ../../ 42 | 43 | - name: Code Quality - Black 44 | run: | 45 | cd lib/poseidon_api && poetry run black . --check && cd ../../ 46 | cd lib/poseidon_cli && poetry run black . --check && cd ../../ 47 | cd lib/poseidon_core && poetry run black . --check && cd ../../ 48 | 49 | - name: Code Quality - Pytype 50 | run: | 51 | cd lib/poseidon_api && poetry run pytype poseidon_api/ && cd ../../ 52 | cd lib/poseidon_cli && poetry run pytype poseidon_cli/ && cd ../../ 53 | cd lib/poseidon_core && poetry run pytype poseidon_core/ && cd ../../ 54 | 55 | - name: Code Quality - Pylint 56 | run: | 57 | cd lib/poseidon_api && poetry run pylint --fail-under=4 poseidon_api/ && cd ../../ 58 | cd lib/poseidon_cli && poetry run pylint --fail-under=4 poseidon_cli/ && cd ../../ 59 | cd lib/poseidon_core && poetry run pylint --fail-under=4 poseidon_core/ && cd ../../ 60 | 61 | - name: Test with pytest 62 | # https://github.com/python-poetry/poetry/issues/4511 63 | # TODO: setuptools needed by c65faucet, but poetry < 1.2.0 does not allow adding setuptools. 64 | run: | 65 | export POSEIDON_CONFIG=$PWD/config/poseidon.config 66 | cd lib/poseidon_api && poetry run pip install 'setuptools==64.0.3' && poetry run pytest --cov-report term-missing --cov=. --cov-report=xml && cd ../../ 67 | cd lib/poseidon_cli && poetry run pip install 'setuptools==64.0.3' && poetry run pytest --cov-report term-missing --cov=. --cov-report=xml && cd ../../ 68 | cd lib/poseidon_core && poetry run pip install 'setuptools==64.0.3' && poetry run pytest --cov-report term-missing --cov=. --cov-report=xml && cd ../../ 69 | - name: Upload coverage 70 | uses: codecov/codecov-action@v4 71 | if: github.repository == 'faucetsdn/poseidon' && github.ref_name == 'main' 72 | with: 73 | token: ${{ secrets.CODECOV_TOKEN }} 74 | files: /home/runner/work/poseidon/poseidon/lib/poseidon_api/coverage.xml,/home/runner/work/poseidon/poseidon/lib/poseidon_cli/coverage.xml,/home/runner/work/poseidon/poseidon/lib/poseidon_core/coverage.xml 75 | fail_ci_if_error: true 76 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '15 9 * * 3' 22 | 23 | jobs: 24 | analyze: 25 | permissions: 26 | actions: read 27 | contents: write 28 | security-events: write 29 | name: Analyze 30 | runs-on: ubuntu-latest 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v4 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v3 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v3 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v3 72 | -------------------------------------------------------------------------------- /.github/workflows/config/whispers-config.yml: -------------------------------------------------------------------------------- 1 | include: 2 | files: 3 | - "**/*" 4 | 5 | exclude: 6 | files: 7 | - __pycache__|\.eggs|build|dev|\.vscode|\.git|\.github 8 | - .*/(locale|spec|test|mock)s?/ 9 | - integration|node_modules 10 | - (package(-lock)?|npm-shrinkwrap)\.json 11 | 12 | keys: 13 | - .*(public|project).* 14 | 15 | values: 16 | - ^(true|false|yes|no|1|0)$ 17 | - .*_(user|password|token|key|placeholder|name)$ 18 | - ^aws_(access_key_id|secret_access_key|session_token)$ 19 | - ^arn:aws:.* 20 | - ^((cn?trl|alt|shift|del|ins|esc|tab|f[\d]+) ?[\+_\-\\/] ?)+[\w]+$ -------------------------------------------------------------------------------- /.github/workflows/docker-extras.yml: -------------------------------------------------------------------------------- 1 | name: buildx-extras 2 | 3 | on: 4 | push: 5 | branches: main 6 | tags: 'v*' 7 | 8 | jobs: 9 | buildx-extras: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | - name: Get the version 18 | id: get_version 19 | run: echo ::set-output name=VERSION::$(echo $GITHUB_REF | cut -d / -f 3) 20 | - name: Change for main 21 | id: change_version 22 | run: if [ "${{ steps.get_version.outputs.VERSION }}" == "main" ]; then echo ::set-output name=VERSION::latest; else echo ::set-output name=VERSION::${{ steps.get_version.outputs.VERSION }}; fi 23 | - name: Set up qemu 24 | uses: docker/setup-qemu-action@v2 25 | with: 26 | platforms: all 27 | - name: Set up Docker Buildx 28 | id: buildx 29 | uses: docker/setup-buildx-action@v2 30 | with: 31 | version: latest 32 | - name: Docker Login 33 | env: 34 | DOCKER_PASSWORD: ${{ secrets.DOCKER_TOKEN }} 35 | run: | 36 | echo "${DOCKER_PASSWORD}" | docker login --username "${{ secrets.DOCKER_USERNAME }}" --password-stdin 37 | if: github.repository == 'iqtlabs/poseidon' && github.event_name == 'push' 38 | 39 | - name: Build and push platforms 40 | env: 41 | DOCKER_CLI_EXPERIMENTAL: enabled 42 | run: | 43 | docker buildx build \ 44 | --platform linux/amd64,linux/arm64 \ 45 | --push \ 46 | -t iqtlabs/rabbitmq:${{ steps.change_version.outputs.VERSION }} helpers/rabbitmq && \ 47 | docker buildx build \ 48 | --platform linux/amd64,linux/arm64 \ 49 | --push \ 50 | -t iqtlabs/poseidon-api:${{ steps.change_version.outputs.VERSION }} helpers/api 51 | if: github.repository == 'iqtlabs/poseidon' && github.event_name == 'push' 52 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: buildx 2 | 3 | on: 4 | push: 5 | branches: main 6 | tags: 'v*' 7 | 8 | jobs: 9 | buildx: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | - name: Get the version 18 | id: get_version 19 | run: echo ::set-output name=VERSION::$(echo $GITHUB_REF | cut -d / -f 3) 20 | - name: Change for main 21 | id: change_version 22 | run: if [ "${{ steps.get_version.outputs.VERSION }}" == "main" ]; then echo ::set-output name=VERSION::latest; else echo ::set-output name=VERSION::${{ steps.get_version.outputs.VERSION }}; fi 23 | - name: Set up qemu 24 | uses: docker/setup-qemu-action@v2 25 | with: 26 | platforms: all 27 | - name: Set up Docker Buildx 28 | id: buildx 29 | uses: docker/setup-buildx-action@v2 30 | with: 31 | version: latest 32 | - name: Docker Login 33 | env: 34 | DOCKER_PASSWORD: ${{ secrets.DOCKER_TOKEN }} 35 | run: | 36 | echo "${DOCKER_PASSWORD}" | docker login --username "${{ secrets.DOCKER_USERNAME }}" --password-stdin 37 | if: github.repository == 'iqtlabs/poseidon' && github.event_name == 'push' 38 | 39 | - name: Build and push platforms 40 | env: 41 | DOCKER_CLI_EXPERIMENTAL: enabled 42 | run: | 43 | docker buildx build \ 44 | --platform linux/amd64,linux/arm64 \ 45 | --push \ 46 | -t iqtlabs/poseidon:${{ steps.change_version.outputs.VERSION }} . && \ 47 | docker buildx build \ 48 | --platform linux/amd64,linux/arm64 \ 49 | --push \ 50 | -t iqtlabs/poseidon-workers:${{ steps.change_version.outputs.VERSION }} workers 51 | if: github.repository == 'iqtlabs/poseidon' && github.event_name == 'push' 52 | -------------------------------------------------------------------------------- /.github/workflows/pypi.yaml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | branches: main 6 | tags: 'v*.*.*' 7 | 8 | jobs: 9 | release: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v3 15 | with: 16 | fetch-depth: 0 17 | - name: Install Poetry 18 | uses: snok/install-poetry@v1 19 | with: 20 | virtualenvs-create: true 21 | virtualenvs-in-project: true 22 | - name: Build and publish packages 23 | id: build_and_publish_packages 24 | run: | 25 | sudo apt-get update && \ 26 | sudo apt-get install -yq --no-install-recommends curl gcc g++ libev-dev libyaml-dev python3-pip python3.8 python3.8-dev && \ 27 | cd lib/poseidon_api && poetry build && poetry publish -u ${{ secrets.PYPI_USERNAME }} -p ${{ secrets.PYPI_TOKEN }} && cd ../../ && \ 28 | cd lib/poseidon_cli && poetry build && poetry publish -u ${{ secrets.PYPI_USERNAME }} -p ${{ secrets.PYPI_TOKEN }} && cd ../../ && \ 29 | cd lib/poseidon_core && poetry build && poetry publish -u ${{ secrets.PYPI_USERNAME }} -p ${{ secrets.PYPI_TOKEN }} && cd ../../ 30 | if: github.repository == 'iqtlabs/poseidon' && github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') 31 | -------------------------------------------------------------------------------- /.github/workflows/secrets-scan.yml: -------------------------------------------------------------------------------- 1 | name: secrets 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | scan: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v3 10 | - name: scan 11 | run: | 12 | export DEBIAN_FRONTEND=noninteractive && \ 13 | echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections && \ 14 | sudo apt-get update && \ 15 | python3 -m pip install --upgrade pip && \ 16 | pip3 install whispers && \ 17 | mkdir /home/runner/reports/ && \ 18 | whispers --severity BLOCKER,CRITICAL -o /home/runner/reports/whispers.json -c ${GITHUB_WORKSPACE}/.github/workflows/config/whispers-config.yml ${GITHUB_WORKSPACE} && \ 19 | echo "::set-output name=found-count::$(wc -l /home/runner/reports/whispers.json | cut -d' ' -f1)" 20 | - name: Fail if found 21 | if: steps.scan.outputs.found-count != 0 22 | uses: actions/github-script@v6 23 | with: 24 | script: | 25 | echo {{steps.scan.outputs.found-count}} && \ 26 | core.setFailed('Secrets found. Please check the uploaded report') 27 | - name: Upload scan reports 28 | uses: actions/upload-artifact@v4.6.0 29 | if: failure() 30 | with: 31 | name: whispers-report 32 | path: /home/runner/reports/whispers.json 33 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: 'Close stale issues and PRs' 2 | on: 3 | schedule: 4 | - cron: '30 1 * * *' 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v8 11 | with: 12 | stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' 13 | stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.' 14 | close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.' 15 | close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.' 16 | days-before-issue-stale: 30 17 | days-before-pr-stale: 45 18 | days-before-issue-close: 5 19 | days-before-pr-close: 10 20 | exempt-issue-labels: 'dependencies' 21 | -------------------------------------------------------------------------------- /.github/workflows/test-yaml-lint.yml: -------------------------------------------------------------------------------- 1 | name: YAML Lint 2 | on: [push, pull_request] 3 | jobs: 4 | yamllint: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v3 8 | - name: yaml-lint 9 | uses: ibiqlik/action-yamllint@v3 10 | with: 11 | config_data: "{extends: default, ignore: .github}" 12 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - uses: actions/checkout@v3 12 | with: 13 | fetch-depth: 0 14 | - name: shell_test 15 | run: | 16 | wget "https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz" && \ 17 | tar --xz -xvf "shellcheck-stable.linux.x86_64.tar.xz" && \ 18 | shellcheck() { "shellcheck-stable/shellcheck" "$@"; } && \ 19 | shellcheck --version && \ 20 | /bin/bash --version && /bin/bash -n bin/poseidon && shellcheck -x -e SC2016 -e SC2119 -e SC2129 -e SC2001 -e SC2038 -e SC2044 bin/poseidon 21 | - name: e2e_test 22 | run: | 23 | export DEBIAN_FRONTEND=noninteractive && \ 24 | echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections 25 | sudo apt-get update && sudo apt-get install -y libev-dev tshark wget jq tcpreplay tcpdump 26 | docker system prune -a -f --volumes && ./tests/test_e2e.sh 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | pytype_output/ 4 | .pytype 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Editor files 12 | *~ 13 | 14 | # Distribution / packaging 15 | .Python 16 | env/ 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # dotenv 88 | .env 89 | 90 | # virtualenv 91 | .venv 92 | venv/ 93 | ENV/ 94 | 95 | # Spyder project settings 96 | .spyderproject 97 | 98 | # Rope project settings 99 | .ropeproject 100 | 101 | # vim temp files 102 | *.swp 103 | *.swo 104 | 105 | .mypy_cache 106 | 107 | # IntelliJ IDE files 108 | .idea/ 109 | faucet.iml 110 | *.bak 111 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | default_language_version: 3 | python: python3 4 | repos: 5 | - repo: https://github.com/pre-commit/pre-commit-hooks 6 | rev: v4.0.1 7 | hooks: 8 | - id: trailing-whitespace 9 | - id: end-of-file-fixer 10 | - id: check-case-conflict 11 | - id: check-json 12 | - id: pretty-format-json 13 | args: ['--autofix'] 14 | - id: double-quote-string-fixer 15 | - id: check-yaml 16 | - repo: https://github.com/asottile/reorder_python_imports 17 | rev: v2.6.0 18 | hooks: 19 | - id: reorder-python-imports 20 | - repo: https://github.com/pre-commit/mirrors-autopep8 21 | rev: v1.5.7 22 | hooks: 23 | - id: autopep8 24 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | # This file lists all individuals having contributed content to the repository. 2 | # 3 | # For a list of active project maintainers, see the MAINTAINERS file. 4 | # 5 | Abhinav Ganesh 6 | bradh 7 | Charlie Lewis 8 | Cory Stephenson 9 | csessine 10 | Daniel Popescu 11 | David Grossman 12 | Eugen Wybitul 13 | Greg Shipley 14 | James Day 15 | Jeff Wang 16 | JJ Ben-Joseph 17 | Joe Adams 18 | Joel V Zachariah 19 | Joris 20 | Josh Bailey 21 | kylemvz 22 | lanhamt 23 | Lee Skillen 24 | lilchurro 25 | Luka Atanasovski 26 | MikhailShel 27 | Misha 28 | Mohammed Alshaboti 29 | Nguyen Duy Hai 30 | ns61817 31 | Robert Caudill 32 | Rounaq Jhunjhunu wala 33 | Ryan Ashley 34 | sanchezg 35 | scottkelso 36 | sourav sarkar 37 | T K Sourabh 38 | Vivek Singh 39 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at clewis@iqt.org. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Poseidon 2 | 3 | Want to hack on Poseidon? Awesome! Here are instructions to get you started. 4 | They are probably not perfect, please let us know if anything feels 5 | wrong or incomplete. 6 | 7 | ## Contribution guidelines 8 | 9 | ### Pull requests are always welcome 10 | 11 | We are always thrilled to receive pull requests, and do our best to 12 | process them as fast as possible. Not sure if that typo is worth a pull 13 | request? Do it! We will appreciate it. 14 | 15 | If your pull request is not accepted on the first try, don't be 16 | discouraged! If there's a problem with the implementation, hopefully you 17 | received feedback on what to improve. 18 | 19 | We're trying very hard to keep Poseidon lean and focused. We don't want it 20 | to do everything for everybody. This means that we might decide against 21 | incorporating a new feature. However, there might be a way to implement 22 | that feature *on top of* poseidon. 23 | 24 | ### Create issues... 25 | 26 | Any significant improvement should be documented as [a github 27 | issue](https://github.com/IQTLabs/poseidon/issues) before anybody 28 | starts working on it. 29 | 30 | ### ...but check for existing issues first! 31 | 32 | Please take a moment to check that an issue doesn't already exist 33 | documenting your bug report or improvement proposal. If it does, it 34 | never hurts to add a quick "+1" or "I have this problem too". This will 35 | help prioritize the most common problems and requests. 36 | 37 | ### Conventions 38 | 39 | Fork the repo and make changes on your fork in a feature branch. 40 | 41 | Make sure you include relevant updates or additions to documentation and 42 | tests when creating or modifying features. 43 | 44 | Pull requests descriptions should be as clear as possible and include a 45 | reference to all the issues that they address. 46 | 47 | Code review comments may be added to your pull request. Discuss, then make the 48 | suggested modifications and push additional commits to your feature branch. Be 49 | sure to post a comment after pushing. The new commits will show up in the pull 50 | request automatically, but the reviewers will not be notified unless you 51 | comment. 52 | 53 | Before the pull request is merged, make sure that you squash your commits into 54 | logical units of work using `git rebase -i` and `git push -f`. After every 55 | commit the test suite should be passing. Include documentation changes in the 56 | same commit so that a revert would remove all traces of the feature or fix. 57 | 58 | Commits that fix or close an issue should include a reference like `Closes #XXX` 59 | or `Fixes #XXX`, which will automatically close the issue when merged. 60 | 61 | Add your name to the AUTHORS file, but make sure the list is sorted and your 62 | name and email address match your git configuration. The AUTHORS file is 63 | regenerated occasionally from the git commit history, so a mismatch may result 64 | in your changes being overwritten. 65 | 66 | ## Decision process 67 | 68 | ### How are decisions made? 69 | 70 | Short answer: with pull requests to the poseidon repository. 71 | 72 | All decisions affecting poseidon, big and small, follow the same 3 steps: 73 | 74 | * Step 1: Open a pull request. Anyone can do this. 75 | 76 | * Step 2: Discuss the pull request. Anyone can do this. 77 | 78 | * Step 3: Accept or refuse a pull request. A maintainer does this. 79 | 80 | 81 | ### How can I become a maintainer? 82 | 83 | * Step 1: learn the code inside out 84 | * Step 2: make yourself useful by contributing code, bugfixes, support etc. 85 | 86 | Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. 87 | You don't have to be a maintainer to make a difference on the project! 88 | 89 | ### What are a maintainer's responsibility? 90 | 91 | It is every maintainer's responsibility to: 92 | 93 | * 1) Deliver prompt feedback and decisions on pull requests. 94 | * 2) Be available to anyone with questions, bug reports, criticism etc. on poseidon. 95 | 96 | ### How is this process changed? 97 | 98 | Just like everything else: by making a pull request :) 99 | 100 | *Derivative work from [Docker](https://github.com/moby/moby/blob/master/CONTRIBUTING.md).* 101 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim 2 | LABEL maintainer="Charlie Lewis " 3 | LABEL poseidon.namespace="primary" 4 | 5 | ENV PYTHONUNBUFFERED 1 6 | 7 | COPY . /poseidon 8 | WORKDIR /poseidon 9 | 10 | ENV PATH="${PATH}:/root/.local/bin" 11 | RUN apt-get update && apt-get install -y --no-install-recommends curl gcc git g++ libev-dev libyaml-dev tini && \ 12 | curl -sSL https://install.python-poetry.org | python3 - --version 1.4.2 && \ 13 | poetry config virtualenvs.create false && \ 14 | cd lib/poseidon_api && poetry install --no-interaction --no-ansi && poetry build && cd ../../ && \ 15 | cd lib/poseidon_cli && poetry install --no-interaction --no-ansi && poetry build && cd ../../ && \ 16 | cd lib/poseidon_core && poetry install --no-interaction --no-ansi && poetry build && cd ../../ && \ 17 | apt-get purge -y gcc g++ && apt -y autoremove --purge && rm -rf /var/cache/* /root/.cache/* 18 | 19 | HEALTHCHECK --interval=15s --timeout=15s \ 20 | CMD curl --silent --fail http://localhost:9304/ || exit 1 21 | 22 | RUN mkdir -p /opt/poseidon 23 | RUN mv /poseidon/config/poseidon.config /opt/poseidon/poseidon.config 24 | ENV POSEIDON_CONFIG /opt/poseidon/poseidon.config 25 | 26 | CMD tini -s -- poseidon-core 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright (c) 2016-2022 IQT Labs LLC, All Rights Reserved. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Charlie Lewis 2 | Josh Bailey 3 | rashley-iqt 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL:=/bin/bash -O extglob -c 2 | TAG=poseidon 3 | VERSION=$(shell cat VERSION) 4 | 5 | build_poseidon: 6 | docker build -t $(TAG) . 7 | 8 | .PHONY: build_poseidon 9 | -------------------------------------------------------------------------------- /bin/gen_pcap_manifest: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import csv 4 | import glob 5 | import gzip 6 | import io 7 | import ipaddress 8 | import os 9 | import subprocess 10 | import sys 11 | try: 12 | import netaddr 13 | except ImportError: 14 | print('Please install netaddr with "pip3 install netaddr"') 15 | sys.exit(1) 16 | 17 | 18 | BCAST_EUI = netaddr.EUI('ff:ff:ff:ff:ff:ff', dialect=netaddr.mac_unix_expanded) 19 | 20 | 21 | def get_pcap_mac_ips(pcap_dirs): 22 | pcaps = [] 23 | for pcap_dir in pcap_dirs: 24 | if os.path.isdir(pcap_dir): 25 | pcaps.extend([ 26 | pcap for pcap in glob.glob(os.path.join(pcap_dir, '**/*cap'), recursive=True) 27 | if os.path.isfile(pcap)]) 28 | pcap_pairs = {} 29 | for pcap in pcaps: 30 | print(f'Processing {pcap}...') 31 | tshark_args = ['tshark', '-T', 'fields', '-r', pcap, '-s', '256'] 32 | fields = ('eth.src', 'eth.dst', 'ipv6.src_host', 33 | 'ipv6.dst_host', 'ip.src', 'ip.dst') 34 | for field in fields: 35 | tshark_args.extend(['-e', field]) 36 | try: 37 | tshark_proc = subprocess.Popen(tshark_args, stdout=subprocess.PIPE) 38 | except FileNotFoundError: 39 | sys.stderr.write('Please install tshark.\n') 40 | sys.exit(-1) 41 | pairs = set() 42 | for tshark_line in tshark_proc.stdout.readlines(): 43 | tshark_line_list = tshark_line.decode( 44 | 'utf-8').rstrip('\n').split('\t') 45 | eth_src_str, eth_dst_str, ipv6_src, ipv6_dst, ipv4_src, ipv4_dst = tshark_line_list 46 | eth_src = netaddr.EUI( 47 | eth_src_str, dialect=netaddr.mac_unix_expanded) 48 | eth_dst = netaddr.EUI( 49 | eth_dst_str, dialect=netaddr.mac_unix_expanded) 50 | for src_ip_str in (ipv4_src, ipv6_src): 51 | try: 52 | ip_src = ipaddress.ip_address(src_ip_str) 53 | except ValueError: 54 | continue 55 | pairs.add((eth_src, ip_src)) 56 | if eth_dst != BCAST_EUI: 57 | for dst_ip_str in (ipv4_dst, ipv6_dst): 58 | try: 59 | ip_dst = ipaddress.ip_address(dst_ip_str) 60 | except ValueError: 61 | continue 62 | if ip_dst.is_multicast or ip_dst.is_unspecified: 63 | continue 64 | pairs.add((eth_dst, ip_dst)) 65 | pcap_pairs[pcap] = pairs 66 | return pcap_pairs 67 | 68 | 69 | def gen_manifest(pcap_pairs, csv_output): 70 | print(f'Generating manifest {csv_output}...') 71 | with gzip.open(csv_output, 'wb') as csv_out: 72 | writer = csv.DictWriter(io.TextIOWrapper( 73 | csv_out, newline='', write_through=True), fieldnames=('eth', 'ip', 'pcap')) 74 | writer.writeheader() 75 | for pcap, pairs in sorted(pcap_pairs.items()): 76 | for eth, ipa in pairs: 77 | writer.writerow( 78 | {'eth': str(eth), 'ip': str(ipa), 'pcap': pcap}) 79 | 80 | 81 | def main(): 82 | arg_parser = argparse.ArgumentParser( 83 | prog=sys.argv[0], 84 | description='Generate a compressed CSV of MAC/IP/file mappings from pcaps', 85 | usage=""" 86 | 87 | Example: 88 | 89 | --pcapdirs=/some/dir,/some/other/dir --csv=/some/csvfile.csv.gz 90 | """) 91 | arg_parser.add_argument( 92 | '-p', '--pcapdirs', help='list of pcap dirs') 93 | arg_parser.add_argument( 94 | '-c', '--csv', help='compressed csv file to write') 95 | try: 96 | args = arg_parser.parse_args(sys.argv[1:]) 97 | except (KeyError, IndexError): 98 | arg_parser.print_usage() 99 | sys.exit(-1) 100 | 101 | if not (args.pcapdirs and args.csv): 102 | arg_parser.print_usage() 103 | sys.exit(-1) 104 | 105 | pcap_pairs = get_pcap_mac_ips(args.pcapdirs.split(',')) 106 | gen_manifest(pcap_pairs, args.csv) 107 | 108 | 109 | if __name__ == '__main__': 110 | main() 111 | -------------------------------------------------------------------------------- /config/poseidon.config: -------------------------------------------------------------------------------- 1 | [Poseidon] 2 | logger_level = INFO 3 | reinvestigation_frequency = 900 4 | max_concurrent_reinvestigations = 2 5 | scan_frequency = 5 6 | learn_public_addresses = True 7 | controller_type = faucet 8 | automated_acls = False 9 | rules_file = /opt/poseidon/config/rules.yaml 10 | # A single string, being the interface name Poseidon will receive mirrored traffic on. 11 | # A single Poseidon instance can support just one collector_nic (to mirror multiple 12 | # switches centrally, use FAUCET stacking - see below). 13 | collector_nic = lo 14 | network_tap_ip = network_tap 15 | network_tap_port = 8080 16 | prometheus_ip = prometheus 17 | prometheus_port = 9090 18 | 19 | [Faucet] 20 | faucetconfrpc_address = faucetconfrpc:59999 21 | faucetconfrpc_client = poseidon 22 | # Dict of one port per switch to use for mirroring ports on that switch (e.g. '{"switch1": 3}) 23 | controller_mirror_ports = '{"switch1": 3}' 24 | # If a switch doesn't have its own mirror port available (maybe an AP), we can use another directly 25 | # connected switch instead. For example, if we want to mirror anything on switchx, then 26 | # mirror switch1 port 99 instead. 27 | controller_proxy_mirror_ports = '{"switchx": ["switch1", 99]}' 28 | # VLAN ID to use for FAUCET remote mirroring via tunneling (must be an int). 29 | tunnel_vlan = 999 30 | # Tunnel name to use for FAUCET ACL when remote mirroring via tunneling. 31 | tunnel_name = poseidon_tunnel 32 | # List of integer VIDs to ignore (e.g. '[123,456]') 33 | ignore_vlans = '[]' 34 | # Dict of one port per switch to ignore (e.g. '{"switch1": 99}') 35 | ignore_ports = '{}' 36 | # Dict of one trunk port per switch to ignore (e.g. '{"switch1": 99}') 37 | trunk_ports = '{}' 38 | FA_RABBIT_HOST = RABBIT_SERVER 39 | FA_RABBIT_PORT = 5672 40 | FA_RABBIT_EXCHANGE = topic_recs 41 | FA_RABBIT_EXCHANGE_TYPE = topic 42 | FA_RABBIT_ROUTING_KEY = FAUCET.Event 43 | 44 | [VOLOS] 45 | enable_volos = False 46 | volos_cfg_file = /opt/poseidon/config/volos_config.yaml 47 | acl_dir = /opt/poseidon/volos/acls 48 | coprocessor_port = 23 49 | ignore_copro_ports = '{}' 50 | coprocessing_frequency = 900 51 | max_concurrent_coprocessing = 2 52 | 53 | [PIPETTE] 54 | pipette_repo = https://github.com/IQTLabs/pipette.git 55 | pipette_dir = /opt/poseidon/pipette 56 | coprocessor_nic = "enx0" 57 | fake_interface = fake0 58 | fake_mac = 0e:00:00:00:00:67 59 | fake_ips = [10.10.0.1/16] 60 | coprocessor_vlans = [2] 61 | bridge = opro0 62 | pipette_port = 6699 63 | pcap_location = /opt/poseidon/pcaps 64 | pcap_size = 50 65 | -------------------------------------------------------------------------------- /config/rules.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | include: 3 | - acls.yaml 4 | 5 | rules: 6 | rule-name-1: 7 | - rule: 8 | device_key: os 9 | value: Mac 10 | acls: [office-vlan-protect, foo] 11 | - rule: 12 | device_key: role 13 | value: Printer 14 | min_confidence: 50 15 | acls: [no-external] 16 | no-internal: 17 | - rule: 18 | acls: [no-internal] 19 | -------------------------------------------------------------------------------- /config/volos_config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | https://github.com/yeasy/simple-web: 3 | simple-web: 4 | branch: master 5 | ports: 6 | - port: 7 | protocol: tcp 8 | mapping: "80:80" 9 | - port: 10 | protocol: tcp 11 | mapping: "443:443" 12 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | services: 3 | rabbit: 4 | restart: always 5 | image: 'iqtlabs/rabbitmq:latest' 6 | ports: 7 | - '127.0.0.1:15672:15672' 8 | - '5672:5672' 9 | networks: 10 | poseidon: 11 | aliases: 12 | - RABBIT_SERVER 13 | - rabbitmq 14 | - messenger 15 | build: 16 | context: helpers/rabbitmq 17 | dockerfile: Dockerfile 18 | network_tap: 19 | restart: always 20 | image: 'iqtlabs/network_tap:v0.11.33' 21 | volumes: 22 | - '/var/run/docker.sock:/var/run/docker.sock' 23 | - '${POSEIDON_PREFIX}/opt/poseidon_files:/files' 24 | environment: 25 | PYTHONUNBUFFERED: '1' 26 | KEEPIMAGES: '0' 27 | networks: 28 | poseidon: 29 | poseidon_api: 30 | restart: always 31 | image: 'iqtlabs/poseidon-api:latest' 32 | ports: 33 | - '5000:8000' 34 | environment: 35 | PYTHONUNBUFFERED: '1' 36 | volumes: 37 | - '${POSEIDON_PREFIX}/opt/poseidon:/opt/poseidon' 38 | networks: 39 | poseidon: 40 | aliases: 41 | - poseidon-api 42 | depends_on: 43 | - poseidon 44 | build: 45 | context: helpers/api 46 | dockerfile: Dockerfile 47 | poseidon: 48 | restart: always 49 | image: 'iqtlabs/poseidon:latest' 50 | ports: 51 | - '9304:9304' 52 | environment: 53 | PYTHONUNBUFFERED: '1' 54 | volumes: 55 | - '${POSEIDON_PREFIX}/opt/poseidon:/opt/poseidon' 56 | - '${POSEIDON_PREFIX}/var/log/poseidon:/var/log/poseidon' 57 | - '${POSEIDON_PREFIX}/opt/faucetconfrpc:/certs' 58 | networks: 59 | - poseidon 60 | depends_on: 61 | faucetconfrpc: 62 | condition: service_healthy 63 | rabbitmq_adapter: 64 | condition: service_started 65 | build: 66 | context: . 67 | dockerfile: Dockerfile 68 | workers: 69 | restart: always 70 | image: 'iqtlabs/poseidon-workers:latest' 71 | environment: 72 | PYTHONUNBUFFERED: '1' 73 | KEEPIMAGES: '0' 74 | VOL_PREFIX: '${POSEIDON_PREFIX}' 75 | SWARM: '${POSEIDON_SWARM}' 76 | networks: 77 | - poseidon 78 | volumes: 79 | - '/var/run/docker.sock:/var/run/docker.sock' 80 | depends_on: 81 | - poseidon 82 | build: 83 | context: workers 84 | dockerfile: Dockerfile 85 | networks: 86 | poseidon: 87 | driver: overlay 88 | -------------------------------------------------------------------------------- /docs/img/Rookies16Badge_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/docs/img/Rookies16Badge_1.png -------------------------------------------------------------------------------- /docs/img/faucet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/docs/img/faucet.png -------------------------------------------------------------------------------- /docs/img/poseidon-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/docs/img/poseidon-logo.png -------------------------------------------------------------------------------- /helpers/api/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM iqtlabs/poseidon:latest 2 | LABEL maintainer="Charlie Lewis " 3 | LABEL poseidon.namespace="api" 4 | 5 | EXPOSE 8000 6 | 7 | CMD (flask run > /dev/null 2>&1) & (poseidon-api) 8 | -------------------------------------------------------------------------------- /helpers/faucet/dashboards.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: 1 3 | 4 | providers: 5 | - name: 'dashboards' # an unique provider name 6 | # org id. will default to orgId 1 if not specified 7 | orgId: 1 8 | # name of the dashboard folder. Required 9 | folder: '' 10 | # folder UID. will be automatically generated if not specified 11 | folderUid: '' 12 | # provider type. Required 13 | type: file 14 | # disable dashboard deletion 15 | disableDeletion: false 16 | # enable dashboard editing 17 | editable: true 18 | # how often Grafana will scan for changed dashboards 19 | updateIntervalSeconds: 10 20 | options: 21 | # path to dashboard files on disk. Required 22 | path: /var/lib/grafana/dashboards 23 | -------------------------------------------------------------------------------- /helpers/faucet/docker-compose-faucet.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.7' 3 | services: 4 | gauge: 5 | restart: always 6 | image: 'c65sdn/gauge:1.0.56' 7 | environment: 8 | GAUGE_CONFIG: '/etc/faucet/gauge.yaml' 9 | GAUGE_CONFIG_STAT_RELOAD: '1' 10 | volumes: 11 | - '${POSEIDON_PREFIX}/var/log/faucet:/var/log/faucet' 12 | - '${POSEIDON_PREFIX}/etc/faucet:/etc/faucet' 13 | ports: 14 | - '6654:6653' 15 | networks: 16 | - poseidon 17 | faucet: 18 | restart: always 19 | image: 'c65sdn/faucet:1.0.56' 20 | volumes: 21 | - '${POSEIDON_PREFIX}/var/log/faucet:/var/log/faucet' 22 | - '${POSEIDON_PREFIX}/var/run/faucet:/var/run/faucet' 23 | - '${POSEIDON_PREFIX}/etc/faucet:/etc/faucet' 24 | ports: 25 | - '6653:6653' 26 | environment: 27 | FAUCET_CONFIG_STAT_RELOAD: '1' 28 | FAUCET_EVENT_SOCK: '1' 29 | FAUCET_CONFIG_AUTO_REVERT: '1' 30 | networks: 31 | - poseidon 32 | networks: 33 | poseidon: 34 | driver: overlay 35 | -------------------------------------------------------------------------------- /helpers/faucet/docker-compose-monitoring.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.7' 3 | services: 4 | prometheus: 5 | restart: always 6 | image: 'prom/prometheus:v2.44.0' 7 | user: 'root' 8 | ports: 9 | - '9090:9090' 10 | volumes: 11 | - '${POSEIDON_PREFIX}/opt/prometheus/:/prometheus' 12 | - './helpers/faucet/prometheus-docker-compose.yml:/etc/prometheus/prometheus.yml' 13 | - './helpers/faucet/faucet.rules.yml:/etc/prometheus/faucet.rules.yml' 14 | networks: 15 | - poseidon 16 | grafana: 17 | restart: always 18 | image: 'grafana/grafana:9.5.2' 19 | user: 'root' 20 | ports: 21 | - '3000:3000' 22 | volumes: 23 | - '${POSEIDON_PREFIX}/opt/grafana:/var/lib/grafana' 24 | - '${POSEIDON_PREFIX}/opt/grafana/provisioning:/etc/grafana/provisioning' 25 | networks: 26 | - poseidon 27 | networks: 28 | poseidon: 29 | driver: overlay 30 | -------------------------------------------------------------------------------- /helpers/faucet/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.7' 3 | services: 4 | rabbitmq_adapter: 5 | restart: always 6 | image: 'c65sdn/event-adapter-rabbitmq:1.0.56' 7 | volumes: 8 | - '${POSEIDON_PREFIX}/var/run/faucet:/var/run/faucet' 9 | environment: 10 | FAUCET_EVENT_SOCK: '1' 11 | FA_RABBIT_HOST: 'rabbit' 12 | FA_RABBIT_PORT: '${FA_RABBIT_PORT}' 13 | FA_RABBIT_EXCHANGE: '${FA_RABBIT_EXCHANGE}' 14 | FA_RABBIT_EXCHANGE_TYPE: '${FA_RABBIT_EXCHANGE_TYPE}' 15 | FA_RABBIT_ROUTING_KEY: '${FA_RABBIT_ROUTING_KEY}' 16 | networks: 17 | - poseidon 18 | faucet_certstrap: 19 | restart: on-failure 20 | image: 'iqtlabs/faucet-certstrap:v0.55.56' 21 | volumes: 22 | - '${POSEIDON_PREFIX}/opt/faucetconfrpc:/opt/faucetconfrpc' 23 | command: 24 | - /opt/faucetconfrpc 25 | - faucetconfrpc 26 | - poseidon 27 | networks: 28 | - poseidon 29 | faucetconfrpc: 30 | restart: always 31 | image: 'iqtlabs/faucetconfrpc:v0.55.56' 32 | environment: 33 | PYTHONUNBUFFERED: '1' 34 | volumes: 35 | - '${POSEIDON_PREFIX}/opt/faucetconfrpc:/certs' 36 | - /etc/faucet:/etc/faucet 37 | ports: 38 | - 59999:59999 39 | networks: 40 | - poseidon 41 | depends_on: 42 | faucet_certstrap: 43 | condition: service_completed_successfully 44 | command: 45 | - --key=/certs/faucetconfrpc.key 46 | - --cert=/certs/faucetconfrpc.crt 47 | - --cacert=/certs/faucetconfrpc-ca.crt 48 | - --host=faucetconfrpc 49 | - --config_dir=/etc/faucet 50 | networks: 51 | poseidon: 52 | driver: overlay 53 | -------------------------------------------------------------------------------- /helpers/faucet/faucet.rules.yml: -------------------------------------------------------------------------------- 1 | --- 2 | groups: 3 | - name: faucet.rules 4 | rules: 5 | 6 | # Convert OF stats to rates 7 | - record: instance_dpid:of_packet_ins:rate1m 8 | expr: rate(of_packet_ins_total[1m]) 9 | - record: instance_dpid:ignored_packet_ins:rate1m 10 | expr: rate(of_ignored_packet_ins_total[1m]) 11 | - record: instance_dpid:of_unexpected_packet_ins:rate1m 12 | expr: rate(of_unexpected_packet_ins_total[1m]) 13 | - record: instance_dpid:of_flowmsgs_sent:rate1m 14 | expr: rate(of_flowmsgs_sent_total[1m]) 15 | 16 | # Sum hosts learned on VLANs 17 | - record: instance_vlan:vlan_hosts_learned:sum 18 | expr: sum(vlan_hosts_learned) BY (instance, vlan) 19 | - record: instance_vlan_dpid:vlan_hosts_learned:sum 20 | expr: sum(vlan_hosts_learned) BY (instance, vlan, dp_id, dp_name) 21 | 22 | # Sum hosts learned on ports 23 | - record: port_dpid:port_vlan_hosts_learned:sum 24 | expr: sum(port_vlan_hosts_learned) BY (instance, port, dp_id, dp_name) 25 | - record: port_vlan_dpid:port_vlan_hosts_learned:sum 26 | expr: >- 27 | sum(port_vlan_hosts_learned) 28 | BY (instance, port, vlan, dp_id, dp_name) 29 | 30 | # Convert Port stats to rates 31 | - record: instance_port:of_port_rx_packets:rate1m 32 | expr: rate(of_port_rx_packets[1m]) 33 | - record: instance_port:of_port_tx_packets:rate1m 34 | expr: rate(of_port_tx_packets[1m]) 35 | - record: instance_port:of_port_rx_bits:rate1m 36 | expr: rate(of_port_rx_bytes[1m]) * 8 37 | - record: instance_port:of_port_tx_bits:rate1m 38 | expr: rate(of_port_tx_bytes[1m]) * 8 39 | - record: instance_port:of_port_rx_dropped:rate1m 40 | expr: rate(of_port_rx_dropped[1m]) 41 | - record: instance_port:of_port_tx_dropped:rate1m 42 | expr: rate(of_port_tx_dropped[1m]) 43 | - record: instance_port:of_port_rx_errors:rate1m 44 | expr: rate(of_port_rx_errors[1m]) 45 | -------------------------------------------------------------------------------- /helpers/faucet/gauge.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | faucet_configs: 3 | - '/etc/faucet/faucet.yaml' 4 | watchers: 5 | port_status_poller: 6 | type: 'port_state' 7 | all_dps: true 8 | db: 'prometheus' 9 | port_stats_poller: 10 | type: 'port_stats' 11 | all_dps: true 12 | interval: 10 13 | db: 'prometheus' 14 | flow_table_poller: 15 | type: 'flow_table' 16 | all_dps: true 17 | interval: 60 18 | db: 'prometheus' 19 | dbs: 20 | prometheus: 21 | type: 'prometheus' 22 | prometheus_addr: '0.0.0.0' 23 | prometheus_port: 9303 24 | ft_file: 25 | type: 'text' 26 | compress: true 27 | file: 'flow_table.yaml.gz' 28 | -------------------------------------------------------------------------------- /helpers/faucet/prometheus-docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | global: 3 | scrape_interval: 15s 4 | evaluation_interval: 15s 5 | rule_files: 6 | - "faucet.rules.yml" 7 | scrape_configs: 8 | - job_name: 'faucet' 9 | static_configs: 10 | - targets: ['faucet:9302'] 11 | - job_name: 'faucetconfrpc' 12 | static_configs: 13 | - targets: ['faucetconfrpc:59998'] 14 | - job_name: 'gauge' 15 | static_configs: 16 | - targets: ['gauge:9303'] 17 | - job_name: 'poseidon' 18 | static_configs: 19 | - targets: ['poseidon:9304'] 20 | - job_name: 'poseidon workers' 21 | static_configs: 22 | - targets: ['workers:9305'] 23 | -------------------------------------------------------------------------------- /helpers/rabbitmq/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rabbitmq:3-management-alpine 2 | LABEL maintainer="Charlie Lewis " 3 | 4 | RUN apk add --update curl && rm -rf /var/cache/apk/* 5 | 6 | HEALTHCHECK --interval=15s --timeout=15s \ 7 | CMD curl --silent --fail http://localhost:15672/ || exit 1 8 | -------------------------------------------------------------------------------- /lib/poseidon_api/poseidon_api/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib import metadata 2 | 3 | 4 | __version__ = metadata.version("poseidon_api") 5 | -------------------------------------------------------------------------------- /lib/poseidon_api/poseidon_api/api.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | import bjoern 5 | import falcon 6 | from falcon_cors import CORS 7 | 8 | from .routes import routes 9 | from .routes import version 10 | 11 | 12 | cors = CORS(allow_all_origins=True) 13 | api = application = falcon.App(middleware=[cors.middleware]) 14 | 15 | r = routes() 16 | for route in r: 17 | api.add_route(version() + route, r[route]) 18 | 19 | 20 | def main(): 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument( 23 | "--port", "-p", help="Port to run the API webserver on", type=int, default=8000 24 | ) 25 | parser.add_argument( 26 | "--prom_addr", 27 | "-a", 28 | help='Prometheus address connected to Poseidon, i.e. "prometheus:9090"', 29 | default="prometheus:9090", 30 | ) 31 | args = parser.parse_args() 32 | 33 | os.environ["PROM_ADDR"] = args.prom_addr 34 | bjoern.run(api, "0.0.0.0", args.port) 35 | -------------------------------------------------------------------------------- /lib/poseidon_api/poseidon_api/routes.py: -------------------------------------------------------------------------------- 1 | def routes(): 2 | from .data import Endpoints, Info, Network, NetworkByIp, NetworkFull 3 | 4 | endpoints = Endpoints() 5 | p = paths() 6 | info = Info() 7 | network = Network() 8 | network_by_ip = NetworkByIp() 9 | network_full = NetworkFull() 10 | funcs = [endpoints, info, network, network_by_ip, network_full] 11 | return dict(zip(p, funcs)) 12 | 13 | 14 | def paths(): 15 | return ["", "/info", "/network", "/network/{ip}", "/network_full"] 16 | 17 | 18 | def version(): 19 | return "/v1" 20 | -------------------------------------------------------------------------------- /lib/poseidon_api/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "poseidon-api" 3 | version = "0.18.3.dev" 4 | description = "RESTful API for querying Poseidon" 5 | authors = ["cglewis "] 6 | license = "Apache-2.0" 7 | packages = [ 8 | { include = "poseidon_api" }, 9 | ] 10 | 11 | [tool.poetry.dependencies] 12 | python = ">=3.8 <3.11" 13 | bjoern = "3.2.2" 14 | falcon = "3.1.1" 15 | falcon-cors = "1.1.7" 16 | httpx = "0.24.1" 17 | natural = "0.2.0" 18 | requests = "<2.32.3" 19 | urllib3 = "<2.2.3" 20 | poseidon-core = { path="../poseidon_core", develop=true } 21 | 22 | [tool.poetry.dev-dependencies] 23 | black = "24.3.0" 24 | docker = "6.1.3" 25 | httmock = "1.4.0" 26 | mock = "5.0.2" 27 | netifaces = "0.11.0" 28 | pylint = "2.17.4" 29 | pytest-cov = "4.1.0" 30 | pytest = "7.3.1" 31 | pytype = "2023.5.24" 32 | "ruamel.yaml" = "0.17.28" 33 | 34 | [tool.poetry.scripts] 35 | poseidon-api = 'poseidon_api.api:main' 36 | 37 | [tool.poetry.urls] 38 | homepage = "https://github.com/IQTLabs/poseidon" 39 | 40 | [build-system] 41 | requires = ["poetry-core>=1.0.0"] 42 | build-backend = "poetry.core.masonry.api" 43 | -------------------------------------------------------------------------------- /lib/poseidon_api/tests/test_api.py: -------------------------------------------------------------------------------- 1 | import falcon 2 | import pytest 3 | from falcon import testing 4 | from poseidon_api.api import api 5 | 6 | 7 | @pytest.fixture 8 | def client(): 9 | return testing.TestClient(api) 10 | 11 | 12 | def test_v1(client): 13 | response = client.simulate_get("/v1") 14 | assert response.status == falcon.HTTP_OK 15 | 16 | 17 | def test_network(client): 18 | response = client.simulate_get("/v1/network") 19 | assert len(response.json) == 2 20 | assert response.status == falcon.HTTP_OK 21 | 22 | 23 | def test_network_by_ip(client): 24 | response = client.simulate_get("/v1/network/10.0.0.1") 25 | assert len(response.json["dataset"]) == 0 26 | assert response.status == falcon.HTTP_OK 27 | 28 | 29 | def test_network_full(client): 30 | response = client.simulate_get("/v1/network_full") 31 | assert len(response.json) == 1 32 | assert response.status == falcon.HTTP_OK 33 | 34 | 35 | def test_info(client): 36 | response = client.simulate_get("/v1/info") 37 | assert response.status == falcon.HTTP_OK 38 | -------------------------------------------------------------------------------- /lib/poseidon_cli/poseidon_cli/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib import metadata 2 | 3 | 4 | __version__ = metadata.version("poseidon_cli") 5 | -------------------------------------------------------------------------------- /lib/poseidon_cli/poseidon_cli/__main__.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | import sys 3 | from poseidon_cli.cli import PoseidonShell 4 | 5 | p_shell = PoseidonShell() 6 | if "-c" in sys.argv: 7 | while sys.argv.pop(0) != "-c": 8 | pass 9 | p_shell.onecmd(" ".join(sys.argv)) 10 | else: 11 | p_shell.cmdloop() 12 | -------------------------------------------------------------------------------- /lib/poseidon_cli/poseidon_cli/commands.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | The commands that can be executed in the Poseidon shell. 5 | 6 | Created on 18 January 2019 7 | @author: Charlie Lewis 8 | """ 9 | import json 10 | import logging 11 | 12 | from poseidon_core.controllers.faucet.config import FaucetRemoteConfGetSetter 13 | from poseidon_core.controllers.sdnconnect import SDNConnect 14 | from poseidon_core.helpers.config import Config 15 | from poseidon_core.helpers.prometheus import Prometheus 16 | 17 | logger = logging.getLogger("commands") 18 | 19 | 20 | class Commands: 21 | def __init__(self, config=None, faucetconfgetsetter_cl=FaucetRemoteConfGetSetter): 22 | self.states = ["known", "unknown", "operating", "queued"] 23 | if config: 24 | self.config = config 25 | else: 26 | self.config = Config().get_config() 27 | prom = Prometheus() 28 | self.sdnc = SDNConnect( 29 | self.config, logger, prom, faucetconfgetsetter_cl=faucetconfgetsetter_cl 30 | ) 31 | 32 | def _publish_action(self, address, payload): 33 | if payload: 34 | self.sdnc.publish_action(address, json.dumps(payload)) 35 | 36 | def _get_endpoints(self, args, idx, match_all=False): 37 | """get endpoints that match""" 38 | self.sdnc.get_stored_endpoints() 39 | device = args.rsplit(" ", 1)[idx] 40 | endpoints = {} 41 | for match_func in ( 42 | self.sdnc.endpoint_by_name, 43 | self.sdnc.endpoint_by_hash, 44 | self.sdnc.endpoints_by_ip, 45 | self.sdnc.endpoints_by_mac, 46 | ): 47 | match = match_func(device) 48 | if match: 49 | if isinstance(match, list): 50 | endpoints.update({endpoint.name: endpoint for endpoint in match}) 51 | else: 52 | endpoints[match.name] = match 53 | if not match_all: 54 | break 55 | return endpoints.values() 56 | 57 | def _ignored_endpoints(self): 58 | return [ 59 | endpoint for endpoint in self.sdnc.endpoints.values() if endpoint.ignore 60 | ] 61 | 62 | def what_is(self, args): 63 | """what is a specific thing""" 64 | return self._get_endpoints(args, -1) 65 | 66 | def history_of(self, args): 67 | """history of a specific thing""" 68 | return self._get_endpoints(args, -1) 69 | 70 | def acls_of(self, args): 71 | """ACL history of a specific thing""" 72 | return self._get_endpoints(args, -1) 73 | 74 | def where_is(self, args): 75 | """where topologically is a specific thing""" 76 | return self._get_endpoints(args, -1) 77 | 78 | def remove_ignored(self, args): 79 | """remove all ignored devices""" 80 | endpoints = self._ignored_endpoints() 81 | endpoint_names = [endpoint.name for endpoint in endpoints] 82 | self._publish_action("poseidon.action.remove.ignored", endpoint_names) 83 | return endpoints 84 | 85 | def ignore(self, args): 86 | """ignore a specific thing""" 87 | endpoints = self._get_endpoints(args, 0, match_all=True) 88 | endpoint_names = [endpoint.name for endpoint in endpoints] 89 | self._publish_action("poseidon.action.ignore", endpoint_names) 90 | return endpoints 91 | 92 | def clear_ignored(self, args): 93 | """stop ignoring a specific thing""" 94 | device = args.rsplit(" ", 1)[0] 95 | if device == "ignored": 96 | endpoints = self._ignored_endpoints() 97 | else: 98 | endpoints = self._get_endpoints(args, 0, match_all=True) 99 | endpoint_names = [endpoint.name for endpoint in endpoints] 100 | self._publish_action("poseidon.action.clear.ignored", endpoint_names) 101 | return endpoints 102 | 103 | def remove(self, args): 104 | """remove and forget about a specific thing until it's seen again""" 105 | endpoints = self._get_endpoints(args, 0) 106 | endpoint_names = [endpoint.name for endpoint in endpoints] 107 | self._publish_action("poseidon.action.remove", endpoint_names) 108 | return endpoints 109 | 110 | def show_devices(self, arg): 111 | """ 112 | show all devices that are of a specific filter. i.e. windows, 113 | developer workstation, mirroring, etc. 114 | """ 115 | return self.sdnc.show_endpoints(arg) 116 | 117 | def change_devices(self, args): 118 | """change state of a specific thing""" 119 | state = args.rsplit(" ", 1)[-1] 120 | endpoints = self._get_endpoints(args, 0) 121 | endpoint_names = [(endpoint.name, state) for endpoint in endpoints] 122 | self._publish_action("poseidon.action.change", endpoint_names) 123 | return endpoints 124 | -------------------------------------------------------------------------------- /lib/poseidon_cli/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "poseidon-cli" 3 | version = "0.18.3.dev" 4 | description = "Commandline tool for querying Poseidon via Prometheus" 5 | authors = ["cglewis "] 6 | license = "Apache-2.0" 7 | packages = [ 8 | { include = "poseidon_cli" }, 9 | ] 10 | 11 | [tool.poetry.dependencies] 12 | python = ">=3.8 <3.11" 13 | cmd2 = "2.4.3" 14 | natural = "0.2.0" 15 | poseidon-core = { path="../poseidon_core", develop=true } 16 | texttable = "1.6.7" 17 | requests = "<2.32.3" 18 | urllib3 = "<2.2.3" 19 | 20 | [tool.poetry.dev-dependencies] 21 | black = "24.3.0" 22 | docker = "6.1.3" 23 | httmock = "1.4.0" 24 | mock = "5.0.2" 25 | netifaces = "0.11.0" 26 | pylint = "2.17.4" 27 | pytest-cov = "4.1.0" 28 | pytest = "7.3.1" 29 | pytype = "2023.5.24" 30 | "ruamel.yaml" = "0.17.28" 31 | 32 | [tool.poetry.scripts] 33 | poseidon-cli = 'poseidon_cli.__main__:main' 34 | 35 | [tool.poetry.urls] 36 | homepage = "https://github.com/IQTLabs/poseidon" 37 | 38 | [build-system] 39 | requires = ["poetry-core>=1.0.0"] 40 | build-backend = "poetry.core.masonry.api" 41 | -------------------------------------------------------------------------------- /lib/poseidon_cli/tests/faucetconfgetsetter.py: -------------------------------------------------------------------------------- 1 | from poseidon_core.controllers.faucet.config import FaucetRemoteConfGetSetter 2 | from poseidon_core.controllers.sdnconnect import SDNConnect 3 | from poseidon_core.helpers.config import Config 4 | from poseidon_core.helpers.config import yaml_in 5 | from poseidon_core.helpers.config import yaml_out 6 | from poseidon_core.helpers.prometheus import Prometheus 7 | 8 | 9 | class FaucetLocalConfGetSetter(FaucetRemoteConfGetSetter): 10 | def __init__(self, **_kwargs): 11 | self.faucet_conf = {} 12 | 13 | @staticmethod 14 | def config_file_path(config_file): 15 | return config_file 16 | 17 | def read_faucet_conf(self, config_file): 18 | if not config_file: 19 | config_file = self.DEFAULT_CONFIG_FILE 20 | faucet_conf = yaml_in(config_file) 21 | if isinstance(faucet_conf, dict): 22 | self.faucet_conf = faucet_conf 23 | return self.faucet_conf 24 | 25 | def write_faucet_conf(self, config_file=None, faucet_conf=None): 26 | if not config_file: 27 | config_file = self.DEFAULT_CONFIG_FILE 28 | if faucet_conf is None: 29 | faucet_conf = self.faucet_conf 30 | self.faucet_conf = faucet_conf 31 | return yaml_out(config_file, self.faucet_conf) 32 | 33 | def set_port_conf(self, dp, port, port_conf): 34 | switch_conf = self.get_switch_conf(dp) 35 | switch_conf["interfaces"][port] = port_conf 36 | self.write_faucet_conf() 37 | 38 | def update_switch_conf(self, dp, switch_conf): 39 | self.faucet_conf["dps"][dp].update(switch_conf) 40 | self.write_faucet_conf() 41 | 42 | def _get_mirrored_ports(self, dp, mirror_port): 43 | mirror_interface_conf = self.get_port_conf(dp, mirror_port) 44 | mirrored_ports = None 45 | if mirror_interface_conf: 46 | mirrored_ports = mirror_interface_conf.get("mirror", None) 47 | return mirror_interface_conf, mirrored_ports 48 | 49 | def _set_mirror_config(self, dp, mirror_port, mirror_interface_conf, ports=None): 50 | if ports: 51 | if isinstance(ports, set): 52 | ports = list(ports) 53 | mirror_interface_conf["mirror"] = ports 54 | # Don't delete DP level config when setting mirror list to empty, 55 | # as that could cause an unnecessary cold start. 56 | elif "mirror" in mirror_interface_conf: 57 | del mirror_interface_conf["mirror"] 58 | self.set_port_conf(dp, mirror_port, mirror_interface_conf) 59 | 60 | def mirror_port(self, dp, mirror_port, port): 61 | mirror_interface_conf, ports = self._get_mirrored_ports(dp, mirror_port) 62 | ports = set(ports) 63 | ports.add(port) 64 | self._set_mirror_config(dp, mirror_port, mirror_interface_conf, ports) 65 | 66 | def unmirror_port(self, dp, mirror_port, port): 67 | mirror_interface_conf, ports = self._get_mirrored_ports(dp, mirror_port) 68 | ports = set(ports) 69 | if port in ports: 70 | ports.remove(port) 71 | self._set_mirror_config(dp, mirror_port, mirror_interface_conf, ports) 72 | 73 | def clear_mirror_port(self, dp, mirror_port): 74 | mirror_interface_conf, _ = self._get_mirrored_ports(dp, mirror_port) 75 | self._set_mirror_config(dp, mirror_port, mirror_interface_conf) 76 | 77 | 78 | def get_test_config(): 79 | config = Config().get_config() 80 | config["faucetconfrpc_address"] = None 81 | return config 82 | 83 | 84 | def get_sdn_connect(logger): 85 | config = get_test_config() 86 | prom = Prometheus() 87 | return SDNConnect( 88 | config, logger, prom, faucetconfgetsetter_cl=FaucetLocalConfGetSetter 89 | ) 90 | -------------------------------------------------------------------------------- /lib/poseidon_cli/tests/test_commands.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 18 Jan 2019 4 | @author: Charlie Lewis 5 | """ 6 | from faucetconfgetsetter import FaucetLocalConfGetSetter 7 | from faucetconfgetsetter import get_test_config 8 | from poseidon_cli.commands import Commands 9 | from poseidon_core.helpers.config import Config 10 | from poseidon_core.helpers.endpoint import endpoint_factory 11 | 12 | 13 | def test_commands(): 14 | commands = Commands( 15 | config=get_test_config(), faucetconfgetsetter_cl=FaucetLocalConfGetSetter 16 | ) 17 | endpoint = endpoint_factory("foo") 18 | endpoint.endpoint_data = { 19 | "tenant": "foo", 20 | "mac": "00:00:00:00:00:00", 21 | "segment": "foo", 22 | "port": "1", 23 | } 24 | commands.sdnc.endpoints = {} 25 | commands.sdnc.endpoints[endpoint.name] = endpoint 26 | 27 | commands.what_is("foo") 28 | commands.history_of("foo") 29 | commands.acls_of("foo") 30 | commands.where_is("foo") 31 | commands.show_devices("foo bar") 32 | commands.show_devices("all") 33 | commands.change_devices("foo") 34 | commands.remove("foo") 35 | commands.clear_ignored("foo") 36 | commands.clear_ignored("ignored") 37 | commands.ignore("foo") 38 | commands.remove_ignored("foo") 39 | 40 | endpoint2 = endpoint_factory("foo2") 41 | endpoint2.endpoint_data = { 42 | "tenant": "foo", 43 | "mac": "00:00:00:00:00:00", 44 | "segment": "foo", 45 | "port": "1", 46 | } 47 | commands.sdnc.endpoints[endpoint2.name] = endpoint2 48 | commands.what_is("00:00:00:00:00:00") 49 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib import metadata 2 | 3 | 4 | __version__ = metadata.version("poseidon_core") 5 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/__main__.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | from poseidon_core.main import main 3 | 4 | main() 5 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/constants.py: -------------------------------------------------------------------------------- 1 | NO_DATA = "NO DATA" 2 | 3 | PROTOCOL_MAP = {"tcp": 6, "udp": 17} 4 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/controllers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/lib/poseidon_core/poseidon_core/controllers/__init__.py -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/controllers/faucet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/lib/poseidon_core/poseidon_core/controllers/faucet/__init__.py -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/controllers/faucet/config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from faucetconfrpc.faucetconfrpc_client_lib import FaucetConfRpcClient 5 | from poseidon_core.helpers.config import yaml_dump 6 | 7 | 8 | class EmptyFaucetConf(Exception): 9 | pass 10 | 11 | 12 | class FaucetRemoteConfGetSetter: 13 | DEFAULT_CONFIG_FILE = "" 14 | 15 | def __init__( 16 | self, client_key=None, client_cert=None, ca_cert=None, server_addr=None 17 | ): 18 | self.client = FaucetConfRpcClient( 19 | client_key=client_key, 20 | client_cert=client_cert, 21 | ca_cert=ca_cert, 22 | server_addr=server_addr, 23 | ) 24 | 25 | @staticmethod 26 | def config_file_path(config_file): 27 | if config_file: 28 | return os.path.basename(config_file) 29 | return config_file 30 | 31 | def read_faucet_conf(self, config_file): 32 | self.faucet_conf = self.client.get_config_file( 33 | config_filename=self.config_file_path(config_file) 34 | ) 35 | if self.faucet_conf is None: 36 | logging.error("Faucet config is empty, exiting.") 37 | raise EmptyFaucetConf 38 | return self.faucet_conf 39 | 40 | def write_faucet_conf(self, config_file=None, faucet_conf=None, merge=False): 41 | if not config_file: 42 | config_file = self.DEFAULT_CONFIG_FILE 43 | if faucet_conf is None: 44 | faucet_conf = self.faucet_conf 45 | return self.client.set_config_file( 46 | self.faucet_conf, 47 | config_filename=self.config_file_path(config_file), 48 | merge=merge, 49 | ) 50 | 51 | def get_dps(self): 52 | self.read_faucet_conf(config_file=None) 53 | return self.faucet_conf.get("dps", {}) 54 | 55 | def set_acls(self, acls): 56 | self.read_faucet_conf(config_file=None) 57 | self.faucet_conf["acls"] = acls 58 | self.write_faucet_conf(config_file=None) 59 | 60 | def get_port_conf(self, dp, port): 61 | switch_conf = self.get_switch_conf(dp) 62 | if not switch_conf: 63 | return None 64 | return switch_conf["interfaces"].get(port, None) 65 | 66 | def get_switch_conf(self, dp): 67 | return self.get_dps().get(dp, None) 68 | 69 | def get_stack_root_switch(self): 70 | root_stack_switch = [ 71 | switch 72 | for switch, switch_conf in self.get_dps().items() 73 | if switch_conf.get("stack", {}).get("priority", None) 74 | ] 75 | if root_stack_switch: 76 | return root_stack_switch[0] 77 | return None 78 | 79 | def set_port_conf(self, dp, port, port_conf): 80 | return self.client.set_dp_interfaces([(dp, {port: yaml_dump(port_conf)})]) 81 | 82 | def update_switch_conf(self, dp, switch_conf): 83 | return self.write_faucet_conf( 84 | faucet_conf={"dps": {dp: switch_conf}}, merge=True 85 | ) 86 | 87 | def mirror_port(self, dp, mirror_port, port): # pragma: no cover 88 | self.client.add_port_mirror(dp, port, mirror_port) 89 | 90 | def unmirror_port(self, dp, mirror_port, port): # pragma: no cover 91 | self.client.remove_port_mirror(dp, port, mirror_port) 92 | 93 | def clear_mirror_port(self, dp, mirror_port): # pragma: no cover 94 | self.client.clear_port_mirror(dp, mirror_port) 95 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/controllers/sdnevents.py: -------------------------------------------------------------------------------- 1 | import json 2 | import queue 3 | import time 4 | from collections import defaultdict 5 | from functools import partial 6 | 7 | from poseidon_core.helpers.actions import Actions 8 | from poseidon_core.helpers.config import Config 9 | from poseidon_core.helpers.rabbit import Rabbit 10 | 11 | 12 | class SDNEvents: 13 | def __init__(self, logger, prom, sdnc): 14 | self.logger = logger 15 | self.prom = prom 16 | self.m_queue = queue.Queue() 17 | self.job_queue = queue.Queue() 18 | self.rabbits = [] 19 | self.config = Config().get_config() 20 | self.sdnc = sdnc 21 | self.sdnc.default_endpoints() 22 | self.prom.update_endpoint_metadata(self.sdnc.endpoints) 23 | 24 | def create_message_queue(self, host, port, exchange, binding_key): 25 | waiting = True 26 | while waiting: 27 | rabbit = Rabbit() 28 | rabbit.make_rabbit_connection(host, port, exchange, binding_key) 29 | rabbit.start_channel(self.rabbit_callback, self.m_queue) 30 | waiting = False 31 | self.rabbits.append(rabbit) 32 | 33 | def start_message_queues(self): 34 | host = self.config["FA_RABBIT_HOST"] 35 | port = int(self.config["FA_RABBIT_PORT"]) 36 | exchange = "topic-poseidon-internal" 37 | binding_key = ["poseidon.algos.#", "poseidon.action.#"] 38 | self.create_message_queue(host, port, exchange, binding_key) 39 | exchange = self.config["FA_RABBIT_EXCHANGE"] 40 | binding_key = [self.config["FA_RABBIT_ROUTING_KEY"] + ".#"] 41 | self.create_message_queue(host, port, exchange, binding_key) 42 | 43 | def merge_metadata(self, new_metadata): 44 | updated = set() 45 | metadata_types = { 46 | "mac_addresses": self.sdnc.endpoints_by_mac, 47 | "ipv4_addresses": self.sdnc.endpoints_by_ip, 48 | "ipv6_addresses": self.sdnc.endpoints_by_ip, 49 | } 50 | for metadata_type, metadata_lookup in metadata_types.items(): 51 | type_new_metadata = new_metadata.get(metadata_type, {}) 52 | for key, data in type_new_metadata.items(): 53 | endpoints = metadata_lookup(key) 54 | if endpoints: 55 | endpoint = endpoints[0] 56 | if metadata_type not in endpoint.metadata: 57 | endpoint.metadata[metadata_type] = defaultdict(dict) 58 | if key in endpoint.metadata[metadata_type]: 59 | endpoint.metadata[metadata_type][key].update(data) 60 | else: 61 | endpoint.metadata[metadata_type][key] = data 62 | updated.add(endpoint) 63 | return updated 64 | 65 | def format_rabbit_message(self, item, faucet_event, remove_list): 66 | """ 67 | read a message off the rabbit_q 68 | the message should be item = (routing_key,msg) 69 | """ 70 | routing_key, my_obj = item 71 | self.logger.debug( 72 | "routing_key: {0} rabbit_message: {1}".format(routing_key, my_obj) 73 | ) 74 | 75 | def handler_algos_decider(my_obj): 76 | self.logger.debug("decider value:{0}".format(my_obj)) 77 | tool = my_obj.get("tool", "unknown") 78 | self.update_prom_var_time("last_tool_result_time", "tool", tool) 79 | data = my_obj.get("data", None) 80 | if isinstance(data, dict) and data: 81 | updated = self.merge_metadata(data) 82 | if updated: 83 | for endpoint in updated: 84 | if endpoint.operation_active(): 85 | self.sdnc.unmirror_endpoint(endpoint) 86 | return data 87 | return {} 88 | 89 | def handler_action_ignore(my_obj): 90 | for name in my_obj: 91 | endpoint = self.sdnc.endpoints.get(name, None) 92 | if endpoint: 93 | endpoint.ignore = True 94 | return {} 95 | 96 | def handler_action_clear_ignored(my_obj): 97 | for name in my_obj: 98 | endpoint = self.sdnc.endpoints.get(name, None) 99 | if endpoint: 100 | endpoint.ignore = False 101 | return {} 102 | 103 | def handler_action_change(my_obj): 104 | for name, state in my_obj: 105 | endpoint = self.sdnc.endpoints.get(name, None) 106 | if endpoint: 107 | try: 108 | if endpoint.operation_active(): 109 | self.sdnc.unmirror_endpoint(endpoint) 110 | # pytype: disable=attribute-error 111 | endpoint.machine_trigger(state) 112 | # pytype: enable=attribute-error 113 | endpoint.p_next_state = None 114 | if endpoint.operation_active(): 115 | self.sdnc.mirror_endpoint(endpoint) 116 | self.prom.prom_metrics["ncapture_count"].inc() 117 | except Exception as e: # pragma: no cover 118 | self.logger.error( 119 | "Unable to change endpoint {0} because: {1}".format( 120 | endpoint.name, str(e) 121 | ) 122 | ) 123 | return {} 124 | 125 | def handler_action_update_acls(my_obj): 126 | for ip in my_obj: 127 | rules = my_obj[ip] 128 | endpoints = self.sdnc.endpoints_by_ip(ip) 129 | if endpoints: 130 | endpoint = endpoints[0] 131 | try: 132 | status = Actions(endpoint, self.sdnc.sdnc).update_acls( 133 | rules_file=self.config["RULES_FILE"], 134 | endpoints=endpoints, 135 | force_apply_rules=rules, 136 | ) 137 | if not status: 138 | self.logger.warning( 139 | "Unable to apply rules: {0} to endpoint: {1}".format( 140 | rules, endpoint.name 141 | ) 142 | ) 143 | except Exception as e: 144 | self.logger.error( 145 | "Unable to apply rules: {0} to endpoint: {1} because {2}".format( 146 | rules, endpoint.name, str(e) 147 | ) 148 | ) 149 | return {} 150 | 151 | def handler_action_remove(my_obj): 152 | remove_list.extend([name for name in my_obj]) 153 | return {} 154 | 155 | def handler_action_remove_ignored(_my_obj): 156 | remove_list.extend( 157 | [ 158 | endpoint.name 159 | for endpoint in self.sdnc.endpoints.values() 160 | if endpoint.ignore 161 | ] 162 | ) 163 | return {} 164 | 165 | def handler_faucet_event(my_obj): 166 | if self.sdnc and self.sdnc.sdnc: 167 | faucet_event.append(my_obj) 168 | return my_obj 169 | return {} 170 | 171 | handlers = { 172 | "poseidon.algos.decider": handler_algos_decider, 173 | "poseidon.action.ignore": handler_action_ignore, 174 | "poseidon.action.clear.ignored": handler_action_clear_ignored, 175 | "poseidon.action.change": handler_action_change, 176 | "poseidon.action.update_acls": handler_action_update_acls, 177 | "poseidon.action.remove": handler_action_remove, 178 | "poseidon.action.remove.ignored": handler_action_remove_ignored, 179 | self.config["FA_RABBIT_ROUTING_KEY"]: handler_faucet_event, 180 | } 181 | 182 | handler = handlers.get(routing_key, None) 183 | if handler is not None: 184 | ret_val = handler(my_obj) 185 | return ret_val, True 186 | 187 | self.logger.error("no handler for routing_key {0}".format(routing_key)) 188 | return {}, False 189 | 190 | def update_prom_var_time(self, var, label_name, label_value): 191 | if self.prom: 192 | self.prom.prom_metrics[var].labels(**{label_name: label_value}).set( 193 | time.time() 194 | ) 195 | 196 | def handle_rabbit(self): 197 | events = 0 198 | faucet_event = [] 199 | remove_list = [] 200 | while True: 201 | found_work, rabbit_msg = self.prom.runtime_callable( 202 | partial(self.get_q_item, self.m_queue) 203 | ) 204 | if not found_work: 205 | break 206 | events += 1 207 | # faucet_event and remove_list get updated as references because partial() 208 | self.prom.runtime_callable( 209 | partial( 210 | self.format_rabbit_message, rabbit_msg, faucet_event, remove_list 211 | ) 212 | ) 213 | return (events, faucet_event, remove_list) 214 | 215 | def ignore_rabbit(self, routing_key, body): 216 | """drop ignored messages.""" 217 | if routing_key == self.config["FA_RABBIT_ROUTING_KEY"]: 218 | if self.sdnc and self.sdnc.sdnc: 219 | if self.sdnc.sdnc.ignore_event(body): 220 | return True 221 | return False 222 | 223 | def rabbit_callback(self, ch, method, _properties, body, q=None): 224 | """callback, places rabbit data into internal queue""" 225 | body = json.loads(body) 226 | self.logger.debug( 227 | "got a message: {0}:{1} (qsize {2})".format( 228 | method.routing_key, body, q.qsize() 229 | ) 230 | ) 231 | if q is not None: 232 | self.update_prom_var_time( 233 | "last_rabbitmq_routing_key_time", "routing_key", method.routing_key 234 | ) 235 | if not self.ignore_rabbit(method.routing_key, body): 236 | q.put((method.routing_key, body)) 237 | ch.basic_ack(delivery_tag=method.delivery_tag) 238 | 239 | def process(self, monitor): 240 | while True: 241 | events, faucet_event, remove_list = self.prom.runtime_callable( 242 | self.handle_rabbit 243 | ) 244 | if remove_list: 245 | for endpoint_name in remove_list: 246 | if endpoint_name in self.sdnc.endpoints: 247 | del self.sdnc.endpoints[endpoint_name] 248 | if faucet_event: 249 | self.prom.runtime_callable( 250 | partial(self.sdnc.check_endpoints, faucet_event) 251 | ) 252 | # schedule_mirroring should be abstracted out 253 | events += self.prom.runtime_callable(monitor.schedule_mirroring) 254 | found_work, schedule_func = self.prom.runtime_callable( 255 | partial(self.get_q_item, self.job_queue) 256 | ) 257 | if found_work and callable(schedule_func): 258 | events += self.prom.runtime_callable(schedule_func) 259 | if events: 260 | self.prom.update_endpoint_metadata(self.sdnc.endpoints) 261 | time.sleep(1) 262 | 263 | @staticmethod 264 | def get_q_item(q): 265 | """ 266 | attempt to get a work item from the queue 267 | m_queue -> (routing_key, body) 268 | a read from get_q_item should be of the form 269 | (boolean,(routing_key, body)) 270 | """ 271 | try: 272 | item = q.get_nowait() 273 | q.task_done() 274 | return (True, item) 275 | except queue.Empty: # pragma: no cover 276 | pass 277 | 278 | return (False, None) 279 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/lib/poseidon_core/poseidon_core/helpers/__init__.py -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/actions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 9 December 2018 4 | @author: Charlie Lewis 5 | """ 6 | from poseidon_core.helpers.collector import Collector 7 | from poseidon_core.operations.volos.acls import VolosAcl 8 | 9 | 10 | class Actions: 11 | def __init__(self, endpoint, sdnc): 12 | self.endpoint = endpoint 13 | self.sdnc = sdnc 14 | 15 | def mirror_endpoint(self): 16 | """ 17 | tell network_tap to start a collector and the controller to begin 18 | mirroring traffic 19 | """ 20 | status = False 21 | if self.sdnc: 22 | endpoint_data = self.endpoint.endpoint_data 23 | if self.sdnc.mirror_mac( 24 | endpoint_data["mac"], endpoint_data["segment"], endpoint_data["port"] 25 | ): 26 | collector = Collector(self.endpoint, endpoint_data["segment"]) 27 | if collector.nic: 28 | status = collector.start_collector() 29 | else: 30 | status = True 31 | return status 32 | 33 | def unmirror_endpoint(self): 34 | """tell the controller to unmirror traffic""" 35 | status = False 36 | if self.sdnc: 37 | endpoint_data = self.endpoint.endpoint_data 38 | if self.sdnc.unmirror_mac( 39 | endpoint_data["mac"], endpoint_data["segment"], endpoint_data["port"] 40 | ): 41 | collector = Collector(self.endpoint, endpoint_data["segment"]) 42 | if collector.nic: 43 | status = collector.stop_collector() 44 | else: 45 | status = True 46 | return status 47 | 48 | def coprocess_endpoint(self): 49 | """ 50 | Build up and apply acls for coprocessing 51 | """ 52 | status = False 53 | if self.sdnc: 54 | endpoint_data = self.endpoint.endpoint_data 55 | if self.sdnc.volos and self.sdnc.volos.enabled: 56 | acl = VolosAcl( 57 | self.endpoint, 58 | acl_dir=self.sdnc.volos.acl_dir, 59 | copro_vlans=[self.sdnc.volos.copro_vlan], 60 | copro_port=self.sdnc.volos.copro_port, 61 | ) 62 | endpoints = [self.endpoint] 63 | force_apply_rules = [acl.acl_key] 64 | coprocess_rules_files = [acl.acl_file] 65 | port_list = self.sdnc.volos.get_port_list( 66 | endpoint_data["mac"], 67 | ipv4=endpoint_data.get("ipv4", None), 68 | ipv6=endpoint_data.get("ipv6", None), 69 | ) 70 | if acl.ensure_acls_dir() and acl.write_acl_file(port_list): 71 | status = self.sdnc.update_acls( 72 | rules_file=None, 73 | endpoints=endpoints, 74 | force_apply_rules=force_apply_rules, 75 | coprocess_rules_files=coprocess_rules_files, 76 | ) 77 | else: 78 | status = True 79 | return status 80 | 81 | def uncoprocess_endpoint(self): 82 | """tell the controller to remove coprocessing acls""" 83 | status = False 84 | if self.sdnc: 85 | if self.sdnc.volos and self.sdnc.volos.enabled: 86 | acl = VolosAcl( 87 | self.endpoint, 88 | acl_dir=self.sdnc.volos.acl_dir, 89 | copro_vlans=[self.sdnc.volos.copro_vlan], 90 | copro_port=self.sdnc.volos.copro_port, 91 | ) 92 | endpoints = [self.endpoint] 93 | force_remove_rules = [acl.acl_key] 94 | if self.sdnc.update_acls( 95 | rules_file=None, 96 | endpoints=endpoints, 97 | force_remove_rules=force_remove_rules, 98 | coprocess_rules_files=None, 99 | ): 100 | status = acl.delete_acl_file() 101 | else: 102 | status = True 103 | return status 104 | 105 | def update_acls( 106 | self, 107 | rules_file=None, 108 | endpoints=None, 109 | force_apply_rules=None, 110 | force_remove_rules=None, 111 | ): 112 | """tell the controller what ACLs to dynamically change""" 113 | status = False 114 | if self.sdnc: 115 | status = self.sdnc.update_acls( 116 | rules_file=rules_file, 117 | endpoints=endpoints, 118 | force_apply_rules=force_apply_rules, 119 | force_remove_rules=force_remove_rules, 120 | ) 121 | return status 122 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/collector.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 9 December 2018 4 | @author: Charlie Lewis 5 | """ 6 | import ast 7 | import json 8 | import logging 9 | 10 | import httpx 11 | from poseidon_core.helpers.config import Config 12 | 13 | 14 | class Collector(object): 15 | def __init__(self, endpoint, switch, iterations=1): 16 | self.logger = logging.getLogger("collector") 17 | self.config = Config().get_config() 18 | self.endpoint = endpoint 19 | self.id = endpoint.name 20 | self.mac = endpoint.endpoint_data["mac"] 21 | self.nic = None 22 | nic = self.config["collector_nic"] 23 | try: 24 | eval_nic = ast.literal_eval(nic) 25 | if switch in eval_nic: 26 | self.nic = eval_nic[switch] 27 | else: 28 | self.logger.error( 29 | "Failed to get collector nic for the switch: {0}".format(switch) 30 | ) 31 | except ValueError: 32 | self.nic = nic 33 | self.interval = str(self.config["reinvestigation_frequency"]) 34 | self.iterations = str(iterations) 35 | 36 | def start_collector(self): 37 | """ 38 | Starts collector for a given endpoint with the 39 | options passed in at the creation of the class instance. 40 | """ 41 | status = False 42 | payload = { 43 | "nic": self.nic, 44 | "id": self.id, 45 | "interval": self.interval, 46 | "filter": "'ether host {0}'".format(self.mac), 47 | "iters": self.iterations, 48 | "metadata": "{'endpoint_data': " + str(self.endpoint.endpoint_data) + "}", 49 | } 50 | 51 | self.logger.debug("Payload: {0}".format(str(payload))) 52 | 53 | network_tap_addr = ( 54 | self.config["network_tap_ip"] + ":" + self.config["network_tap_port"] 55 | ) 56 | uri = "http://" + network_tap_addr + "/create" 57 | 58 | try: 59 | resp = httpx.post(uri, json=payload) 60 | # TODO improve logged output 61 | self.logger.debug("Collector response: {0}".format(resp.text)) 62 | response = ast.literal_eval(resp.text) 63 | if response[0]: 64 | self.logger.info( 65 | "Successfully started the collector for: {0}".format(self.id) 66 | ) 67 | self.endpoint.endpoint_data["container_id"] = ( 68 | response[1].rsplit(":", 1)[-1].strip() 69 | ) 70 | status = True 71 | else: 72 | self.logger.error( 73 | "Failed to start collector because: {0}".format(response[1]) 74 | ) 75 | except Exception as e: # pragma: no cover 76 | self.logger.error("Failed to start collector because: {0}".format(str(e))) 77 | return status 78 | 79 | def stop_collector(self): 80 | """ 81 | Stops collector for a given endpoint. 82 | """ 83 | status = False 84 | if "container_id" not in self.endpoint.endpoint_data: 85 | self.logger.warning( 86 | "No collector to stop because no container_id for endpoint" 87 | ) 88 | return True 89 | 90 | payload = {"id": [self.endpoint.endpoint_data["container_id"]]} 91 | self.logger.debug("Payload: {0}".format(str(payload))) 92 | 93 | network_tap_addr = ( 94 | self.config["network_tap_ip"] + ":" + self.config["network_tap_port"] 95 | ) 96 | uri = "http://" + network_tap_addr + "/stop" 97 | 98 | try: 99 | resp = httpx.post(uri, json=payload) 100 | self.logger.debug("Collector response: {0}".format(resp.text)) 101 | response = ast.literal_eval(resp.text) 102 | if response[0]: 103 | self.logger.info( 104 | "Successfully stopped the collector for: {0}".format(self.id) 105 | ) 106 | status = True 107 | else: 108 | self.logger.error( 109 | "Failed to stop collector because response failed with: {0}".format( 110 | response[1] 111 | ) 112 | ) 113 | except Exception as e: # pragma: no cover 114 | self.logger.error("Failed to stop collector because: {0}".format(str(e))) 115 | return status 116 | 117 | # returns a dictionary of existing collectors keyed on dev_hash 118 | def get_collectors(self): 119 | network_tap_addr = ( 120 | self.config["network_tap_ip"] + ":" + self.config["network_tap_port"] 121 | ) 122 | uri = "http://" + network_tap_addr + "/list" 123 | collectors = {} 124 | try: 125 | resp = httpx.get(uri) 126 | text = resp.text 127 | # TODO need to parse out text 128 | self.logger.debug("collector list response: " + text) 129 | except Exception as e: # pragma: no cover 130 | self.logger.debug("failed to get collector statuses" + str(e)) 131 | 132 | return collectors 133 | 134 | def host_has_active_collectors(self, dev_hash): 135 | active_collectors_exist = False 136 | 137 | collectors = self.get_collectors() 138 | 139 | if dev_hash in collectors: 140 | hash_coll = collectors[dev_hash] 141 | else: 142 | self.logger.warning( 143 | "Key: {0} not found in collector dictionary. " 144 | "Treating this as the existence of multiple active" 145 | "collectors".format(dev_hash) 146 | ) 147 | return True 148 | 149 | for c in collectors: 150 | self.logger.debug(c) 151 | if ( 152 | collectors[c].hash != dev_hash 153 | and collectors[c].host == hash_coll.host 154 | and collectors[c].status != "exited" 155 | ): 156 | active_collectors_exist = True 157 | break 158 | 159 | return active_collectors_exist 160 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 5 December 2018 4 | @author: Charlie Lewis 5 | """ 6 | import configparser 7 | import json 8 | import logging 9 | import os 10 | import tempfile 11 | from distutils import util 12 | 13 | import yaml 14 | from poseidon_core.helpers.exception_decor import exception 15 | 16 | 17 | class Config: 18 | def __init__(self): 19 | self.logger = logging.getLogger("config") 20 | self.config = configparser.RawConfigParser() 21 | self.config.optionxform = str 22 | if os.environ.get("POSEIDON_CONFIG") is not None: 23 | self.config_path = os.environ.get("POSEIDON_CONFIG") 24 | else: # pragma: no cover 25 | raise Exception( 26 | "Could not find poseidon config. Make sure to set the POSEIDON_CONFIG environment variable" 27 | ) 28 | self.config.read_file(open(self.config_path, "r")) 29 | 30 | def get_config(self): 31 | # set some defaults 32 | controller = { 33 | "TYPE": "faucet", 34 | "RULES_FILE": None, 35 | "MIRROR_PORTS": None, 36 | "AUTOMATED_ACLS": False, 37 | "LEARN_PUBLIC_ADDRESSES": False, 38 | "reinvestigation_frequency": 900, 39 | "max_concurrent_reinvestigations": 2, 40 | "max_concurrent_coprocessing": 2, 41 | "logger_level": "INFO", 42 | "faucetconfrpc_address": "faucetconfrpc:59999", 43 | "faucetconfrpc_client": "faucetconfrpc", 44 | "prometheus_ip": "prometheus", 45 | "prometheus_port": 9090, 46 | } 47 | 48 | config_map = { 49 | "controller_type": ("TYPE", []), 50 | "learn_public_addresses": ("LEARN_PUBLIC_ADDRESSES", [util.strtobool]), 51 | "rules_file": ("RULES_FILE", []), 52 | "collector_nic": ("collector_nic", []), 53 | "controller_mirror_ports": ("MIRROR_PORTS", [json.loads]), 54 | "controller_proxy_mirror_ports": ( 55 | "controller_proxy_mirror_ports", 56 | [json.loads], 57 | ), 58 | "tunnel_vlan": ("tunnel_vlan", [int]), 59 | "tunnel_name": ("tunnel_name", []), 60 | "automated_acls": ("AUTOMATED_ACLS", [util.strtobool]), 61 | "FA_RABBIT_PORT": ("FA_RABBIT_PORT", [int]), 62 | "scan_frequency": ("scan_frequency", [int]), 63 | "reinvestigation_frequency": ("reinvestigation_frequency", [int]), 64 | "max_concurrent_reinvestigations": ( 65 | "max_concurrent_reinvestigations", 66 | [int], 67 | ), 68 | "max_concurrent_coprocessing": ("max_concurrent_coprocessing", [int]), 69 | "ignore_vlans": ("ignore_vlans", [json.loads]), 70 | "ignore_ports": ("ignore_ports", [json.loads]), 71 | "trunk_ports": ("trunk_ports", [json.loads]), 72 | "logger_level": ("logger_level", []), 73 | } 74 | 75 | for section in self.config.sections(): 76 | for key, val in self.config[section].items(): 77 | if isinstance(val, str): 78 | val = val.strip("'") 79 | controller_key, val_funcs = config_map.get(key, (key, [])) 80 | for val_func in val_funcs: 81 | try: 82 | val = val_func(val) 83 | break 84 | except Exception as e: # pragma: no cover 85 | self.logger.error( 86 | "Unable to set configuration option {0} {1} because {2}".format( 87 | key, val, str(e) 88 | ) 89 | ) 90 | controller[controller_key] = val 91 | return controller 92 | 93 | 94 | def represent_none(dumper, _): 95 | return dumper.represent_scalar("tag:yaml.org,2002:null", "") 96 | 97 | 98 | def parse_rules(config_file): 99 | obj_doc = yaml_in(config_file) 100 | return obj_doc 101 | 102 | 103 | def yaml_load(yaml_str): 104 | return yaml.safe_load(yaml_str) 105 | 106 | 107 | def yaml_dump(yaml_str): 108 | return yaml.dump(yaml_str) 109 | 110 | 111 | @exception 112 | def yaml_in(config_file): 113 | try: 114 | with open(config_file, "r") as stream: 115 | return yaml_load(stream) 116 | except Exception as e: # pragma: no cover 117 | return False 118 | 119 | 120 | @exception 121 | def yaml_out(config_file, obj_doc): 122 | with tempfile.NamedTemporaryFile( 123 | prefix=os.path.basename(config_file), 124 | dir=os.path.dirname(config_file), 125 | mode="w", 126 | delete=False, 127 | ) as stream: 128 | yaml.add_representer(type(None), represent_none) 129 | yaml.dump(obj_doc, stream, default_flow_style=False) 130 | os.replace(stream.name, config_file) 131 | return True 132 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/endpoint.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 3 December 2018 4 | @author: Charlie Lewis 5 | """ 6 | import hashlib 7 | import json 8 | import time 9 | 10 | from poseidon_core.constants import NO_DATA 11 | from transitions import Machine 12 | 13 | MACHINE_IP_FIELDS = { 14 | "ipv4": ("ipv4_rdns", "ipv4_subnet"), 15 | "ipv6": ("ipv6_rdns", "ipv6_subnet"), 16 | } 17 | MACHINE_IP_PREFIXES = {"ipv4": 24, "ipv6": 64} 18 | 19 | 20 | def transit_wrap(trigger, source, dest, before=None, after=None): 21 | transit_dict = {"trigger": trigger, "source": source, "dest": dest} 22 | if before is not None: 23 | transit_dict["before"] = before 24 | if after is not None: 25 | transit_dict["after"] = after 26 | return transit_dict 27 | 28 | 29 | def endpoint_transit_wrap(trigger, source, dest): 30 | return transit_wrap(trigger, source, dest, after="_update_state_time") 31 | 32 | 33 | def endpoint_copro_transit_wrap(trigger, source, dest): 34 | return transit_wrap(trigger, source, dest, after="_update_copro_state_time") 35 | 36 | 37 | class Endpoint: 38 | states = ["known", "unknown", "operating", "queued"] 39 | 40 | transitions = [ 41 | endpoint_transit_wrap("operate", "unknown", "operating"), 42 | endpoint_transit_wrap("queue", "unknown", "queued"), 43 | endpoint_transit_wrap("operate", "known", "operating"), 44 | endpoint_transit_wrap("queue", "known", "queued"), 45 | endpoint_transit_wrap("operate", "queued", "operating"), 46 | endpoint_transit_wrap("known", "known", "known"), 47 | endpoint_transit_wrap("unknown", "known", "unknown"), 48 | endpoint_transit_wrap("known", "unknown", "known"), 49 | endpoint_transit_wrap("unknown", "unknown", "unknown"), 50 | endpoint_transit_wrap("known", "operating", "known"), 51 | endpoint_transit_wrap("unknown", "operating", "unknown"), 52 | endpoint_transit_wrap("known", "queued", "known"), 53 | endpoint_transit_wrap("unknown", "queued", "unknown"), 54 | ] 55 | 56 | copro_states = [ 57 | "copro_unknown", 58 | "copro_coprocessing", 59 | "copro_nominal", 60 | "copro_suspicious", 61 | "copro_queued", 62 | ] 63 | 64 | copro_transitions = [ 65 | endpoint_copro_transit_wrap( 66 | "copro_coprocess", "copro_unknown", "copro_coprocessing" 67 | ), 68 | endpoint_copro_transit_wrap("copro_queue", "copro_unknown", "copro_queued"), 69 | endpoint_copro_transit_wrap( 70 | "copro_coprocess", "copro_queued", "copro_coprocessing" 71 | ), 72 | endpoint_copro_transit_wrap( 73 | "copro_nominal", "copro_coprocessing", "copro_nominal" 74 | ), 75 | endpoint_copro_transit_wrap("copro_nominal", "copro_queued", "copro_nominal"), 76 | endpoint_copro_transit_wrap( 77 | "copro_suspicious", "copro_coprocessing", "copro_suspicious" 78 | ), 79 | endpoint_copro_transit_wrap("copro_queue", "copro_nominal", "copro_queued"), 80 | endpoint_copro_transit_wrap( 81 | "copro_coprocess", "copro_nominal", "copro_coprocessing" 82 | ), 83 | endpoint_copro_transit_wrap("copro_queue", "copro_suspicious", "copro_queued"), 84 | endpoint_copro_transit_wrap( 85 | "copro_coprocess", "copro_suspicious", "copro_coprocessing" 86 | ), 87 | ] 88 | 89 | def __init__(self, hashed_val): 90 | self.name = hashed_val.strip() 91 | self.ignore = False 92 | self.copro_ignore = False 93 | self.endpoint_data = None 94 | self.p_next_state = None 95 | self.p_prev_state = None 96 | self.p_next_copro_state = None 97 | self.p_prev_copro_state = None 98 | self.acl_data = [] 99 | self.metadata = {} 100 | self.state = None 101 | self.copro_state = None 102 | self.state_time = 0 103 | self.copro_state_time = 0 104 | self.observed_time = 0 105 | 106 | def _update_state_time(self, *args, **kwargs): 107 | self.state_time = time.time() 108 | 109 | def _update_copro_state_time(self, *args, **kwargs): 110 | self.copro_state_time = time.time() 111 | 112 | def encode(self): 113 | endpoint_d = { 114 | "name": self.name, 115 | "state": self.state, 116 | "copro_state": self.copro_state, 117 | "ignore": self.ignore, 118 | "endpoint_data": self.endpoint_data, 119 | "p_next_state": self.p_next_state, 120 | "p_prev_state": self.p_prev_state, 121 | "acl_data": self.acl_data, 122 | "metadata": self.metadata, 123 | "observed_time": self.observed_time, 124 | } 125 | return str(json.dumps(endpoint_d)) 126 | 127 | def mac_addresses(self): 128 | return self.metadata.get("mac_addresses", {}) 129 | 130 | def get_roles_confidences_pcap_labels(self): 131 | top_role = NO_DATA 132 | second_role = NO_DATA 133 | third_role = NO_DATA 134 | top_conf = "0" 135 | second_conf = "0" 136 | third_conf = "0" 137 | pcap_labels = NO_DATA 138 | for metadata in self.mac_addresses().values(): 139 | classification = metadata.get("classification", {}) 140 | if "labels" in classification: 141 | top_role, second_role, third_role = classification["labels"][:3] 142 | if "confidences" in classification: 143 | top_conf, second_conf, third_conf = classification["confidences"][:3] 144 | metadata_pcap_labels = metadata.get("pcap_labels", None) 145 | if metadata_pcap_labels: 146 | pcap_labels = metadata_pcap_labels 147 | return ( 148 | (top_role, second_role, third_role), 149 | (top_conf, second_conf, third_conf), 150 | pcap_labels, 151 | ) 152 | 153 | def get_ipv4_os(self): 154 | if "ipv4_addresses" in self.metadata: 155 | ipv4 = self.endpoint_data["ipv4"] 156 | for ip, ip_metadata in self.metadata["ipv4_addresses"].items(): 157 | if ip == ipv4: 158 | if "short_os" in ip_metadata: 159 | return ip_metadata["short_os"] 160 | return NO_DATA 161 | 162 | def touch(self): 163 | self.observed_time = time.time() 164 | 165 | def observed_timeout(self, timeout): 166 | return time.time() - self.observed_time > timeout 167 | 168 | def state_age(self): 169 | return int(time.time()) - self.state_time 170 | 171 | def state_timeout(self, timeout): 172 | return self.state_age() > timeout 173 | 174 | def copro_state_age(self): 175 | return int(time.time()) - self.copro_state_time 176 | 177 | def copro_state_timeout(self, timeout): 178 | return self.copro_state_age() > timeout 179 | 180 | def queue_next(self, next_state): 181 | self.p_next_state = next_state 182 | self.queue() # pytype: disable=attribute-error 183 | 184 | def machine_trigger(self, state): 185 | # pytype: disable=attribute-error 186 | self.machine.events[state].trigger(self) 187 | # pytype: enable=attribute-error 188 | 189 | def trigger_next(self): 190 | self.p_prev_state = self.state 191 | if self.p_next_state: 192 | self.machine_trigger(self.p_next_state) 193 | self.p_next_state = None 194 | 195 | def copro_queue_next(self, next_state): 196 | self.p_next_copro_state = next_state 197 | self.copro_queue() # pytype: disable=attribute-error 198 | 199 | def copro_machine_trigger(self, state): 200 | # pytype: disable=attribute-error 201 | self.copro_machine.events[state].trigger(self) 202 | # pytype: enable=attribute-error 203 | 204 | def copro_trigger_next(self): 205 | if self.p_next_copro_state: 206 | self.copro_machine_trigger(self.p_next_copro_state) 207 | self.p_next_copro_state = None 208 | 209 | def operation_active(self): 210 | return self.state == "operating" 211 | 212 | def operation_requested(self, next_state=None): 213 | if next_state is None: 214 | next_state = self.p_next_state 215 | return next_state == "operate" 216 | 217 | def force_unknown(self): 218 | self.unknown() # pytype: disable=attribute-error 219 | self.p_next_state = None 220 | 221 | def default(self): 222 | if not self.ignore: 223 | if self.state != "unknown": 224 | if self.state == "operating": 225 | self.p_next_state = "operate" 226 | elif self.state == "queued": 227 | self.p_next_state = "queue" 228 | elif self.state == "known": 229 | self.p_next_state = self.state 230 | self.unknown() # pytype: disable=attribute-error 231 | 232 | @staticmethod 233 | def make_hash(machine, trunk=False): 234 | """hash the unique metadata parts of an endpoint""" 235 | h = hashlib.sha256() 236 | words = ["tenant", "mac", "segment"] 237 | if trunk: 238 | words.append("ipv4") 239 | words.append("ipv6") 240 | pre_h = "".join([str(machine.get(word, "missing")) for word in words]) 241 | h.update(pre_h.encode("utf-8")) 242 | post_h = h.hexdigest() 243 | return post_h 244 | 245 | 246 | def endpoint_factory(hashed_val): 247 | endpoint = Endpoint(hashed_val) 248 | machine = Machine( 249 | model=endpoint, 250 | model_attribute="state", 251 | states=Endpoint.states, 252 | transitions=Endpoint.transitions, 253 | initial="unknown", 254 | send_event=True, 255 | ) 256 | machine.name = endpoint.name[:8] + " " 257 | endpoint.machine = machine 258 | copro_machine = Machine( 259 | model=endpoint, 260 | model_attribute="copro_state", 261 | states=Endpoint.copro_states, 262 | transitions=Endpoint.copro_transitions, 263 | initial="copro_unknown", 264 | send_event=True, 265 | ) 266 | copro_machine.name = endpoint.name[:8] + "_copro" 267 | endpoint.copro_machine = copro_machine 268 | return endpoint 269 | 270 | 271 | class EndpointDecoder: 272 | def __init__(self, endpoint): 273 | if isinstance(endpoint, dict): 274 | e = endpoint 275 | else: 276 | e = json.loads(endpoint) 277 | self.endpoint = endpoint_factory(e["name"]) 278 | self.endpoint.state = e["state"] 279 | self.endpoint.copro_state = e.get("copro_state", None) 280 | self.endpoint.ignore = bool(e.get("ignore", False)) 281 | self.endpoint.metadata = e.get("metadata", {}) 282 | self.endpoint.acl_data = e.get("acl_data", []) 283 | self.endpoint.endpoint_data = e["endpoint_data"] 284 | self.endpoint.p_next_state = e["p_next_state"] 285 | self.endpoint.p_prev_state = e.get("p_prev_state", None) 286 | self.endpoint.observed_time = e.get("observed_time", 0) 287 | 288 | def get_endpoint(self): 289 | return self.endpoint 290 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/exception_decor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 27 December 2018 4 | @author: Charlie Lewis 5 | """ 6 | import functools 7 | import logging 8 | 9 | 10 | def exception(function): 11 | """ 12 | A decorator that wraps the passed in function and logs 13 | exceptions should one occur 14 | """ 15 | 16 | @functools.wraps(function) 17 | def wrapper(*args, **kwargs): 18 | logger = logging.getLogger("exception") 19 | try: 20 | return function(*args, **kwargs) 21 | except Exception as e: 22 | # log the exception 23 | logger.exception("Exception in {0}: {1}".format(function.__name__, str(e))) 24 | return False 25 | 26 | return wrapper 27 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/log.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 18 September 2017 4 | @author: Jeff Wang, Charlie Lewis 5 | """ 6 | import logging.handlers 7 | import os 8 | import socket 9 | 10 | from poseidon_core.helpers.config import Config 11 | 12 | 13 | class Logger: 14 | """ 15 | Base logger class that handles logging. Outputs to both console, a poseidon 16 | specific log file and a user specified syslog. To log, create a logger: 17 | logger1 = logging.getLogger('mpapp.area1') 18 | """ 19 | 20 | host = os.getenv("SYSLOG_HOST", "NOT_CONFIGURED") 21 | port = int(os.getenv("SYSLOG_PORT", 514)) 22 | 23 | level_int = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, "INFO": 20, "DEBUG": 10} 24 | 25 | config = Config().get_config() 26 | 27 | # setup existing loggers 28 | logging.getLogger("schedule").setLevel(logging.ERROR) 29 | 30 | use_file_logger = True 31 | # ensure log file exists 32 | try: 33 | if not os.path.exists("/var/log/poseidon"): 34 | os.makedirs("/var/log/poseidon") 35 | if not os.path.exists("/var/log/poseidon/poseidon.log"): 36 | with open("/var/log/poseidon/poseidon.log", "w"): 37 | pass 38 | # set up logging to file 39 | level_str = config.get("logger_level", None) 40 | level = 0 41 | if isinstance(level_str, str): 42 | level = level_int.get(level_str.upper(), 0) 43 | logging.basicConfig( 44 | level=level, 45 | format="%(asctime)s [%(levelname)s] %(name)s - %(message)s", 46 | filename="/var/log/poseidon/poseidon.log", 47 | filemode="a", 48 | ) 49 | except Exception as e: # pragma: no cover 50 | use_file_logger = False 51 | 52 | # define a Handler which writes INFO messages or higher to the sys.stderr 53 | console = logging.StreamHandler() 54 | console.setLevel(logging.INFO) 55 | # set a format which is simpler for console use 56 | formatter = logging.Formatter("[%(levelname)s] %(message)s") 57 | # tell the handler to use this format 58 | console.setFormatter(formatter) 59 | # add the handler to the root logger 60 | logging.getLogger("").addHandler(console) 61 | 62 | # don't try to connect to a syslog address if one was not supplied 63 | if host != "NOT_CONFIGURED": # pragma: no cover 64 | # if a syslog address was supplied, log to it 65 | syslog = logging.handlers.SysLogHandler( # pytype: disable=wrong-arg-types 66 | address=(host, port), facility=1, socktype=socket.SOCK_STREAM 67 | ) 68 | f_format = "%(asctime)s [%(levelname)s] %(name)s - %(message)s" 69 | f_formatter = logging.Formatter(f_format) 70 | syslog.setFormatter(f_formatter) 71 | logging.getLogger("").addHandler(syslog) 72 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/metadata.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 19 February 2019 4 | @author: Charlie Lewis 5 | """ 6 | import functools 7 | import socket 8 | from concurrent.futures import ThreadPoolExecutor 9 | 10 | from poseidon_core.constants import NO_DATA 11 | 12 | 13 | @functools.lru_cache() 14 | def get_ether_vendor(mac, lookup_path): 15 | """ 16 | Takes a MAC address and looks up and returns the vendor for it. 17 | """ 18 | mac = "".join(mac.split(":"))[:6].upper() 19 | try: 20 | with open(lookup_path, "r") as f: 21 | for line in f: 22 | if line.startswith(mac): 23 | return line.split()[1].strip() 24 | except Exception: # pragma: no cover 25 | return NO_DATA 26 | 27 | 28 | class DNSResolver: 29 | TIMEOUT = 5 30 | 31 | @staticmethod 32 | def _resolve_ip(ip): 33 | try: 34 | result = socket.getnameinfo((ip, 0), 0)[0] 35 | if result == ip: 36 | return NO_DATA 37 | return result 38 | except socket.gaierror: 39 | return NO_DATA 40 | 41 | def resolve_ips(self, ips): 42 | with ThreadPoolExecutor() as executor: 43 | return { 44 | ip: result 45 | for ip, result in zip( 46 | ips, executor.map(DNSResolver()._resolve_ip, list(ips)) 47 | ) 48 | } 49 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/helpers/rabbit.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 21 August 2017 4 | @author: dgrossman 5 | """ 6 | import logging 7 | import threading 8 | import time 9 | from functools import partial 10 | 11 | import pika 12 | 13 | 14 | class Rabbit: 15 | """ 16 | Base Class for RabbitMQ 17 | """ 18 | 19 | def __init__(self): 20 | self.logger = logging.getLogger("rabbit") 21 | self.connection = None 22 | self.channel = None 23 | self.mq_recv_thread = None 24 | self.queue_name = "poseidon_main" 25 | 26 | def close(self): 27 | if self.connection: 28 | self.connection.close() 29 | 30 | def make_rabbit_connection( 31 | self, host, port, exchange, keys, total_sleep=float("inf") 32 | ): # pragma: no cover 33 | """ 34 | Connects to rabbitmq using the given hostname, 35 | exchange, and queue. Retries on failure until success. 36 | Binds routing keys appropriate for module, and returns 37 | the channel and connection. 38 | """ 39 | wait = True 40 | do_rabbit = True 41 | 42 | while wait and total_sleep > 0: 43 | try: 44 | # Starting rabbit connection 45 | self.connection = pika.BlockingConnection( 46 | pika.ConnectionParameters(host=host, port=port) 47 | ) 48 | self.channel = self.connection.channel() 49 | self.channel.exchange_declare(exchange=exchange, exchange_type="topic") 50 | self.channel.queue_declare( 51 | queue=self.queue_name, exclusive=False, durable=True 52 | ) 53 | self.logger.info(f"Connected to {host} rabbitmq...") 54 | wait = False 55 | except Exception as e: 56 | self.logger.debug(f"Waiting for connection to {host} rabbitmq...") 57 | time.sleep(2) 58 | total_sleep -= 2 59 | wait = True 60 | 61 | if wait: 62 | do_rabbit = False 63 | 64 | if self.channel is not None and isinstance(keys, list) and not wait: 65 | for key in keys: 66 | self.logger.debug(f"Array adding key:{key} to rabbitmq channel") 67 | self.channel.queue_bind( 68 | exchange=exchange, queue=self.queue_name, routing_key=key 69 | ) 70 | 71 | if isinstance(keys, str) and not wait: 72 | self.logger.debug(f"String adding key:{keys} to rabbitmq channel") 73 | self.channel.queue_bind( 74 | exchange=exchange, queue=self.queue_name, routing_key=keys 75 | ) 76 | 77 | return do_rabbit 78 | 79 | def start_channel(self, mycallback, m_queue): 80 | """Handle threading for messagetype""" 81 | self.logger.debug(f"About to start channel {self.channel}") 82 | self.channel.basic_consume(self.queue_name, partial(mycallback, q=m_queue)) 83 | self.mq_recv_thread = threading.Thread(target=self.channel.start_consuming) 84 | self.mq_recv_thread.start() 85 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | The main entrypoint for Poseidon 5 | 6 | Created on 3 December 2018 7 | @author: Charlie Lewis 8 | """ 9 | import logging 10 | import sys 11 | import threading 12 | import time 13 | from functools import partial 14 | 15 | import schedule 16 | from poseidon_core.controllers.faucet.config import FaucetRemoteConfGetSetter 17 | from poseidon_core.controllers.sdnconnect import SDNConnect 18 | from poseidon_core.controllers.sdnevents import SDNEvents 19 | from poseidon_core.helpers.config import Config 20 | from poseidon_core.helpers.log import Logger 21 | from poseidon_core.helpers.prometheus import Prometheus 22 | from poseidon_core.operations.monitor import Monitor 23 | 24 | 25 | def start_prometheus(logger): 26 | prom = Prometheus() 27 | try: 28 | prom.initialize_metrics() 29 | except Exception as e: # pragma: no cover 30 | logger.debug(f"Prometheus metrics are already initialized: {e}") 31 | Prometheus.start() 32 | return prom 33 | 34 | 35 | def schedule_thread_worker(logger, scheduler=schedule): 36 | """schedule thread, takes care of running processes in the future""" 37 | logger.debug("Starting thread_worker") 38 | while True: 39 | sys.stdout.flush() 40 | scheduler.run_pending() 41 | time.sleep(1) 42 | 43 | 44 | def main(): # pragma: no cover 45 | logging.getLogger("pika").setLevel(logging.CRITICAL) 46 | Logger() 47 | logger = logging.getLogger("main") 48 | config = Config().get_config() 49 | prom = start_prometheus(logger) 50 | 51 | # TODO option that doesn't require an sdn connection? 52 | sdnc = SDNConnect( 53 | config=config, 54 | logger=logger, 55 | prom=prom, 56 | faucetconfgetsetter_cl=FaucetRemoteConfGetSetter, 57 | ) 58 | 59 | sdne = SDNEvents(logger, prom, sdnc) 60 | sdne.start_message_queues() 61 | 62 | # TODO this should be the default operation, but can be overridden with config to do other operations instead or additionally 63 | monitor = Monitor(logger, config, schedule, sdne.job_queue, sdnc, prom) 64 | 65 | # schedule all threads 66 | schedule_thread = threading.Thread( 67 | target=partial(schedule_thread_worker, logger, scheduler=schedule), 68 | name="st_worker", 69 | ) 70 | schedule_thread.start() 71 | 72 | try: 73 | # TODO each operation should have its own thread running its own "process" and this is just a main infinite loop 74 | sdne.process(monitor) 75 | except Exception as e: 76 | logger.exception(e) 77 | logger.error("restarting because of exception") 78 | sys.exit(1) 79 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/lib/poseidon_core/poseidon_core/operations/__init__.py -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/monitor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import json 4 | import queue 5 | import random 6 | import sys 7 | import time 8 | 9 | 10 | class Monitor: 11 | def __init__(self, logger, config, schedule, job_queue, sdnc, prom): 12 | self.logger = logger 13 | self.rabbits = [] 14 | self.config = config 15 | self.job_queue = job_queue 16 | self.sdnc = sdnc 17 | self.prom = prom 18 | 19 | # timer class to call things periodically in own thread 20 | schedule.every(self.config["scan_frequency"]).seconds.do( 21 | self.schedule_job_update_metrics 22 | ) 23 | schedule.every(self.config["reinvestigation_frequency"]).seconds.do( 24 | self.schedule_job_reinvestigation_timeout 25 | ) 26 | 27 | def get_hosts(self): 28 | # TODO consolidate with update_endpoint_metadata 29 | hosts = [] 30 | for hash_id, endpoint in self.sdnc.endpoints.items(): 31 | roles, _, _ = endpoint.get_roles_confidences_pcap_labels() 32 | role = roles[0] 33 | ipv4_os = endpoint.get_ipv4_os() 34 | host = { 35 | "mac": endpoint.endpoint_data["mac"], 36 | "id": hash_id, 37 | "role": role, 38 | "ipv4_os": ipv4_os, 39 | "state": endpoint.state, 40 | "tenant": endpoint.endpoint_data["tenant"], 41 | "port": endpoint.endpoint_data["port"], 42 | "segment": endpoint.endpoint_data["segment"], 43 | "ipv4": endpoint.endpoint_data["ipv4"], 44 | } 45 | hosts.append(host) 46 | return hosts 47 | 48 | def job_update_metrics(self): 49 | self.logger.debug("updating metrics") 50 | try: 51 | hosts = self.get_hosts() 52 | self.prom.update_metrics(hosts) 53 | except Exception as e: # pragma: no cover 54 | self.logger.error( 55 | "Unable to get current state and send it to Prometheus because: {0}".format( 56 | str(e) 57 | ) 58 | ) 59 | return 0 60 | 61 | def job_recoprocess(self): 62 | if not self.sdnc.sdnc: 63 | for endpoint in self.sdnc.not_copro_ignored_endpoints(): 64 | if endpoint.copro_state != "copro_nominal": 65 | endpoint.copro_nominal() # pytype: disable=attribute-error 66 | return 0 67 | events = 0 68 | for endpoint in self.sdnc.not_copro_ignored_endpoints("copro_coprocessing"): 69 | if endpoint.copro_state_timeout(2 * self.config["coprocessing_frequency"]): 70 | self.logger.debug( 71 | "timing out: {0} and setting to unknown".format(endpoint.name) 72 | ) 73 | self.sdnc.uncoprocess_endpoint(endpoint) 74 | endpoint.copro_unknown() # pytype: disable=attribute-error 75 | events += 1 76 | return events 77 | 78 | def job_reinvestigation_timeout(self): 79 | """put endpoints into the reinvestigation state if possible, and timeout investigations""" 80 | if not self.sdnc.sdnc: 81 | for endpoint in self.sdnc.not_ignored_endpoints(): 82 | if endpoint.state != "known": 83 | endpoint.known() # pytype: disable=attribute-error 84 | return 0 85 | events = 0 86 | timeout = 2 * self.config["reinvestigation_frequency"] 87 | for endpoint in self.sdnc.not_ignored_endpoints(): 88 | if endpoint.observed_timeout(timeout): 89 | self.logger.info("observation timing out: {0}".format(endpoint.name)) 90 | endpoint.force_unknown() 91 | events += 1 92 | elif endpoint.operation_active() and endpoint.state_timeout(timeout): 93 | self.logger.info("mirror timing out: {0}".format(endpoint.name)) 94 | self.sdnc.unmirror_endpoint(endpoint) 95 | events += 1 96 | budget = self.sdnc.investigation_budget() 97 | candidates = self.sdnc.not_ignored_endpoints("queued") 98 | if not candidates: 99 | candidates = self.sdnc.not_ignored_endpoints("known") 100 | return events + self._schedule_queued_work( 101 | candidates, budget, "operate", self.sdnc.mirror_endpoint, shuffle=True 102 | ) 103 | 104 | def schedule_job_update_metrics(self): 105 | self.job_queue.put(self.job_update_metrics) 106 | 107 | def schedule_job_reinvestigation_timeout(self): 108 | self.job_queue.put(self.job_reinvestigation_timeout) 109 | 110 | def _schedule_queued_work( 111 | self, queued_endpoints, budget, endpoint_state, endpoint_work, shuffle=False 112 | ): 113 | events = 0 114 | if self.sdnc.sdnc: 115 | if shuffle: 116 | random.shuffle(queued_endpoints) 117 | for endpoint in queued_endpoints[:budget]: 118 | getattr(endpoint, endpoint_state)() 119 | endpoint_work(endpoint) 120 | if endpoint_state in ["trigger_next", "operate"]: 121 | # TODO this may not be necessarily true going forward 122 | self.prom.prom_metrics["ncapture_count"].inc() 123 | events += 1 124 | return events 125 | 126 | # TODO make generic 127 | def schedule_mirroring(self): 128 | for endpoint in self.sdnc.not_ignored_endpoints("unknown"): 129 | endpoint.queue_next("operate") 130 | budget = self.sdnc.investigation_budget() 131 | queued_endpoints = [ 132 | endpoint 133 | for endpoint in self.sdnc.not_ignored_endpoints("queued") 134 | if endpoint.operation_requested() 135 | ] 136 | queued_endpoints = sorted(queued_endpoints, key=lambda x: x.state_time) 137 | self.logger.debug( 138 | "operations {0}, budget {1}, queued {2}".format( 139 | str(self.sdnc.investigations), str(budget), str(len(queued_endpoints)) 140 | ) 141 | ) 142 | return self._schedule_queued_work( 143 | queued_endpoints, budget, "trigger_next", self.sdnc.mirror_endpoint 144 | ) 145 | 146 | # TODO make generic 147 | def schedule_coprocessing(self): 148 | for endpoint in self.sdnc.not_copro_ignored_endpoints("copro_unknown"): 149 | endpoint.copro_queue_next("copro_coprocess") 150 | budget = self.sdnc.coprocessing_budget() 151 | queued_endpoints = self.sdnc.not_copro_ignored_endpoints("copro_queued") 152 | queued_endpoints = sorted(queued_endpoints, key=lambda x: x.copro_state_time) 153 | self.logger.debug( 154 | "coprocessing {0}, budget {1}, queued {2}".format( 155 | str(self.sdnc.coprocessing), str(budget), str(len(queued_endpoints)) 156 | ) 157 | ) 158 | return self._schedule_queued_work( 159 | queued_endpoints, budget, "copro_trigger_next", self.sdnc.coprocess_endpoint 160 | ) 161 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/primitives/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/lib/poseidon_core/poseidon_core/operations/primitives/__init__.py -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/primitives/acl.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 4 March 2020 4 | @author: Charlie Lewis 5 | """ 6 | import logging 7 | import os 8 | 9 | 10 | class ACL: 11 | def __init__(self, faucetconfgetsetter): 12 | self.logger = logging.getLogger("acl") 13 | self.frpc = faucetconfgetsetter 14 | 15 | def _config_file_paths(self, file_paths): 16 | return [self.frpc.config_file_path(f) for f in file_paths] 17 | 18 | def include_acl_files(self, rules_doc, rules_file, coprocess_rules_files, obj_doc): 19 | files = self._config_file_paths(rules_doc["include"]) 20 | rules_path = rules_file.rsplit("/", 1)[0] 21 | conf_files = obj_doc.get("include", []) 22 | 23 | acls_docs = {} 24 | for f in files: 25 | if f.startswith("/"): 26 | acls_doc = self.frpc.read_faucet_conf(f) 27 | else: 28 | acls_doc = self.frpc.read_faucet_conf(os.path.join(rules_path, f)) 29 | if isinstance(acls_doc, bool): 30 | self.logger.warning( 31 | "Include file {0} was not found, ACLs may not be working as expected".format( 32 | f 33 | ) 34 | ) 35 | continue 36 | acls_docs[f] = acls_doc 37 | 38 | if conf_files: 39 | acls_filenames = [] 40 | if coprocess_rules_files: 41 | acls_filenames += self._config_file_paths(coprocess_rules_files) 42 | for f in files: 43 | if "/" in f: 44 | acls_filenames.append(f.rsplit("/", 1)[1]) 45 | else: 46 | acls_filenames.append(f) 47 | for conf_file in conf_files: 48 | if conf_file.startswith("poseidon") and conf_file not in acls_filenames: 49 | obj_doc["include"].remove(conf_file) 50 | self.logger.info("Removing {0} from config".format(conf_file)) 51 | else: 52 | obj_doc["include"] = [] 53 | 54 | for f, acls_doc in acls_docs.items(): 55 | if "/" in f: 56 | _, acls_filename = f.rsplit("/", 1) 57 | else: 58 | acls_filename = f 59 | poseidon_acls_filename = "poseidon_" + acls_filename 60 | if poseidon_acls_filename not in conf_files: 61 | obj_doc["include"].append(poseidon_acls_filename) 62 | self.frpc.write_faucet_conf( 63 | os.path.join(rules_path, poseidon_acls_filename), acls_doc 64 | ) 65 | self.logger.info("Adding {0} to config".format(acls_filename)) 66 | 67 | # get defined ACL names from included files 68 | acl_names = [] 69 | for acls_doc in acls_docs.values(): 70 | acl_names.extend(list(acls_doc.get("acls", []))) 71 | return obj_doc, acl_names 72 | 73 | def match_rules( 74 | self, 75 | rule, 76 | rules, 77 | obj_doc, 78 | endpoint, 79 | switch, 80 | port, 81 | all_rule_acls, 82 | force_apply_rules, 83 | ): 84 | matches = 0 85 | for r in rules[rule]: 86 | if "rule" in r and "device_key" in r["rule"]: 87 | rule_data = r["rule"] 88 | if rule_data["device_key"] == "os": 89 | match = False 90 | for addresses in ("ipv4_addresses", "ipv6_addresses"): 91 | if "addresses" in endpoint.metadata: 92 | for ip, ip_metadata in endpoint.metadata["addresses"]: 93 | if ( 94 | "os" in ip_metadata 95 | and ip_metadata["os"] == rule_data["value"] 96 | ): 97 | self.logger.info( 98 | "{0} os match: {1} {2}, rule: {3}".format( 99 | addresses, ip, rule_data["value"], rule 100 | ) 101 | ) 102 | match = True 103 | if match: 104 | matches += 1 105 | elif rule_data["device_key"] == "role": 106 | match = False 107 | if "mac_addresses" in endpoint.metadata: 108 | for mac, mac_metadata in endpoint.metadata[ 109 | "mac_addresses" 110 | ].items(): 111 | most_recent = 0 112 | for record in mac_metadata: 113 | if float(record) > most_recent: 114 | most_recent = float(record) 115 | most_recent = str(most_recent) 116 | if ( 117 | most_recent != "0" 118 | and most_recent in mac_metadata 119 | and "labels" in mac_metadata[most_recent] 120 | and "confidences" in mac_metadata[most_recent] 121 | ): 122 | # check top three 123 | for i in range(3): 124 | if ( 125 | mac_metadata[most_recent]["labels"][i] 126 | == rule_data["value"] 127 | ): 128 | if "min_confidence" in rule_data["value"]: 129 | if ( 130 | float( 131 | mac_metadata[most_recent][ 132 | "confidences" 133 | ][i] 134 | ) 135 | * 100 136 | >= rule_data["min_confidence"] 137 | ): 138 | self.logger.info( 139 | "Confidence match: {0} {1}, rule: {2}".format( 140 | mac, 141 | float( 142 | mac_metadata[most_recent][ 143 | "confidences" 144 | ][i] 145 | ) 146 | * 100, 147 | rule, 148 | ) 149 | ) 150 | match = True 151 | else: 152 | self.logger.info( 153 | "Role match: {0} {1}, rule: {2}".format( 154 | mac, rule_data["value"], rule 155 | ) 156 | ) 157 | match = True 158 | if match: 159 | matches += 1 160 | if matches == len(rules[rule]) or ( 161 | force_apply_rules and rule in force_apply_rules 162 | ): 163 | rule_acls = [] 164 | for r in rules[rule]: 165 | rule_acls += r["rule"]["acls"] 166 | all_rule_acls += r["rule"]["acls"] 167 | rule_acls = list(set(rule_acls)) 168 | interfaces_conf = obj_doc["dps"][switch]["interfaces"] 169 | if rule_acls: 170 | if port not in interfaces_conf: 171 | interfaces_conf[port] = {} 172 | port_conf = interfaces_conf[port] 173 | if "acls_in" not in port_conf: 174 | self.logger.info( 175 | "All rules met for: {0} on switch: {1} and port: {2}; applying ACLs: {3}".format( 176 | endpoint.endpoint_data["mac"], switch, port, rule_acls 177 | ) 178 | ) 179 | port_conf["acls_in"] = rule_acls 180 | else: 181 | # add new ACLs 182 | orig_rule_acls = rule_acls 183 | rule_acls += port_conf["acls_in"] 184 | rule_acls = list(set(rule_acls)) 185 | if port_conf["acls_in"] != rule_acls: 186 | port_conf["acls_in"] = rule_acls 187 | self.logger.info( 188 | "All rules met for: {0} on switch: {1} and port: {2}; applying ACLs: {3}".format( 189 | endpoint.endpoint_data["mac"], switch, port, orig_rule_acls 190 | ) 191 | ) 192 | return obj_doc, all_rule_acls 193 | 194 | def apply_acls( 195 | self, 196 | rules_file, 197 | endpoints, 198 | force_apply_rules, 199 | force_remove_rules, 200 | coprocess_rules_files, 201 | obj_doc, 202 | rules_doc, 203 | ): 204 | if not endpoints: 205 | return obj_doc 206 | 207 | # get acls file and add to faucet.yaml if not already there 208 | if "include" not in rules_doc: 209 | self.logger.info( 210 | "No included ACLs files in the rules file, using ACLs that Faucet already knows about" 211 | ) 212 | else: 213 | obj_doc, acl_names = self.include_acl_files( 214 | rules_doc, rules_file, coprocess_rules_files, obj_doc 215 | ) 216 | 217 | if "rules" in rules_doc: 218 | acls = [] 219 | rules = rules_doc["rules"] 220 | 221 | for rule in rules: 222 | for r in rules[rule]: 223 | acls += r["rule"]["acls"] 224 | acls = list(set(acls)) 225 | 226 | # check that acls in rules exist in the included acls file 227 | if "include" in rules_doc: 228 | for acl in acls: 229 | if acl not in acl_names: 230 | self.logger.info( 231 | "Using named ACL: {0}, but it was not found in included ACL files, assuming ACL name exists in Faucet config".format( 232 | acl 233 | ) 234 | ) 235 | 236 | for endpoint in endpoints: 237 | port = int(endpoint.endpoint_data["port"]) 238 | switch = endpoint.endpoint_data["segment"] 239 | switch_conf = obj_doc["dps"].get(switch, None) 240 | if not switch_conf: 241 | continue 242 | port_conf = switch_conf["interfaces"].get(port, None) 243 | if not port_conf: 244 | continue 245 | existing_acls = port_conf.get("acls_in", None) 246 | if not existing_acls: 247 | continue 248 | all_rule_acls = [] 249 | for rule in rules: 250 | obj_doc, all_rule_acls = self.match_rules( 251 | rule, 252 | rules, 253 | obj_doc, 254 | endpoint, 255 | switch, 256 | port, 257 | all_rule_acls, 258 | force_apply_rules, 259 | ) 260 | # remove ACLs that were previously applied 261 | all_rule_acls = list(set(all_rule_acls)) 262 | for acl in existing_acls: 263 | if acl in acls and ( 264 | acl not in all_rule_acls or acl in force_remove_rules 265 | ): 266 | port_conf["acls_in"].remove(acl) 267 | self.logger.info( 268 | "Removing no longer needed ACL: {0} for: {1} on switch: {2} and port: {3}".format( 269 | acl, endpoint.endpoint_data["mac"], switch, port 270 | ) 271 | ) 272 | 273 | # TODO acl by port - potentially later update rules in acls to be mac/ip specific 274 | # TODO ignore trunk ports/stacking ports? 275 | 276 | return obj_doc 277 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/primitives/coprocess.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 30 January 2020 4 | @author: Ryan Ashley 5 | """ 6 | import logging 7 | 8 | 9 | class Coprocess: 10 | def __init__(self, controller): 11 | self.logger = logging.getLogger("coprocessor") 12 | self.pipette_repo = controller["pipette_repo"] 13 | self.pipette_dir = controller["pipette_dir"] 14 | self.coprocessor_nic = controller["coprocessor_nic"] 15 | self.coprocessor_port = controller["coprocessor_port"] 16 | self.coprocessor_vlans = controller["coprocessor_vlans"] 17 | self.fake_interface = controller["fake_interface"] 18 | self.fake_mac = controller["fake_mac"] 19 | self.fake_ips = controller["fake_ips"] 20 | self.bridge = controller["bridge"] 21 | self.pipette_port = controller["pipette_port"] 22 | self.pcap_location = controller["pcap_location"] 23 | self.pcap_size = controller["pcap_size"] 24 | self.pipette_running = False 25 | 26 | def start_coprocessor(self): 27 | """ 28 | Starts OVS and containers 29 | """ 30 | status = False 31 | 32 | return status 33 | 34 | def stop_coprocessor(self): 35 | """ 36 | Stops OVS and containers 37 | """ 38 | status = False 39 | 40 | return status 41 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/primitives/meter.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/lib/poseidon_core/poseidon_core/operations/primitives/meter.py -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/primitives/mirror.py: -------------------------------------------------------------------------------- 1 | from poseidon_core.helpers.config import Config 2 | 3 | 4 | class Mirror: 5 | def __init__(self, logger): 6 | config = Config().get_config() 7 | self.logger = logger 8 | self.mirror_ports = config["MIRROR_PORTS"] 9 | self.proxy_mirror_ports = config["controller_proxy_mirror_ports"] 10 | self.tunnel_vlan = config["tunnel_vlan"] 11 | self.tunnel_name = config["tunnel_name"] 12 | self.ignore_vlans = config["ignore_vlans"] 13 | self.ignore_ports = config["ignore_ports"] 14 | self.trunk_ports = config["truck_ports"] 15 | 16 | def mirror_port(self, switch, port): 17 | self.logger.warning(f"Unable to mirror {switch}:{port}") 18 | return False 19 | 20 | def unmirror_port(self, switch, port): 21 | self.logger.warning(f"Unable to unmirror {switch}:{port}") 22 | return False 23 | 24 | def mirror_mac(self, switch, port, mac): 25 | self.logger.warning(f"Unable to mirror {switch}:{port}:{mac}") 26 | return False 27 | 28 | def unmirror_mac(self, switch, port, mac): 29 | self.logger.warning(f"Unable to unmirror {switch}:{port}:{mac}") 30 | return False 31 | 32 | def mirror_endpoint(self, endpoint): 33 | self.logger.warning(f"Unable to mirror {endpoint.name}") 34 | return False 35 | 36 | def unmirror_endpoint(self, endpoint): 37 | self.logger.warning(f"Unable to unmirror {endpoint.name}") 38 | return False 39 | 40 | def clear_mirrors(self): 41 | self.logger.warning("Unable to clear all mirrors") 42 | return False 43 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/volos/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/lib/poseidon_core/poseidon_core/operations/volos/__init__.py -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/volos/acls.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 31 January 2020 4 | @author: Ryan Ashley 5 | """ 6 | import logging 7 | import os 8 | from pathlib import Path 9 | 10 | # TODO merge with primitives/acl.py 11 | 12 | 13 | class Acl: 14 | def __init__(self, acl_file=None, faucetconfgetsetter=None): 15 | self.acls = {} 16 | self.acl_file = acl_file 17 | self.faucetconfgetsetter = faucetconfgetsetter 18 | 19 | def _read_existing(self): 20 | try: 21 | return self.faucetconfgetsetter.read_faucet_conf(self.acl_file) 22 | except (FileNotFoundError, PermissionError): 23 | return {} 24 | 25 | def read(self, config_yaml=None): 26 | if config_yaml is None: 27 | config_yaml = self._read_existing() 28 | self.acls = config_yaml.get("acls", {}) 29 | return config_yaml 30 | 31 | def write(self): 32 | config_yaml = self._read_existing() 33 | self._merge_acls(config_yaml) 34 | try: 35 | self.faucetconfgetsetter.write_faucet_conf(self.acl_file, config_yaml) 36 | return True 37 | except (FileNotFoundError, PermissionError): 38 | return False 39 | 40 | def add_rule(self, name, rule): 41 | if name not in self.acls: 42 | self.acls[name] = [] 43 | self.acls[name].append(rule) 44 | 45 | def _merge_acls(self, yaml_config): 46 | if "acls" not in yaml_config: 47 | yaml_config["acls"] = {} 48 | yaml_config["acls"].update(self.acls) 49 | 50 | 51 | class ExclusiveAcl(Acl): 52 | def _merge_acls(self, yaml_config): 53 | yaml_config["acls"] = self.acls 54 | 55 | 56 | class VolosAcl(ExclusiveAcl): 57 | def __init__(self, endpoint, acl_dir, copro_vlans=[2], copro_port=23): 58 | self.mac = endpoint.endpoint_data["mac"] 59 | self.acl_key = f"volos_copro_{self.mac}" 60 | self.acl_dir = acl_dir 61 | acl_file = os.path.join(self.acl_dir, f"/%s.yaml" % self.acl_key) 62 | super(VolosAcl, self).__init__(acl_file=acl_file) 63 | self.logger = logging.getLogger("coprocessor") 64 | self.endpoint = endpoint 65 | self.id = endpoint.name 66 | self.copro_vlans = copro_vlans 67 | self.copro_port = copro_port 68 | 69 | def write_acl_file(self, port_list=[]): 70 | self.acls = {} 71 | for port in port_list: 72 | for eth_type in (0x0800, 0x86DD): 73 | ip_str = port["proto"] 74 | addresses = self.endpoint.metadata.get("%s_addresses" % ip_str, None) 75 | if addresses: 76 | for ip in addresses: 77 | rule = { 78 | "rule": { 79 | "dl_type": eth_type, 80 | "nw_proto": port["proto_id"], 81 | "%s_src" % ip_str: ip, 82 | "actions": { 83 | "output": { 84 | "ports": [self.copro_port], 85 | "vlan_vid": self.copro_vlans, 86 | } 87 | }, 88 | } 89 | } 90 | rule["rule"]["%s_dst" % ip_str] = port["port"] 91 | self.add_rule(self.acl_key, rule) 92 | self.add_rule(self.acl_key, {"rule": {"actions": {"allow": 1}}}) 93 | status = self.write() 94 | if not status: 95 | self.logger.error( 96 | "Volos ACL file:{0} could not be written. Coprocessing may not work as expected".format( 97 | self.acl_file 98 | ) 99 | ) 100 | return status 101 | 102 | def delete_acl_file(self): 103 | try: 104 | if os.path.exists(self.acl_file): 105 | os.remove(self.acl_file) 106 | return True 107 | except Exception as e: # pragma: no cover 108 | self.logger.error( 109 | "Volos ACL file:{0} could not be deleted. Coprocessing may not work as expected".format( 110 | self.acl_file 111 | ) 112 | ) 113 | return False 114 | 115 | def ensure_acls_dir(self): 116 | try: 117 | if not os.path.exists(self.acl_dir): 118 | Path(self.acl_dir).mkdir(parents=True, exist_ok=True) 119 | return True 120 | except Exception as e: # pragma: no cover 121 | self.logger.error( 122 | "Volos ACL directory:{0} could not be created. Coprocessing may not work as expected".format( 123 | self.acl_file 124 | ) 125 | ) 126 | return False 127 | -------------------------------------------------------------------------------- /lib/poseidon_core/poseidon_core/operations/volos/volos.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 05 February 2020 4 | @author: Ryan Ashley 5 | """ 6 | import logging 7 | import os 8 | 9 | from poseidon_core.constants import PROTOCOL_MAP 10 | from poseidon_core.helpers.config import yaml_load 11 | 12 | 13 | class Volos(object): 14 | def __init__(self, controller): 15 | self.logger = logging.getLogger("volos") 16 | self.enabled = controller["enable_volos"] 17 | self.coprocessor_nic = controller["coprocessor_nic"] 18 | self.coprocessor_port = controller["coprocessor_port"] 19 | self.coprocessor_vlans = controller["coprocessor_vlans"] 20 | self.coprocessing_frequency = controller["coprocessing_frequency"] 21 | self.ignore_copro_ports = controller["ignore_copro_ports"] 22 | self.acl_dir = controller["acl_dir"] 23 | volos_cfg_file = controller["volos_cfg_file"] 24 | self.container_config = self.parse_volos_cfg(volos_cfg_file) 25 | 26 | def parse_volos_cfg(self, volos_cfg_file): 27 | cfg = None 28 | container_cfg = None 29 | if os.path.exists(volos_cfg_file): 30 | try: 31 | with open(volos_cfg_file, "r") as f: 32 | cfg = yaml_load(f) 33 | except Exception as e: # pragma: no cover 34 | self.logger.warning( 35 | "Volos configuration could not be loaded, disabling Volos" 36 | ) 37 | self.logger.error( 38 | "Failed to load Volos config with error: {0}".format(str(e)) 39 | ) 40 | self.enabled = False 41 | container_cfg = [] 42 | if cfg: 43 | for repo in cfg: 44 | item = {} 45 | for name in cfg[repo]: 46 | item["repo"] = repo 47 | item["name"] = name 48 | item["branch"] = cfg[repo][name]["branch"] 49 | item["ports"] = [] 50 | for port in cfg[repo][name]["ports"]: 51 | if port: 52 | cfg_port = port.get("port", None) 53 | if cfg_port: 54 | mapping = cfg_port["mapping"] 55 | protocol = cfg_port["protocol"] 56 | cfg_p = { 57 | "proto": protocol, 58 | "proto_id": PROTOCOL_MAP[protocol], 59 | "host": mapping[: mapping.index(":")], 60 | "dest": mapping[mapping.index(":") :], 61 | } 62 | item["ports"].append(cfg_p) 63 | container_cfg.append(item) 64 | 65 | else: 66 | self.enabled = False 67 | else: 68 | self.logger.debug("Volos configuration could not be found. disabling volos") 69 | self.enabled = False 70 | 71 | return container_cfg 72 | 73 | """ 74 | build structure of the form 75 | { 76 | 'mac1': { 77 | 'ip': { 78 | 'v4': "ipv4_1", 79 | 'v6': "ipv6_1", 80 | }, 81 | 'ports': [ 82 | { 83 | 'proto': 'tcp', 84 | 'proto_id': 6, 85 | 'port': 25, 86 | }, 87 | { 88 | 'proto': 'tcp', 89 | 'proto_id': 6, 90 | 'port': 26, 91 | }, 92 | { 93 | 'proto': 'udp', 94 | 'proto_id': 17, 95 | 'port': 27, 96 | } 97 | ] 98 | }, 99 | """ 100 | 101 | def get_port_list(self, mac, ipv4=None, ipv6=None): 102 | port_list = {} 103 | port_list[mac] = {"ip": {"v4": ipv4, "v6": ipv6}, "ports": []} 104 | for i in self.container_config: 105 | for port in i["ports"]: 106 | p = {} 107 | p["proto"] = port["proto"] 108 | p["proto_id"] = port["proto_id"] 109 | p["port"] = port["dest"] 110 | 111 | port_list[mac]["ports"].append(p) 112 | 113 | return port_list 114 | -------------------------------------------------------------------------------- /lib/poseidon_core/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "poseidon-core" 3 | version = "0.18.3.dev" 4 | description = "Poseidon core package, an application that leverages software defined networks (SDN) to acquire and then feed network traffic to a number of analytic tools." 5 | authors = ["cglewis "] 6 | license = "Apache-2.0" 7 | packages = [ 8 | { include = "poseidon_core" }, 9 | ] 10 | 11 | [tool.poetry.dependencies] 12 | python = ">=3.8 <3.11" 13 | faucetconfrpc = "0.55.57" 14 | httpx = "0.24.1" 15 | netaddr = "0.8.0" 16 | pika = "1.3.2" 17 | prometheus_client = "^0.17.0" 18 | pyyaml = "6.0" 19 | schedule = "1.2.0" 20 | transitions = "0.9.0" 21 | requests = "<2.32.3" 22 | urllib3 = "<2.2.3" 23 | 24 | [tool.poetry.dev-dependencies] 25 | black = "24.3.0" 26 | docker = "6.1.3" 27 | httmock = "1.4.0" 28 | mock = "5.0.2" 29 | netifaces = "0.11.0" 30 | pylint = "2.17.4" 31 | pytest-cov = "4.1.0" 32 | pytest = "7.3.1" 33 | pytype = "2023.5.24" 34 | "ruamel.yaml" = "0.17.28" 35 | 36 | [tool.poetry.scripts] 37 | poseidon-core = 'poseidon_core.__main__:main' 38 | 39 | [tool.poetry.urls] 40 | homepage = "https://github.com/IQTLabs/poseidon" 41 | 42 | [build-system] 43 | requires = ["poetry-core>=1.0.0"] 44 | build-backend = "poetry.core.masonry.api" 45 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/faucetconfgetsetter.py: -------------------------------------------------------------------------------- 1 | from poseidon_core.controllers.faucet.config import FaucetRemoteConfGetSetter 2 | from poseidon_core.controllers.sdnconnect import SDNConnect 3 | from poseidon_core.helpers.config import Config 4 | from poseidon_core.helpers.config import yaml_in 5 | from poseidon_core.helpers.config import yaml_out 6 | from poseidon_core.helpers.prometheus import Prometheus 7 | 8 | 9 | class FaucetLocalConfGetSetter(FaucetRemoteConfGetSetter): 10 | def __init__(self, **_kwargs): 11 | self.faucet_conf = {} 12 | 13 | @staticmethod 14 | def config_file_path(config_file): 15 | return config_file 16 | 17 | def read_faucet_conf(self, config_file): 18 | if not config_file: 19 | config_file = self.DEFAULT_CONFIG_FILE 20 | faucet_conf = yaml_in(config_file) 21 | if isinstance(faucet_conf, dict): 22 | self.faucet_conf = faucet_conf 23 | return self.faucet_conf 24 | 25 | def write_faucet_conf(self, config_file=None, faucet_conf=None): 26 | if not config_file: 27 | config_file = self.DEFAULT_CONFIG_FILE 28 | if faucet_conf is None: 29 | faucet_conf = self.faucet_conf 30 | self.faucet_conf = faucet_conf 31 | return yaml_out(config_file, self.faucet_conf) 32 | 33 | def set_port_conf(self, dp, port, port_conf): 34 | switch_conf = self.get_switch_conf(dp) 35 | switch_conf["interfaces"][port] = port_conf 36 | self.write_faucet_conf() 37 | 38 | def update_switch_conf(self, dp, switch_conf): 39 | self.faucet_conf["dps"][dp].update(switch_conf) 40 | self.write_faucet_conf() 41 | 42 | def _get_mirrored_ports(self, dp, mirror_port): 43 | mirror_interface_conf = self.get_port_conf(dp, mirror_port) 44 | mirrored_ports = None 45 | if mirror_interface_conf: 46 | mirrored_ports = mirror_interface_conf.get("mirror", None) 47 | return mirror_interface_conf, mirrored_ports 48 | 49 | def _set_mirror_config(self, dp, mirror_port, mirror_interface_conf, ports=None): 50 | if ports: 51 | if isinstance(ports, set): 52 | ports = list(ports) 53 | mirror_interface_conf["mirror"] = ports 54 | # Don't delete DP level config when setting mirror list to empty, 55 | # as that could cause an unnecessary cold start. 56 | elif "mirror" in mirror_interface_conf: 57 | del mirror_interface_conf["mirror"] 58 | self.set_port_conf(dp, mirror_port, mirror_interface_conf) 59 | 60 | def mirror_port(self, dp, mirror_port, port): 61 | mirror_interface_conf, ports = self._get_mirrored_ports(dp, mirror_port) 62 | ports = set(ports) 63 | ports.add(port) 64 | self._set_mirror_config(dp, mirror_port, mirror_interface_conf, ports) 65 | 66 | def unmirror_port(self, dp, mirror_port, port): 67 | mirror_interface_conf, ports = self._get_mirrored_ports(dp, mirror_port) 68 | ports = set(ports) 69 | if port in ports: 70 | ports.remove(port) 71 | self._set_mirror_config(dp, mirror_port, mirror_interface_conf, ports) 72 | 73 | def clear_mirror_port(self, dp, mirror_port): 74 | mirror_interface_conf, _ = self._get_mirrored_ports(dp, mirror_port) 75 | self._set_mirror_config(dp, mirror_port, mirror_interface_conf) 76 | 77 | 78 | def get_test_config(): 79 | config = Config().get_config() 80 | config["faucetconfrpc_address"] = None 81 | return config 82 | 83 | 84 | def get_sdn_connect(logger): 85 | config = get_test_config() 86 | prom = Prometheus() 87 | return SDNConnect( 88 | config, logger, prom, faucetconfgetsetter_cl=FaucetLocalConfGetSetter 89 | ) 90 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/sample_acls.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | acls: 3 | acl_same_a: 4 | - rule: 5 | actions: 6 | allow: 1 7 | acl_same_b: 8 | - rule: 9 | actions: 10 | allow: 1 11 | acl_diff_c: 12 | - rule: 13 | actions: 14 | allow: 0 15 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/sample_content.txt: -------------------------------------------------------------------------------- 1 | Cookie test successful. 2 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/sample_faucet_config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | vlans: 3 | office: 4 | vid: 100 5 | 6 | include: 7 | - sample_acls.yaml 8 | 9 | dps: 10 | t1-1: 11 | arp_neighbor_timeout: 900 12 | timeout: 1801 13 | dp_id: 0x1 14 | hardware: "Open vSwitch" 15 | stack: 16 | priority: 1 17 | interfaces: 18 | 1: 19 | stack: 20 | dp: t2-1 21 | port: 1 22 | 2: 23 | output_only: true 24 | mirror: 25 | - 3 26 | 3: 27 | native_vlan: office 28 | loop_protect_external: true 29 | t1-2: 30 | dp_id: 0x2 31 | hardware: "Open vSwitch" 32 | stack: 33 | priority: 2 34 | interfaces: 35 | 1: 36 | stack: 37 | dp: t2-1 38 | port: 2 39 | 2: 40 | native_vlan: office 41 | 3: 42 | native_vlan: office 43 | loop_protect_external: true 44 | t2-1: 45 | dp_id: 0x3 46 | hardware: "Open vSwitch" 47 | interfaces: 48 | 1: 49 | stack: 50 | dp: t1-1 51 | port: 1 52 | 2: 53 | stack: 54 | dp: t1-2 55 | port: 1 56 | 3: 57 | native_vlan: office 58 | loop_protect_external: false 59 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/test_actions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Test module for actions 4 | @author: Charlie Lewis 5 | """ 6 | import logging 7 | 8 | from faucetconfgetsetter import get_sdn_connect 9 | from poseidon_core.helpers.actions import Actions 10 | from poseidon_core.helpers.config import Config 11 | from poseidon_core.helpers.endpoint import endpoint_factory 12 | 13 | logger = logging.getLogger("test") 14 | 15 | 16 | def test_actions(): 17 | """ 18 | Tests Actions 19 | """ 20 | endpoint = endpoint_factory("foo") 21 | endpoint.endpoint_data = {"mac": "00:00:00:00:00:00", "segment": "foo", "port": "1"} 22 | s = get_sdn_connect(logger) 23 | a = Actions(endpoint, s.sdnc) 24 | a.mirror_endpoint() 25 | a.unmirror_endpoint() 26 | a.coprocess_endpoint() 27 | a.uncoprocess_endpoint() 28 | 29 | 30 | def test_actions_nosdn(): 31 | """ 32 | Tests Actions with no SDN controller 33 | """ 34 | endpoint = endpoint_factory("foo") 35 | endpoint.endpoint_data = {"mac": "00:00:00:00:00:00", "segment": "foo", "port": "1"} 36 | s = get_sdn_connect(logger) 37 | s.sdnc = None 38 | a = Actions(endpoint, s.sdnc) 39 | a.mirror_endpoint() 40 | a.unmirror_endpoint() 41 | a.coprocess_endpoint() 42 | a.uncoprocess_endpoint() 43 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/test_collector.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Test module for collector. 4 | @author: Charlie Lewis 5 | """ 6 | from poseidon_core.helpers.collector import Collector 7 | from poseidon_core.helpers.endpoint import endpoint_factory 8 | 9 | 10 | def test_Collector(): 11 | """ 12 | Tests Collector 13 | """ 14 | endpoint = endpoint_factory("foo") 15 | endpoint.endpoint_data = {"mac": "00:00:00:00:00:00"} 16 | a = Collector(endpoint, "foo") 17 | a.start_collector() 18 | a.stop_collector() 19 | a.get_collectors() 20 | a.host_has_active_collectors("foo") 21 | endpoint = endpoint_factory("foo") 22 | endpoint.endpoint_data = {"mac": "00:00:00:00:00:00", "container_id": "foo"} 23 | a = Collector(endpoint, "foo") 24 | a.stop_collector() 25 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/test_endpoint.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Test module for endpoints. 4 | @author: Charlie Lewis 5 | """ 6 | import time 7 | 8 | from poseidon_core.helpers.endpoint import Endpoint 9 | from poseidon_core.helpers.endpoint import endpoint_factory 10 | from poseidon_core.helpers.endpoint import EndpointDecoder 11 | 12 | 13 | def test_Endpoint(): 14 | """Tests Endpoint.""" 15 | endpoint = endpoint_factory("foo") 16 | b = endpoint.encode() 17 | c = EndpointDecoder(b).get_endpoint() 18 | a = {"tenant": "foo", "mac": "00:00:00:00:00:00"} 19 | assert Endpoint.make_hash(a) 20 | 21 | 22 | def test_times_next(): 23 | endpoint = endpoint_factory("foo") 24 | endpoint.queue_next("operate") 25 | time.sleep(1) 26 | endpoint.copro_queue_next("copro_coprocess") 27 | time.sleep(1) 28 | assert endpoint.state_timeout(0) 29 | assert endpoint.copro_state_timeout(0) 30 | endpoint.trigger_next() 31 | endpoint.copro_trigger_next() 32 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/test_log.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 25 Oct 2017 4 | @author: dgrossman 5 | """ 6 | from poseidon_core.helpers.log import Logger 7 | 8 | 9 | def test_logger_base(): 10 | class MockLogger(Logger): 11 | def __init__(self): 12 | pass 13 | 14 | logger = MockLogger() 15 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/test_prometheus.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Test module for prometheus 4 | @author: Charlie Lewis 5 | """ 6 | from poseidon_core.helpers.prometheus import Prometheus 7 | 8 | 9 | def test_Prometheus(): 10 | """ 11 | Tests Prometheus 12 | """ 13 | p = Prometheus.get_metrics() 14 | hosts = [ 15 | { 16 | "active": 0, 17 | "source": "poseidon", 18 | "role": "unknown", 19 | "state": "unknown", 20 | "ipv4_os": "unknown", 21 | "tenant": "vlan1", 22 | "port": 1, 23 | "segment": "switch1", 24 | "ipv4": "123.123.123.123", 25 | "mac": "00:00:00:00:00:00", 26 | "id": "foo1", 27 | }, 28 | { 29 | "active": 1, 30 | "source": "poseidon", 31 | "role": "unknown", 32 | "state": "unknown", 33 | "ipv4_os": "unknown", 34 | "tenant": "vlan1", 35 | "port": 1, 36 | "segment": "switch1", 37 | "ipv4": "123.123.123.123", 38 | "mac": "00:00:00:00:00:00", 39 | "id": "foo2", 40 | }, 41 | { 42 | "active": 0, 43 | "source": "poseidon", 44 | "role": "unknown", 45 | "state": "unknown", 46 | "ipv4_os": "unknown", 47 | "tenant": "vlan1", 48 | "port": 1, 49 | "segment": "switch1", 50 | "ipv4": "123.123.123.123", 51 | "mac": "00:00:00:00:00:00", 52 | "id": "foo3", 53 | }, 54 | { 55 | "active": 1, 56 | "source": "poseidon1", 57 | "role": "unknown", 58 | "state": "unknown", 59 | "ipv4_os": "unknown", 60 | "tenant": "vlan1", 61 | "port": 2, 62 | "segment": "switch1", 63 | "ipv4": "2106::1", 64 | "mac": "00:00:00:00:00:00", 65 | "id": "foo4", 66 | }, 67 | { 68 | "active": 1, 69 | "source": "poseidon", 70 | "role": "unknown", 71 | "state": "unknown", 72 | "ipv4_os": "unknown", 73 | "tenant": "vlan1", 74 | "port": 1, 75 | "segment": "switch1", 76 | "ipv4": "::", 77 | "mac": "00:00:00:00:00:00", 78 | "id": "foo5", 79 | }, 80 | ] 81 | p = Prometheus() 82 | p.update_metrics(hosts) 83 | 84 | 85 | def test_decode_endpoints(): 86 | p = Prometheus() 87 | hashes = { 88 | "6b33db53faf33c77d694ecab2e3fefadc7dacc70": { 89 | "__name__": "poseidon_endpoint_metadata", 90 | "acls": "[]", 91 | "controller_type": "faucet", 92 | "ether_vendor": "Micro-Star", 93 | "hash_id": "6b33db53faf33c77d694ecab2e3fefadc7dacc70", 94 | "ignore": "False", 95 | "instance": "poseidon:9304", 96 | "ipv4_address": "192.168.3.131", 97 | "ipv4_os": "Windows", 98 | "ipv4_rdns": "NO DATA", 99 | "ipv4_subnet": "192.168.3.0/24", 100 | "ipv6_subnet": "NO DATA", 101 | "job": "poseidon", 102 | "mac": "40:61:86:9a:f1:f5", 103 | "name": "None", 104 | "next_state": "None", 105 | "port": "1", 106 | "prev_state": "queued", 107 | "segment": "switch1", 108 | "state": "operating", 109 | "tenant": "VLAN100", 110 | "top_role": "Administrator workstation", 111 | } 112 | } 113 | role_hashes = { 114 | "6b33db53faf33c77d694ecab2e3fefadc7dacc70": { 115 | "mac": "40:61:86:9a:f1:f5", 116 | "pcap_labels": "foo", 117 | "top_confidence": 1.0, 118 | "state": "operating", 119 | "top_role": "Administrator workstation", 120 | "second_role": "GPU laptop", 121 | "second_confidence": 0.0006269307506632729, 122 | "third_role": "Developer workstation", 123 | "third_confidence": 0.000399485844886532, 124 | } 125 | } 126 | endpoints = p.prom_endpoints(hashes, role_hashes) 127 | endpoint = endpoints["6b33db53faf33c77d694ecab2e3fefadc7dacc70"] 128 | assert endpoint.state == "operating" 129 | assert endpoint.get_ipv4_os() == "Windows" 130 | roles, confidences, pcap_labels = endpoint.get_roles_confidences_pcap_labels() 131 | assert roles == ("Administrator workstation", "GPU laptop", "Developer workstation") 132 | assert confidences == (1.0, 0.0006269307506632729, 0.000399485844886532) 133 | assert pcap_labels == "foo" 134 | -------------------------------------------------------------------------------- /lib/poseidon_core/tests/test_volos.py: -------------------------------------------------------------------------------- 1 | from poseidon_core.helpers.config import Config 2 | from poseidon_core.helpers.endpoint import endpoint_factory 3 | from poseidon_core.operations.primitives.coprocess import Coprocess 4 | from poseidon_core.operations.volos.acls import VolosAcl 5 | from poseidon_core.operations.volos.volos import Volos 6 | 7 | 8 | def test_Volos(): 9 | controller = Config().get_config() 10 | v = Volos(controller) 11 | 12 | 13 | def test_Acl(): 14 | controller = Config().get_config() 15 | endpoint = endpoint_factory("foo") 16 | endpoint.endpoint_data = {"mac": "00:00:00:00:00:00"} 17 | a = VolosAcl(endpoint, controller["acl_dir"]) 18 | 19 | 20 | def test_Coprocess(): 21 | controller = Config().get_config() 22 | c = Coprocess(controller) 23 | -------------------------------------------------------------------------------- /release/update_docker_compose.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # update docker-compose.yaml with release version or dev version, depending on contents of VERSION. 3 | import json 4 | import sys 5 | import urllib.request 6 | from collections import OrderedDict 7 | 8 | import ruamel.yaml 9 | 10 | 11 | # TODO: only updates to Poseidon's main docker-compose.yaml are currently handled. 12 | DOCKER_COMPOSE = "../docker-compose.yaml" 13 | RELEASE_VER = sys.argv[1] 14 | DEV = RELEASE_VER.endswith(".dev") 15 | 16 | # These services have their own versions - update automatically. 17 | OWN_VERSIONED_SERVICES = { 18 | "network_tap": "iqtlabs/network-tools", 19 | } 20 | # For dev versions, add this config. 21 | DEV_SERVICE_OVERRIDE = { 22 | "poseidon": {"build": {"context": ".", "dockerfile": "Dockerfile"}}, 23 | "poseidon_api": {"build": {"context": "helpers/api", "dockerfile": "Dockerfile"}}, 24 | "rabbit": {"build": {"context": "helpers/rabbitmq", "dockerfile": "Dockerfile"}}, 25 | "workers": {"build": {"context": "workers", "dockerfile": "Dockerfile"}}, 26 | } 27 | # For non-dev versions, delete this config. 28 | NON_DEV_SERVICE_DELETE = { 29 | "poseidon": ["build"], 30 | "poseidon_api": ["build"], 31 | "rabbit": ["build"], 32 | "workers": ["build"], 33 | } 34 | 35 | # Broadly preserves formatting. 36 | yaml = ruamel.yaml.YAML() 37 | yaml.indent(mapping=4, sequence=2, offset=4) 38 | dc = ruamel.yaml.round_trip_load(open(DOCKER_COMPOSE).read(), preserve_quotes=True) 39 | for service, service_config in dc["services"].items(): 40 | image, version = service_config["image"].split(":") 41 | repo = OWN_VERSIONED_SERVICES.get(service, None) 42 | if repo: 43 | req = urllib.request.Request( 44 | url="https://api.github.com/repos/%s/releases/latest" % repo 45 | ) 46 | res = urllib.request.urlopen(req, timeout=15) # nosec 47 | latest_json = json.loads(res.read().decode("utf-8")) 48 | version = latest_json["name"] 49 | elif DEV: 50 | version = "latest" 51 | if service in DEV_SERVICE_OVERRIDE: 52 | service_config.update(DEV_SERVICE_OVERRIDE[service]) 53 | else: 54 | version = "v" + RELEASE_VER 55 | del_keys = NON_DEV_SERVICE_DELETE.get(service, None) 56 | if del_keys: 57 | for del_key in del_keys: 58 | if del_key in service_config: 59 | del service_config[del_key] 60 | service_config["image"] = ":".join((image, version)) 61 | 62 | 63 | yaml.dump(dc, open(DOCKER_COMPOSE, "w")) 64 | -------------------------------------------------------------------------------- /release/update_workers_json.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Generate diff for workers.json to update to latest releases. 3 | import json 4 | import sys 5 | import urllib.request 6 | 7 | 8 | WORKERS_JSON = "../workers/workers.json" 9 | RELEASE_MAP = { 10 | "iqtlabs/pcap_to_node_pcap": "iqtlabs/network-tools", 11 | "iqtlabs/tcprewrite_dot1q": "iqtlabs/network-tools", 12 | "iqtlabs/networkml": "iqtlabs/networkml", 13 | "iqtlabs/p0f": "iqtlabs/network-tools", 14 | "iqtlabs/faucetconfrpc": "iqtlabs/faucetconfrpc", 15 | "yeasy/simple-web": "yeasy/simple-web", 16 | } 17 | 18 | 19 | changes = set() 20 | workers = json.loads(open(WORKERS_JSON).read()) 21 | for worker in workers["workers"]: 22 | repo = RELEASE_MAP.get(worker["image"], None) 23 | if repo is None: 24 | print("Unknown repo for %s" % worker["image"]) 25 | sys.exit(-1) 26 | req = urllib.request.Request( 27 | url="https://api.github.com/repos/%s/releases/latest" % repo 28 | ) 29 | try: 30 | res = urllib.request.urlopen(req, timeout=15) # nosec 31 | except urllib.error.HTTPError: 32 | print("no release for %s, skipping update" % worker["image"]) 33 | continue 34 | latest_json = json.loads(res.read().decode("utf-8")) 35 | latest_version = latest_json["name"] 36 | if worker["version"] != latest_version: 37 | changes.add((repo, latest_version)) 38 | worker["version"] = latest_version 39 | 40 | with open(WORKERS_JSON, "w") as f: 41 | f.write(json.dumps(workers, indent=2, sort_keys=True)) 42 | 43 | 44 | if changes: 45 | print( 46 | "Upgrade workers: " 47 | + ", ".join("%s: %s" % (repo, version) for repo, version in changes) 48 | ) 49 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "constraints": { 3 | "poetry": "1.4.2" 4 | }, 5 | "extends": [ 6 | "config:base", 7 | "docker:enableMajor" 8 | ], 9 | "ignorePaths": [] 10 | } 11 | -------------------------------------------------------------------------------- /tests/test-e2e-ovs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3.7' 3 | services: 4 | ovs: 5 | image: iqtlabs/openvswitch:v3.1.1 6 | network_mode: host 7 | devices: 8 | - "/dev/net/tun:/dev/net/tun" 9 | cap_add: 10 | - NET_ADMIN 11 | -------------------------------------------------------------------------------- /tests/test-ipv4.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/tests/test-ipv4.pcap -------------------------------------------------------------------------------- /tests/test_e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | TESTHOST="00:1e:68:51:4f:a9" 6 | 7 | POSEIDON_IMAGE=$(grep -Eo "image:.+poseidon:[^\']+" docker-compose.yaml |grep -Eo ':\S+') 8 | if [[ "$POSEIDON_IMAGE" == "" ]] ; then 9 | echo error: cannot detect poseidon docker image name. 10 | exit 1 11 | fi 12 | if [[ "$POSEIDON_IMAGE" != ":latest" ]] ; then 13 | echo poseidon image is $POSEIDON_IMAGE, so not running e2e tests - assuming release 14 | exit 0 15 | fi 16 | 17 | TMPDIR=$(mktemp -d) 18 | 19 | FASTREPLAY="sudo tcpreplay -q -t -i sw1b $TMPDIR/test.pcap" 20 | SLOWREPLAY="sudo tcpreplay -q -M1 -i sw1b $TMPDIR/test.pcap" 21 | 22 | cli_cmd () { 23 | PID=$(docker ps -q --filter "label=com.docker.compose.service=poseidon") 24 | CLICMD="docker exec $PID poseidon-cli" 25 | } 26 | 27 | wait_show_all () { 28 | match=$1 29 | TRIES=0 30 | MATCHED="" 31 | cli_cmd 32 | PID=$(docker ps -q --filter "label=com.docker.compose.service=poseidon") 33 | CMD="docker exec $PID poseidon-cli" 34 | echo waiting for $match in show all 35 | while [[ "$MATCHED" == "" ]] ; do 36 | MATCHED=$($CLICMD 'show all' | grep -E "$match" | cat) 37 | TRIES=$((TRIES+1)) 38 | if [[ "$TRIES" == "60" ]] ; then 39 | echo FAIL: show all did not contain $match 40 | echo $($CMD 'show all') 41 | exit 1 42 | fi 43 | sleep 1 44 | done 45 | echo $MATCHED 46 | } 47 | 48 | wait_var_nonzero () { 49 | var=$1 50 | cmd=$2 51 | failvar=$3 52 | api="http://0.0.0.0:9090/api/v1/query?query=" 53 | query="$api$var>0" 54 | echo waiting for $query to be non-zero 55 | RC="[]" 56 | TRIES=0 57 | while [[ "$RC" == "[]" ]] || [[ "$RC" == "" ]] ; do 58 | RC=$(echo "$query" | wget -q -O- -i -|jq .data.result) 59 | TRIES=$((TRIES+1)) 60 | if [[ "$TRIES" == "180" ]] ; then 61 | echo FAIL: $query returned no results: $RC 62 | echo diagnostic logs follow 63 | if [[ "$failvar" != "" ]] ; then 64 | echo "$api$failvar" | wget -q -O- -i - 65 | fi 66 | grep -v -E "(main - operations|transitions.core)" /var/log/poseidon/poseidon.log |tail -500 67 | docker ps -a 68 | wget -q -O- 0.0.0.0:9304 69 | echo FAIL: $query returned no results: $RC 70 | exit 1 71 | fi 72 | if [[ "$cmd" != "" ]] ; then 73 | echo $($cmd) 74 | fi 75 | sleep 1 76 | done 77 | echo $RC 78 | } 79 | 80 | wait_job_up () { 81 | instance=$1 82 | wait_var_nonzero "up{instance=\"$instance\"}" "" up 83 | } 84 | 85 | sudo rm -rf /etc/faucet /opt/prometheus/ 86 | sudo mkdir -p /etc/faucet 87 | cat >$TMPDIR/faucet.yaml<&1 | grep yml || true 151 | docker logs poseidon-prometheus-1 2>&1 | grep -i error || true 152 | for i in sw1a sw1b ; do 153 | sudo ip link set $i up 154 | done 155 | wait_var_nonzero "port_status{port=\"1\"}" "" port_status 156 | echo waiting for FAUCET to recognize test port 157 | COUNT="0" 158 | while [[ "$COUNT" == 0 ]] ; do 159 | COUNT=$(docker exec -t $OVSID ovs-ofctl dump-flows -OOpenFlow13 switch1 table=0,in_port=1|grep -c in_port|cat) 160 | sleep 1 161 | done 162 | # Poseidon event client receiving from FAUCET 163 | wait_var_nonzero "poseidon_last_rabbitmq_routing_key_time{routing_key=\"FAUCET.Event\"}" "" poseidon_last_rabbitmq_routing_key_time 164 | echo waiting for ncapture 165 | COUNT="0" 166 | while [[ "$COUNT" == "0" ]] ; do 167 | COUNT=$(docker ps -a --filter=status=running|grep -c ncapture|cat) 168 | echo $($FASTREPLAY) 169 | sleep 1 170 | echo -n . 171 | done 172 | echo waiting for FAUCET mirror to be applied 173 | COUNT="0" 174 | while [[ "$COUNT" == 0 ]] ; do 175 | COUNT=$(docker exec -t $OVSID ovs-ofctl dump-flows -OOpenFlow13 switch1 table=0,in_port=1|grep -c output:|cat) 176 | sleep 1 177 | echo -n . 178 | done 179 | echo Sending test traffic to be mirrored 180 | # TODO: come up with a better way to stimulate p0f and/or ensure most test traffic is sent within the capture window. 181 | # Send mirror traffic 182 | echo $($SLOWREPLAY) 183 | # Poseidon detected endpoints 184 | wait_var_nonzero "sum(poseidon_endpoint_current_states{current_state=\"operating\"})" "$FASTREPLAY" poseidon_endpoint_current_states 185 | # wait for networkml to return a result 186 | wait_var_nonzero "poseidon_last_rabbitmq_routing_key_time{routing_key=\"poseidon.algos.decider\"}" "" poseidon_last_rabbitmq_routing_key_time 187 | # keep endpoints active awaiting results 188 | wait_var_nonzero "sum(poseidon_last_tool_result_time{tool=\"networkml\"})" "$FASTREPLAY" poseidon_last_tool_result_time 189 | wait_var_nonzero "sum(poseidon_endpoint_roles{role!=\"NO DATA\"})" "$FASTREPLAY" poseidon_endpoint_roles 190 | wait_var_nonzero "sum(poseidon_last_tool_result_time{tool=\"p0f\"})" "$FASTREPLAY" poseidon_last_tool_result_time 191 | wait_var_nonzero "sum(poseidon_endpoint_metadata{role!=\"NO DATA\"})" "$FASTREPLAY" poseidon_endpoint_metadata 192 | # ensure CLI results reported. 193 | wait_show_all "orkstation.+${TESTHOST}" 194 | wait_var_nonzero "sum(poseidon_endpoint_oses{ipv4_os!=\"NO DATA\"})" "" poseidon_endpoint_oses 195 | # TODO: fix certstrap to allow creating multiple named client keys. 196 | wait_var_nonzero "sum(faucetconfrpc_ok_total{peer_id=\"poseidon\"})" "" faucetconfrpc_ok_total 197 | for rpc in GetConfigFile SetConfigFile ClearPortMirror AddPortMirror RemovePortMirror ; do 198 | wait_var_nonzero "faucetconfrpc_ok_total{peer_id=\"poseidon\",request=\"$rpc\"}" "" faucetconfrpc_ok_total 199 | done 200 | docker run -i iqtlabs/poseidon python3 -c "from poseidon_core import __version__ ; print(__version__) ;" 201 | cli_cmd 202 | $CLICMD "show version" 203 | poseidon -V 204 | poseidon -S 205 | poseidon -d 206 | COMPOSE_PROJECT_NAME=ovs docker compose -f tests/test-e2e-ovs.yml stop 207 | rm -rf $TMPDIR 208 | -------------------------------------------------------------------------------- /tests/test_gen_pcap_manifest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import gzip 3 | import os 4 | import tempfile 5 | 6 | 7 | def test_gen_pcap_manifest(): 8 | test_dir = os.path.dirname(os.path.abspath(__file__)) 9 | gen_pcap_manifest = os.path.sep.join((test_dir, "..", "bin", "gen_pcap_manifest")) 10 | pcap_file = os.path.join(test_dir, "test-ipv4.pcap") 11 | with tempfile.TemporaryDirectory() as tempdir: 12 | csv_file = os.path.join(tempdir, "out.csv.gz") 13 | os.system(" ".join((gen_pcap_manifest, "-p", test_dir, "-c", csv_file))) 14 | with gzip.open(csv_file, "r") as csv_out: 15 | all_csv_out = [line.decode("utf-8") for line in csv_out.readlines()] 16 | assert all_csv_out == [ 17 | "eth,ip,pcap\r\n", 18 | "00:00:00:00:00:00,127.0.0.1,%s\r\n" % pcap_file, 19 | ] 20 | -------------------------------------------------------------------------------- /tests/test_worker.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on 31 Dec 2019 4 | @author: cglewis 5 | """ 6 | import os 7 | 8 | from workers.worker import callback 9 | from workers.worker import load_workers 10 | from workers.worker import setup_docker 11 | 12 | WORKERS_JSON = "workers/workers.json" 13 | 14 | 15 | def test_setup_docker(): 16 | d = setup_docker() 17 | d.networks.create("poseidon-poseidon") 18 | 19 | 20 | def test_load_workers(): 21 | workers = load_workers(WORKERS_JSON) 22 | 23 | 24 | def test_callback(): 25 | class MockChannel: 26 | def basic_ack(self, delivery_tag=None): 27 | return True 28 | 29 | class MockMethod: 30 | def __init__(self): 31 | self.delivery_tag = None 32 | self.routing_key = "" 33 | 34 | os.environ["VOL_PREFIX"] = "/tmp" 35 | ch = MockChannel() 36 | method = MockMethod() 37 | body = '{"id": "", "type": "metadata", "file_path": "/files/tcprewrite-dot1q-2019-12-31-17_33_32.961111-UTC/pcap-node-splitter-2019-12-31-17_34_17.314910-UTC/clients/trace_5c7820e51dbabbf0476097dda838c7eabfa8e160_2019-12-31_17_18_17-client-ip-38-103-36-98-192-168-0-46-38-103-36-98-eth-udpencap-ip-frame-wsshort-udp-esp-port-4500.pcap", "data": "", "results": {"tool": "p0f", "version": "0.1.7"}}' 38 | body = body.encode("utf-8") 39 | callback(ch, method, None, body, workers_json=WORKERS_JSON) 40 | 41 | body = '{"id": "", "type": "metadata", "file_path": "/files/tcprewrite-dot1q-2019-12-31-17_33_32.961111-UTC/pcap-node-splitter-2019-12-31-17_34_17.314910-UTC/clients/trace_5c7820e51dbabbf0476097dda838c7eabfa8e160_2019-12-31_17_18_17-client-ip-38-103-36-98-192-168-0-46-38-103-36-98-eth-udpencap-ip-frame-wsshort-udp-esp-port-4500.pcap", "data": "foo", "results": {"tool": "p0f", "version": "0.1.7"}}' 42 | body = body.encode("utf-8") 43 | callback(ch, method, None, body, workers_json=WORKERS_JSON) 44 | 45 | body = '{"id": "", "type": "metadata", "file_path": "/files/tcprewrite-dot1q-2019-12-31-17_33_32.961111-UTC/pcap-node-splitter-2019-12-31-17_34_17.314910-UTC/clients/trace_5c7820e51dbabbf0476097dda838c7eabfa8e160_2019-12-31_17_18_17-client-ip-38-103-36-98-192-168-0-46-38-103-36-98-eth-udpencap-ip-frame-wsshort-udp-esp-port-4500.pcap", "data": "", "results": {"tool": "ncapture", "version": "0.1.7"}}' 46 | body = body.encode("utf-8") 47 | callback(ch, method, None, body, workers_json=WORKERS_JSON) 48 | 49 | body = '{"id": "", "type": "data", "file_path": "/files/tcprewrite-dot1q-2019-12-31-17_33_32.961111-UTC/pcap-node-splitter-2019-12-31-17_34_17.314910-UTC/clients/trace_5c7820e51dbabbf0476097dda838c7eabfa8e160_2019-12-31_17_18_17-client-ip-38-103-36-98-192-168-0-46-38-103-36-98-eth-udpencap-ip-frame-wsshort-udp-esp-port-4500.pcap", "data": "", "results": {"tool": "p0f", "version": "0.1.7"}}' 50 | body = body.encode("utf-8") 51 | callback(ch, method, None, body, workers_json=WORKERS_JSON) 52 | 53 | body = '{"type": "data", "file_path": "/files/tcprewrite-dot1q-2019-12-31-17_33_32.961111-UTC/pcap-node-splitter-2019-12-31-17_34_17.314910-UTC/clients/trace_5c7820e51dbabbf0476097dda838c7eabfa8e160_2019-12-31_17_18_17-client-ip-38-103-36-98-192-168-0-46-38-103-36-98-eth-udpencap-ip-frame-wsshort-udp-esp-port-4500.pcap", "data": "", "results": {"tool": "p0f", "version": "0.1.7"}}' 54 | body = body.encode("utf-8") 55 | callback(ch, method, None, body, workers_json=WORKERS_JSON) 56 | 57 | body = '{"id": "", "type": "metadata", "file_path": "/files/tcprewrite-dot1q-2019-12-31-17_33_32.961111-UTC/pcap-node-splitter-2019-12-31-17_34_17.314910-UTC/clients/trace_5c7820e51dbabbf0476097dda838c7eabfa8e160_2019-12-31_17_18_17-client-ip-38-103-36-98-192-168-0-46-38-103-36-98-eth-udpencap-ip-frame-wsshort-udp-esp-port-4500.pcap", "data": "", "results": {"tool": "pcap-splitter", "version": "0.1.7"}}' 58 | body = body.encode("utf-8") 59 | callback(ch, method, None, body, workers_json=WORKERS_JSON) 60 | -------------------------------------------------------------------------------- /workers/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim 2 | LABEL maintainer="Charlie Lewis " 3 | 4 | COPY requirements.txt requirements.txt 5 | 6 | RUN apt-get update && apt-get install -y --no-install-recommends curl \ 7 | && apt-get clean \ 8 | && rm -rf /var/lib/apt/lists/* 9 | RUN pip3 install --no-cache-dir -r requirements.txt 10 | 11 | COPY . /app 12 | WORKDIR /app 13 | ENV PYTHONUNBUFFERED 1 14 | 15 | CMD python3 worker.py 16 | -------------------------------------------------------------------------------- /workers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/faucetsdn/poseidon/a18d4eb7e848e4c6a87306d11568d86e5fa0e8b7/workers/__init__.py -------------------------------------------------------------------------------- /workers/requirements.txt: -------------------------------------------------------------------------------- 1 | docker==6.1.3 2 | pika==1.3.2 3 | prometheus_client==0.17.0 4 | six==1.16.0 5 | requests<2.31.1 6 | urllib3<2.0.3 7 | -------------------------------------------------------------------------------- /workers/worker.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import os 4 | import time 5 | import uuid 6 | 7 | import docker 8 | import pika 9 | from prometheus_client import Gauge 10 | from prometheus_client import start_http_server 11 | 12 | metrics = {} 13 | 14 | 15 | def set_status(status): 16 | global metrics 17 | for worker in status: 18 | if "workers_state" in metrics: 19 | metrics["workers_state"].labels( 20 | worker=worker, state=status[worker]["state"] 21 | ).inc() 22 | else: 23 | metrics["workers_state"] = Gauge( 24 | "workers_state", "State of workers", ["worker", "state"] 25 | ) 26 | 27 | 28 | def callback(ch, method, properties, body, workers_json="workers.json"): 29 | """Callback that has the message that was received""" 30 | vol_prefix = os.getenv("VOL_PREFIX", "") 31 | workers = load_workers(workers_json) 32 | d = setup_docker() 33 | pipeline = json.loads(body.decode("utf-8")) 34 | worker_found = False 35 | status = {} 36 | for worker in workers["workers"]: 37 | file_path = pipeline["file_path"] 38 | if file_path in ["-1", ""]: 39 | print( 40 | " [X file empty] %s UTC %r:%r" 41 | % (str(datetime.datetime.utcnow()), method.routing_key, pipeline) 42 | ) 43 | elif "id" in pipeline and ( 44 | ("results" in pipeline and pipeline["results"]["tool"] in worker["inputs"]) 45 | or ("file_type" in pipeline and pipeline["file_type"] in worker["inputs"]) 46 | ): 47 | uid = str(uuid.uuid4()).split("-")[-1] 48 | name = worker["name"] + "_" + uid 49 | image = worker["image"] 50 | ports = None 51 | 52 | if "version" in worker: 53 | image += ":" + worker["version"] 54 | command = [] 55 | if "command" in worker: 56 | command = worker["command"] 57 | 58 | command.append(file_path) 59 | 60 | environment = pipeline 61 | if "environment" in worker: 62 | environment.update(worker["environment"]) 63 | if "rabbit" not in pipeline: 64 | pipeline["rabbit"] = "true" 65 | if "ports" in worker: 66 | ports = worker["ports"] 67 | 68 | keep_images = os.getenv("KEEPIMAGES", "0") 69 | remove = True 70 | if keep_images == "1": 71 | remove = False 72 | 73 | use_swarm = os.getenv("SWARM", "0") 74 | try: 75 | if use_swarm == "1": 76 | # fix environment 77 | env = [] 78 | for key in environment: 79 | if key != "results": 80 | env.append(key + "=" + str(environment[key])) 81 | restart_policy = docker.types.RestartPolicy() 82 | d.services.create( 83 | image=image, 84 | name=name, 85 | networks=[worker["stage"]], 86 | constraints=["node.role==worker"], 87 | restart_policy=restart_policy, 88 | labels={"project": "poseidon"}, 89 | mounts=[vol_prefix + "/opt/poseidon_files:/files:rw"], 90 | env=env, 91 | args=command, 92 | ) 93 | else: 94 | d.containers.run( 95 | image=image, 96 | name=name, 97 | network=worker["stage"], 98 | volumes={ 99 | vol_prefix 100 | + "/opt/poseidon_files": {"bind": "/files", "mode": "rw"} 101 | }, 102 | environment=environment, 103 | remove=remove, 104 | command=command, 105 | ports=ports, 106 | detach=True, 107 | ) 108 | print( 109 | " [Create container] %s UTC %r:%r:%r:%r" 110 | % ( 111 | str(datetime.datetime.utcnow()), 112 | method.routing_key, 113 | pipeline["id"], 114 | image, 115 | pipeline, 116 | ) 117 | ) 118 | status[worker["name"]] = {"state": "In progress"} 119 | worker_found = True 120 | except Exception as e: # pragma: no cover 121 | print("failed: {0}".format(str(e))) 122 | status[worker["name"]] = {"state": "Error"} 123 | else: 124 | if not worker["name"] in status: 125 | status[worker["name"]] = {"state": "Queued"} 126 | if "id" in pipeline and "results" in pipeline and pipeline["type"] == "data": 127 | print( 128 | " [Data] %s UTC %r:%r:%r" 129 | % ( 130 | str(datetime.datetime.utcnow()), 131 | method.routing_key, 132 | pipeline["id"], 133 | pipeline["results"], 134 | ) 135 | ) 136 | status[pipeline["results"]["tool"]] = {"state": "In progress"} 137 | elif "id" in pipeline and "results" in pipeline and pipeline["type"] == "metadata": 138 | if "data" in pipeline and pipeline["data"] != "": 139 | print( 140 | " [Metadata] %s UTC %r:%r:%r" 141 | % ( 142 | str(datetime.datetime.utcnow()), 143 | method.routing_key, 144 | pipeline["id"], 145 | pipeline["results"], 146 | ) 147 | ) 148 | status[pipeline["results"]["tool"]] = {"state": "In progress"} 149 | else: 150 | print( 151 | " [Finished] %s UTC %r:%r" 152 | % (str(datetime.datetime.utcnow()), method.routing_key, pipeline) 153 | ) 154 | status[pipeline["results"]["tool"]] = {"state": "Complete"} 155 | elif not worker_found: 156 | print( 157 | " [X no match] %s UTC %r:%r" 158 | % (str(datetime.datetime.utcnow()), method.routing_key, pipeline) 159 | ) 160 | 161 | ch.basic_ack(delivery_tag=method.delivery_tag) 162 | set_status(status) 163 | 164 | 165 | def main(queue_name, host): # pragma: no cover 166 | """Creates the connection to RabbitMQ as a consumer and binds to the queue 167 | waiting for messages 168 | """ 169 | start_prom() 170 | counter = 0 171 | while True: 172 | counter += 1 173 | try: 174 | params = pika.ConnectionParameters(host=host, port=5672) 175 | connection = pika.BlockingConnection(params) 176 | channel = connection.channel() 177 | print("Connected to rabbit") 178 | channel.queue_declare(queue=queue_name, durable=True) 179 | channel.basic_qos(prefetch_count=1) 180 | channel.basic_consume(queue=queue_name, on_message_callback=callback) 181 | channel.start_consuming() 182 | except Exception as e: # pragma: no cover 183 | print(str(e)) 184 | print("Waiting for connection to rabbit...attempt: {0}".format(counter)) 185 | time.sleep(1) 186 | 187 | return 188 | 189 | 190 | def setup_docker(): 191 | return docker.from_env() 192 | 193 | 194 | def start_prom(port=9305): 195 | start_http_server(port) 196 | 197 | 198 | def load_workers(workers_json="workers.json"): 199 | with open(workers_json) as json_file: 200 | workers = json.load(json_file) 201 | return workers 202 | 203 | 204 | if __name__ == "__main__": # pragma: no cover 205 | queue_name = os.getenv("RABBIT_QUEUE_NAME", "task_queue") 206 | host = os.getenv("RABBIT_HOST", "messenger") 207 | main(queue_name, host) 208 | -------------------------------------------------------------------------------- /workers/workers.json: -------------------------------------------------------------------------------- 1 | { 2 | "workers": [ 3 | { 4 | "image": "iqtlabs/pcap_to_node_pcap", 5 | "inputs": [ 6 | "pcap-dot1q" 7 | ], 8 | "labels": "", 9 | "name": "pcap-splitter", 10 | "outputs": [ 11 | "pcap" 12 | ], 13 | "stage": "poseidon_poseidon", 14 | "version": "v0.11.33", 15 | "viewableOutput": false 16 | }, 17 | { 18 | "image": "iqtlabs/tcprewrite_dot1q", 19 | "inputs": [ 20 | "ncapture" 21 | ], 22 | "labels": "", 23 | "name": "pcap-dot1q", 24 | "outputs": [ 25 | "pcap" 26 | ], 27 | "stage": "poseidon_poseidon", 28 | "version": "v0.11.33", 29 | "viewableOutput": false 30 | }, 31 | { 32 | "command": [ 33 | "--no-srcmacid", 34 | "-o/tmp" 35 | ], 36 | "contentType": "application/json", 37 | "environment": { 38 | "RABBIT_EXCHANGE": "topic-poseidon-internal", 39 | "RABBIT_HOST": "messenger", 40 | "RABBIT_QUEUE_NAME": "topic-poseidon-internal", 41 | "RABBIT_ROUTING_KEY": "poseidon.algos.decider", 42 | "RESULT_PATH": "/tmp/predict.json" 43 | }, 44 | "image": "iqtlabs/networkml", 45 | "inputs": [ 46 | "ncapture" 47 | ], 48 | "labels": "", 49 | "name": "networkml", 50 | "outputs": [ 51 | "rabbitmq" 52 | ], 53 | "stage": "poseidon_poseidon", 54 | "version": "v0.6.19", 55 | "viewableOutput": true 56 | }, 57 | { 58 | "contentType": "application/json", 59 | "environment": { 60 | "RABBIT_EXCHANGE": "topic-poseidon-internal", 61 | "RABBIT_HOST": "messenger", 62 | "RABBIT_QUEUE_NAME": "topic-poseidon-internal", 63 | "RABBIT_ROUTING_KEY": "poseidon.algos.decider", 64 | "RESULT_PATH": "/tmp/result.json", 65 | "rabbit": "true" 66 | }, 67 | "image": "iqtlabs/p0f", 68 | "inputs": [ 69 | "pcap-splitter" 70 | ], 71 | "labels": "", 72 | "name": "p0f", 73 | "outputs": [ 74 | "rabbitmq" 75 | ], 76 | "stage": "poseidon_poseidon", 77 | "version": "v0.11.33", 78 | "viewableOutput": true 79 | }, 80 | { 81 | "image": "yeasy/simple-web", 82 | "inputs": [], 83 | "labels": "", 84 | "name": "simple-web", 85 | "outputs": [], 86 | "ports": [ 87 | "80:80" 88 | ], 89 | "stage": "poseidon_volos", 90 | "version": "latest", 91 | "viewableOutput": false 92 | } 93 | ] 94 | } --------------------------------------------------------------------------------