├── .gitattributes ├── .github ├── actions │ └── e2e-test-setup-action │ │ └── action.yaml └── workflows │ ├── build-fork.yaml │ ├── build.yaml │ ├── cla.yaml │ ├── e2e-test.yaml │ ├── lint.yml │ ├── release-tag.yaml │ ├── release.Dockerfile │ └── tests-expected-results │ ├── kafka-tutorial-intents.json │ └── simple-tutorial-intents.json ├── .gitignore ├── .gitmodules ├── .golangci.yaml ├── .markdownlint.json ├── CODE_OF_CONDUCT.md ├── Justfile ├── LICENSE ├── README.md ├── build ├── kafka-watcher.Dockerfile ├── mapper.Dockerfile └── sniffer.Dockerfile ├── cloud-example.png ├── otterhelm.png ├── src ├── go.mod ├── go.sum ├── graphql.config.yml ├── istio-watcher │ └── pkg │ │ └── watcher │ │ ├── generate.go │ │ ├── helpers.go │ │ ├── istioconnectionmatcher.go │ │ ├── mocks │ │ └── mocks.go │ │ ├── watcher.go │ │ └── watcher_test.go ├── kafka-watcher │ ├── cmd │ │ └── main.go │ └── pkg │ │ ├── config │ │ └── config.go │ │ ├── logwatcher │ │ ├── filewatcher.go │ │ ├── kubeneteslogwatcher.go │ │ └── watcher.go │ │ └── prometheus │ │ └── metrics.go ├── mapper │ ├── cmd │ │ └── main.go │ ├── fix-errors-import.sh │ ├── generate.go │ ├── gqlgen.yml │ └── pkg │ │ ├── awsintentsholder │ │ └── holder.go │ │ ├── azureintentsholder │ │ └── holder.go │ │ ├── cloudclient │ │ ├── cloud_client.go │ │ ├── generate.go │ │ ├── generated.go │ │ ├── genqlient.graphql │ │ ├── genqlient.yaml │ │ ├── graphql.config.yml │ │ ├── mocks │ │ │ ├── dummy.go │ │ │ └── mocks.go │ │ └── schema.graphql │ │ ├── clouduploader │ │ ├── cloud_config.go │ │ ├── cloud_upload.go │ │ ├── cloud_uploader_test.go │ │ ├── intents_input_matcher.go │ │ └── models_to_api.go │ │ ├── collectors │ │ └── traffic │ │ │ └── collector.go │ │ ├── concurrentconnectioncounter │ │ ├── concurrent_connection_counter.go │ │ ├── concurrent_connection_counter_test.go │ │ ├── connection_count_differ.go │ │ ├── connection_count_differ_test.go │ │ ├── consts.go │ │ └── countable_intent.go │ │ ├── config │ │ └── config.go │ │ ├── dnscache │ │ ├── dns_cache.go │ │ ├── dns_cache_test.go │ │ └── ttl_cache │ │ │ ├── ttl_cache.go │ │ │ └── ttl_cache_test.go │ │ ├── dnsintentspublisher │ │ ├── dns_intents_publisher.go │ │ ├── dns_intents_publisher_test.go │ │ └── init.go │ │ ├── externaltrafficholder │ │ └── externaltrafficholder.go │ │ ├── gcpintentsholder │ │ └── holder.go │ │ ├── graph │ │ ├── generated │ │ │ └── generated.go │ │ └── model │ │ │ ├── gvk.go │ │ │ ├── kafkaop.go │ │ │ ├── models_gen.go │ │ │ ├── results_length.go │ │ │ └── serviceidentity.go │ │ ├── incomingtrafficholder │ │ ├── incoming_traffic_holder.go │ │ └── incoming_traffic_holder_test.go │ │ ├── intentsstore │ │ └── holder.go │ │ ├── kubefinder │ │ ├── kubefinder.go │ │ └── kubefinder_test.go │ │ ├── mapperwebhooks │ │ └── mapperwebhooks.go │ │ ├── metadatareporter │ │ ├── convert_to_cloud_api_types.go │ │ ├── endpoints_reconciler.go │ │ ├── metadata_reporter.go │ │ ├── metadata_reporter_once.go │ │ ├── metadata_reporter_test.go │ │ ├── namespace_reconciler.go │ │ ├── namespace_reconciler_test.go │ │ ├── pod_reconciler.go │ │ ├── setup.go │ │ └── workload_metadata_cache.go │ │ ├── metricexporter │ │ ├── edge_metric.go │ │ ├── metric_exporter.go │ │ ├── metric_exporter_test.go │ │ ├── mock_edge_metric.go │ │ └── otel_edge_metric.go │ │ ├── metrics_collection_traffic │ │ ├── endpoints_reconciler.go │ │ ├── metrics_collection_traffic_cache.go │ │ ├── metrics_collection_traffic_handler.go │ │ ├── pod_reconciler.go │ │ └── service_reconciler.go │ │ ├── mocks │ │ ├── mock_k8s_client.go │ │ └── mock_kubefinder.go │ │ ├── networkpolicyreport │ │ ├── cilium_clusterwide_policies_reconciler.go │ │ ├── cilium_clusterwide_policies_reconciler_test.go │ │ ├── helpers.go │ │ ├── network_policies_reconciler.go │ │ └── network_policy_reconciler_test.go │ │ ├── prometheus │ │ └── metrics.go │ │ ├── resolvers │ │ ├── helpers.go │ │ ├── resolver.go │ │ ├── resolver_test.go │ │ ├── schema.helpers.resolvers.go │ │ ├── schema.resolvers.go │ │ └── test_gql_client │ │ │ ├── generate.go │ │ │ ├── generated.go │ │ │ ├── genqlient.graphql │ │ │ └── genqlient.yaml │ │ └── resourcevisibility │ │ ├── ingress_reconciler.go │ │ ├── ingress_reconciler_test.go │ │ ├── model_convertion.go │ │ ├── svc_reconciler.go │ │ └── svc_reconciler_test.go ├── mapperclient │ ├── client.go │ ├── generate.go │ ├── generated.go │ ├── genqlient.yaml │ ├── mockclient │ │ └── mocks.go │ └── operations.graphql ├── mappergraphql │ └── schema.graphql ├── shared │ ├── config │ │ └── config.go │ ├── echologrus │ │ └── echologrus.go │ ├── isrunningonaws │ │ └── check.go │ ├── kubeutils │ │ └── kubeutils.go │ ├── testbase │ │ ├── README.md │ │ ├── patch_matcher.go │ │ └── testsuitebase.go │ └── version │ │ ├── version │ │ └── version.go ├── sniffer │ ├── cmd │ │ └── main.go │ └── pkg │ │ ├── collectors │ │ ├── collector.go │ │ ├── dnssniffer.go │ │ ├── dnssniffer_test.go │ │ ├── socketscanner.go │ │ ├── socketscanner_test.go │ │ ├── tcpsniffer.go │ │ └── tcpsniffer_test.go │ │ ├── config │ │ └── config.go │ │ ├── ipresolver │ │ ├── ipresolver.go │ │ ├── process_monitor.go │ │ ├── process_montior_test.go │ │ ├── procfs_resolver.go │ │ └── procfs_resolver_test.go │ │ ├── prometheus │ │ └── metrics.go │ │ ├── sniffer │ │ └── sniffer.go │ │ └── utils │ │ └── procfs.go └── tools.go └── visualize-example.png /.gitattributes: -------------------------------------------------------------------------------- 1 | *_gen.go linguist-generated=true 2 | generated.go linguist-generated=true 3 | go.mod linguist-generated=true 4 | go.sum linguist-generated=true -------------------------------------------------------------------------------- /.github/workflows/build-fork.yaml: -------------------------------------------------------------------------------- 1 | name: Build 2 | concurrency: 3 | group: "${{ github.repository }}${{ github.ref }}buildfork" 4 | cancel-in-progress: true 5 | on: 6 | pull_request: 7 | types: 8 | - opened 9 | - synchronize 10 | push: 11 | branches: 12 | - main 13 | - develop 14 | permissions: 15 | contents: read 16 | packages: write 17 | env: 18 | REGISTRY: "dummy" 19 | 20 | jobs: 21 | 22 | build: 23 | name: Build 24 | if: github.repository != 'otterize/network-mapper' || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != 'otterize/network-mapper') 25 | runs-on: ubuntu-latest 26 | permissions: 27 | contents: read 28 | packages: write 29 | outputs: 30 | registry: ${{ steps.registry.outputs.registry }} # workaround since env is not available outside of steps, i.e. in calling external workflows like we later do in e2e-test 31 | strategy: 32 | matrix: 33 | service: 34 | - mapper 35 | - sniffer 36 | - kafka-watcher 37 | 38 | steps: 39 | - id: registry 40 | run: echo "registry=${{ env.REGISTRY }}" >> "$GITHUB_OUTPUT" 41 | - name: Checkout 42 | uses: actions/checkout@v2 43 | with: 44 | submodules: recursive 45 | 46 | - name: Set up Docker Buildx 47 | id: buildx 48 | uses: docker/setup-buildx-action@master 49 | with: 50 | driver-opts: network=host 51 | 52 | - name: Test & Build production image 53 | uses: docker/build-push-action@v2 54 | with: 55 | context: src/ 56 | file: build/${{ matrix.service }}.Dockerfile 57 | tags: ${{ env.REGISTRY }}/${{ github.actor }}/${{ matrix.service }}:${{ github.sha }} 58 | network: host 59 | platforms: linux/amd64 60 | outputs: type=docker,dest=${{ matrix.service }}.tar 61 | cache-from: type=gha 62 | cache-to: type=gha,mode=max 63 | build-args: | 64 | "VERSION=0.0.${{ github.run_id }}" 65 | 66 | - uses: actions/upload-artifact@v3 67 | with: 68 | path: ${{ matrix.service }}.tar 69 | name: ${{ env.REGISTRY }}_${{ github.actor }}_${{ matrix.service }}_${{ github.sha }}.tar 70 | 71 | e2e-test: 72 | uses: ./.github/workflows/e2e-test.yaml 73 | name: Trigger e2e tests 74 | # Must pass the secrets as the called workflow does not have access to the same context 75 | with: 76 | registry: ${{ needs.build.outputs.registry }} 77 | mapper-tag: ${{ github.sha }} 78 | sniffer-tag: ${{ github.sha }} 79 | mapper-image: ${{ github.actor }}/mapper 80 | sniffer-image: ${{ github.actor }}/sniffer 81 | 82 | needs: 83 | - build -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Build 2 | concurrency: 3 | group: "${{ github.repository }}${{ github.ref }}" 4 | cancel-in-progress: true 5 | on: 6 | pull_request: 7 | types: 8 | - opened 9 | - synchronize 10 | push: 11 | branches: 12 | - main 13 | - develop 14 | 15 | env: 16 | REGISTRY: us-central1-docker.pkg.dev/main-383408/otterize 17 | 18 | jobs: 19 | 20 | build: 21 | if: (github.event_name == 'push' && github.repository == 'otterize/network-mapper') || github.event.pull_request.head.repo.full_name == 'otterize/network-mapper' 22 | name: Build 23 | runs-on: ubuntu-latest 24 | outputs: 25 | registry: ${{ steps.registry.outputs.registry }} # workaround since env is not available outside of steps, i.e. in calling external workflows like we later do in e2e-test 26 | strategy: 27 | matrix: 28 | service: 29 | - mapper 30 | - sniffer 31 | - kafka-watcher 32 | 33 | steps: 34 | - id: registry 35 | run: echo "registry=${{ env.REGISTRY }}" >> "$GITHUB_OUTPUT" 36 | 37 | - name: Checkout 38 | uses: actions/checkout@v2 39 | with: 40 | submodules: recursive 41 | 42 | - name: Set up Docker Buildx 43 | id: buildx 44 | uses: docker/setup-buildx-action@master 45 | with: 46 | driver-opts: network=host 47 | 48 | - name: Login to GCR 49 | uses: docker/login-action@v2 50 | with: 51 | registry: ${{ env.REGISTRY }} 52 | username: _json_key_base64 53 | password: ${{ secrets.B64_GCLOUD_SERVICE_ACCOUNT_JSON }} 54 | 55 | - name: Login to DockerHub 56 | uses: docker/login-action@v1 57 | with: 58 | username: otterize 59 | password: ${{ secrets.DOCKER_PASSWORD }} 60 | 61 | - name: Test & Build production image 62 | uses: docker/build-push-action@v2 63 | with: 64 | context: src/ 65 | file: build/${{ matrix.service }}.Dockerfile 66 | tags: ${{ env.REGISTRY }}/${{ matrix.service }}:${{ github.sha }} 67 | push: true 68 | network: host 69 | platforms: linux/amd64,linux/arm64 70 | cache-from: type=gha 71 | cache-to: type=gha,mode=max 72 | build-args: | 73 | "VERSION=0.0.${{ github.run_id }}" 74 | 75 | e2e-test: 76 | uses: ./.github/workflows/e2e-test.yaml 77 | name: Trigger e2e tests 78 | # Must pass the secrets as the called workflow does not have access to the same context 79 | secrets: 80 | B64_GCLOUD_SERVICE_ACCOUNT_JSON: ${{ secrets.B64_GCLOUD_SERVICE_ACCOUNT_JSON }} 81 | with: 82 | registry: ${{ needs.build.outputs.registry }} 83 | mapper-tag: ${{ github.sha }} 84 | sniffer-tag: ${{ github.sha }} 85 | mapper-image: mapper 86 | sniffer-image: sniffer 87 | 88 | needs: 89 | - build 90 | 91 | tag-latest: 92 | name: Tag latest 93 | if: github.ref == 'refs/heads/main' 94 | needs: e2e-test 95 | runs-on: ubuntu-latest 96 | 97 | steps: 98 | - name: Checkout 99 | uses: actions/checkout@v2 100 | 101 | - name: GCP auth 102 | uses: 'google-github-actions/auth@v1' 103 | with: 104 | credentials_json: ${{ secrets.B64_GCLOUD_SERVICE_ACCOUNT_JSON }} 105 | 106 | - name: Set up Cloud SDK 107 | uses: 'google-github-actions/setup-gcloud@v1' 108 | 109 | # Push the Docker image to AWS ECR 110 | - name: Tag Images as latest 111 | run: |- 112 | retag_image_as_latest() { if [[ $(gcloud container images add-tag "${{ env.REGISTRY }}/$1:${{ github.sha }}" "${{ env.REGISTRY }}/$1:latest" --quiet) ]]; then echo "Failed tagging $1 as latest"; exit 1; fi } # using --quiet to avoid prompt 113 | retag_image_as_latest mapper 114 | retag_image_as_latest sniffer 115 | retag_image_as_latest kafka-watcher 116 | -------------------------------------------------------------------------------- /.github/workflows/cla.yaml: -------------------------------------------------------------------------------- 1 | name: "CLA Assistant" 2 | on: 3 | issue_comment: 4 | types: [created] 5 | pull_request_target: 6 | types: [opened,closed,synchronize] 7 | 8 | jobs: 9 | CLAssistant: 10 | uses: otterize/cla-bot/.github/workflows/cla.yaml@otterize_cla 11 | with: 12 | comment-body: github.event.comment.body 13 | event-name: github.event_name 14 | secrets: 15 | PERSONAL_ACCESS_TOKEN : ${{ secrets.OTTERIZEBOT_GITHUB_TOKEN }} 16 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | on: 3 | pull_request: 4 | types: 5 | - opened 6 | - synchronize 7 | push: 8 | branches: 9 | - main 10 | permissions: 11 | contents: read 12 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 13 | # pull-requests: read 14 | 15 | jobs: 16 | vet: 17 | # run vet in a separate job to avoid conflicts with golangci-lint pkg-cache 18 | name: vet 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/setup-go@v3 22 | with: 23 | go-version: '1.23.5' 24 | - uses: actions/checkout@v3 25 | - name: Install dependencies 26 | run: sudo apt update && sudo apt install libpcap-dev # required for the linter to be able to lint github.com/google/gopacket 27 | - name: go generate 28 | run: go generate ./... 29 | working-directory: src 30 | - name: go vet 31 | run: go vet ./... 32 | working-directory: src/ 33 | - name: check git diff 34 | run: git diff --exit-code 35 | 36 | golangci: 37 | name: golangci-lint 38 | runs-on: ubuntu-latest 39 | steps: 40 | - uses: actions/setup-go@v3 41 | with: 42 | go-version: '1.23.5' 43 | - uses: actions/checkout@v3 44 | - name: Install dependencies 45 | run: sudo apt update && sudo apt install libpcap-dev # required for the linter to be able to lint github.com/google/gopacket 46 | - name: golangci-lint 47 | uses: golangci/golangci-lint-action@v3 48 | with: 49 | # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version 50 | version: v1.64.8 51 | 52 | # Optional: working directory, useful for monorepos 53 | working-directory: src 54 | 55 | # Optional: golangci-lint command line arguments. 56 | args: --timeout 5m --out-format github-actions 57 | 58 | # Optional: show only new issues if it's a pull request. The default value is `false`. 59 | # only-new-issues: true 60 | 61 | # Optional: if set to true then the all caching functionality will be complete disabled, 62 | # takes precedence over all other caching options. 63 | # skip-cache: true 64 | 65 | # Optional: if set to true then the action don't cache or restore ~/go/pkg. 66 | # skip-pkg-cache: true 67 | 68 | # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. 69 | # skip-build-cache: true 70 | 71 | markdownlint: 72 | name: Check for Markdown errors 73 | runs-on: ubuntu-latest 74 | 75 | steps: 76 | - uses: actions/checkout@v4 77 | 78 | # Tip: run the markdown lint action locally with '--fix' to automatically fix some of the issues: 79 | # docker run -v $PWD:/workdir ghcr.io/igorshubovych/markdownlint-cli:latest "**/*.md" --fix 80 | - uses: articulate/actions-markdownlint@v1 81 | with: 82 | config: .markdownlint.json 83 | files: '**/*.md' -------------------------------------------------------------------------------- /.github/workflows/release-tag.yaml: -------------------------------------------------------------------------------- 1 | name: Build 2 | concurrency: 3 | group: "${{ github.repository }}${{ github.ref }}-release" 4 | 5 | on: 6 | release: 7 | types: 8 | - published 9 | 10 | env: 11 | REGISTRY: us-central1-docker.pkg.dev/main-383408/otterize 12 | 13 | jobs: 14 | push-dockerhub: 15 | name: Push to Docker Hub 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v2 21 | 22 | - name: Set up Docker Buildx 23 | id: buildx 24 | uses: docker/setup-buildx-action@master 25 | with: 26 | driver-opts: network=host 27 | 28 | - name: Login to GCR 29 | uses: docker/login-action@v2 30 | with: 31 | registry: ${{ env.REGISTRY }} 32 | username: _json_key_base64 33 | password: ${{ secrets.B64_GCLOUD_SERVICE_ACCOUNT_JSON}} 34 | 35 | - name: Login to DockerHub 36 | uses: docker/login-action@v1 37 | with: 38 | username: otterize 39 | password: ${{ secrets.DOCKER_PASSWORD }} 40 | 41 | 42 | - name: Push to Docker Hub - Mapper 43 | uses: docker/build-push-action@v2 44 | with: 45 | context: .github/workflows 46 | file: .github/workflows/release.Dockerfile 47 | tags: otterize/network-mapper:latest,otterize/network-mapper:${{ github.ref_name }} 48 | push: true 49 | network: host 50 | platforms: linux/amd64,linux/arm64 51 | build-args: | 52 | "VERSION=${{ github.ref_name }}" 53 | "SOURCE_IMAGE=${{ env.REGISTRY }}/mapper:${{ github.sha }}" 54 | 55 | - name: Push to Docker Hub - Sniffer 56 | uses: docker/build-push-action@v2 57 | with: 58 | context: .github/workflows 59 | file: .github/workflows/release.Dockerfile 60 | tags: otterize/network-mapper-sniffer:latest,otterize/network-mapper-sniffer:${{ github.ref_name }} 61 | push: true 62 | network: host 63 | platforms: linux/amd64,linux/arm64 64 | build-args: | 65 | "VERSION=${{ github.ref_name }}" 66 | "SOURCE_IMAGE=${{ env.REGISTRY }}/sniffer:${{ github.sha }}" 67 | 68 | - name: Push to Docker Hub - Kafka Watcher 69 | uses: docker/build-push-action@v2 70 | with: 71 | context: .github/workflows 72 | file: .github/workflows/release.Dockerfile 73 | tags: otterize/network-mapper-kafka-watcher:latest,otterize/network-mapper-kafka-watcher:${{ github.ref_name }} 74 | push: true 75 | network: host 76 | platforms: linux/amd64,linux/arm64 77 | build-args: | 78 | "VERSION=${{ github.ref_name }}" 79 | "SOURCE_IMAGE=${{ env.REGISTRY }}/kafka-watcher:${{ github.sha }}" 80 | 81 | 82 | commit-latest-build-tag-to-helm-chart: 83 | name: Commit Latest Build Tag 84 | runs-on: ubuntu-latest 85 | needs: push-dockerhub 86 | steps: 87 | - name: Checkout 88 | uses: actions/checkout@v3 89 | with: 90 | repository: "otterize/helm-charts" 91 | token: ${{ secrets.OTTERIZEBOT_GITHUB_TOKEN }} 92 | ref: main 93 | 94 | - name: Update appVersion in chart.yaml 95 | uses: mikefarah/yq@master 96 | with: 97 | cmd: yq -i '.appVersion = "${{ github.ref_name }}"' 'network-mapper/Chart.yaml' 98 | 99 | - name: Setup git config 100 | run: | 101 | git config user.name "otterizebot" 102 | git config user.email "otterizebot@users.noreply.github.com" 103 | 104 | - name: Commit 105 | run: | 106 | git add --all 107 | git commit -m "Setting latest tag in network-mapper helm chart repo to ${{ github.ref_name }}" 108 | git push origin HEAD 109 | 110 | -------------------------------------------------------------------------------- /.github/workflows/release.Dockerfile: -------------------------------------------------------------------------------- 1 | # The only purpose this Dockerfile serves is to be able to run buildx to push a multi-platform image without rebuilding. 2 | 3 | ARG SOURCE_IMAGE 4 | 5 | FROM alpine as releaser 6 | ARG VERSION 7 | RUN echo -n $VERSION > ./version 8 | 9 | FROM $SOURCE_IMAGE 10 | COPY --from=releaser /version . -------------------------------------------------------------------------------- /.github/workflows/tests-expected-results/kafka-tutorial-intents.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "kind": "ClientIntents", 4 | "apiVersion": "k8s.otterize.com/v2beta1", 5 | "metadata": { 6 | "name": "client", 7 | "namespace": "otterize-tutorial-kafka-mapping", 8 | "creationTimestamp": null 9 | }, 10 | "spec": { 11 | "workload": { 12 | "name": "client" 13 | }, 14 | "targets": [ 15 | { 16 | "kafka": { 17 | "name": "kafka.kafka", 18 | "topics": [ 19 | { 20 | "name": "mytopic", 21 | "operations": [ 22 | "consume", 23 | "describe" 24 | ] 25 | } 26 | ] 27 | } 28 | }, 29 | { 30 | "kubernetes": { 31 | "name": "kafka.kafka", 32 | "kind": "StatefulSet" 33 | } 34 | } 35 | ] 36 | }, 37 | "status": { 38 | "upToDate": false, 39 | "observedGeneration": 0 40 | } 41 | }, 42 | { 43 | "kind": "ClientIntents", 44 | "apiVersion": "k8s.otterize.com/v2beta1", 45 | "metadata": { 46 | "name": "client-2", 47 | "namespace": "otterize-tutorial-kafka-mapping", 48 | "creationTimestamp": null 49 | }, 50 | "spec": { 51 | "workload": { 52 | "name": "client-2" 53 | }, 54 | "targets": [ 55 | { 56 | "kafka": { 57 | "name": "kafka.kafka", 58 | "topics": [ 59 | { 60 | "name": "mytopic", 61 | "operations": [ 62 | "describe", 63 | "produce" 64 | ] 65 | } 66 | ] 67 | } 68 | }, 69 | { 70 | "kubernetes": { 71 | "name": "kafka.kafka", 72 | "kind": "StatefulSet" 73 | } 74 | } 75 | ] 76 | }, 77 | "status": { 78 | "upToDate": false, 79 | "observedGeneration": 0 80 | } 81 | } 82 | ] 83 | -------------------------------------------------------------------------------- /.github/workflows/tests-expected-results/simple-tutorial-intents.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "kind": "ClientIntents", 4 | "apiVersion": "k8s.otterize.com/v2beta1", 5 | "metadata": { 6 | "name": "client", 7 | "namespace": "otterize-tutorial-mapper", 8 | "creationTimestamp": null 9 | }, 10 | "spec": { 11 | "workload": { 12 | "name": "client", 13 | "kind": "Deployment" 14 | }, 15 | "targets": [ 16 | { 17 | "kubernetes": { 18 | "name": "server", 19 | "kind": "Deployment" 20 | } 21 | } 22 | ] 23 | }, 24 | "status": { 25 | "upToDate": false, 26 | "observedGeneration": 0 27 | } 28 | }, 29 | { 30 | "kind": "ClientIntents", 31 | "apiVersion": "k8s.otterize.com/v2beta1", 32 | "metadata": { 33 | "name": "client2", 34 | "namespace": "otterize-tutorial-mapper", 35 | "creationTimestamp": null 36 | }, 37 | "spec": { 38 | "workload": { 39 | "name": "client2", 40 | "kind": "Deployment" 41 | }, 42 | "targets": [ 43 | { 44 | "kubernetes": { 45 | "name": "server", 46 | "kind": "Deployment" 47 | } 48 | } 49 | ] 50 | }, 51 | "status": { 52 | "upToDate": false, 53 | "observedGeneration": 0 54 | } 55 | } 56 | ] 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | bin/ 8 | 9 | # Test binary, built with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | 15 | # Dependency directories (remove the comment below to include it) 16 | # vendor/ 17 | 18 | # IDE 19 | .idea/ 20 | *.iml 21 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "helm-charts"] 2 | path = helm-charts 3 | url = https://github.com/otterize/helm-charts 4 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | # See https://golangci-lint.run/usage/linters/ for linters explanations 2 | linters: 3 | disable-all: true 4 | enable: 5 | # Enabled by default 6 | - errcheck 7 | - govet 8 | - ineffassign 9 | - staticcheck 10 | - typecheck 11 | - unused 12 | - nilnil 13 | - nilerr -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "default": true, 3 | "MD013": false, 4 | "MD033": false 5 | } -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at . All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | 77 | -------------------------------------------------------------------------------- /Justfile: -------------------------------------------------------------------------------- 1 | ## Justfiles are modernized Makefiles. 2 | ## To install just: 3 | ## - macOS: `brew install just` 4 | ## - Ubuntu: `sudo apt install just` 5 | ## - Fedora: `sudo dnf install just` 6 | ## To list tasks: `just --list` (or `just`) 7 | ## To run a task: `just ` 8 | ## To set a variable: `just =` 9 | ## e.g. `just build-images image-tag=latest` 10 | 11 | image-tag := "local" 12 | 13 | list-tasks: 14 | @just --list 15 | 16 | generate: 17 | #!/usr/bin/env bash 18 | set -euxo pipefail 19 | cd src/ 20 | go generate ./... 21 | 22 | build-mapper: 23 | #!/usr/bin/env bash 24 | set -euxo pipefail 25 | cd src/ 26 | go build -o ../bin/mapper ./mapper/cmd 27 | 28 | build-kafka-watcher: 29 | #!/usr/bin/env bash 30 | set -euxo pipefail 31 | cd src/ 32 | go build -o ../bin/kafka-watcher ./kafka-watcher/cmd 33 | 34 | build-sniffer: 35 | #!/usr/bin/env bash 36 | set -euxo pipefail 37 | cd src/ 38 | go build -o ../bin/sniffer ./sniffer/cmd 39 | 40 | build: generate build-mapper build-kafka-watcher build-sniffer 41 | 42 | build-mapper-image: 43 | #!/usr/bin/env bash 44 | set -euxo pipefail 45 | cd src/ 46 | docker buildx build \ 47 | --platform linux/amd64,linux/arm64 \ 48 | -t otterize/network-mapper:{{image-tag}} \ 49 | -f ../build/mapper.Dockerfile \ 50 | . 51 | 52 | build-kafka-watcher-image: 53 | #!/usr/bin/env bash 54 | set -euxo pipefail 55 | cd src/ 56 | docker buildx build \ 57 | --platform linux/amd64,linux/arm64 \ 58 | -t otterize/kafka-watcher:{{image-tag}} \ 59 | -f ../build/kafka-watcher.Dockerfile \ 60 | . 61 | 62 | build-sniffer-image: 63 | #!/usr/bin/env bash 64 | set -euxo pipefail 65 | cd src/ 66 | docker buildx build \ 67 | --platform linux/amd64,linux/arm64 \ 68 | -t otterize/sniffer:{{image-tag}} \ 69 | -f ../build/sniffer.Dockerfile \ 70 | . 71 | 72 | build-images: generate build-mapper-image build-kafka-watcher-image build-sniffer-image 73 | -------------------------------------------------------------------------------- /build/kafka-watcher.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM golang:1.23.5-alpine AS buildenv 2 | RUN apk add --no-cache ca-certificates git protoc 3 | RUN apk add build-base libpcap-dev 4 | WORKDIR /src 5 | 6 | # restore dependencies 7 | COPY go.mod go.sum ./ 8 | RUN go mod download 9 | 10 | COPY . . 11 | 12 | FROM buildenv AS test 13 | RUN go test ./kafka-watcher/... 14 | 15 | FROM test AS builder 16 | ARG TARGETOS 17 | ARG TARGETARCH 18 | RUN CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -trimpath -o /main ./kafka-watcher/cmd 19 | 20 | # add version file 21 | ARG VERSION 22 | RUN echo -n $VERSION > /version 23 | 24 | # Use distroless as minimal base image to package the manager binary 25 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 26 | FROM gcr.io/distroless/static:nonroot 27 | COPY --from=builder /main /main 28 | COPY --from=builder /version . 29 | USER 65532:65532 30 | 31 | ENTRYPOINT ["/main"] 32 | -------------------------------------------------------------------------------- /build/mapper.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM golang:1.23.5-alpine AS buildenv 2 | RUN apk add --no-cache ca-certificates git protoc 3 | RUN apk add build-base libpcap-dev 4 | WORKDIR /src 5 | 6 | # restore dependencies 7 | COPY go.mod go.sum ./ 8 | RUN go mod download 9 | 10 | COPY . . 11 | 12 | FROM buildenv AS test 13 | # install dependencies for "envtest" package 14 | RUN go install sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20230216140739-c98506dc3b8e && \ 15 | source <(setup-envtest use -p env) && \ 16 | mkdir -p /usr/local/kubebuilder && \ 17 | ln -s "$KUBEBUILDER_ASSETS" /usr/local/kubebuilder/bin 18 | RUN go test ./mapper/... 19 | 20 | FROM test AS builder 21 | ARG TARGETOS 22 | ARG TARGETARCH 23 | 24 | RUN echo "Building for $TARGETOS/$TARGETARCH - Running on $(uname -m)" 25 | RUN CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -trimpath -o /main ./mapper/cmd 26 | 27 | # add version file 28 | ARG VERSION 29 | RUN echo -n $VERSION > /version 30 | 31 | # Use distroless as minimal base image to package the manager binary 32 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 33 | FROM gcr.io/distroless/static:nonroot 34 | COPY --from=builder /main /main 35 | COPY --from=builder /version . 36 | USER 65532:65532 37 | 38 | EXPOSE 9090 39 | ENTRYPOINT ["/main"] 40 | -------------------------------------------------------------------------------- /build/sniffer.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 golang:1.23.5-alpine AS buildenv 2 | RUN apk add --no-cache ca-certificates git protoc 3 | RUN apk add build-base libpcap-dev 4 | WORKDIR /src 5 | 6 | # restore dependencies 7 | COPY go.mod go.sum ./ 8 | RUN go mod download 9 | 10 | COPY . . 11 | 12 | FROM buildenv AS test 13 | RUN go test ./sniffer/... && echo dep > /dep 14 | 15 | # We start from the base image again, only this time it's using the target arch instead of always amd64. This is done to make the build faster. 16 | # Unlike the mapper, it can't be amd64 throughout and use Go's cross-compilation, since the sniffer depends on libpcap (C library). 17 | FROM golang:1.23.5-alpine AS builder 18 | COPY --from=test /dep /dep 19 | RUN apk add --no-cache ca-certificates git protoc 20 | RUN apk add build-base libpcap-dev 21 | WORKDIR /src 22 | 23 | # restore dependencies 24 | COPY . . 25 | RUN go mod download 26 | RUN go build -trimpath -o /main ./sniffer/cmd 27 | 28 | # add version file 29 | ARG VERSION 30 | RUN echo -n $VERSION > /version 31 | 32 | FROM alpine AS release 33 | RUN apk add --no-cache ca-certificates libpcap 34 | WORKDIR / 35 | COPY --from=builder /main /main 36 | RUN chmod +x /main 37 | COPY --from=builder /version . 38 | 39 | ENTRYPOINT ["/main"] 40 | -------------------------------------------------------------------------------- /cloud-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/otterize/network-mapper/24ee0adf90a8036bf524419736c3f18a2b80dee4/cloud-example.png -------------------------------------------------------------------------------- /otterhelm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/otterize/network-mapper/24ee0adf90a8036bf524419736c3f18a2b80dee4/otterhelm.png -------------------------------------------------------------------------------- /src/graphql.config.yml: -------------------------------------------------------------------------------- 1 | schema: 2 | - ./mappergraphql/schema.graphql 3 | -------------------------------------------------------------------------------- /src/istio-watcher/pkg/watcher/generate.go: -------------------------------------------------------------------------------- 1 | package istiowatcher 2 | 3 | //go:generate go run go.uber.org/mock/mockgen@v0.2.0 -source=watcher.go -destination=mocks/mocks.go 4 | -------------------------------------------------------------------------------- /src/istio-watcher/pkg/watcher/helpers.go: -------------------------------------------------------------------------------- 1 | package istiowatcher 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 5 | "github.com/samber/lo" 6 | "golang.org/x/exp/slices" 7 | "net/http" 8 | "time" 9 | ) 10 | 11 | var HTTPMethodsToGQLMethods = map[string]model.HTTPMethod{ 12 | http.MethodGet: model.HTTPMethodGet, 13 | http.MethodPost: model.HTTPMethodPost, 14 | http.MethodPut: model.HTTPMethodPut, 15 | http.MethodDelete: model.HTTPMethodDelete, 16 | http.MethodOptions: model.HTTPMethodOptions, 17 | http.MethodTrace: model.HTTPMethodTrace, 18 | http.MethodPatch: model.HTTPMethodPatch, 19 | http.MethodConnect: model.HTTPMethodConnect, 20 | } 21 | 22 | type ConnectionPairWithPath struct { 23 | SourceWorkload string `json:"sourceWorkload"` 24 | DestinationWorkload string `json:"destinationWorkload"` 25 | RequestPath string `json:"requestPath"` 26 | } 27 | 28 | func ToGraphQLIstioConnections(connections map[ConnectionWithPath]time.Time) []model.IstioConnection { 29 | connectionPairToGraphQLConnection := map[ConnectionPairWithPath]model.IstioConnection{} 30 | 31 | for connWithPath, timestamp := range connections { 32 | connectionPair := ConnectionPairWithPath{ 33 | SourceWorkload: connWithPath.SourceWorkload, 34 | DestinationWorkload: connWithPath.DestinationWorkload, 35 | RequestPath: connWithPath.RequestPath, 36 | } 37 | 38 | istioConnection, ok := connectionPairToGraphQLConnection[connectionPair] 39 | if !ok { 40 | istioConnection = model.IstioConnection{ 41 | SrcWorkload: connWithPath.SourceWorkload, 42 | SrcWorkloadNamespace: connWithPath.SourceNamespace, 43 | DstWorkload: connWithPath.DestinationWorkload, 44 | DstWorkloadNamespace: connWithPath.DestinationNamespace, 45 | DstServiceName: connWithPath.DestinationServiceName, 46 | Path: connWithPath.RequestPath, 47 | LastSeen: timestamp, 48 | } 49 | 50 | method, ok := HTTPMethodsToGQLMethods[connWithPath.RequestMethod] 51 | if ok { 52 | istioConnection.Methods = []model.HTTPMethod{method} 53 | } 54 | 55 | connectionPairToGraphQLConnection[connectionPair] = istioConnection 56 | continue 57 | } 58 | 59 | if timestamp.After(istioConnection.LastSeen) { 60 | istioConnection.LastSeen = timestamp 61 | } 62 | 63 | method, ok := HTTPMethodsToGQLMethods[connWithPath.RequestMethod] 64 | if ok && !slices.Contains(istioConnection.Methods, method) { 65 | istioConnection.Methods = append(istioConnection.Methods, method) 66 | } 67 | 68 | connectionPairToGraphQLConnection[connectionPair] = istioConnection 69 | } 70 | 71 | return lo.Values(connectionPairToGraphQLConnection) 72 | } 73 | -------------------------------------------------------------------------------- /src/istio-watcher/pkg/watcher/istioconnectionmatcher.go: -------------------------------------------------------------------------------- 1 | package istiowatcher 2 | 3 | import ( 4 | "fmt" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 6 | "github.com/samber/lo" 7 | "golang.org/x/exp/slices" 8 | ) 9 | 10 | // IstioConnectionResultMatcher Implement gomock.Matcher interface for []model.IstioConnectionResults 11 | type IstioConnectionResultMatcher struct { 12 | model.IstioConnectionResults 13 | } 14 | 15 | func (m *IstioConnectionResultMatcher) Matches(x interface{}) bool { 16 | actual, ok := x.(model.IstioConnectionResults) 17 | if !ok { 18 | return false 19 | } 20 | 21 | if len(actual.Results) != len(m.Results) { 22 | return false 23 | } 24 | 25 | for _, actualResult := range actual.Results { 26 | anyResultsEqual := lo.SomeBy(m.Results, func(expectedResult model.IstioConnection) bool { 27 | return compareConnections(actualResult, expectedResult) 28 | }) 29 | if !anyResultsEqual { 30 | return false 31 | } 32 | } 33 | 34 | return true 35 | } 36 | 37 | func compareConnections(actualResult model.IstioConnection, expectedResult model.IstioConnection) bool { 38 | if actualResult.SrcWorkload != expectedResult.SrcWorkload { 39 | return false 40 | } 41 | if actualResult.SrcWorkloadNamespace != expectedResult.SrcWorkloadNamespace { 42 | return false 43 | } 44 | if actualResult.DstWorkload != expectedResult.DstWorkload { 45 | return false 46 | } 47 | if actualResult.DstWorkloadNamespace != expectedResult.DstWorkloadNamespace { 48 | return false 49 | } 50 | if actualResult.Path != expectedResult.Path { 51 | return false 52 | } 53 | if len(actualResult.Methods) != len(expectedResult.Methods) { 54 | return false 55 | } 56 | slices.Sort(actualResult.Methods) 57 | slices.Sort(expectedResult.Methods) 58 | for j, actualMethod := range actualResult.Methods { 59 | expectedMethod := expectedResult.Methods[j] 60 | if actualMethod != expectedMethod { 61 | return false 62 | } 63 | } 64 | 65 | // We ignore last seen during testing 66 | return true 67 | } 68 | 69 | func (m *IstioConnectionResultMatcher) String() string { 70 | return fmt.Sprintf("%v", m.Results) 71 | } 72 | 73 | func GetMatcher(results model.IstioConnectionResults) *IstioConnectionResultMatcher { 74 | return &IstioConnectionResultMatcher{results} 75 | } 76 | -------------------------------------------------------------------------------- /src/istio-watcher/pkg/watcher/mocks/mocks.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: watcher.go 3 | 4 | // Package mock_istiowatcher is a generated GoMock package. 5 | package mock_istiowatcher 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | 11 | model "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 12 | gomock "go.uber.org/mock/gomock" 13 | ) 14 | 15 | // MockIstioReporter is a mock of IstioReporter interface. 16 | type MockIstioReporter struct { 17 | ctrl *gomock.Controller 18 | recorder *MockIstioReporterMockRecorder 19 | } 20 | 21 | // MockIstioReporterMockRecorder is the mock recorder for MockIstioReporter. 22 | type MockIstioReporterMockRecorder struct { 23 | mock *MockIstioReporter 24 | } 25 | 26 | // NewMockIstioReporter creates a new mock instance. 27 | func NewMockIstioReporter(ctrl *gomock.Controller) *MockIstioReporter { 28 | mock := &MockIstioReporter{ctrl: ctrl} 29 | mock.recorder = &MockIstioReporterMockRecorder{mock} 30 | return mock 31 | } 32 | 33 | // EXPECT returns an object that allows the caller to indicate expected use. 34 | func (m *MockIstioReporter) EXPECT() *MockIstioReporterMockRecorder { 35 | return m.recorder 36 | } 37 | 38 | // ReportIstioConnectionResults mocks base method. 39 | func (m *MockIstioReporter) ReportIstioConnectionResults(ctx context.Context, results model.IstioConnectionResults) (bool, error) { 40 | m.ctrl.T.Helper() 41 | ret := m.ctrl.Call(m, "ReportIstioConnectionResults", ctx, results) 42 | ret0, _ := ret[0].(bool) 43 | ret1, _ := ret[1].(error) 44 | return ret0, ret1 45 | } 46 | 47 | // ReportIstioConnectionResults indicates an expected call of ReportIstioConnectionResults. 48 | func (mr *MockIstioReporterMockRecorder) ReportIstioConnectionResults(ctx, results interface{}) *gomock.Call { 49 | mr.mock.ctrl.T.Helper() 50 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportIstioConnectionResults", reflect.TypeOf((*MockIstioReporter)(nil).ReportIstioConnectionResults), ctx, results) 51 | } 52 | -------------------------------------------------------------------------------- /src/kafka-watcher/pkg/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/spf13/viper" 5 | "time" 6 | ) 7 | 8 | const ( 9 | KubernetesLogReadMode string = "k8s-logs" 10 | FileReadMode string = "file-logs" 11 | ) 12 | 13 | const ( 14 | KafkaLogReadModeKey = "kafka-log-read-mode" 15 | KafkaLogReadModeDefault = KubernetesLogReadMode 16 | KafkaServersKey = "kafka-servers" 17 | KafkaReportIntervalKey = "kafka-report-interval" 18 | KafkaReportIntervalDefault = 10 * time.Second 19 | KafkaCooldownIntervalKey = "kafka-cooldown-interval" 20 | KafkaCooldownIntervalDefault = 10 * time.Second 21 | KafkaAuthZLogPathKey = "kafka-authz-log-path" 22 | KafkaAuthZLogPathDefault = "/opt/otterize/kafka-watcher/authz.log" 23 | ) 24 | 25 | func init() { 26 | viper.SetDefault(KafkaReportIntervalKey, KafkaReportIntervalDefault) 27 | viper.SetDefault(KafkaServersKey, []string{}) 28 | viper.SetDefault(KafkaCooldownIntervalKey, KafkaCooldownIntervalDefault) 29 | viper.SetDefault(KafkaAuthZLogPathKey, KafkaAuthZLogPathDefault) 30 | viper.SetDefault(KafkaLogReadModeKey, KafkaLogReadModeDefault) 31 | } 32 | -------------------------------------------------------------------------------- /src/kafka-watcher/pkg/logwatcher/filewatcher.go: -------------------------------------------------------------------------------- 1 | package logwatcher 2 | 3 | import ( 4 | "context" 5 | "github.com/nxadm/tail" 6 | "github.com/otterize/network-mapper/src/kafka-watcher/pkg/config" 7 | "github.com/otterize/network-mapper/src/mapperclient" 8 | "github.com/sirupsen/logrus" 9 | "github.com/spf13/viper" 10 | "io" 11 | "k8s.io/apimachinery/pkg/types" 12 | "sync" 13 | "time" 14 | ) 15 | 16 | type LogFileWatcher struct { 17 | baseWatcher 18 | 19 | authzFilePath string 20 | server types.NamespacedName 21 | } 22 | 23 | func NewLogFileWatcher(mapperClient *mapperclient.Client, authzFilePath string, server types.NamespacedName) (*LogFileWatcher, error) { 24 | w := &LogFileWatcher{ 25 | baseWatcher: baseWatcher{ 26 | mu: sync.Mutex{}, 27 | seen: SeenRecordsStore{}, 28 | mapperClient: mapperClient, 29 | }, 30 | authzFilePath: authzFilePath, 31 | server: server, 32 | } 33 | 34 | return w, nil 35 | } 36 | 37 | func (w *LogFileWatcher) RunForever(ctx context.Context) error { 38 | go w.watchForever(ctx) 39 | 40 | for { 41 | time.Sleep(viper.GetDuration(config.KafkaReportIntervalKey)) 42 | 43 | if err := w.reportResults(ctx); err != nil { 44 | logrus.WithError(err).Errorf("Failed reporting watcher results to mapper") 45 | } 46 | } 47 | } 48 | 49 | func (w *LogFileWatcher) watchForever(ctx context.Context) { 50 | t, err := tail.TailFile(w.authzFilePath, tail.Config{Follow: true, ReOpen: true, MustExist: false, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}}) 51 | 52 | if err != nil { 53 | logrus.WithError(err).Panic() 54 | } 55 | 56 | for line := range t.Lines { 57 | w.processLogRecord(w.server, line.Text) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/kafka-watcher/pkg/logwatcher/kubeneteslogwatcher.go: -------------------------------------------------------------------------------- 1 | package logwatcher 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "github.com/otterize/intents-operator/src/shared/errors" 7 | "github.com/otterize/network-mapper/src/kafka-watcher/pkg/config" 8 | "github.com/otterize/network-mapper/src/mapperclient" 9 | "github.com/sirupsen/logrus" 10 | "github.com/spf13/viper" 11 | "golang.org/x/exp/slices" 12 | corev1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/types" 15 | "k8s.io/client-go/kubernetes" 16 | "k8s.io/client-go/rest" 17 | "k8s.io/client-go/tools/clientcmd" 18 | "k8s.io/client-go/util/homedir" 19 | "path/filepath" 20 | "reflect" 21 | "sync" 22 | "time" 23 | ) 24 | 25 | type KubernetesLogWatcher struct { 26 | baseWatcher 27 | clientset *kubernetes.Clientset 28 | kafkaServers []types.NamespacedName 29 | } 30 | 31 | func NewKubernetesLogWatcher(mapperClient *mapperclient.Client, kafkaServers []types.NamespacedName) (*KubernetesLogWatcher, error) { 32 | conf, err := rest.InClusterConfig() 33 | 34 | if err != nil && !errors.Is(err, rest.ErrNotInCluster) { 35 | return nil, errors.Wrap(err) 36 | } 37 | 38 | // We try building the REST Config from ./kube/config to support running the watcher locally 39 | if conf == nil { 40 | conf, err = clientcmd.BuildConfigFromFlags("", filepath.Join(homedir.HomeDir(), ".kube", "config")) 41 | if err != nil { 42 | return nil, errors.Wrap(err) 43 | } 44 | } 45 | 46 | cs, err := kubernetes.NewForConfig(conf) 47 | if err != nil { 48 | return nil, errors.Wrap(err) 49 | } 50 | 51 | w := &KubernetesLogWatcher{ 52 | baseWatcher: baseWatcher{ 53 | mu: sync.Mutex{}, 54 | seen: SeenRecordsStore{}, 55 | mapperClient: mapperClient, 56 | }, 57 | clientset: cs, 58 | kafkaServers: kafkaServers, 59 | } 60 | 61 | return w, nil 62 | } 63 | 64 | func (w *KubernetesLogWatcher) RunForever(ctx context.Context) error { 65 | err := w.validateKafkaServers(ctx) 66 | 67 | if err != nil { 68 | return errors.Wrap(err) 69 | } 70 | 71 | for _, kafkaServer := range w.kafkaServers { 72 | go w.watchForever(ctx, kafkaServer) 73 | } 74 | 75 | for { 76 | time.Sleep(viper.GetDuration(config.KafkaReportIntervalKey)) 77 | if err := w.reportResults(ctx); err != nil { 78 | logrus.WithError(err).Errorf("Failed reporting watcher results to mapper") 79 | } 80 | } 81 | } 82 | 83 | func (w *KubernetesLogWatcher) watchOnce(ctx context.Context, kafkaServer types.NamespacedName, startTime time.Time) error { 84 | pod, err := w.clientset.CoreV1().Pods(kafkaServer.Namespace).Get(ctx, kafkaServer.Name, metav1.GetOptions{}) 85 | if err != nil { 86 | return errors.Wrap(err) 87 | } 88 | if pod.Status.Phase != corev1.PodRunning { 89 | logrus.Debugf("Kafka server %s is not running, skipping logs for this iteration", kafkaServer.String()) 90 | return nil 91 | } 92 | podLogOpts := corev1.PodLogOptions{ 93 | SinceTime: &metav1.Time{Time: startTime}, 94 | } 95 | ctxTimeout, cancel := context.WithTimeout(ctx, 1*time.Minute) 96 | defer cancel() 97 | req := w.clientset.CoreV1().Pods(kafkaServer.Namespace).GetLogs(kafkaServer.Name, &podLogOpts) 98 | reader, err := req.Stream(ctxTimeout) 99 | if err != nil { 100 | return errors.Wrap(err) 101 | } 102 | 103 | defer reader.Close() 104 | 105 | s := bufio.NewScanner(reader) 106 | s.Split(bufio.ScanLines) 107 | for s.Scan() { 108 | w.processLogRecord(kafkaServer, s.Text()) 109 | } 110 | 111 | return nil 112 | } 113 | 114 | func (w *KubernetesLogWatcher) watchForever(ctx context.Context, kafkaServer types.NamespacedName) { 115 | log := logrus.WithField("pod", kafkaServer) 116 | cooldownPeriod := viper.GetDuration(config.KafkaCooldownIntervalKey) 117 | readFromTime := time.Now().Add(-(viper.GetDuration(config.KafkaCooldownIntervalKey))) 118 | 119 | for { 120 | log.Info("Watching logs") 121 | err := w.watchOnce(ctx, kafkaServer, readFromTime) 122 | 123 | if err != nil { 124 | if errors.Is(err, context.DeadlineExceeded) { 125 | continue 126 | } 127 | log.WithError(err).Error("Error watching logs") 128 | } 129 | 130 | readFromTime = time.Now() 131 | log.Infof("Waiting %s before watching logs again...", cooldownPeriod) 132 | 133 | time.Sleep(cooldownPeriod) 134 | } 135 | } 136 | 137 | func (w *KubernetesLogWatcher) validateKafkaServers(ctx context.Context) error { 138 | invalidServers := make([]string, 0) 139 | for _, kafkaServer := range w.kafkaServers { 140 | _, err := w.clientset.CoreV1().Pods(kafkaServer.Namespace).Get(ctx, kafkaServer.Name, metav1.GetOptions{}) 141 | if err != nil { 142 | invalidServers = append(invalidServers, kafkaServer.String()) 143 | } 144 | } 145 | if len(invalidServers) == 0 { 146 | return nil 147 | } 148 | logrus.Errorf("The following Kafka servers were not found or unreachable: %s", invalidServers) 149 | 150 | if reflect.DeepEqual(invalidServers, w.kafkaServers) { 151 | return errors.New("failed validating all Kafka servers") 152 | } 153 | validServers := make([]string, 0) 154 | for _, server := range w.kafkaServers { 155 | if !slices.Contains(invalidServers, server.String()) { 156 | validServers = append(validServers, server.String()) 157 | } 158 | } 159 | 160 | logrus.Infof("Kafka watcher will run for the following servers: %s", validServers) 161 | return nil 162 | } 163 | -------------------------------------------------------------------------------- /src/kafka-watcher/pkg/logwatcher/watcher.go: -------------------------------------------------------------------------------- 1 | package logwatcher 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "github.com/oriser/regroup" 7 | "github.com/otterize/network-mapper/src/kafka-watcher/pkg/prometheus" 8 | "github.com/otterize/network-mapper/src/mapperclient" 9 | "github.com/samber/lo" 10 | "github.com/sirupsen/logrus" 11 | "k8s.io/apimachinery/pkg/types" 12 | "sync" 13 | "time" 14 | ) 15 | 16 | // AclAuthorizerRegex matches & decodes AclAuthorizer log records. 17 | // Sample log record for reference: 18 | // [2023-03-12 13:51:55,904] INFO Principal = User:2.5.4.45=#13206331373734376636373865323137613636346130653335393130326638303662,CN=myclient.otterize-tutorial-kafka-mtls,O=SPIRE,C=US is Denied Operation = Describe from host = 10.244.0.27 on resource = Topic:LITERAL:mytopic for request = Metadata with resourceRefCount = 1 (kafka.authorizer.logger) 19 | var AclAuthorizerRegex = regroup.MustCompile( 20 | `^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d+\] [A-Z]+ Principal = \S+ is (?P\S+) Operation = (?P\S+) from host = (?P\S+) on resource = Topic:LITERAL:(?P.+) for request = \S+ with resourceRefCount = \d+ \(kafka\.authorizer\.logger\)$`, 21 | ) 22 | 23 | type AuthorizerRecord struct { 24 | Server types.NamespacedName 25 | Access string `regroup:"access"` 26 | Operation string `regroup:"operation"` 27 | Host string `regroup:"host"` 28 | Topic string `regroup:"topic"` 29 | } 30 | 31 | type SeenRecordsStore map[AuthorizerRecord]time.Time 32 | 33 | type Watcher interface { 34 | RunForever(ctx context.Context) error 35 | } 36 | 37 | type baseWatcher struct { 38 | mu sync.Mutex 39 | seen SeenRecordsStore 40 | mapperClient *mapperclient.Client 41 | } 42 | 43 | func (b *baseWatcher) flush() SeenRecordsStore { 44 | b.mu.Lock() 45 | defer b.mu.Unlock() 46 | r := b.seen 47 | b.seen = SeenRecordsStore{} 48 | return r 49 | } 50 | 51 | func (b *baseWatcher) reportResults(ctx context.Context) error { 52 | records := b.flush() 53 | 54 | cRecords := len(records) 55 | 56 | if cRecords == 0 { 57 | logrus.Infof("Zero records, not reporting") 58 | return nil 59 | } 60 | 61 | logrus.Infof("Reporting %d records", cRecords) 62 | prometheus.IncrementKafkaTopicReports(cRecords) 63 | 64 | results := lo.MapToSlice(records, func(r AuthorizerRecord, t time.Time) mapperclient.KafkaMapperResult { 65 | return mapperclient.KafkaMapperResult{ 66 | SrcIp: r.Host, 67 | ServerPodName: r.Server.Name, 68 | ServerNamespace: r.Server.Namespace, 69 | Topic: r.Topic, 70 | Operation: r.Operation, 71 | LastSeen: t, 72 | } 73 | }) 74 | 75 | return b.mapperClient.ReportKafkaMapperResults(ctx, mapperclient.KafkaMapperResults{Results: results}) 76 | } 77 | 78 | func (b *baseWatcher) processLogRecord(kafkaServer types.NamespacedName, record string) { 79 | authorizerRecord := AuthorizerRecord{ 80 | Server: kafkaServer, 81 | } 82 | if err := AclAuthorizerRegex.MatchToTarget(record, &authorizerRecord); errors.Is(err, ®roup.NoMatchFoundError{}) { 83 | return 84 | } else if err != nil { 85 | logrus.Errorf("Error matching authorizer regex: %s", err) 86 | return 87 | } 88 | 89 | b.mu.Lock() 90 | defer b.mu.Unlock() 91 | b.seen[authorizerRecord] = time.Now() 92 | } 93 | -------------------------------------------------------------------------------- /src/kafka-watcher/pkg/prometheus/metrics.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/prometheus/client_golang/prometheus/promauto" 6 | ) 7 | 8 | var ( 9 | topicReports = promauto.NewCounter(prometheus.CounterOpts{ 10 | Name: "kafka_reported_topics", 11 | Help: "The total number of Kafka topics reported.", 12 | }) 13 | ) 14 | 15 | func IncrementKafkaTopicReports(count int) { 16 | topicReports.Add(float64(count)) 17 | } 18 | -------------------------------------------------------------------------------- /src/mapper/fix-errors-import.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env sh 2 | # This script is a workaround to this issue: https://github.com/99designs/gqlgen/issues/1171 3 | # Until they make it possible to use a custom errors package, we run this replace after gqlgen generate. 4 | 5 | if find . -name "*.resolvers.go" -exec false {} + 6 | then 7 | echo 'no files found' 8 | exit 0 9 | fi 10 | 11 | # Remove blank lines from imports to let gofmt sort all import lines consistently 12 | find . -name "*.resolvers.go" -exec sed -i '' -e ' 13 | /^import/,/)/ { 14 | /^$/ d 15 | } 16 | ' {} + 17 | 18 | # Run gofmt, replacing native "errors" pkg with out own 19 | find . -name "*.resolvers.go" -exec gofmt -w -r '"errors" -> "github.com/otterize/intents-operator/src/shared/errors"' {} + -------------------------------------------------------------------------------- /src/mapper/generate.go: -------------------------------------------------------------------------------- 1 | package mapper 2 | 3 | //go:generate go run github.com/99designs/gqlgen@v0.17.44 4 | //go:generate ./fix-errors-import.sh 5 | //go:generate go run go.uber.org/mock/mockgen@v0.2.0 -destination=./pkg/mocks/mock_k8s_client.go -package=mocks -mock_names Client=K8sClient,SubResourceWriter=K8sStatus sigs.k8s.io/controller-runtime/pkg/client Client,SubResourceWriter 6 | //go:generate go run go.uber.org/mock/mockgen@v0.2.0 -destination=./pkg/mocks/mock_kubefinder.go -package=mocks -source=./pkg/resourcevisibility/svc_reconciler.go KubeFinder 7 | -------------------------------------------------------------------------------- /src/mapper/gqlgen.yml: -------------------------------------------------------------------------------- 1 | # Where are all the schema files located? globs are supported eg src/**/*.graphqls 2 | schema: 3 | - '../mappergraphql/*.graphql' 4 | 5 | # Where should the generated server code go? 6 | exec: 7 | filename: pkg/graph/generated/generated.go 8 | package: generated 9 | 10 | # Uncomment to enable federation 11 | # federation: 12 | # filename: graph/generated/federation.go 13 | # package: generated 14 | 15 | # Where should any generated models go? 16 | model: 17 | filename: pkg/graph/model/models_gen.go 18 | package: model 19 | 20 | # Where should the resolver implementations go? 21 | resolver: 22 | layout: follow-schema 23 | dir: pkg/resolvers 24 | package: resolvers 25 | 26 | # Optional: turn on use ` + "`" + `gqlgen:"fieldName"` + "`" + ` tags in your models 27 | # struct_tag: json 28 | 29 | # Optional: turn on to use []Thing instead of []*Thing 30 | omit_slice_element_pointers: true 31 | 32 | # Optional: set to speed up generation time by not performing a final validation pass. 33 | skip_validation: true 34 | 35 | # gqlgen will search for any type names in the schema in these go packages 36 | # if they match it will use them, otherwise it will generate them. 37 | # autobind: 38 | 39 | # This section declares type mapping between the GraphQL and go type systems 40 | # 41 | # The first line in each type will be used as defaults for resolver arguments and 42 | # modelgen, the others will be allowed when binding to fields. Configure them to 43 | # your liking 44 | models: 45 | ID: 46 | model: 47 | - github.com/99designs/gqlgen/graphql.ID 48 | - github.com/99designs/gqlgen/graphql.Int64 49 | - github.com/99designs/gqlgen/graphql.Int 50 | - github.com/99designs/gqlgen/graphql.Int32 51 | Int: 52 | model: 53 | - github.com/99designs/gqlgen/graphql.Int64 54 | - github.com/99designs/gqlgen/graphql.Int 55 | - github.com/99designs/gqlgen/graphql.Int32 56 | -------------------------------------------------------------------------------- /src/mapper/pkg/awsintentsholder/holder.go: -------------------------------------------------------------------------------- 1 | package awsintentsholder 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 6 | "github.com/samber/lo" 7 | "github.com/sirupsen/logrus" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type AWSIntent struct { 13 | Client model.OtterizeServiceIdentity `json:"client"` 14 | Actions []string 15 | ARN string 16 | IamRole string 17 | } 18 | 19 | type AWSIntentKey struct { 20 | ClientName string 21 | ClientNamespace string 22 | ARN string 23 | } 24 | 25 | type TimestampedAWSIntent struct { 26 | Timestamp time.Time 27 | AWSIntent 28 | } 29 | 30 | type AWSIntentsHolder struct { 31 | intents map[AWSIntentKey]TimestampedAWSIntent 32 | lock sync.Mutex 33 | callbacks []AWSIntentCallbackFunc 34 | } 35 | 36 | type AWSIntentCallbackFunc func(context.Context, []AWSIntent) 37 | 38 | func New() *AWSIntentsHolder { 39 | notifier := &AWSIntentsHolder{ 40 | intents: make(map[AWSIntentKey]TimestampedAWSIntent), 41 | } 42 | 43 | return notifier 44 | } 45 | 46 | func (h *AWSIntentsHolder) RegisterNotifyIntents(callback AWSIntentCallbackFunc) { 47 | h.callbacks = append(h.callbacks, callback) 48 | } 49 | 50 | func (h *AWSIntentsHolder) AddIntent(intent AWSIntent) { 51 | h.lock.Lock() 52 | defer h.lock.Unlock() 53 | 54 | logrus.Debugf("Adding intent: %+v", intent) 55 | 56 | key := AWSIntentKey{ 57 | ClientName: intent.Client.Name, 58 | ClientNamespace: intent.Client.Namespace, 59 | ARN: intent.ARN, 60 | } 61 | 62 | _, found := h.intents[key] 63 | now := time.Now() 64 | 65 | if !found { 66 | h.intents[key] = TimestampedAWSIntent{ 67 | Timestamp: now, 68 | AWSIntent: intent, 69 | } 70 | } 71 | 72 | mergedIntent := h.intents[key] 73 | mergedIntent.Timestamp = now 74 | mergedIntent.Actions = lo.Union(mergedIntent.Actions, intent.Actions) 75 | h.intents[key] = mergedIntent 76 | } 77 | 78 | func (h *AWSIntentsHolder) PeriodicIntentsUpload(ctx context.Context, interval time.Duration) { 79 | for { 80 | select { 81 | case <-ctx.Done(): 82 | return 83 | case <-time.After(interval): 84 | if len(h.callbacks) == 0 { 85 | continue 86 | } 87 | 88 | intents := h.GetNewIntentsSinceLastGet() 89 | if len(intents) == 0 { 90 | continue 91 | } 92 | 93 | for _, callback := range h.callbacks { 94 | callback(ctx, intents) 95 | } 96 | } 97 | } 98 | } 99 | 100 | func (h *AWSIntentsHolder) GetNewIntentsSinceLastGet() []AWSIntent { 101 | h.lock.Lock() 102 | defer h.lock.Unlock() 103 | 104 | intents := make([]AWSIntent, 0, len(h.intents)) 105 | 106 | for _, intent := range h.intents { 107 | intents = append(intents, intent.AWSIntent) 108 | } 109 | 110 | h.intents = make(map[AWSIntentKey]TimestampedAWSIntent) 111 | 112 | return intents 113 | } 114 | -------------------------------------------------------------------------------- /src/mapper/pkg/azureintentsholder/holder.go: -------------------------------------------------------------------------------- 1 | package azureintentsholder 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 6 | "github.com/samber/lo" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type key struct { 13 | client types.NamespacedName 14 | scope string 15 | } 16 | 17 | type AzureIntentsHolder struct { 18 | intents map[key]model.AzureOperation 19 | lock sync.Mutex 20 | callbacks []Callback 21 | } 22 | 23 | type Callback func(context.Context, []model.AzureOperation) 24 | 25 | func New() *AzureIntentsHolder { 26 | return &AzureIntentsHolder{ 27 | intents: make(map[key]model.AzureOperation), 28 | } 29 | } 30 | 31 | func (h *AzureIntentsHolder) RegisterNotifyIntents(callback Callback) { 32 | h.callbacks = append(h.callbacks, callback) 33 | } 34 | 35 | func (h *AzureIntentsHolder) AddOperation(serviceId model.OtterizeServiceIdentity, op model.AzureOperation) { 36 | h.lock.Lock() 37 | defer h.lock.Unlock() 38 | 39 | k := key{ 40 | client: types.NamespacedName{ 41 | Namespace: serviceId.Namespace, 42 | Name: serviceId.Name, 43 | }, 44 | scope: op.Scope, 45 | } 46 | 47 | _, found := h.intents[k] 48 | 49 | if !found { 50 | h.intents[k] = model.AzureOperation{ 51 | Scope: op.Scope, 52 | Actions: op.Actions, 53 | DataActions: op.DataActions, 54 | ClientName: serviceId.Name, 55 | ClientNamespace: serviceId.Namespace, 56 | } 57 | } else { 58 | h.intents[k] = model.AzureOperation{ 59 | Scope: op.Scope, 60 | Actions: lo.Union(h.intents[k].Actions, op.Actions), 61 | DataActions: lo.Union(h.intents[k].DataActions, op.DataActions), 62 | ClientName: h.intents[k].ClientName, 63 | ClientNamespace: h.intents[k].ClientNamespace, 64 | } 65 | } 66 | } 67 | 68 | func (h *AzureIntentsHolder) PeriodicIntentsUpload(ctx context.Context, interval time.Duration) { 69 | for { 70 | select { 71 | case <-ctx.Done(): 72 | return 73 | case <-time.After(interval): 74 | if len(h.callbacks) == 0 { 75 | continue 76 | } 77 | 78 | intents := h.getOperations() 79 | 80 | if len(intents) == 0 { 81 | continue 82 | } 83 | 84 | for _, callback := range h.callbacks { 85 | callback(ctx, intents) 86 | } 87 | } 88 | } 89 | } 90 | 91 | func (h *AzureIntentsHolder) getOperations() []model.AzureOperation { 92 | h.lock.Lock() 93 | defer h.lock.Unlock() 94 | 95 | intents := lo.Values(h.intents) 96 | h.intents = make(map[key]model.AzureOperation) 97 | 98 | return intents 99 | } 100 | -------------------------------------------------------------------------------- /src/mapper/pkg/cloudclient/generate.go: -------------------------------------------------------------------------------- 1 | package cloudclient 2 | 3 | import _ "github.com/suessflorian/gqlfetch" 4 | 5 | // The check for $CI makes sure we don't redownload the schema in CI. 6 | //go:generate sh -c "if [ -z $CI ]; then go run github.com/suessflorian/gqlfetch/gqlfetch --endpoint https://app.staging.otterize.com/api/graphql/v1beta > schema.graphql; fi" 7 | //go:generate go run github.com/Khan/genqlient ./genqlient.yaml 8 | //go:generate go run go.uber.org/mock/mockgen@v0.2.0 -destination=./mocks/mocks.go -package=cloudclientmocks -source=./cloud_client.go CloudClient 9 | -------------------------------------------------------------------------------- /src/mapper/pkg/cloudclient/genqlient.graphql: -------------------------------------------------------------------------------- 1 | # @genqlient(pointer: true) 2 | mutation ReportDiscoveredIntents($intents: [DiscoveredIntentInput!]!) { 3 | reportDiscoveredIntents(intents: $intents) 4 | } 5 | 6 | mutation ReportExternalTrafficDiscoveredIntents($intents: [ExternalTrafficDiscoveredIntentInput!]!) { 7 | reportExternalTrafficDiscoveredIntents(intents: $intents) 8 | } 9 | 10 | mutation ReportComponentStatus($component: ComponentType!) { 11 | reportIntegrationComponentStatus(component: $component) 12 | } 13 | 14 | mutation ReportIncomingTrafficDiscoveredIntents($intents: [IncomingTrafficDiscoveredIntentInput!]!) { 15 | reportIncomingTrafficDiscoveredIntents(intents: $intents) 16 | } 17 | 18 | mutation ReportK8sServices($namespace: String!, $services: [K8sServiceInput!]!) { 19 | reportK8sServices(namespace: $namespace, services: $services) 20 | } 21 | 22 | mutation ReportK8sIngresses($namespace: String!, $ingresses: [K8sIngressInput!]!) { 23 | reportK8sIngresses(namespace: $namespace, ingresses: $ingresses) 24 | } 25 | 26 | mutation ReportK8sResourceEligibleForMetricsCollection($namespace: String!, $reason: EligibleForMetricsCollectionReason!, $resources: [K8sResourceEligibleForMetricsCollectionInput!]!) { 27 | reportK8sResourceEligibleForMetricsCollection(namespace: $namespace, reason: $reason, resources: $resources) 28 | } 29 | 30 | mutation ReportTrafficLevels( 31 | $trafficLevels: [TrafficLevelInput!]! 32 | ) { 33 | reportTrafficLevels(trafficLevels: $trafficLevels) 34 | } 35 | 36 | mutation ReportNamespaceLabels($name: String!, $labels: [LabelInput!]!) { 37 | reportNamespaceLabels(name: $name, labels: $labels) 38 | } 39 | 40 | mutation ReportWorkloadsMetadata($workloadsLabels: [ReportServiceMetadataInput!]!) { 41 | reportServicesMetadata(servicesMeta: $workloadsLabels) 42 | } 43 | 44 | mutation ReportNetworkPolicies($namespace: String!, $networkPolicies: [NetworkPolicyInput!]!) { 45 | reportNetworkPolicies(namespace: $namespace, networkPolicies: $networkPolicies) 46 | } 47 | 48 | mutation ReportCiliumClusterWideNetworkPolicies($networkPolicies: [NetworkPolicyInput!]!) { 49 | reportNetworkPolicies(networkPolicies: $networkPolicies) 50 | } -------------------------------------------------------------------------------- /src/mapper/pkg/cloudclient/genqlient.yaml: -------------------------------------------------------------------------------- 1 | # genqlient config; for full documentation see: 2 | # https://github.com/Khan/genqlient/blob/main/docs/genqlient.yaml 3 | schema: 4 | - ./schema.graphql 5 | operations: 6 | - genqlient.graphql 7 | generated: ./generated.go 8 | bindings: 9 | Time: 10 | type: time.Time 11 | optional: generic 12 | optional_generic_type: github.com/otterize/nilable.Nilable -------------------------------------------------------------------------------- /src/mapper/pkg/cloudclient/graphql.config.yml: -------------------------------------------------------------------------------- 1 | schema: 2 | - schema.graphql 3 | -------------------------------------------------------------------------------- /src/mapper/pkg/cloudclient/mocks/dummy.go: -------------------------------------------------------------------------------- 1 | package cloudclientmocks 2 | -------------------------------------------------------------------------------- /src/mapper/pkg/clouduploader/cloud_config.go: -------------------------------------------------------------------------------- 1 | package clouduploader 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 5 | "github.com/spf13/viper" 6 | "time" 7 | ) 8 | 9 | type Config struct { 10 | UploadInterval time.Duration 11 | UploadBatchSize int 12 | } 13 | 14 | func ConfigFromViper() Config { 15 | return Config{ 16 | UploadInterval: time.Duration(viper.GetInt(config.UploadIntervalSecondsKey)) * time.Second, 17 | UploadBatchSize: viper.GetInt(config.UploadBatchSizeKey), 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/mapper/pkg/clouduploader/intents_input_matcher.go: -------------------------------------------------------------------------------- 1 | package clouduploader 2 | 3 | import ( 4 | "fmt" 5 | "github.com/google/go-cmp/cmp" 6 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 7 | "golang.org/x/exp/constraints" 8 | "sort" 9 | ) 10 | 11 | // IntentsMatcher Implement gomock.Matcher interface for []cloudclient.IntentInput 12 | type IntentsMatcher struct { 13 | expected []cloudclient.IntentInput 14 | } 15 | 16 | func NilCompare[T constraints.Ordered](a *T, b *T) int { 17 | if a == nil && b == nil { 18 | return 0 19 | } 20 | if a == nil { 21 | return -1 22 | } 23 | if b == nil { 24 | return 1 25 | } 26 | if *a > *b { 27 | return 1 28 | } 29 | if *a < *b { 30 | return -1 31 | } 32 | return 0 33 | } 34 | 35 | func sortIntentInput(intents []cloudclient.IntentInput) { 36 | for _, intent := range intents { 37 | if intent.Type == nil { 38 | continue 39 | } 40 | 41 | switch *intent.Type { 42 | case cloudclient.IntentTypeKafka: 43 | for _, topic := range intent.Topics { 44 | sort.Slice(topic.Operations, func(i, j int) bool { 45 | return NilCompare(topic.Operations[i], topic.Operations[j]) < 0 46 | }) 47 | } 48 | sort.Slice(intent.Topics, func(i, j int) bool { 49 | res := NilCompare(intent.Topics[i].Name, intent.Topics[j].Name) 50 | if res != 0 { 51 | return res < 0 52 | } 53 | 54 | return len(intent.Topics[i].Operations) < len(intent.Topics[j].Operations) 55 | }) 56 | case cloudclient.IntentTypeHttp: 57 | for _, resource := range intent.Resources { 58 | sort.Slice(resource.Methods, func(i, j int) bool { 59 | return NilCompare(resource.Methods[i], resource.Methods[j]) < 0 60 | }) 61 | } 62 | sort.Slice(intent.Resources, func(i, j int) bool { 63 | res := NilCompare(intent.Resources[i].Path, intent.Resources[j].Path) 64 | if res != 0 { 65 | return res < 0 66 | } 67 | 68 | return len(intent.Resources[i].Methods) < len(intent.Resources[j].Methods) 69 | }) 70 | } 71 | } 72 | sort.Slice(intents, func(i, j int) bool { 73 | res := NilCompare(intents[i].Namespace, intents[j].Namespace) 74 | if res != 0 { 75 | return res < 0 76 | } 77 | res = NilCompare(intents[i].ClientName, intents[j].ClientName) 78 | if res != 0 { 79 | return res < 0 80 | } 81 | res = NilCompare(intents[i].ServerName, intents[j].ServerName) 82 | if res != 0 { 83 | return res < 0 84 | } 85 | res = NilCompare(intents[i].ServerNamespace, intents[j].ServerNamespace) 86 | if res != 0 { 87 | return res < 0 88 | } 89 | res = NilCompare(intents[i].Type, intents[j].Type) 90 | if res != 0 { 91 | return res < 0 92 | } 93 | switch *intents[i].Type { 94 | case cloudclient.IntentTypeKafka: 95 | return len(intents[i].Topics) < len(intents[j].Topics) 96 | case cloudclient.IntentTypeHttp: 97 | return len(intents[i].Resources) < len(intents[j].Resources) 98 | default: 99 | panic("Unimplemented intent type") 100 | } 101 | }) 102 | } 103 | 104 | func (m IntentsMatcher) Matches(x interface{}) bool { 105 | if x == nil { 106 | return false 107 | } 108 | actualDiscoveredIntents, ok := x.([]*cloudclient.DiscoveredIntentInput) 109 | if !ok { 110 | return false 111 | } 112 | expectedIntents := m.expected 113 | actualIntents := discoveredIntentsPtrToIntents(actualDiscoveredIntents) 114 | 115 | if len(actualIntents) != len(expectedIntents) { 116 | return false 117 | } 118 | 119 | sortIntentInput(actualIntents) 120 | sortIntentInput(expectedIntents) 121 | 122 | diff := cmp.Diff(expectedIntents, actualIntents) 123 | if diff != "" { 124 | fmt.Println(diff) 125 | } 126 | return cmp.Equal(expectedIntents, actualIntents) 127 | } 128 | 129 | func discoveredIntentsPtrToIntents(actualDiscoveredIntents []*cloudclient.DiscoveredIntentInput) []cloudclient.IntentInput { 130 | actualIntents := make([]cloudclient.IntentInput, 0) 131 | for _, intent := range actualDiscoveredIntents { 132 | intentObject := *intent.Intent 133 | actualIntents = append(actualIntents, intentObject) 134 | } 135 | return actualIntents 136 | } 137 | 138 | func (m IntentsMatcher) String() string { 139 | return prettyPrint(m) 140 | } 141 | 142 | func prettyPrint(m IntentsMatcher) string { 143 | expected := m.expected 144 | var result string 145 | itemFormat := "IntentInput{ClientName: %s, ServerName: %s, Namespace: %s, ServerNamespace: %s}," 146 | for _, intent := range expected { 147 | var clientName, namespace, serverName, serverNamespace string 148 | if intent.ClientName != nil { 149 | clientName = *intent.ClientName 150 | } 151 | if intent.Namespace != nil { 152 | namespace = *intent.Namespace 153 | } 154 | if intent.ServerName != nil { 155 | serverName = *intent.ServerName 156 | } 157 | if intent.ServerNamespace != nil { 158 | serverNamespace = *intent.ServerNamespace 159 | } 160 | result += fmt.Sprintf(itemFormat, clientName, serverName, namespace, serverNamespace) 161 | } 162 | 163 | return result 164 | } 165 | 166 | func (m IntentsMatcher) Got(got interface{}) string { 167 | actual, ok := got.([]*cloudclient.DiscoveredIntentInput) 168 | if !ok { 169 | return fmt.Sprintf("Not an []*cloudclient.DiscoveredIntentInput, Got: %v", got) 170 | } 171 | 172 | return prettyPrint(IntentsMatcher{discoveredIntentsPtrToIntents(actual)}) 173 | } 174 | 175 | func GetMatcher(expected []cloudclient.IntentInput) IntentsMatcher { 176 | return IntentsMatcher{expected} 177 | } 178 | -------------------------------------------------------------------------------- /src/mapper/pkg/clouduploader/models_to_api.go: -------------------------------------------------------------------------------- 1 | package clouduploader 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 6 | "github.com/samber/lo" 7 | ) 8 | 9 | var modelMethodToAPIMethodMap = map[model.HTTPMethod]cloudclient.HTTPMethod{ 10 | model.HTTPMethodGet: cloudclient.HTTPMethodGet, 11 | model.HTTPMethodPost: cloudclient.HTTPMethodPost, 12 | model.HTTPMethodPut: cloudclient.HTTPMethodPut, 13 | model.HTTPMethodPatch: cloudclient.HTTPMethodPatch, 14 | model.HTTPMethodDelete: cloudclient.HTTPMethodDelete, 15 | model.HTTPMethodConnect: cloudclient.HTTPMethodConnect, 16 | model.HTTPMethodOptions: cloudclient.HTTPMethodOptions, 17 | model.HTTPMethodTrace: cloudclient.HTTPMethodTrace, 18 | model.HTTPMethodAll: cloudclient.HTTPMethodAll, 19 | } 20 | 21 | func modelKafkaOpToAPI(op model.KafkaOperation) cloudclient.KafkaOperation { 22 | return cloudclient.KafkaOperation(op) 23 | } 24 | 25 | func modelKafkaConfToAPI(kc model.KafkaConfig) cloudclient.KafkaConfigInput { 26 | return cloudclient.KafkaConfigInput{ 27 | Name: lo.ToPtr(kc.Name), 28 | Operations: lo.Map(kc.Operations, func(op model.KafkaOperation, _ int) *cloudclient.KafkaOperation { 29 | return lo.ToPtr(modelKafkaOpToAPI(op)) 30 | }), 31 | } 32 | } 33 | 34 | func modelIntentTypeToAPI(it *model.IntentType) *cloudclient.IntentType { 35 | if it == nil { 36 | return nil 37 | } 38 | return lo.ToPtr(cloudclient.IntentType(lo.FromPtr(it))) 39 | } 40 | 41 | func modelHTTPMethodToAPI(method model.HTTPMethod) cloudclient.HTTPMethod { 42 | return modelMethodToAPIMethodMap[method] 43 | } 44 | -------------------------------------------------------------------------------- /src/mapper/pkg/collectors/traffic/collector.go: -------------------------------------------------------------------------------- 1 | package traffic 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/serviceidresolver/serviceidentity" 6 | "time" 7 | ) 8 | 9 | type TrafficLevelKey struct { 10 | SourceName string 11 | SourceNamespace string 12 | DestinationName string 13 | DestinationNamespace string 14 | } 15 | 16 | type TrafficLevelData struct { 17 | Bytes int 18 | Flows int 19 | at time.Time 20 | } 21 | 22 | type TrafficLevelCounter map[TrafficLevelKey][]TrafficLevelData 23 | type TrafficLevelMap map[TrafficLevelKey]TrafficLevelData 24 | type TrafficLevelCallbackFunc func(context.Context, TrafficLevelMap) 25 | 26 | type Collector struct { 27 | trafficLevels TrafficLevelCounter 28 | callbacks []TrafficLevelCallbackFunc 29 | } 30 | 31 | func NewCollector() *Collector { 32 | return &Collector{ 33 | trafficLevels: make(TrafficLevelCounter), 34 | } 35 | } 36 | 37 | func (c *Collector) Add(source, destination serviceidentity.ServiceIdentity, bytes, flows int) { 38 | trafficKey := TrafficLevelKey{ 39 | SourceName: source.Name, 40 | SourceNamespace: source.Namespace, 41 | DestinationName: destination.Name, 42 | DestinationNamespace: destination.Namespace, 43 | } 44 | 45 | c.trafficLevels[trafficKey] = append(c.trafficLevels[trafficKey], TrafficLevelData{ 46 | Bytes: bytes, 47 | Flows: flows, 48 | at: time.Now(), 49 | }) 50 | } 51 | 52 | func (c *Collector) RegisterNotifyTraffic(callback TrafficLevelCallbackFunc) { 53 | c.callbacks = append(c.callbacks, callback) 54 | } 55 | 56 | func (c *Collector) PeriodicUpload(ctx context.Context, interval time.Duration) { 57 | for { 58 | select { 59 | case <-ctx.Done(): 60 | return 61 | case <-time.After(interval): 62 | for _, callback := range c.callbacks { 63 | callback(ctx, c.getTrafficMap()) 64 | } 65 | } 66 | } 67 | } 68 | 69 | func (c *Collector) getTrafficMap() TrafficLevelMap { 70 | trafficLevelMap := make(TrafficLevelMap) 71 | 72 | for k, v := range c.trafficLevels { 73 | var sumBytes, sumFlows int 74 | var count int 75 | 76 | for _, data := range v { 77 | if time.Since(data.at) < time.Hour { 78 | // count only data within the last hour 79 | sumBytes += data.Bytes 80 | sumFlows += data.Flows 81 | count++ 82 | } else { 83 | // drop data older than an hour 84 | c.trafficLevels[k] = c.trafficLevels[k][1:] 85 | } 86 | } 87 | 88 | if count > 0 { 89 | trafficLevelMap[k] = TrafficLevelData{ 90 | Bytes: sumBytes / count, 91 | Flows: sumFlows / count, 92 | } 93 | } 94 | } 95 | 96 | return trafficLevelMap 97 | } 98 | -------------------------------------------------------------------------------- /src/mapper/pkg/concurrentconnectioncounter/concurrent_connection_counter.go: -------------------------------------------------------------------------------- 1 | package concurrentconnectioncounter 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 5 | "github.com/samber/lo" 6 | "sync" 7 | ) 8 | 9 | type SourcePortsSet map[int64]struct{} 10 | 11 | type CountMethod int 12 | 13 | const ( 14 | CountMethodUnset CountMethod = 0 15 | CountMethodDNS CountMethod = 1 16 | CountMethodSourcePort CountMethod = 2 17 | ) 18 | 19 | type CounterInput[T CountableIntent] struct { 20 | Intent T 21 | SourcePorts []int64 22 | } 23 | 24 | type ConnectionCounter[T CountableIntent] struct { 25 | SourcePorts SourcePortsSet 26 | DNSCounter int 27 | countMethod CountMethod 28 | lock sync.Mutex 29 | } 30 | 31 | func NewConnectionCounter[T CountableIntent]() *ConnectionCounter[T] { 32 | return &ConnectionCounter[T]{ 33 | SourcePorts: make(SourcePortsSet), 34 | DNSCounter: 0, 35 | countMethod: CountMethodUnset, 36 | lock: sync.Mutex{}, 37 | } 38 | } 39 | 40 | func (c *ConnectionCounter[T]) AddConnection(input CounterInput[T]) { 41 | c.lock.Lock() 42 | defer c.lock.Unlock() 43 | 44 | if input.Intent.ShouldCountUsingSrcPortMethod() { 45 | // TCP source port connections wins over DNS (in terms of connections count) 46 | c.countMethod = CountMethodSourcePort 47 | lo.ForEach(input.SourcePorts, func(port int64, _ int) { 48 | c.SourcePorts[port] = struct{}{} 49 | }) 50 | return 51 | } 52 | 53 | if (c.countMethod == CountMethodUnset || c.countMethod == CountMethodDNS) && input.Intent.ShouldCountUsingDNSMethod() { 54 | c.countMethod = CountMethodDNS 55 | c.DNSCounter++ 56 | return 57 | } 58 | 59 | // otherwise we do not count this intent. Either because it is a DNS intent and we are already counting by source ports, 60 | // or because it is an unknown intent type 61 | } 62 | 63 | func (c *ConnectionCounter[T]) GetConnectionCount() (int, bool) { 64 | c.lock.Lock() 65 | defer c.lock.Unlock() 66 | 67 | return c.getConnectionCountUnsafe() 68 | } 69 | 70 | func (c *ConnectionCounter[T]) getConnectionCountUnsafe() (int, bool) { 71 | if c.countMethod == CountMethodSourcePort { 72 | return len(c.SourcePorts), true 73 | } 74 | 75 | if c.countMethod == CountMethodDNS { 76 | return c.DNSCounter, true 77 | } 78 | 79 | return 0, false 80 | } 81 | 82 | func (c *ConnectionCounter[T]) GetConnectionCountDiff(other *ConnectionCounter[T]) (cloudclient.ConnectionsCount, bool) { 83 | c.lock.Lock() 84 | defer c.lock.Unlock() 85 | 86 | if c.countMethod == CountMethodUnset || other.countMethod == CountMethodUnset { 87 | return cloudclient.ConnectionsCount{}, false 88 | } 89 | 90 | // Note that we call the usafe version since we already locked the lock ar the function entrance, wnad mutex lock are 91 | // not reentrant in GO. 92 | currentCount, _ := c.getConnectionCountUnsafe() 93 | otherCount, _ := other.getConnectionCountUnsafe() 94 | 95 | if c.countMethod != other.countMethod { 96 | return cloudclient.ConnectionsCount{ 97 | Current: lo.ToPtr(currentCount), 98 | Added: lo.ToPtr(currentCount), 99 | Removed: lo.ToPtr(otherCount), 100 | }, true 101 | } 102 | 103 | // For here onwards we can assume that both counters use the same counting method 104 | if c.countMethod == CountMethodDNS { 105 | return cloudclient.ConnectionsCount{ 106 | Current: lo.ToPtr(currentCount), 107 | Added: lo.ToPtr(currentCount), 108 | Removed: lo.ToPtr(otherCount), 109 | }, true 110 | } 111 | 112 | var missingFromSelfCount, missingFromOtherCount int 113 | 114 | for key := range c.SourcePorts { 115 | if _, ok := other.SourcePorts[key]; !ok { 116 | missingFromOtherCount += 1 117 | } 118 | } 119 | 120 | for key := range other.SourcePorts { 121 | if _, ok := c.SourcePorts[key]; !ok { 122 | missingFromSelfCount += 1 123 | } 124 | } 125 | 126 | return cloudclient.ConnectionsCount{ 127 | Current: lo.ToPtr(len(c.SourcePorts)), 128 | Added: lo.ToPtr(missingFromOtherCount), 129 | Removed: lo.ToPtr(missingFromSelfCount), 130 | }, true 131 | 132 | } 133 | -------------------------------------------------------------------------------- /src/mapper/pkg/concurrentconnectioncounter/connection_count_differ.go: -------------------------------------------------------------------------------- 1 | package concurrentconnectioncounter 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 5 | "github.com/samber/lo" 6 | ) 7 | 8 | type ConnectionCounterMap[K comparable, Countable CountableIntent] map[K]*ConnectionCounter[Countable] 9 | 10 | type ConnectionCountDiffer[K comparable, Countable CountableIntent] struct { 11 | currentCounters ConnectionCounterMap[K, Countable] 12 | previousCounters ConnectionCounterMap[K, Countable] 13 | } 14 | 15 | func NewConnectionCountDiffer[K comparable, Countable CountableIntent]() *ConnectionCountDiffer[K, Countable] { 16 | return &ConnectionCountDiffer[K, Countable]{ 17 | currentCounters: make(ConnectionCounterMap[K, Countable]), 18 | previousCounters: make(ConnectionCounterMap[K, Countable]), 19 | } 20 | } 21 | 22 | func (c *ConnectionCountDiffer[K, Countable]) Reset() { 23 | c.previousCounters = c.currentCounters 24 | c.currentCounters = make(ConnectionCounterMap[K, Countable]) 25 | } 26 | 27 | func (c *ConnectionCountDiffer[K, Countable]) Increment(key K, counterInput CounterInput[Countable]) { 28 | 29 | _, existingCounterFound := c.currentCounters[key] 30 | if !existingCounterFound { 31 | c.currentCounters[key] = NewConnectionCounter[Countable]() 32 | } 33 | 34 | c.currentCounters[key].AddConnection(counterInput) 35 | } 36 | 37 | func (c *ConnectionCountDiffer[K, Countable]) GetDiff(key K) (cloudclient.ConnectionsCount, bool) { 38 | currentCounter, hasCurrentValue := c.currentCounters[key] 39 | prevCounter, hasPrevValue := c.previousCounters[key] 40 | 41 | if hasCurrentValue && !hasPrevValue { 42 | connectionsCount, isValid := currentCounter.GetConnectionCount() 43 | if isValid { 44 | return cloudclient.ConnectionsCount{ 45 | Current: lo.ToPtr(connectionsCount), 46 | Added: lo.ToPtr(connectionsCount), 47 | Removed: lo.ToPtr(0), 48 | }, true 49 | } 50 | return cloudclient.ConnectionsCount{}, false 51 | } 52 | 53 | if !hasCurrentValue && hasPrevValue { 54 | connectionsCount, isValid := prevCounter.GetConnectionCount() 55 | if isValid { 56 | return cloudclient.ConnectionsCount{ 57 | Current: lo.ToPtr(0), 58 | Added: lo.ToPtr(0), 59 | Removed: lo.ToPtr(connectionsCount), 60 | }, true 61 | } 62 | return cloudclient.ConnectionsCount{}, false 63 | } 64 | 65 | if hasCurrentValue && hasPrevValue { 66 | connectionDiff, valid := currentCounter.GetConnectionCountDiff(prevCounter) 67 | if valid { 68 | return connectionDiff, true 69 | } 70 | } 71 | 72 | return cloudclient.ConnectionsCount{}, false 73 | } 74 | -------------------------------------------------------------------------------- /src/mapper/pkg/concurrentconnectioncounter/connection_count_differ_test.go: -------------------------------------------------------------------------------- 1 | package concurrentconnectioncounter 2 | 3 | import ( 4 | "github.com/stretchr/testify/suite" 5 | "testing" 6 | ) 7 | 8 | type ConnectionCountDifferSuite struct { 9 | suite.Suite 10 | } 11 | 12 | type CountableIntentTCPDummy struct{} 13 | type CountableIntentDNSDummy struct{} 14 | 15 | func NewCountableIntentDummy() *CountableIntentTCPDummy { 16 | return &CountableIntentTCPDummy{} 17 | } 18 | 19 | func (c *CountableIntentTCPDummy) ShouldCountUsingSrcPortMethod() bool { 20 | return true 21 | } 22 | 23 | func (c *CountableIntentTCPDummy) ShouldCountUsingDNSMethod() bool { 24 | return false 25 | } 26 | 27 | func NewCountableIntentDNSDummy() *CountableIntentDNSDummy { 28 | return &CountableIntentDNSDummy{} 29 | } 30 | 31 | func (c *CountableIntentDNSDummy) ShouldCountUsingSrcPortMethod() bool { 32 | return false 33 | } 34 | 35 | func (c *CountableIntentDNSDummy) ShouldCountUsingDNSMethod() bool { 36 | return true 37 | } 38 | 39 | func (s *ConnectionCountDifferSuite) TestTCPDiff_TestNoPrevValue() { 40 | differ := NewConnectionCountDiffer[string, *CountableIntentTCPDummy]() 41 | 42 | // Add a connections 43 | differ.Increment("key1", CounterInput[*CountableIntentTCPDummy]{ 44 | Intent: NewCountableIntentDummy(), 45 | SourcePorts: []int64{1, 2}, 46 | }) 47 | differ.Increment("key1", CounterInput[*CountableIntentTCPDummy]{ 48 | Intent: NewCountableIntentDummy(), 49 | SourcePorts: []int64{2, 3}, 50 | }) 51 | 52 | // Get the diff 53 | diff, ok := differ.GetDiff("key1") 54 | 55 | s.Require().True(ok) 56 | s.Require().Equal(3, *diff.Current) 57 | s.Require().Equal(3, *diff.Added) 58 | s.Require().Equal(0, *diff.Removed) 59 | } 60 | 61 | func (s *ConnectionCountDifferSuite) TestTCPDiff_TestPrevConnectionsAreTheSameAsCurrent() { 62 | differ := NewConnectionCountDiffer[string, *CountableIntentTCPDummy]() 63 | 64 | // Add a connections 65 | differ.Increment("key1", CounterInput[*CountableIntentTCPDummy]{ 66 | Intent: NewCountableIntentDummy(), 67 | SourcePorts: []int64{1, 2, 3}, 68 | }) 69 | 70 | differ.Reset() 71 | 72 | // Add same connections 73 | differ.Increment("key1", CounterInput[*CountableIntentTCPDummy]{ 74 | Intent: NewCountableIntentDummy(), 75 | SourcePorts: []int64{1, 2, 3}, 76 | }) 77 | 78 | // Get the diff 79 | diff, ok := differ.GetDiff("key1") 80 | 81 | s.Require().True(ok) 82 | s.Require().Equal(3, *diff.Current) 83 | s.Require().Equal(0, *diff.Added) 84 | s.Require().Equal(0, *diff.Removed) 85 | } 86 | 87 | func (s *ConnectionCountDifferSuite) TestDNSDiff_TestNoPrevValue() { 88 | differ := NewConnectionCountDiffer[string, *CountableIntentDNSDummy]() 89 | 90 | // Add a connections 91 | differ.Increment("key1", CounterInput[*CountableIntentDNSDummy]{ 92 | Intent: NewCountableIntentDNSDummy(), 93 | SourcePorts: []int64{1, 2}, 94 | }) 95 | differ.Increment("key1", CounterInput[*CountableIntentDNSDummy]{ 96 | Intent: NewCountableIntentDNSDummy(), 97 | SourcePorts: []int64{2, 3}, 98 | }) 99 | 100 | // Get the diff 101 | diff, ok := differ.GetDiff("key1") 102 | 103 | s.Require().True(ok) 104 | s.Require().Equal(2, *diff.Current) 105 | s.Require().Equal(2, *diff.Added) 106 | s.Require().Equal(0, *diff.Removed) 107 | } 108 | 109 | func (s *ConnectionCountDifferSuite) TestDNSDiff_TestWithPrevValue() { 110 | differ := NewConnectionCountDiffer[string, *CountableIntentDNSDummy]() 111 | 112 | // Add a connections 113 | differ.Increment("key1", CounterInput[*CountableIntentDNSDummy]{ 114 | Intent: NewCountableIntentDNSDummy(), 115 | SourcePorts: []int64{1, 2}, 116 | }) 117 | differ.Increment("key1", CounterInput[*CountableIntentDNSDummy]{ 118 | Intent: NewCountableIntentDNSDummy(), 119 | SourcePorts: []int64{2, 3}, 120 | }) 121 | 122 | differ.Reset() 123 | 124 | differ.Increment("key1", CounterInput[*CountableIntentDNSDummy]{ 125 | Intent: NewCountableIntentDNSDummy(), 126 | SourcePorts: make([]int64, 0), 127 | }) 128 | 129 | // Get the diff 130 | diff, ok := differ.GetDiff("key1") 131 | 132 | s.Require().True(ok) 133 | s.Require().Equal(1, *diff.Current) 134 | s.Require().Equal(1, *diff.Added) 135 | s.Require().Equal(2, *diff.Removed) 136 | } 137 | 138 | func TestConnectionCountDifferSuite(t *testing.T) { 139 | suite.Run(t, new(ConnectionCountDifferSuite)) 140 | } 141 | -------------------------------------------------------------------------------- /src/mapper/pkg/concurrentconnectioncounter/consts.go: -------------------------------------------------------------------------------- 1 | package concurrentconnectioncounter 2 | 3 | const ( 4 | SocketScanServiceIntentResolution string = "addSocketScanServiceIntent" 5 | SocketScanPodIntentResolution string = "addSocketScanPodIntent" 6 | TCPTrafficIntentResolution string = "handleInternalTrafficTCPResult" 7 | DNSTrafficIntentResolution string = "handleDNSCaptureResultsAsKubernetesPods" 8 | KafkaResultIntentResolution string = "handleReportKafkaMapperResults" 9 | IstioResultIntentResolution string = "handleReportIstioConnectionResults" 10 | ) 11 | -------------------------------------------------------------------------------- /src/mapper/pkg/concurrentconnectioncounter/countable_intent.go: -------------------------------------------------------------------------------- 1 | package concurrentconnectioncounter 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 5 | ) 6 | 7 | type CountableIntent interface { 8 | ShouldCountUsingSrcPortMethod() bool 9 | ShouldCountUsingDNSMethod() bool 10 | } 11 | 12 | type CountableIntentIntent struct { 13 | intent model.Intent 14 | } 15 | 16 | func NewCountableIntentIntent(intent model.Intent) *CountableIntentIntent { 17 | return &CountableIntentIntent{ 18 | intent: intent, 19 | } 20 | } 21 | 22 | func (c *CountableIntentIntent) ShouldCountUsingSrcPortMethod() bool { 23 | return c.intent.ResolutionData != nil && 24 | (*(c.intent.ResolutionData) == SocketScanServiceIntentResolution || 25 | *c.intent.ResolutionData == SocketScanPodIntentResolution || 26 | *c.intent.ResolutionData == TCPTrafficIntentResolution) 27 | } 28 | 29 | func (c *CountableIntentIntent) ShouldCountUsingDNSMethod() bool { 30 | return c.intent.ResolutionData != nil && *(c.intent.ResolutionData) == DNSTrafficIntentResolution 31 | } 32 | 33 | type CountableIntentExternalTrafficIntent struct { 34 | } 35 | 36 | func NewCountableIntentExternalTrafficIntent() *CountableIntentExternalTrafficIntent { 37 | return &CountableIntentExternalTrafficIntent{} 38 | } 39 | 40 | func (c *CountableIntentExternalTrafficIntent) ShouldCountUsingSrcPortMethod() bool { 41 | return false 42 | } 43 | 44 | func (c *CountableIntentExternalTrafficIntent) ShouldCountUsingDNSMethod() bool { 45 | return true 46 | } 47 | 48 | type CountableIncomingInternetTrafficIntent struct { 49 | } 50 | 51 | func NewCountableIncomingInternetTrafficIntent() *CountableIncomingInternetTrafficIntent { 52 | return &CountableIncomingInternetTrafficIntent{} 53 | } 54 | 55 | func (c *CountableIncomingInternetTrafficIntent) ShouldCountUsingSrcPortMethod() bool { 56 | return true 57 | } 58 | 59 | func (c *CountableIncomingInternetTrafficIntent) ShouldCountUsingDNSMethod() bool { 60 | return false 61 | } 62 | -------------------------------------------------------------------------------- /src/mapper/pkg/dnscache/dns_cache.go: -------------------------------------------------------------------------------- 1 | package dnscache 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 6 | "github.com/otterize/network-mapper/src/mapper/pkg/dnscache/ttl_cache" 7 | "github.com/sirupsen/logrus" 8 | "github.com/spf13/viper" 9 | "net" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | type DNSCache struct { 15 | cache *ttl_cache.TTLCache[string, string] 16 | } 17 | 18 | type Resolver interface { 19 | LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) 20 | } 21 | 22 | func NewDNSCache() *DNSCache { 23 | capacity := viper.GetInt(config.DNSCacheItemsMaxCapacityKey) 24 | if capacity == 0 { 25 | logrus.Panic("Capacity cannot be 0") 26 | } 27 | dnsRecordCache := ttl_cache.NewTTLCache[string, string](capacity) 28 | 29 | return &DNSCache{ 30 | cache: dnsRecordCache, 31 | } 32 | } 33 | 34 | func (d *DNSCache) AddOrUpdateDNSData(dnsName string, ip string, ttl time.Duration) { 35 | d.cache.Insert(dnsName, ip, ttl) 36 | } 37 | 38 | func (d *DNSCache) GetResolvedIPs(dnsName string) []string { 39 | entry := d.cache.Get(dnsName) 40 | return entry 41 | } 42 | 43 | func (d *DNSCache) GetResolvedIPsForWildcard(dnsName string) []string { 44 | dnsSuffix := strings.TrimPrefix(dnsName, "*") // Strip the wildcard, leave the '.example.com' suffix 45 | result := d.cache.Filter(func(key string) bool { 46 | return strings.HasSuffix(key, dnsSuffix) 47 | }) 48 | 49 | return result 50 | } 51 | -------------------------------------------------------------------------------- /src/mapper/pkg/dnscache/dns_cache_test.go: -------------------------------------------------------------------------------- 1 | package dnscache 2 | 3 | import ( 4 | "fmt" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 6 | "github.com/spf13/viper" 7 | "github.com/stretchr/testify/suite" 8 | "golang.org/x/exp/slices" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | const ( 14 | IP1 = "10.0.0.1" 15 | IP2 = "10.0.0.2" 16 | ) 17 | 18 | type DNSCacheTestSuite struct { 19 | suite.Suite 20 | } 21 | 22 | func (s *DNSCacheTestSuite) TearDownTest() { 23 | viper.Set(config.DNSCacheItemsMaxCapacityKey, config.DNSCacheItemsMaxCapacityDefault) 24 | } 25 | 26 | func (s *DNSCacheTestSuite) TestDNSCache() { 27 | cache := NewDNSCache() 28 | cache.AddOrUpdateDNSData("good-news.com", IP1, 60*time.Second) 29 | ips := cache.GetResolvedIPs("good-news.com") 30 | s.Require().Len(ips, 1) 31 | s.Require().Equal(IP1, ips[0]) 32 | 33 | cache.AddOrUpdateDNSData("good-news.com", IP2, 60*time.Second) 34 | ips = cache.GetResolvedIPs("good-news.com") 35 | s.Require().Len(ips, 2) 36 | s.Require().Contains(ips, IP1) 37 | s.Require().Contains(ips, IP2) 38 | 39 | ips = cache.GetResolvedIPs("bad-news.de") 40 | s.Require().Len(ips, 0) 41 | 42 | cache.AddOrUpdateDNSData("bad-news.de", IP1, 60*time.Second) 43 | ips = cache.GetResolvedIPs("bad-news.de") 44 | s.Require().Len(ips, 1) 45 | s.Require().Equal(IP1, ips[0]) 46 | } 47 | 48 | func (s *DNSCacheTestSuite) TestCapacityConfig() { 49 | capacityLimit := 2 50 | viper.Set(config.DNSCacheItemsMaxCapacityKey, capacityLimit) 51 | cache := NewDNSCache() 52 | names := make([]string, 0) 53 | for i := 0; i < capacityLimit+1; i++ { 54 | dnsName := fmt.Sprintf("dns-%d.com", i) 55 | cache.AddOrUpdateDNSData(dnsName, IP1, 60*time.Second) 56 | names = append(names, dnsName) 57 | } 58 | 59 | for i, dnsName := range names { 60 | vals := cache.GetResolvedIPs(dnsName) 61 | if i == 0 { 62 | s.Require().Len(vals, 0) 63 | } else { 64 | s.Require().Len(vals, 1) 65 | } 66 | } 67 | } 68 | 69 | func (s *DNSCacheTestSuite) TestWildcardIP() { 70 | cache := NewDNSCache() 71 | cache.AddOrUpdateDNSData("www.surf-forecast.com", IP1, 60*time.Second) 72 | ips := cache.GetResolvedIPsForWildcard("*.surf-forecast.com") 73 | s.Require().Len(ips, 1) 74 | s.Require().Equal(ips[0], IP1) 75 | } 76 | 77 | func (s *DNSCacheTestSuite) TestMultipleWildcardIPs() { 78 | cache := NewDNSCache() 79 | cache.AddOrUpdateDNSData("www.surf-forecast.com", IP1, 60*time.Second) 80 | cache.AddOrUpdateDNSData("api.surf-forecast.com", IP2, 60*time.Second) 81 | ips := cache.GetResolvedIPsForWildcard("*.surf-forecast.com") 82 | s.Require().Len(ips, 2) 83 | slices.Sort(ips) 84 | compIps := []string{IP1, IP2} 85 | slices.Sort(compIps) 86 | s.Require().Equal(ips, compIps) 87 | } 88 | 89 | func TestDNSCacheTestSuite(t *testing.T) { 90 | suite.Run(t, new(DNSCacheTestSuite)) 91 | } 92 | -------------------------------------------------------------------------------- /src/mapper/pkg/dnscache/ttl_cache/ttl_cache_test.go: -------------------------------------------------------------------------------- 1 | package ttl_cache 2 | 3 | import ( 4 | "fmt" 5 | "github.com/stretchr/testify/suite" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | type TTLCacheTestSuite struct { 11 | suite.Suite 12 | } 13 | 14 | func (s *TTLCacheTestSuite) TestCacheFilterWhileWrite() { 15 | cache := NewTTLCache[string, string](100) 16 | stop := make(chan struct{}) 17 | 18 | go func() { 19 | // Intensive write to the cache 20 | for { 21 | cache.Insert("example.com", fmt.Sprintf("192.0.2.%d", time.Now().UnixNano()%255), 5*time.Second) 22 | time.Sleep(1 * time.Millisecond) 23 | } 24 | }() 25 | 26 | go func() { 27 | // Iterating over the cache 28 | for { 29 | _ = cache.Filter(func(key string) bool { 30 | return true 31 | }) 32 | time.Sleep(1 * time.Millisecond) 33 | } 34 | }() 35 | 36 | // Let them race for 15 seconds 37 | // Unfortunately, there isn't a way to make sure that they won't race (which will yield fatal error: concurrent map iteration and map write) 38 | // so we hope that 15 seconds are good enough interval to reproduce the error if it exists 39 | time.Sleep(15 * time.Second) 40 | close(stop) 41 | } 42 | 43 | func (s *TTLCacheTestSuite) TestTTL() { 44 | cache := NewTTLCache[string, string](100) 45 | 46 | cache.Insert("my-future-blog.de", "ip1", 1*time.Second) 47 | ips := cache.Get("my-future-blog.de") 48 | s.Require().Len(ips, 1) 49 | s.Require().Equal("ip1", ips[0]) 50 | 51 | // This is the only place where we sleep in the test, to make sure the TTL works as expected 52 | time.Sleep(2 * time.Second) 53 | 54 | cache.cleanupExpired() 55 | 56 | ips = cache.Get("my-future-blog.de") 57 | s.Require().Len(ips, 0) 58 | } 59 | 60 | func TestTTLCacheTestSuite(t *testing.T) { 61 | suite.Run(t, new(TTLCacheTestSuite)) 62 | } 63 | -------------------------------------------------------------------------------- /src/mapper/pkg/dnsintentspublisher/init.go: -------------------------------------------------------------------------------- 1 | package dnsintentspublisher 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/dnscache" 8 | "github.com/sirupsen/logrus" 9 | "github.com/spf13/viper" 10 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 11 | k8serrors "k8s.io/apimachinery/pkg/api/errors" 12 | "k8s.io/apimachinery/pkg/types" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/manager" 15 | ) 16 | 17 | const ( 18 | clientIntentsCRDName = "clientintents.k8s.otterize.com" 19 | ) 20 | 21 | func InitWithManager(ctx context.Context, mgr manager.Manager, dnsCache *dnscache.DNSCache) (*Publisher, bool, error) { 22 | if !viper.GetBool(config.DNSClientIntentsUpdateEnabledKey) { 23 | return nil, false, nil 24 | } 25 | 26 | installed, err := IsClientIntentsInstalled(ctx, mgr) 27 | if err != nil { 28 | return nil, false, errors.Wrap(err) 29 | } 30 | 31 | if !installed { 32 | logrus.Debugf("DNS client intents publishing is not enabled due to missing CRD %s", clientIntentsCRDName) 33 | return nil, false, nil 34 | } 35 | 36 | dnsPublisher := NewPublisher(mgr.GetClient(), dnsCache) 37 | err = dnsPublisher.InitIndices(ctx, mgr) 38 | if err != nil { 39 | return nil, false, errors.Wrap(err) 40 | } 41 | 42 | return dnsPublisher, true, nil 43 | } 44 | 45 | func IsClientIntentsInstalled(ctx context.Context, mgr manager.Manager) (bool, error) { 46 | directClient, err := client.New(mgr.GetConfig(), client.Options{Scheme: mgr.GetScheme()}) 47 | if err != nil { 48 | logrus.WithError(err).Error("unable to create kubernetes API client") 49 | return false, errors.Wrap(err) 50 | } 51 | 52 | crd := apiextensionsv1.CustomResourceDefinition{} 53 | err = directClient.Get(ctx, types.NamespacedName{Name: clientIntentsCRDName}, &crd) 54 | if err != nil && !k8serrors.IsNotFound(err) { 55 | return false, errors.Wrap(err) 56 | } 57 | 58 | if k8serrors.IsNotFound(err) { 59 | return false, nil 60 | } 61 | 62 | return true, nil 63 | } 64 | -------------------------------------------------------------------------------- /src/mapper/pkg/externaltrafficholder/externaltrafficholder.go: -------------------------------------------------------------------------------- 1 | package externaltrafficholder 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 6 | "github.com/otterize/network-mapper/src/mapper/pkg/concurrentconnectioncounter" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 8 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 9 | "github.com/samber/lo" 10 | "github.com/sirupsen/logrus" 11 | "sync" 12 | "time" 13 | ) 14 | 15 | type IP string 16 | 17 | type ExternalTrafficIntent struct { 18 | Client model.OtterizeServiceIdentity `json:"client"` 19 | LastSeen time.Time 20 | DNSName string 21 | IPs map[IP]struct{} 22 | } 23 | 24 | type TimestampedExternalTrafficIntent struct { 25 | Timestamp time.Time 26 | Intent ExternalTrafficIntent 27 | ConnectionsCount *cloudclient.ConnectionsCount 28 | } 29 | 30 | type ExternalTrafficKey struct { 31 | ClientName string 32 | ClientNamespace string 33 | DestDNSName string 34 | } 35 | 36 | type IntentsConnectionCounter map[ExternalTrafficKey]*concurrentconnectioncounter.ConnectionCounter[*concurrentconnectioncounter.CountableIntentExternalTrafficIntent] 37 | 38 | type ExternalTrafficIntentsHolder struct { 39 | intents map[ExternalTrafficKey]TimestampedExternalTrafficIntent 40 | lock sync.Mutex 41 | callbacks []ExternalTrafficCallbackFunc 42 | connectionCountDiffer *concurrentconnectioncounter.ConnectionCountDiffer[ExternalTrafficKey, *concurrentconnectioncounter.CountableIntentExternalTrafficIntent] 43 | } 44 | 45 | type ExternalTrafficCallbackFunc func(context.Context, []TimestampedExternalTrafficIntent) 46 | 47 | func NewExternalTrafficIntentsHolder() *ExternalTrafficIntentsHolder { 48 | return &ExternalTrafficIntentsHolder{ 49 | intents: make(map[ExternalTrafficKey]TimestampedExternalTrafficIntent), 50 | connectionCountDiffer: concurrentconnectioncounter.NewConnectionCountDiffer[ExternalTrafficKey, *concurrentconnectioncounter.CountableIntentExternalTrafficIntent](), 51 | } 52 | } 53 | 54 | func (h *ExternalTrafficIntentsHolder) RegisterNotifyIntents(callback ExternalTrafficCallbackFunc) { 55 | h.callbacks = append(h.callbacks, callback) 56 | } 57 | 58 | func (h *ExternalTrafficIntentsHolder) PeriodicIntentsUpload(ctx context.Context, interval time.Duration) { 59 | logrus.Info("Starting periodic external traffic intents upload") 60 | 61 | for { 62 | select { 63 | case <-time.After(interval): 64 | if len(h.callbacks) == 0 { 65 | continue 66 | } 67 | 68 | intents := h.GetNewIntentsSinceLastGet() 69 | if len(intents) == 0 { 70 | continue 71 | } 72 | for _, callback := range h.callbacks { 73 | callback(ctx, intents) 74 | } 75 | 76 | case <-ctx.Done(): 77 | return 78 | } 79 | } 80 | } 81 | 82 | func (h *ExternalTrafficIntentsHolder) GetNewIntentsSinceLastGet() []TimestampedExternalTrafficIntent { 83 | h.lock.Lock() 84 | defer h.lock.Unlock() 85 | 86 | intents := make([]TimestampedExternalTrafficIntent, 0, len(h.intents)) 87 | 88 | for key, intent := range h.intents { 89 | // Add connection count value 90 | connectionsCount, connectionsCountValid := h.connectionCountDiffer.GetDiff(key) 91 | if connectionsCountValid { 92 | intent.ConnectionsCount = lo.ToPtr(connectionsCount) 93 | } 94 | 95 | intents = append(intents, intent) 96 | } 97 | 98 | h.intents = make(map[ExternalTrafficKey]TimestampedExternalTrafficIntent) 99 | h.connectionCountDiffer.Reset() 100 | 101 | return intents 102 | } 103 | 104 | func (h *ExternalTrafficIntentsHolder) AddIntent(intent ExternalTrafficIntent) { 105 | if config.ExcludedNamespaces().Contains(intent.Client.Namespace) { 106 | return 107 | } 108 | 109 | h.lock.Lock() 110 | defer h.lock.Unlock() 111 | 112 | key := ExternalTrafficKey{ 113 | ClientName: intent.Client.Name, 114 | ClientNamespace: intent.Client.Namespace, 115 | DestDNSName: intent.DNSName, 116 | } 117 | _, found := h.intents[key] 118 | h.connectionCountDiffer.Increment(key, concurrentconnectioncounter.CounterInput[*concurrentconnectioncounter.CountableIntentExternalTrafficIntent]{ 119 | Intent: concurrentconnectioncounter.NewCountableIntentExternalTrafficIntent(), 120 | SourcePorts: make([]int64, 0), 121 | }) 122 | 123 | if !found { 124 | h.intents[key] = TimestampedExternalTrafficIntent{ 125 | Timestamp: intent.LastSeen, 126 | Intent: intent, 127 | } 128 | return 129 | } 130 | 131 | mergedIntent := h.intents[key] 132 | 133 | for ip := range intent.IPs { 134 | mergedIntent.Intent.IPs[ip] = struct{}{} 135 | } 136 | if intent.LastSeen.After(mergedIntent.Timestamp) { 137 | mergedIntent.Timestamp = intent.LastSeen 138 | } 139 | 140 | h.intents[key] = mergedIntent 141 | } 142 | -------------------------------------------------------------------------------- /src/mapper/pkg/gcpintentsholder/holder.go: -------------------------------------------------------------------------------- 1 | package gcpintentsholder 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 6 | "github.com/samber/lo" 7 | "github.com/sirupsen/logrus" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type GCPIntent struct { 13 | Client model.OtterizeServiceIdentity `json:"client"` 14 | Permissions []string 15 | Resource string 16 | } 17 | 18 | type GCPIntentKey struct { 19 | ClientName string 20 | ClientNamespace string 21 | Resource string 22 | } 23 | 24 | type TimestampedGCPIntent struct { 25 | Timestamp time.Time 26 | GCPIntent 27 | } 28 | 29 | type GCPIntentsHolder struct { 30 | intents map[GCPIntentKey]TimestampedGCPIntent 31 | lock sync.Mutex 32 | callbacks []GCPIntentCallbackFunc 33 | } 34 | 35 | type GCPIntentCallbackFunc func(context.Context, []GCPIntent) 36 | 37 | func New() *GCPIntentsHolder { 38 | notifier := &GCPIntentsHolder{ 39 | intents: make(map[GCPIntentKey]TimestampedGCPIntent), 40 | } 41 | 42 | return notifier 43 | } 44 | 45 | func (h *GCPIntentsHolder) RegisterNotifyIntents(callback GCPIntentCallbackFunc) { 46 | h.callbacks = append(h.callbacks, callback) 47 | } 48 | 49 | func (h *GCPIntentsHolder) AddIntent(intent GCPIntent) { 50 | h.lock.Lock() 51 | defer h.lock.Unlock() 52 | 53 | logrus.Debugf("Adding intent: %+v", intent) 54 | 55 | key := GCPIntentKey{ 56 | ClientName: intent.Client.Name, 57 | ClientNamespace: intent.Client.Namespace, 58 | Resource: intent.Resource, 59 | } 60 | 61 | _, found := h.intents[key] 62 | now := time.Now() 63 | 64 | if !found { 65 | h.intents[key] = TimestampedGCPIntent{ 66 | Timestamp: now, 67 | GCPIntent: intent, 68 | } 69 | } 70 | 71 | mergedIntent := h.intents[key] 72 | mergedIntent.Timestamp = now 73 | mergedIntent.Permissions = lo.Union(mergedIntent.Permissions, intent.Permissions) 74 | h.intents[key] = mergedIntent 75 | } 76 | 77 | func (h *GCPIntentsHolder) PeriodicIntentsUpload(ctx context.Context, interval time.Duration) { 78 | for { 79 | select { 80 | case <-ctx.Done(): 81 | return 82 | case <-time.After(interval): 83 | if len(h.callbacks) == 0 { 84 | continue 85 | } 86 | 87 | intents := h.GetNewIntentsSinceLastGet() 88 | if len(intents) == 0 { 89 | continue 90 | } 91 | 92 | for _, callback := range h.callbacks { 93 | callback(ctx, intents) 94 | } 95 | } 96 | } 97 | } 98 | 99 | func (h *GCPIntentsHolder) GetNewIntentsSinceLastGet() []GCPIntent { 100 | h.lock.Lock() 101 | defer h.lock.Unlock() 102 | 103 | intents := make([]GCPIntent, 0, len(h.intents)) 104 | 105 | for _, intent := range h.intents { 106 | intents = append(intents, intent.GCPIntent) 107 | } 108 | 109 | h.intents = make(map[GCPIntentKey]TimestampedGCPIntent) 110 | 111 | return intents 112 | } 113 | -------------------------------------------------------------------------------- /src/mapper/pkg/graph/model/gvk.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import "k8s.io/apimachinery/pkg/runtime/schema" 4 | 5 | func GroupVersionKindFromKubeGVK(kind schema.GroupVersionKind) *GroupVersionKind { 6 | gvk := &GroupVersionKind{ 7 | Version: kind.Version, 8 | Kind: kind.Kind, 9 | } 10 | 11 | if kind.Group != "" { 12 | gvk.Group = &kind.Group 13 | } 14 | 15 | return gvk 16 | } 17 | -------------------------------------------------------------------------------- /src/mapper/pkg/graph/model/kafkaop.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "github.com/otterize/intents-operator/src/shared/errors" 5 | "strings" 6 | ) 7 | 8 | var ( 9 | kafkaOperationToAclOperation = map[string]KafkaOperation{ 10 | "read": KafkaOperationConsume, 11 | "write": KafkaOperationProduce, 12 | "create": KafkaOperationCreate, 13 | "delete": KafkaOperationDelete, 14 | "alter": KafkaOperationAlter, 15 | "describe": KafkaOperationDescribe, 16 | "clusteraction": KafkaOperationClusterAction, 17 | "describeconfigs": KafkaOperationDescribeConfigs, 18 | "alterconfigs": KafkaOperationAlterConfigs, 19 | "idempotentwrite": KafkaOperationIdempotentWrite, 20 | } 21 | ) 22 | 23 | func KafkaOpFromText(text string) (KafkaOperation, error) { 24 | normalized := strings.ToLower(text) 25 | 26 | apiOp, ok := kafkaOperationToAclOperation[normalized] 27 | if !ok { 28 | return "", errors.Errorf("failed parsing op %s", text) 29 | } 30 | return apiOp, nil 31 | } 32 | -------------------------------------------------------------------------------- /src/mapper/pkg/graph/model/results_length.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | func (c CaptureResults) Length() int { 4 | return len(c.Results) 5 | } 6 | 7 | func (c KafkaMapperResults) Length() int { 8 | return len(c.Results) 9 | } 10 | 11 | func (c SocketScanResults) Length() int { 12 | return len(c.Results) 13 | } 14 | 15 | func (c IstioConnectionResults) Length() int { 16 | return len(c.Results) 17 | } 18 | 19 | type AWSOperationResults []AWSOperation 20 | 21 | func (c AWSOperationResults) Length() int { 22 | return len(c) 23 | } 24 | 25 | type GCPOperationResults []GCPOperation 26 | 27 | func (c GCPOperationResults) Length() int { 28 | return len(c) 29 | } 30 | 31 | type AzureOperationResults []AzureOperation 32 | 33 | func (c AzureOperationResults) Length() int { 34 | return len(c) 35 | } 36 | 37 | func (c CaptureTCPResults) Length() int { 38 | return len(c.Results) 39 | } 40 | 41 | func (c TrafficLevelResults) Length() int { 42 | return len(c.Results) 43 | } 44 | -------------------------------------------------------------------------------- /src/mapper/pkg/graph/model/serviceidentity.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import "k8s.io/apimachinery/pkg/types" 4 | 5 | func (identity OtterizeServiceIdentity) AsNamespacedName() types.NamespacedName { 6 | return types.NamespacedName{ 7 | Name: identity.Name, 8 | Namespace: identity.Namespace, 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/mapper/pkg/incomingtrafficholder/incoming_traffic_holder.go: -------------------------------------------------------------------------------- 1 | package incomingtrafficholder 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 6 | "github.com/otterize/network-mapper/src/mapper/pkg/concurrentconnectioncounter" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 8 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 9 | "github.com/samber/lo" 10 | "github.com/sirupsen/logrus" 11 | "sync" 12 | "time" 13 | ) 14 | 15 | type IP string 16 | 17 | type IncomingTrafficIntent struct { 18 | Server model.OtterizeServiceIdentity `json:"client"` 19 | LastSeen time.Time 20 | IP string 21 | SrcPorts []int64 22 | } 23 | 24 | type TimestampedIncomingTrafficIntent struct { 25 | Timestamp time.Time 26 | Intent IncomingTrafficIntent 27 | ConnectionsCount *cloudclient.ConnectionsCount 28 | } 29 | 30 | type IncomingTrafficKey struct { 31 | ServerName string 32 | ServerNamespace string 33 | IP string 34 | } 35 | 36 | type IncomingTrafficIntentsHolder struct { 37 | intents map[IncomingTrafficKey]TimestampedIncomingTrafficIntent 38 | lock sync.Mutex 39 | callbacks []IncomingTrafficCallbackFunc 40 | connectionCountDiffer *concurrentconnectioncounter.ConnectionCountDiffer[IncomingTrafficKey, *concurrentconnectioncounter.CountableIncomingInternetTrafficIntent] 41 | } 42 | 43 | type IncomingTrafficCallbackFunc func(context.Context, []TimestampedIncomingTrafficIntent) 44 | type IntentsConnectionCounter map[IncomingTrafficKey]*concurrentconnectioncounter.ConnectionCounter[*concurrentconnectioncounter.CountableIncomingInternetTrafficIntent] 45 | 46 | func NewIncomingTrafficIntentsHolder() *IncomingTrafficIntentsHolder { 47 | return &IncomingTrafficIntentsHolder{ 48 | intents: make(map[IncomingTrafficKey]TimestampedIncomingTrafficIntent), 49 | connectionCountDiffer: concurrentconnectioncounter.NewConnectionCountDiffer[IncomingTrafficKey, *concurrentconnectioncounter.CountableIncomingInternetTrafficIntent](), 50 | } 51 | } 52 | 53 | func (h *IncomingTrafficIntentsHolder) RegisterNotifyIntents(callback IncomingTrafficCallbackFunc) { 54 | h.callbacks = append(h.callbacks, callback) 55 | } 56 | 57 | func (h *IncomingTrafficIntentsHolder) PeriodicIntentsUpload(ctx context.Context, interval time.Duration) { 58 | logrus.Info("Starting periodic external traffic intents upload") 59 | 60 | for { 61 | select { 62 | case <-time.After(interval): 63 | if len(h.callbacks) == 0 { 64 | continue 65 | } 66 | 67 | intents := h.GetNewIntentsSinceLastGet() 68 | if len(intents) == 0 { 69 | continue 70 | } 71 | for _, callback := range h.callbacks { 72 | callback(ctx, intents) 73 | } 74 | 75 | case <-ctx.Done(): 76 | return 77 | } 78 | } 79 | } 80 | 81 | func (h *IncomingTrafficIntentsHolder) GetNewIntentsSinceLastGet() []TimestampedIncomingTrafficIntent { 82 | h.lock.Lock() 83 | defer h.lock.Unlock() 84 | 85 | intents := make([]TimestampedIncomingTrafficIntent, 0, len(h.intents)) 86 | 87 | for key, intent := range h.intents { 88 | connectionsCount, connectionsCountValid := h.connectionCountDiffer.GetDiff(key) 89 | if connectionsCountValid { 90 | intent.ConnectionsCount = lo.ToPtr(connectionsCount) 91 | } 92 | intents = append(intents, intent) 93 | } 94 | 95 | h.intents = make(map[IncomingTrafficKey]TimestampedIncomingTrafficIntent) 96 | h.connectionCountDiffer.Reset() 97 | 98 | return intents 99 | } 100 | 101 | func (h *IncomingTrafficIntentsHolder) AddIntent(intent IncomingTrafficIntent) { 102 | if config.ExcludedNamespaces().Contains(intent.Server.Namespace) { 103 | return 104 | } 105 | 106 | h.lock.Lock() 107 | defer h.lock.Unlock() 108 | 109 | key := IncomingTrafficKey{ 110 | ServerName: intent.Server.Name, 111 | ServerNamespace: intent.Server.Namespace, 112 | IP: intent.IP, 113 | } 114 | 115 | h.connectionCountDiffer.Increment(key, concurrentconnectioncounter.CounterInput[*concurrentconnectioncounter.CountableIncomingInternetTrafficIntent]{ 116 | Intent: concurrentconnectioncounter.NewCountableIncomingInternetTrafficIntent(), 117 | SourcePorts: intent.SrcPorts, 118 | }) 119 | 120 | mergedIntent, ok := h.intents[key] 121 | if !ok { 122 | h.intents[key] = TimestampedIncomingTrafficIntent{ 123 | Timestamp: intent.LastSeen, 124 | Intent: intent, 125 | } 126 | return 127 | } 128 | 129 | if intent.LastSeen.After(mergedIntent.Timestamp) { 130 | mergedIntent.Timestamp = intent.LastSeen 131 | } 132 | 133 | h.intents[key] = mergedIntent 134 | } 135 | -------------------------------------------------------------------------------- /src/mapper/pkg/mapperwebhooks/mapperwebhooks.go: -------------------------------------------------------------------------------- 1 | package mapperwebhooks 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rsa" 6 | "crypto/x509" 7 | "crypto/x509/pkix" 8 | "encoding/base64" 9 | "encoding/pem" 10 | "fmt" 11 | "github.com/sirupsen/logrus" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/types" 14 | "k8s.io/client-go/kubernetes" 15 | "k8s.io/client-go/rest" 16 | "math/big" 17 | "os" 18 | "path/filepath" 19 | "time" 20 | ) 21 | 22 | import ( 23 | "context" 24 | "crypto/rand" 25 | "encoding/json" 26 | "github.com/otterize/intents-operator/src/shared/errors" 27 | ) 28 | 29 | const ( 30 | Year = 365 * 24 * time.Hour 31 | CertDirPath = "/tmp/k8s-webhook-server/serving-certs" 32 | CertFilename = "tls.crt" 33 | PrivateKeyFilename = "tls.key" 34 | ) 35 | 36 | type CertificateBundle struct { 37 | CertPem []byte 38 | PrivateKeyPem []byte 39 | } 40 | 41 | type patchValue struct { 42 | Op string `json:"op"` 43 | Path string `json:"path"` 44 | Value interface{} `json:"value"` 45 | } 46 | 47 | func GenerateSelfSignedCertificate(hostname string, namespace string) (CertificateBundle, error) { 48 | privateKey, err := rsa.GenerateKey(rand.Reader, 4096) 49 | if err != nil { 50 | return CertificateBundle{}, errors.Wrap(err) 51 | } 52 | certificate := x509.Certificate{ 53 | SerialNumber: big.NewInt(time.Now().Unix()), 54 | Subject: pkix.Name{ 55 | CommonName: hostname, 56 | }, 57 | NotBefore: time.Now().Add(-10 * time.Minute), 58 | NotAfter: time.Now().Add(10 * Year), 59 | 60 | KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, 61 | ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, 62 | BasicConstraintsValid: true, 63 | IsCA: true, 64 | DNSNames: []string{hostname, fmt.Sprintf("%s.%s.svc", hostname, namespace), fmt.Sprintf("%s.%s.svc.cluster.local", hostname, namespace)}, 65 | } 66 | derCert, err := x509.CreateCertificate(rand.Reader, &certificate, &certificate, &privateKey.PublicKey, privateKey) 67 | if err != nil { 68 | return CertificateBundle{}, errors.Wrap(err) 69 | } 70 | 71 | signedCert := &bytes.Buffer{} 72 | err = pem.Encode(signedCert, &pem.Block{Type: "CERTIFICATE", Bytes: derCert}) 73 | if err != nil { 74 | return CertificateBundle{}, errors.Wrap(err) 75 | } 76 | 77 | privateKeyPem := pem.EncodeToMemory(&pem.Block{ 78 | Type: "RSA PRIVATE KEY", 79 | Bytes: x509.MarshalPKCS1PrivateKey(privateKey), 80 | }) 81 | return CertificateBundle{ 82 | CertPem: signedCert.Bytes(), 83 | PrivateKeyPem: privateKeyPem, 84 | }, nil 85 | } 86 | 87 | func UpdateMutationWebHookCA(ctx context.Context, webHookName string, ca []byte) error { 88 | kubeClient, err := getKubeClient() 89 | if err != nil { 90 | return errors.Wrap(err) 91 | } 92 | 93 | webhookConfig, err := kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, webHookName, metav1.GetOptions{}) 94 | if err != nil { 95 | return errors.Wrap(err) 96 | } 97 | 98 | var newCA []patchValue 99 | for i := range webhookConfig.Webhooks { 100 | newCA = append(newCA, patchValue{ 101 | Op: "replace", 102 | Path: fmt.Sprintf("/webhooks/%d/clientConfig/caBundle", i), 103 | Value: base64.StdEncoding.EncodeToString(ca), 104 | }) 105 | } 106 | 107 | newCAByte, err := json.Marshal(newCA) 108 | if err != nil { 109 | return errors.Wrap(err) 110 | } 111 | 112 | logrus.Infof("Installing new certificate in %s", webHookName) 113 | _, err = kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Patch(ctx, webHookName, types.JSONPatchType, newCAByte, metav1.PatchOptions{}) 114 | return errors.Wrap(err) 115 | } 116 | 117 | func WriteCertToFiles(bundle CertificateBundle) error { 118 | err := os.MkdirAll(CertDirPath, 0600) 119 | if err != nil { 120 | return errors.Wrap(err) 121 | } 122 | 123 | certFilePath := filepath.Join(CertDirPath, CertFilename) 124 | privateKeyFilePath := filepath.Join(CertDirPath, PrivateKeyFilename) 125 | 126 | err = os.WriteFile(certFilePath, bundle.CertPem, 0600) 127 | if err != nil { 128 | return errors.Wrap(err) 129 | } 130 | return os.WriteFile(privateKeyFilePath, bundle.PrivateKeyPem, 0600) 131 | } 132 | 133 | func getKubeClient() (*kubernetes.Clientset, error) { 134 | config, err := rest.InClusterConfig() 135 | if err != nil { 136 | return nil, errors.Wrap(err) 137 | } 138 | clientSet, err := kubernetes.NewForConfig(config) 139 | if err != nil { 140 | return nil, errors.Wrap(err) 141 | } 142 | return clientSet, nil 143 | } 144 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/convert_to_cloud_api_types.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "github.com/otterize/intents-operator/src/shared/serviceidresolver/serviceidentity" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 6 | "github.com/otterize/nilable" 7 | "golang.org/x/exp/slices" 8 | ) 9 | 10 | func labelsToLabelInput(labels map[string]string) []cloudclient.LabelInput { 11 | labelsInput := make([]cloudclient.LabelInput, 0) 12 | for key, value := range labels { 13 | labelsInput = append(labelsInput, cloudclient.LabelInput{Key: key, Value: nilable.From(value)}) 14 | } 15 | 16 | slices.SortFunc(labelsInput, func(a, b cloudclient.LabelInput) int { 17 | if a.Key < b.Key { 18 | return -1 19 | } 20 | if a.Key > b.Key { 21 | return 1 22 | } 23 | if !a.Value.Set && !b.Value.Set { 24 | return 0 25 | } 26 | if !a.Value.Set { 27 | return -1 28 | } 29 | if !b.Value.Set { 30 | return 1 31 | } 32 | if a.Value.Item < b.Value.Item { 33 | return -1 34 | } 35 | if a.Value.Item > b.Value.Item { 36 | return 1 37 | } 38 | return 0 39 | }) 40 | return labelsInput 41 | } 42 | 43 | func serviceIdentityToServiceIdentityInput(identity serviceidentity.ServiceIdentity) cloudclient.ServiceIdentityInput { 44 | wi := cloudclient.ServiceIdentityInput{ 45 | Namespace: identity.Namespace, 46 | Name: identity.Name, 47 | Kind: identity.Kind, 48 | } 49 | if identity.ResolvedUsingOverrideAnnotation != nil { 50 | wi.NameResolvedUsingAnnotation = nilable.From(*identity.ResolvedUsingOverrideAnnotation) 51 | } 52 | 53 | return wi 54 | } 55 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/endpoints_reconciler.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/serviceidresolver" 7 | "github.com/otterize/intents-operator/src/shared/serviceidresolver/serviceidentity" 8 | "github.com/samber/lo" 9 | "github.com/sirupsen/logrus" 10 | corev1 "k8s.io/api/core/v1" 11 | "k8s.io/apimachinery/pkg/types" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/controller" 15 | "sigs.k8s.io/controller-runtime/pkg/handler" 16 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 17 | ) 18 | 19 | type EndpointsReconciler struct { 20 | client.Client 21 | serviceIDResolver serviceidresolver.ServiceResolver 22 | metadataReporter *MetadataReporter 23 | } 24 | 25 | func NewEndpointsReconciler(client client.Client, resolver serviceidresolver.ServiceResolver, reporter *MetadataReporter) *EndpointsReconciler { 26 | return &EndpointsReconciler{ 27 | Client: client, 28 | serviceIDResolver: resolver, 29 | metadataReporter: reporter, 30 | } 31 | } 32 | 33 | func (r *EndpointsReconciler) SetupWithManager(mgr ctrl.Manager) error { 34 | return ctrl.NewControllerManagedBy(mgr). 35 | For(&corev1.Endpoints{}). 36 | Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(r.mapServicesToEndpoints)). 37 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 38 | Complete(r) 39 | } 40 | 41 | func (r *EndpointsReconciler) mapServicesToEndpoints(_ context.Context, obj client.Object) []reconcile.Request { 42 | service := obj.(*corev1.Service) 43 | logrus.Debugf("Enqueueing endpoints for service %s", service.Name) 44 | 45 | return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: service.GetNamespace(), Name: service.GetName()}}} 46 | } 47 | 48 | func (r *EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 49 | endpoints := &corev1.Endpoints{} 50 | err := r.Get(ctx, req.NamespacedName, endpoints) 51 | if err != nil && client.IgnoreNotFound(err) == nil { 52 | return ctrl.Result{}, nil 53 | } 54 | if err != nil { 55 | return ctrl.Result{}, errors.Wrap(err) 56 | } 57 | 58 | if endpoints.DeletionTimestamp != nil { 59 | return ctrl.Result{}, nil 60 | } 61 | 62 | podNames := r.getPodNamesFromEndpoints(endpoints) 63 | serviceIdentities := make(map[string]serviceidentity.ServiceIdentity) 64 | for _, podName := range podNames { 65 | pod := &corev1.Pod{} 66 | err := r.Get(ctx, client.ObjectKey{Namespace: endpoints.Namespace, Name: podName}, pod) 67 | if err != nil && client.IgnoreNotFound(err) == nil { 68 | return ctrl.Result{}, nil 69 | } 70 | if err != nil { 71 | return ctrl.Result{}, errors.Wrap(err) 72 | } 73 | serviceIdentity, err := r.serviceIDResolver.ResolvePodToServiceIdentity(ctx, pod) 74 | if err != nil { 75 | return ctrl.Result{}, errors.Wrap(err) 76 | } 77 | serviceIdentities[serviceIdentity.GetNameWithKind()] = serviceIdentity 78 | } 79 | 80 | if len(serviceIdentities) == 0 { 81 | return ctrl.Result{}, nil 82 | } 83 | 84 | err = r.metadataReporter.ReportMetadata(ctx, lo.Values(serviceIdentities)) 85 | if err != nil { 86 | return ctrl.Result{}, errors.Wrap(err) 87 | } 88 | 89 | return ctrl.Result{}, nil 90 | } 91 | 92 | func (r *EndpointsReconciler) getPodNamesFromEndpoints(endpoints *corev1.Endpoints) []string { 93 | podNames := make([]string, 0) 94 | for _, subset := range endpoints.Subsets { 95 | for _, address := range subset.Addresses { 96 | if address.TargetRef != nil && address.TargetRef.Kind == "Pod" { 97 | podNames = append(podNames, address.TargetRef.Name) 98 | } 99 | } 100 | for _, address := range subset.NotReadyAddresses { 101 | if address.TargetRef != nil && address.TargetRef.Kind == "Pod" { 102 | podNames = append(podNames, address.TargetRef.Name) 103 | } 104 | } 105 | } 106 | return podNames 107 | } 108 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/metadata_reporter_once.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/serviceidresolver/serviceidentity" 7 | "github.com/samber/lo" 8 | corev1 "k8s.io/api/core/v1" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | func (r *MetadataReporter) syncAllOnce(ctx context.Context) error { 13 | allNamespaces := &corev1.NamespaceList{} 14 | err := r.List(ctx, allNamespaces) 15 | if err != nil { 16 | return errors.Wrap(err) 17 | } 18 | for _, namespace := range allNamespaces.Items { 19 | err := r.syncNamespace(ctx, namespace.Name) 20 | if err != nil { 21 | return errors.Wrap(err) 22 | } 23 | } 24 | 25 | return nil 26 | } 27 | 28 | func (r *MetadataReporter) syncNamespace(ctx context.Context, namespace string) error { 29 | pods := &corev1.PodList{} 30 | err := r.List(ctx, pods, client.InNamespace(namespace)) 31 | if err != nil { 32 | return errors.Wrap(err) 33 | } 34 | 35 | serviceIdentityToReportInput := make(map[serviceIdentityKey]serviceidentity.ServiceIdentity) 36 | 37 | for _, pod := range pods.Items { 38 | serviceIdentity, err := r.serviceIDResolver.ResolvePodToServiceIdentity(ctx, &pod) 39 | if err != nil { 40 | return errors.Wrap(err) 41 | } 42 | if _, ok := serviceIdentityToReportInput[serviceIdentityToCacheKey(serviceIdentity)]; ok { 43 | // For multi-pod workloads, we only need to report the metadata once 44 | continue 45 | } 46 | serviceIdentityToReportInput[serviceIdentityToCacheKey(serviceIdentity)] = serviceIdentity 47 | } 48 | 49 | identities := lo.Values(serviceIdentityToReportInput) 50 | return errors.Wrap(r.reportWorkloadMetadataWithCache(ctx, identities)) 51 | } 52 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/namespace_reconciler.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 7 | "github.com/samber/lo" 8 | corev1 "k8s.io/api/core/v1" 9 | ctrl "sigs.k8s.io/controller-runtime" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | "sigs.k8s.io/controller-runtime/pkg/controller" 12 | ) 13 | 14 | type NamespaceReconciler struct { 15 | client.Client 16 | cloudClient cloudclient.CloudClient 17 | } 18 | 19 | func NewNamespaceReconciler(client client.Client, cloudClient cloudclient.CloudClient) *NamespaceReconciler { 20 | return &NamespaceReconciler{ 21 | Client: client, 22 | cloudClient: cloudClient, 23 | } 24 | } 25 | 26 | func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { 27 | return ctrl.NewControllerManagedBy(mgr). 28 | For(&corev1.Namespace{}). 29 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 30 | Complete(r) 31 | } 32 | 33 | func (r *NamespaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 34 | namespace := &corev1.Namespace{} 35 | err := r.Get(ctx, req.NamespacedName, namespace) 36 | if err != nil && client.IgnoreNotFound(err) == nil { 37 | return ctrl.Result{}, nil 38 | } 39 | if err != nil { 40 | return ctrl.Result{}, errors.Wrap(err) 41 | } 42 | 43 | if !namespace.DeletionTimestamp.IsZero() { 44 | return ctrl.Result{}, nil 45 | } 46 | 47 | labels := labelsToLabelInput(namespace.Labels) 48 | 49 | err = r.cloudClient.ReportNamespaceLabels(ctx, namespace.Name, labels) 50 | if err != nil { 51 | return ctrl.Result{}, errors.Wrap(err) 52 | } 53 | return ctrl.Result{}, nil 54 | } 55 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/namespace_reconciler_test.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 6 | cloudclientmocks "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient/mocks" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/mocks" 8 | "github.com/otterize/nilable" 9 | "github.com/stretchr/testify/suite" 10 | "go.uber.org/mock/gomock" 11 | corev1 "k8s.io/api/core/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/types" 14 | ctrl "sigs.k8s.io/controller-runtime" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | "testing" 17 | ) 18 | 19 | type NamespaceReconcilerTestSuite struct { 20 | suite.Suite 21 | cloudClient *cloudclientmocks.MockCloudClient 22 | k8sClient *mocks.K8sClient 23 | reconciler *NamespaceReconciler 24 | } 25 | 26 | func (s *NamespaceReconcilerTestSuite) SetupTest() { 27 | controller := gomock.NewController(s.T()) 28 | s.cloudClient = cloudclientmocks.NewMockCloudClient(controller) 29 | s.k8sClient = mocks.NewK8sClient(controller) 30 | s.reconciler = NewNamespaceReconciler(s.k8sClient, s.cloudClient) 31 | } 32 | 33 | func (s *NamespaceReconcilerTestSuite) TestNamespaceReconciler_Reconcile() { 34 | testNamespaceName := "test-namespace" 35 | testNS := &corev1.Namespace{ 36 | ObjectMeta: metav1.ObjectMeta{ 37 | Name: testNamespaceName, 38 | Labels: map[string]string{"key1": "value1", "key2": "value2"}, 39 | }, 40 | } 41 | 42 | req := ctrl.Request{ 43 | NamespacedName: client.ObjectKey{Name: testNamespaceName}, 44 | } 45 | 46 | s.k8sClient.EXPECT().Get(gomock.Any(), req.NamespacedName, gomock.Any()).DoAndReturn( 47 | func(ctx context.Context, name types.NamespacedName, namespace *corev1.Namespace, _ ...any) error { 48 | testNS.DeepCopyInto(namespace) 49 | return nil 50 | }) 51 | 52 | s.cloudClient.EXPECT().ReportNamespaceLabels(gomock.Any(), testNamespaceName, []cloudclient.LabelInput{ 53 | {Key: "key1", Value: nilable.From("value1")}, 54 | {Key: "key2", Value: nilable.From("value2")}, 55 | }).Return(nil) 56 | 57 | res, err := s.reconciler.Reconcile(context.Background(), req) 58 | s.NoError(err) 59 | s.Require().True(res.IsZero()) 60 | } 61 | 62 | func TestNamespaceReconcilerTestSuite(t *testing.T) { 63 | suite.Run(t, new(NamespaceReconcilerTestSuite)) 64 | } 65 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/pod_reconciler.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/serviceidresolver" 7 | "github.com/otterize/intents-operator/src/shared/serviceidresolver/serviceidentity" 8 | "github.com/samber/lo" 9 | corev1 "k8s.io/api/core/v1" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/controller" 13 | ) 14 | 15 | type PodReconciler struct { 16 | client.Client 17 | serviceIDResolver serviceidresolver.ServiceResolver 18 | metadataReporter *MetadataReporter 19 | } 20 | 21 | func NewPodReconciler(client client.Client, resolver serviceidresolver.ServiceResolver, reporter *MetadataReporter) *PodReconciler { 22 | return &PodReconciler{ 23 | Client: client, 24 | serviceIDResolver: resolver, 25 | metadataReporter: reporter, 26 | } 27 | } 28 | 29 | func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { 30 | return ctrl.NewControllerManagedBy(mgr). 31 | For(&corev1.Pod{}). 32 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 33 | Complete(r) 34 | } 35 | 36 | func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 37 | pod := &corev1.Pod{} 38 | err := r.Get(ctx, req.NamespacedName, pod) 39 | if err != nil && client.IgnoreNotFound(err) == nil { 40 | return ctrl.Result{}, nil 41 | } 42 | if err != nil { 43 | return ctrl.Result{}, errors.Wrap(err) 44 | } 45 | 46 | if pod.DeletionTimestamp != nil { 47 | return ctrl.Result{}, nil 48 | } 49 | 50 | serviceIdentity, err := r.serviceIDResolver.ResolvePodToServiceIdentity(ctx, pod) 51 | if err != nil { 52 | return ctrl.Result{}, errors.Wrap(err) 53 | } 54 | 55 | err = r.metadataReporter.ReportMetadata(ctx, []serviceidentity.ServiceIdentity{serviceIdentity}) 56 | if err != nil { 57 | return ctrl.Result{}, errors.Wrap(err) 58 | } 59 | 60 | return ctrl.Result{}, nil 61 | } 62 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/setup.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/serviceidresolver" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 8 | corev1 "k8s.io/api/core/v1" 9 | ctrl "sigs.k8s.io/controller-runtime" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | ) 12 | 13 | func Setup(client client.Client, cloudClient cloudclient.CloudClient, resolver serviceidresolver.ServiceResolver, mgr ctrl.Manager) error { 14 | reporter := NewMetadataReporter(client, cloudClient, resolver) 15 | 16 | // Initialize indexes 17 | if err := initIndexes(mgr); err != nil { 18 | return errors.Wrap(err) 19 | } 20 | 21 | // Initialize the EndpointsReconciler 22 | endpointsReconciler := NewEndpointsReconciler(client, resolver, reporter) 23 | if err := endpointsReconciler.SetupWithManager(mgr); err != nil { 24 | return errors.Wrap(err) 25 | } 26 | 27 | // Initialize the PodReconciler 28 | podReconciler := NewPodReconciler(client, resolver, reporter) 29 | if err := podReconciler.SetupWithManager(mgr); err != nil { 30 | return errors.Wrap(err) 31 | } 32 | 33 | // Initialize the NamespaceReconciler 34 | namespaceReconciler := NewNamespaceReconciler(mgr.GetClient(), cloudClient) 35 | if err := namespaceReconciler.SetupWithManager(mgr); err != nil { 36 | return errors.Wrap(err) 37 | } 38 | 39 | return nil 40 | } 41 | func initIndexes(mgr ctrl.Manager) error { 42 | if err := mgr.GetFieldIndexer().IndexField( 43 | context.Background(), 44 | &corev1.Endpoints{}, 45 | endpointsPodNamesIndexField, 46 | func(object client.Object) []string { 47 | var res []string 48 | endpoints := object.(*corev1.Endpoints) 49 | addresses := make([]corev1.EndpointAddress, 0) 50 | for _, subset := range endpoints.Subsets { 51 | addresses = append(addresses, subset.Addresses...) 52 | addresses = append(addresses, subset.NotReadyAddresses...) 53 | } 54 | 55 | for _, address := range addresses { 56 | if address.TargetRef == nil || address.TargetRef.Kind != "Pod" { 57 | continue 58 | } 59 | 60 | res = append(res, address.TargetRef.Name) 61 | } 62 | 63 | return res 64 | }); err != nil { 65 | return errors.Wrap(err) 66 | } 67 | return nil 68 | } 69 | -------------------------------------------------------------------------------- /src/mapper/pkg/metadatareporter/workload_metadata_cache.go: -------------------------------------------------------------------------------- 1 | package metadatareporter 2 | 3 | import ( 4 | "github.com/hashicorp/golang-lru/v2/expirable" 5 | "github.com/otterize/intents-operator/src/shared/serviceidresolver/serviceidentity" 6 | "hash/crc32" 7 | "sort" 8 | "strings" 9 | "time" 10 | ) 11 | 12 | type serviceIdentityKey string 13 | type metadataChecksum uint32 14 | 15 | type workloadMetadataCache struct { 16 | cache *expirable.LRU[serviceIdentityKey, metadataChecksum] 17 | } 18 | 19 | func newWorkloadMetadataCache(size int, ttl time.Duration) *workloadMetadataCache { 20 | cache := expirable.NewLRU[serviceIdentityKey, metadataChecksum](size, nil, ttl) 21 | return &workloadMetadataCache{ 22 | cache: cache, 23 | } 24 | } 25 | 26 | func (c *workloadMetadataCache) Add(key serviceIdentityKey, value metadataChecksum) { 27 | c.cache.Add(key, value) 28 | } 29 | 30 | func (c *workloadMetadataCache) IsCached(key serviceIdentityKey, value metadataChecksum) bool { 31 | cachedValue, ok := c.cache.Get(key) 32 | if !ok { 33 | return false 34 | } 35 | return cachedValue == value 36 | } 37 | 38 | func checksumMetadata(labels map[string]string, podIps []string, serviceIps []string) metadataChecksum { 39 | labelKeys := make([]string, 0, len(labels)) 40 | for key := range labels { 41 | labelKeys = append(labelKeys, key) 42 | } 43 | sort.Strings(labelKeys) 44 | labelString := "" 45 | for _, key := range labelKeys { 46 | labelString += key + labels[key] 47 | } 48 | sort.Strings(podIps) 49 | sort.Strings(serviceIps) 50 | 51 | ipsString := strings.Join(append(podIps, serviceIps...), ",") 52 | 53 | hash := crc32.ChecksumIEEE([]byte(labelString + ipsString)) 54 | return metadataChecksum(hash) 55 | } 56 | 57 | func serviceIdentityToCacheKey(identity serviceidentity.ServiceIdentity) serviceIdentityKey { 58 | return serviceIdentityKey(identity.GetNameWithKind()) 59 | } 60 | -------------------------------------------------------------------------------- /src/mapper/pkg/metricexporter/edge_metric.go: -------------------------------------------------------------------------------- 1 | package metricexporter 2 | 3 | import "context" 4 | 5 | type EdgeMetric interface { 6 | Record(ctx context.Context, from string, to string) 7 | } 8 | -------------------------------------------------------------------------------- /src/mapper/pkg/metricexporter/metric_exporter.go: -------------------------------------------------------------------------------- 1 | package metricexporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | 7 | "github.com/otterize/network-mapper/src/mapper/pkg/intentsstore" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | type MetricExporter struct { 12 | edgeMetric EdgeMetric 13 | } 14 | 15 | func NewMetricExporter(ctx context.Context) (*MetricExporter, error) { 16 | em, err := NewOtelEdgeMetric(ctx) 17 | if err != nil { 18 | return nil, errors.Wrap(err) 19 | } 20 | 21 | return &MetricExporter{ 22 | edgeMetric: em, 23 | }, nil 24 | } 25 | 26 | func (o *MetricExporter) NotifyIntents(ctx context.Context, intents []intentsstore.TimestampedIntent) { 27 | for _, intent := range intents { 28 | clientName := intent.Intent.Client.Name 29 | serverName := intent.Intent.Server.Name 30 | logrus.Debugf("recording metric counter: %s -> %s", clientName, serverName) 31 | o.edgeMetric.Record(ctx, clientName, serverName) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/mapper/pkg/metricexporter/metric_exporter_test.go: -------------------------------------------------------------------------------- 1 | package metricexporter 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 9 | "github.com/otterize/network-mapper/src/mapper/pkg/intentsstore" 10 | "github.com/stretchr/testify/suite" 11 | "go.uber.org/mock/gomock" 12 | ) 13 | 14 | type MetricExporterTestSuite struct { 15 | suite.Suite 16 | testNamespace string 17 | intentsHolder *intentsstore.IntentsHolder 18 | edgeMock *MockEdgeMetric 19 | metricExporter *MetricExporter 20 | } 21 | 22 | var ( 23 | testTimestamp = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) 24 | ) 25 | 26 | func (o *MetricExporterTestSuite) SetupTest() { 27 | o.testNamespace = "default" 28 | o.intentsHolder = intentsstore.NewIntentsHolder() 29 | } 30 | 31 | func (o *MetricExporterTestSuite) BeforeTest(s, testName string) { 32 | controller := gomock.NewController(o.T()) 33 | o.edgeMock = NewMockEdgeMetric(controller) 34 | 35 | metricExporter, err := NewMetricExporter(context.Background()) 36 | o.Require().NoError(err) 37 | metricExporter.edgeMetric = o.edgeMock 38 | o.metricExporter = metricExporter 39 | } 40 | 41 | func (o *MetricExporterTestSuite) addIntent(source string, srcNamespace string, destination string, dstNamespace string) { 42 | o.intentsHolder.AddIntent( 43 | testTimestamp, 44 | model.Intent{ 45 | Client: &model.OtterizeServiceIdentity{Name: source, Namespace: srcNamespace}, 46 | Server: &model.OtterizeServiceIdentity{Name: destination, Namespace: dstNamespace}, 47 | }, 48 | make([]int64, 0), 49 | ) 50 | } 51 | 52 | func (o *MetricExporterTestSuite) TestExportIntents() { 53 | o.addIntent("client1", o.testNamespace, "server1", o.testNamespace) 54 | o.addIntent("client1", o.testNamespace, "server2", "external-namespace") 55 | o.edgeMock.EXPECT().Record(context.Background(), "client1", "server1").Times(1) 56 | o.edgeMock.EXPECT().Record(context.Background(), "client1", "server2").Times(1) 57 | o.metricExporter.NotifyIntents(context.Background(), o.intentsHolder.GetNewIntentsSinceLastGet()) 58 | } 59 | 60 | func TestRunSuite(t *testing.T) { 61 | suite.Run(t, new(MetricExporterTestSuite)) 62 | } 63 | -------------------------------------------------------------------------------- /src/mapper/pkg/metricexporter/mock_edge_metric.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: network-mapper/src/mapper/pkg/otelexporter/edge_metric.go 3 | 4 | // Package mock_otelexporter is a generated GoMock package. 5 | package metricexporter 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | 11 | "go.uber.org/mock/gomock" 12 | ) 13 | 14 | // MockEdgeMetric is a mock of EdgeMetric interface. 15 | type MockEdgeMetric struct { 16 | ctrl *gomock.Controller 17 | recorder *MockEdgeMetricMockRecorder 18 | } 19 | 20 | // MockEdgeMetricMockRecorder is the mock recorder for MockEdgeMetric. 21 | type MockEdgeMetricMockRecorder struct { 22 | mock *MockEdgeMetric 23 | } 24 | 25 | // NewMockEdgeMetric creates a new mock instance. 26 | func NewMockEdgeMetric(ctrl *gomock.Controller) *MockEdgeMetric { 27 | mock := &MockEdgeMetric{ctrl: ctrl} 28 | mock.recorder = &MockEdgeMetricMockRecorder{mock} 29 | return mock 30 | } 31 | 32 | // EXPECT returns an object that allows the caller to indicate expected use. 33 | func (m *MockEdgeMetric) EXPECT() *MockEdgeMetricMockRecorder { 34 | return m.recorder 35 | } 36 | 37 | // Record mocks base method. 38 | func (m *MockEdgeMetric) Record(ctx context.Context, from, to string) { 39 | m.ctrl.T.Helper() 40 | m.ctrl.Call(m, "Record", ctx, from, to) 41 | } 42 | 43 | // Record indicates an expected call of Record. 44 | func (mr *MockEdgeMetricMockRecorder) Record(ctx, from, to interface{}) *gomock.Call { 45 | mr.mock.ctrl.T.Helper() 46 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Record", reflect.TypeOf((*MockEdgeMetric)(nil).Record), ctx, from, to) 47 | } -------------------------------------------------------------------------------- /src/mapper/pkg/metricexporter/otel_edge_metric.go: -------------------------------------------------------------------------------- 1 | package metricexporter 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "time" 7 | 8 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 9 | sharedconfig "github.com/otterize/network-mapper/src/shared/config" 10 | "github.com/spf13/viper" 11 | "go.opentelemetry.io/otel/attribute" 12 | "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" 13 | "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" 14 | "go.opentelemetry.io/otel/metric" 15 | sdk "go.opentelemetry.io/otel/sdk/metric" 16 | "go.opentelemetry.io/otel/sdk/resource" 17 | semconv "go.opentelemetry.io/otel/semconv/v1.26.0" 18 | ) 19 | 20 | type OtelEdgeMetric struct { 21 | meterProvider metric.MeterProvider 22 | counter metric.Int64Counter 23 | } 24 | 25 | func newResource() (*resource.Resource, error) { 26 | return resource.Merge(resource.Default(), 27 | resource.NewWithAttributes(semconv.SchemaURL, 28 | semconv.OTelScopeName("otterize/network-mapper"), 29 | )) 30 | } 31 | 32 | const ClientAttributeName = "client" 33 | const ServerAttributeName = "server" 34 | 35 | func newMeterProvider(ctx context.Context, res *resource.Resource) (*sdk.MeterProvider, error) { 36 | // SDK automatically configured via environment variables: 37 | // - OTEL_EXPORTER_OTLP_ENDPOINT 38 | // - OTEL_EXPORTER_OTLP_HEADERS 39 | // - OTEL_EXPORTER_OTLP_TIMEOUT (...) 40 | metricExporter, err := otlpmetricgrpc.New(ctx) 41 | if err != nil { 42 | return nil, errors.Wrap(err) 43 | } 44 | 45 | if viper.GetBool(sharedconfig.DebugKey) { 46 | stdOutExporter, err := stdoutmetric.New() 47 | if err != nil { 48 | return nil, errors.Wrap(err) 49 | } 50 | meterProvider := sdk.NewMeterProvider( 51 | sdk.WithResource(res), 52 | sdk.WithReader(sdk.NewPeriodicReader(stdOutExporter, 53 | sdk.WithInterval(1*time.Second))), 54 | sdk.WithReader(sdk.NewPeriodicReader(metricExporter)), 55 | ) 56 | return meterProvider, nil 57 | } 58 | 59 | meterProvider := sdk.NewMeterProvider( 60 | sdk.WithResource(res), 61 | sdk.WithReader(sdk.NewPeriodicReader(metricExporter)), 62 | ) 63 | return meterProvider, nil 64 | } 65 | 66 | func (o *OtelEdgeMetric) Record(ctx context.Context, from string, to string) { 67 | o.counter.Add(ctx, 1, metric.WithAttributes(attribute.String(ClientAttributeName, from), attribute.String(ServerAttributeName, to))) 68 | } 69 | 70 | func NewOtelEdgeMetric(ctx context.Context) (*OtelEdgeMetric, error) { 71 | res, err := newResource() 72 | if err != nil { 73 | return nil, errors.Wrap(err) 74 | } 75 | 76 | meterProvider, err := newMeterProvider(ctx, res) 77 | if err != nil { 78 | return nil, errors.Wrap(err) 79 | } 80 | 81 | var meter = meterProvider.Meter("otelexporter") 82 | edgeCounter, err := meter.Int64Counter( 83 | viper.GetString(config.OTelMetricKey), 84 | metric.WithDescription("Count of edges between two nodes"), 85 | ) 86 | if err != nil { 87 | return nil, errors.Wrap(err) 88 | } 89 | 90 | return &OtelEdgeMetric{ 91 | counter: edgeCounter, 92 | meterProvider: meterProvider, 93 | }, nil 94 | } 95 | -------------------------------------------------------------------------------- /src/mapper/pkg/metrics_collection_traffic/endpoints_reconciler.go: -------------------------------------------------------------------------------- 1 | package metrics_collection_traffic 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/injectablerecorder" 7 | "github.com/samber/lo" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/client-go/tools/record" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/controller" 13 | ) 14 | 15 | type EndpointsReconciler struct { 16 | client.Client 17 | injectablerecorder.InjectableRecorder 18 | metricsCollectionTrafficHandler *MetricsCollectionTrafficHandler 19 | } 20 | 21 | func NewEndpointsReconciler(metricsCollectionTrafficHandler *MetricsCollectionTrafficHandler) *EndpointsReconciler { 22 | return &EndpointsReconciler{ 23 | metricsCollectionTrafficHandler: metricsCollectionTrafficHandler, 24 | } 25 | } 26 | 27 | func (r *EndpointsReconciler) SetupWithManager(mgr ctrl.Manager) error { 28 | recorder := mgr.GetEventRecorderFor("intents-operator") 29 | r.InjectRecorder(recorder) 30 | 31 | return ctrl.NewControllerManagedBy(mgr). 32 | For(&corev1.Endpoints{}). 33 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 34 | Complete(r) 35 | } 36 | 37 | func (r *EndpointsReconciler) InjectRecorder(recorder record.EventRecorder) { 38 | r.Recorder = recorder 39 | } 40 | 41 | func (r *EndpointsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 42 | err := r.metricsCollectionTrafficHandler.HandleAllServicesInNamespace(ctx, req) 43 | if err != nil { 44 | return ctrl.Result{}, errors.Wrap(err) 45 | } 46 | 47 | return ctrl.Result{}, nil 48 | } 49 | -------------------------------------------------------------------------------- /src/mapper/pkg/metrics_collection_traffic/metrics_collection_traffic_cache.go: -------------------------------------------------------------------------------- 1 | package metrics_collection_traffic 2 | 3 | import ( 4 | "fmt" 5 | "github.com/hashicorp/golang-lru/v2/expirable" 6 | "github.com/otterize/intents-operator/src/shared/errors" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 8 | "github.com/otterize/network-mapper/src/mapper/pkg/config" 9 | "github.com/samber/lo" 10 | "github.com/sirupsen/logrus" 11 | "github.com/spf13/viper" 12 | "golang.org/x/exp/slices" 13 | "hash/crc32" 14 | ) 15 | 16 | type CacheValue []byte 17 | 18 | type MetricsCollectionTrafficCache struct { 19 | cache *expirable.LRU[string, CacheValue] 20 | } 21 | 22 | func NewMetricsCollectionTrafficCache() *MetricsCollectionTrafficCache { 23 | size := viper.GetInt(config.MetricsCollectionTrafficCacheSizeKey) 24 | // We don't want the cache to expire. It does not contain a lot of data, and we want to keep it as long as possible 25 | // so we won't send unnecessary requests to the cloud. 26 | cache := expirable.NewLRU[string, CacheValue](size, OnEvict, 0) 27 | 28 | return &MetricsCollectionTrafficCache{ 29 | cache: cache, 30 | } 31 | } 32 | 33 | func (c *MetricsCollectionTrafficCache) Get(namespace string, reason cloudclient.EligibleForMetricsCollectionReason) (CacheValue, bool) { 34 | return c.cache.Get(c.key(namespace, reason)) 35 | } 36 | 37 | func (c *MetricsCollectionTrafficCache) Set(namespace string, reason cloudclient.EligibleForMetricsCollectionReason, value CacheValue) bool { 38 | return c.cache.Add(c.key(namespace, reason), value) 39 | } 40 | 41 | func (c *MetricsCollectionTrafficCache) key(namespace string, reason cloudclient.EligibleForMetricsCollectionReason) string { 42 | return fmt.Sprintf("%s#%s", namespace, reason) 43 | } 44 | 45 | func (c *MetricsCollectionTrafficCache) GenerateValue(pods []cloudclient.K8sResourceEligibleForMetricsCollectionInput) (CacheValue, error) { 46 | values := lo.Map(pods, func(resource cloudclient.K8sResourceEligibleForMetricsCollectionInput, _ int) string { 47 | return fmt.Sprintf("%s#%s", resource.Name, resource.Kind) 48 | }) 49 | 50 | slices.Sort(values) 51 | 52 | hash := crc32.NewIEEE() 53 | for _, value := range values { 54 | _, err := hash.Write([]byte(value)) 55 | if err != nil { 56 | return nil, errors.Wrap(err) 57 | } 58 | } 59 | hashSum := hash.Sum(nil) 60 | 61 | return hashSum, nil 62 | } 63 | 64 | func OnEvict(key string, _ CacheValue) { 65 | logrus.WithField("namespace", key).Debug("key evicted from cache, you may change configuration to increase cache size") 66 | } 67 | -------------------------------------------------------------------------------- /src/mapper/pkg/metrics_collection_traffic/pod_reconciler.go: -------------------------------------------------------------------------------- 1 | package metrics_collection_traffic 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/injectablerecorder" 7 | "github.com/samber/lo" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/client-go/tools/record" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/controller" 13 | ) 14 | 15 | type PodReconciler struct { 16 | client.Client 17 | injectablerecorder.InjectableRecorder 18 | metricsCollectionTrafficHandler *MetricsCollectionTrafficHandler 19 | } 20 | 21 | func NewPodReconciler(metricsCollectionTrafficHandler *MetricsCollectionTrafficHandler) *PodReconciler { 22 | return &PodReconciler{ 23 | metricsCollectionTrafficHandler: metricsCollectionTrafficHandler, 24 | } 25 | } 26 | 27 | func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { 28 | recorder := mgr.GetEventRecorderFor("intents-operator") 29 | r.InjectRecorder(recorder) 30 | 31 | return ctrl.NewControllerManagedBy(mgr). 32 | For(&corev1.Pod{}). 33 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 34 | Complete(r) 35 | } 36 | 37 | func (r *PodReconciler) InjectRecorder(recorder record.EventRecorder) { 38 | r.Recorder = recorder 39 | } 40 | 41 | func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 42 | err := r.metricsCollectionTrafficHandler.HandleAllPodsInNamespace(ctx, req) 43 | if err != nil { 44 | return ctrl.Result{}, errors.Wrap(err) 45 | } 46 | 47 | return ctrl.Result{}, nil 48 | } 49 | -------------------------------------------------------------------------------- /src/mapper/pkg/metrics_collection_traffic/service_reconciler.go: -------------------------------------------------------------------------------- 1 | package metrics_collection_traffic 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/injectablerecorder" 7 | "github.com/samber/lo" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/client-go/tools/record" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/controller" 13 | ) 14 | 15 | type ServiceReconciler struct { 16 | client.Client 17 | injectablerecorder.InjectableRecorder 18 | metricsCollectionTrafficHandler *MetricsCollectionTrafficHandler 19 | } 20 | 21 | func NewServiceReconciler(metricsCollectionTrafficHandler *MetricsCollectionTrafficHandler) *ServiceReconciler { 22 | return &ServiceReconciler{ 23 | metricsCollectionTrafficHandler: metricsCollectionTrafficHandler, 24 | } 25 | } 26 | 27 | func (r *ServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { 28 | recorder := mgr.GetEventRecorderFor("intents-operator") 29 | r.InjectRecorder(recorder) 30 | 31 | return ctrl.NewControllerManagedBy(mgr). 32 | For(&corev1.Service{}). 33 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 34 | Complete(r) 35 | } 36 | 37 | func (r *ServiceReconciler) InjectRecorder(recorder record.EventRecorder) { 38 | r.Recorder = recorder 39 | } 40 | 41 | func (r *ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 42 | err := r.metricsCollectionTrafficHandler.HandleAllServicesInNamespace(ctx, req) 43 | if err != nil { 44 | return ctrl.Result{}, errors.Wrap(err) 45 | } 46 | 47 | return ctrl.Result{}, nil 48 | } 49 | -------------------------------------------------------------------------------- /src/mapper/pkg/mocks/mock_kubefinder.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ./pkg/resourcevisibility/svc_reconciler.go 3 | 4 | // Package mocks is a generated GoMock package. 5 | package mocks 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | time "time" 11 | 12 | model "github.com/otterize/network-mapper/src/mapper/pkg/graph/model" 13 | gomock "go.uber.org/mock/gomock" 14 | v1 "k8s.io/api/core/v1" 15 | ) 16 | 17 | // MockKubeFinder is a mock of KubeFinder interface. 18 | type MockKubeFinder struct { 19 | ctrl *gomock.Controller 20 | recorder *MockKubeFinderMockRecorder 21 | } 22 | 23 | // MockKubeFinderMockRecorder is the mock recorder for MockKubeFinder. 24 | type MockKubeFinderMockRecorder struct { 25 | mock *MockKubeFinder 26 | } 27 | 28 | // NewMockKubeFinder creates a new mock instance. 29 | func NewMockKubeFinder(ctrl *gomock.Controller) *MockKubeFinder { 30 | mock := &MockKubeFinder{ctrl: ctrl} 31 | mock.recorder = &MockKubeFinderMockRecorder{mock} 32 | return mock 33 | } 34 | 35 | // EXPECT returns an object that allows the caller to indicate expected use. 36 | func (m *MockKubeFinder) EXPECT() *MockKubeFinderMockRecorder { 37 | return m.recorder 38 | } 39 | 40 | // ResolveOtterizeIdentityForService mocks base method. 41 | func (m *MockKubeFinder) ResolveOtterizeIdentityForService(ctx context.Context, service *v1.Service, now time.Time) (model.OtterizeServiceIdentity, bool, error) { 42 | m.ctrl.T.Helper() 43 | ret := m.ctrl.Call(m, "ResolveOtterizeIdentityForService", ctx, service, now) 44 | ret0, _ := ret[0].(model.OtterizeServiceIdentity) 45 | ret1, _ := ret[1].(bool) 46 | ret2, _ := ret[2].(error) 47 | return ret0, ret1, ret2 48 | } 49 | 50 | // ResolveOtterizeIdentityForService indicates an expected call of ResolveOtterizeIdentityForService. 51 | func (mr *MockKubeFinderMockRecorder) ResolveOtterizeIdentityForService(ctx, service, now interface{}) *gomock.Call { 52 | mr.mock.ctrl.T.Helper() 53 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveOtterizeIdentityForService", reflect.TypeOf((*MockKubeFinder)(nil).ResolveOtterizeIdentityForService), ctx, service, now) 54 | } 55 | -------------------------------------------------------------------------------- /src/mapper/pkg/networkpolicyreport/cilium_clusterwide_policies_reconciler.go: -------------------------------------------------------------------------------- 1 | package networkpolicyreport 2 | 3 | import ( 4 | "context" 5 | ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" 6 | "github.com/otterize/intents-operator/src/shared/errors" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 8 | "github.com/samber/lo" 9 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 10 | k8serrors "k8s.io/apimachinery/pkg/api/errors" 11 | "k8s.io/apimachinery/pkg/types" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/controller" 15 | "sigs.k8s.io/yaml" 16 | ) 17 | 18 | type CiliumClusterWideNetworkPolicyReconciler struct { 19 | client.Client 20 | otterizeCloud cloudclient.CloudClient 21 | } 22 | 23 | func NewCiliumClusterWideNetworkPolicyReconciler(client client.Client, otterizeCloudClient cloudclient.CloudClient) *CiliumClusterWideNetworkPolicyReconciler { 24 | return &CiliumClusterWideNetworkPolicyReconciler{ 25 | Client: client, 26 | otterizeCloud: otterizeCloudClient, 27 | } 28 | } 29 | 30 | func (r *CiliumClusterWideNetworkPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { 31 | return ctrl.NewControllerManagedBy(mgr). 32 | For(&ciliumv2.CiliumClusterwideNetworkPolicy{}). 33 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 34 | Complete(r) 35 | } 36 | 37 | func (r *CiliumClusterWideNetworkPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 38 | var ciliumClusterWideNetpolList ciliumv2.CiliumClusterwideNetworkPolicyList 39 | err := r.List(ctx, &ciliumClusterWideNetpolList) 40 | if err != nil { 41 | return ctrl.Result{}, errors.Wrap(err) 42 | } 43 | 44 | ciliumClusterWideNetpolsToReport, err := r.convertToNetworkPolicyInputs(ciliumClusterWideNetpolList.Items) 45 | if err != nil { 46 | return ctrl.Result{}, errors.Wrap(err) 47 | } 48 | 49 | err = r.otterizeCloud.ReportCiliumClusterWideNetworkPolicies(ctx, ciliumClusterWideNetpolsToReport) 50 | if err != nil { 51 | return ctrl.Result{}, errors.Wrap(err) 52 | } 53 | return ctrl.Result{}, nil 54 | } 55 | 56 | func (r *CiliumClusterWideNetworkPolicyReconciler) convertToNetworkPolicyInputs(ciliumClusterWideNetpols []ciliumv2.CiliumClusterwideNetworkPolicy) ([]cloudclient.NetworkPolicyInput, error) { 57 | ciliumClusterWideNetpolsToReport := make([]cloudclient.NetworkPolicyInput, 0) 58 | for _, ciliumClusterWideNetpol := range ciliumClusterWideNetpols { 59 | ciliumClusterWideNetpolToReport, err := r.convertToNetworkPolicyInput(ciliumClusterWideNetpol) 60 | if err != nil { 61 | return nil, errors.Wrap(err) 62 | } 63 | ciliumClusterWideNetpolsToReport = append(ciliumClusterWideNetpolsToReport, ciliumClusterWideNetpolToReport) 64 | } 65 | 66 | return ciliumClusterWideNetpolsToReport, nil 67 | } 68 | 69 | func (r *CiliumClusterWideNetworkPolicyReconciler) convertToNetworkPolicyInput(ciliumClusterWideNetpol ciliumv2.CiliumClusterwideNetworkPolicy) (cloudclient.NetworkPolicyInput, error) { 70 | ciliumClusterWideNetpol.ObjectMeta = filterObjectMetadata(ciliumClusterWideNetpol.ObjectMeta) 71 | ciliumClusterWideNetpol.Status = ciliumv2.CiliumNetworkPolicyStatus{} 72 | 73 | yamlString, err := yaml.Marshal(ciliumClusterWideNetpol) 74 | if err != nil { 75 | return cloudclient.NetworkPolicyInput{}, errors.Wrap(err) 76 | } 77 | return cloudclient.NetworkPolicyInput{ 78 | Name: ciliumClusterWideNetpol.Name, 79 | Yaml: string(yamlString), 80 | }, nil 81 | } 82 | 83 | func IsCiliumClusterWideInstalledInstalled(ctx context.Context, client client.Client) (bool, error) { 84 | clusterWideCRDName := "ciliumclusterwidenetworkpolicies.cilium.io" 85 | crd := apiextensionsv1.CustomResourceDefinition{} 86 | err := client.Get(ctx, types.NamespacedName{Name: clusterWideCRDName}, &crd) 87 | if err != nil && !k8serrors.IsNotFound(err) { 88 | return false, errors.Wrap(err) 89 | } 90 | 91 | if k8serrors.IsNotFound(err) { 92 | return false, nil 93 | } 94 | 95 | return true, nil 96 | } 97 | -------------------------------------------------------------------------------- /src/mapper/pkg/networkpolicyreport/cilium_clusterwide_policies_reconciler_test.go: -------------------------------------------------------------------------------- 1 | package networkpolicyreport 2 | 3 | import ( 4 | "context" 5 | v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" 6 | "github.com/cilium/cilium/pkg/policy/api" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 8 | cloudclientmocks "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient/mocks" 9 | "github.com/otterize/network-mapper/src/mapper/pkg/mocks" 10 | "github.com/stretchr/testify/suite" 11 | "go.uber.org/mock/gomock" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/types" 14 | ctrl "sigs.k8s.io/controller-runtime" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | "sigs.k8s.io/yaml" 17 | "strings" 18 | "testing" 19 | ) 20 | 21 | type CiliumClusterwidePolicyReconcilerTestSuite struct { 22 | suite.Suite 23 | cloudClient *cloudclientmocks.MockCloudClient 24 | k8sClient *mocks.K8sClient 25 | reconciler *CiliumClusterWideNetworkPolicyReconciler 26 | } 27 | 28 | func (s *CiliumClusterwidePolicyReconcilerTestSuite) SetupTest() { 29 | controller := gomock.NewController(s.T()) 30 | s.cloudClient = cloudclientmocks.NewMockCloudClient(controller) 31 | s.k8sClient = mocks.NewK8sClient(controller) 32 | s.reconciler = NewCiliumClusterWideNetworkPolicyReconciler(s.k8sClient, s.cloudClient) 33 | } 34 | 35 | func (s *CiliumClusterwidePolicyReconcilerTestSuite) TestCiliumClusterWidePolicyUpload() { 36 | resourceName := "test-cilium-policy" 37 | policy := v2.CiliumClusterwideNetworkPolicy{ 38 | ObjectMeta: metav1.ObjectMeta{ 39 | Name: resourceName, 40 | Annotations: map[string]string{ 41 | "keyLarge": strings.Repeat("a", 1000), 42 | "keySmall": "value", 43 | }, 44 | }, 45 | Spec: &api.Rule{ 46 | Ingress: make([]api.IngressRule, 0), 47 | }, 48 | } 49 | s.k8sClient.EXPECT().List(gomock.Any(), gomock.Eq(&v2.CiliumClusterwideNetworkPolicyList{})).DoAndReturn( 50 | func(ctx context.Context, list *v2.CiliumClusterwideNetworkPolicyList, opts ...client.ListOption) error { 51 | list.Items = append(list.Items, policy) 52 | return nil 53 | }) 54 | 55 | expectedPolicy := policy.DeepCopy() 56 | // filter large annotation 57 | delete(expectedPolicy.Annotations, "keyLarge") 58 | expectedContent, err := yaml.Marshal(expectedPolicy) 59 | s.Require().NoError(err) 60 | cloudInput := cloudclient.NetworkPolicyInput{ 61 | Name: resourceName, 62 | Yaml: string(expectedContent), 63 | } 64 | 65 | s.cloudClient.EXPECT().ReportCiliumClusterWideNetworkPolicies(gomock.Any(), gomock.Eq([]cloudclient.NetworkPolicyInput{cloudInput})).Return(nil) 66 | 67 | res, err := s.reconciler.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{Name: resourceName}}) 68 | s.NoError(err) 69 | s.True(res.IsZero()) 70 | } 71 | 72 | func TestCiliumClusterwidePolicyReconcilerTestSuite(t *testing.T) { 73 | suite.Run(t, new(CiliumClusterwidePolicyReconcilerTestSuite)) 74 | } 75 | -------------------------------------------------------------------------------- /src/mapper/pkg/networkpolicyreport/helpers.go: -------------------------------------------------------------------------------- 1 | package networkpolicyreport 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | func filterLargeAnnotations(annotations map[string]string) { 8 | for k, v := range annotations { 9 | if len(v) <= 100 { 10 | // Skip annotations with values longer than 100 characters 11 | continue 12 | } 13 | delete(annotations, k) 14 | } 15 | } 16 | 17 | func filterObjectMetadata(objectMeta metav1.ObjectMeta) metav1.ObjectMeta { 18 | objectMeta.Finalizers = nil 19 | objectMeta.ManagedFields = nil 20 | objectMeta.OwnerReferences = nil 21 | filterLargeAnnotations(objectMeta.Annotations) 22 | 23 | return objectMeta 24 | } 25 | -------------------------------------------------------------------------------- /src/mapper/pkg/networkpolicyreport/network_policies_reconciler.go: -------------------------------------------------------------------------------- 1 | package networkpolicyreport 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 7 | "github.com/samber/lo" 8 | networkingv1 "k8s.io/api/networking/v1" 9 | ctrl "sigs.k8s.io/controller-runtime" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | "sigs.k8s.io/controller-runtime/pkg/controller" 12 | "sigs.k8s.io/yaml" 13 | ) 14 | 15 | type NetworkPolicyReconciler struct { 16 | client.Client 17 | otterizeCloud cloudclient.CloudClient 18 | } 19 | 20 | func NewNetworkPolicyReconciler(client client.Client, otterizeCloudClient cloudclient.CloudClient) *NetworkPolicyReconciler { 21 | return &NetworkPolicyReconciler{ 22 | Client: client, 23 | otterizeCloud: otterizeCloudClient, 24 | } 25 | } 26 | 27 | func (r *NetworkPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { 28 | return ctrl.NewControllerManagedBy(mgr). 29 | For(&networkingv1.NetworkPolicy{}). 30 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 31 | Complete(r) 32 | } 33 | 34 | func (r *NetworkPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 35 | namespace := req.Namespace 36 | var netpolList networkingv1.NetworkPolicyList 37 | err := r.List(ctx, &netpolList, client.InNamespace(namespace)) 38 | if err != nil { 39 | return ctrl.Result{}, errors.Wrap(err) 40 | } 41 | 42 | netpolsToReport, err := r.convertToNetworkPolicyInputs(netpolList.Items) 43 | if err != nil { 44 | return ctrl.Result{}, errors.Wrap(err) 45 | } 46 | 47 | err = r.otterizeCloud.ReportNetworkPolicies(ctx, namespace, netpolsToReport) 48 | if err != nil { 49 | return ctrl.Result{}, errors.Wrap(err) 50 | } 51 | return ctrl.Result{}, nil 52 | } 53 | 54 | func (r *NetworkPolicyReconciler) convertToNetworkPolicyInputs(netpols []networkingv1.NetworkPolicy) ([]cloudclient.NetworkPolicyInput, error) { 55 | netpolsToReport := make([]cloudclient.NetworkPolicyInput, 0) 56 | for _, netpol := range netpols { 57 | netpolToReport, err := r.convertToNetworkPolicyInput(netpol) 58 | if err != nil { 59 | return nil, errors.Wrap(err) 60 | } 61 | netpolsToReport = append(netpolsToReport, netpolToReport) 62 | } 63 | return netpolsToReport, nil 64 | } 65 | 66 | func (r *NetworkPolicyReconciler) convertToNetworkPolicyInput(netpol networkingv1.NetworkPolicy) (cloudclient.NetworkPolicyInput, error) { 67 | netpol.ObjectMeta = filterObjectMetadata(netpol.ObjectMeta) 68 | yamlBytes, err := yaml.Marshal(netpol) 69 | if err != nil { 70 | return cloudclient.NetworkPolicyInput{}, errors.Wrap(err) 71 | } 72 | return cloudclient.NetworkPolicyInput{ 73 | Name: netpol.Name, 74 | Yaml: string(yamlBytes), 75 | }, nil 76 | } 77 | -------------------------------------------------------------------------------- /src/mapper/pkg/networkpolicyreport/network_policy_reconciler_test.go: -------------------------------------------------------------------------------- 1 | package networkpolicyreport 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 6 | cloudclientmocks "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient/mocks" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/mocks" 8 | "github.com/stretchr/testify/suite" 9 | "go.uber.org/mock/gomock" 10 | networkingv1 "k8s.io/api/networking/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/types" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/yaml" 16 | "strings" 17 | "testing" 18 | ) 19 | 20 | type NetworkPolicyReconcilerTestSuite struct { 21 | suite.Suite 22 | cloudClient *cloudclientmocks.MockCloudClient 23 | k8sClient *mocks.K8sClient 24 | reconciler *NetworkPolicyReconciler 25 | } 26 | 27 | func (s *NetworkPolicyReconcilerTestSuite) SetupTest() { 28 | controller := gomock.NewController(s.T()) 29 | s.cloudClient = cloudclientmocks.NewMockCloudClient(controller) 30 | s.k8sClient = mocks.NewK8sClient(controller) 31 | s.reconciler = NewNetworkPolicyReconciler(s.k8sClient, s.cloudClient) 32 | } 33 | 34 | func (s *NetworkPolicyReconcilerTestSuite) TestNetworkPolicyUpload() { 35 | resourceName := "test-networkpolicy" 36 | testNamespace := "test-namespace" 37 | networkPolicy := networkingv1.NetworkPolicy{ 38 | ObjectMeta: metav1.ObjectMeta{ 39 | Name: resourceName, 40 | Namespace: testNamespace, 41 | Annotations: map[string]string{ 42 | "keyLarge": strings.Repeat("a", 1000), 43 | "keySmall": "value", 44 | }, 45 | }, 46 | Spec: networkingv1.NetworkPolicySpec{ 47 | PodSelector: metav1.LabelSelector{ 48 | MatchLabels: map[string]string{ 49 | "app": "test-app", 50 | }, 51 | }, 52 | PolicyTypes: []networkingv1.PolicyType{ 53 | networkingv1.PolicyTypeIngress, 54 | }, 55 | Ingress: []networkingv1.NetworkPolicyIngressRule{ 56 | { 57 | From: []networkingv1.NetworkPolicyPeer{ 58 | { 59 | PodSelector: &metav1.LabelSelector{ 60 | MatchLabels: map[string]string{ 61 | "app": "test-app", 62 | }, 63 | }, 64 | }, 65 | }, 66 | }, 67 | }, 68 | }, 69 | } 70 | s.k8sClient.EXPECT().List(gomock.Any(), gomock.Eq(&networkingv1.NetworkPolicyList{}), gomock.Eq(client.InNamespace(testNamespace))).DoAndReturn( 71 | func(ctx context.Context, list *networkingv1.NetworkPolicyList, opts ...client.ListOption) error { 72 | list.Items = append(list.Items, networkPolicy) 73 | return nil 74 | }) 75 | 76 | expectedPolicy := networkPolicy.DeepCopy() 77 | // filter large annotation 78 | delete(expectedPolicy.Annotations, "keyLarge") 79 | 80 | expectedContent, err := yaml.Marshal(expectedPolicy) 81 | s.Require().NoError(err) 82 | cloudInput := cloudclient.NetworkPolicyInput{ 83 | Name: resourceName, 84 | Yaml: string(expectedContent), 85 | } 86 | 87 | s.cloudClient.EXPECT().ReportNetworkPolicies(gomock.Any(), testNamespace, gomock.Eq([]cloudclient.NetworkPolicyInput{cloudInput})).Return(nil) 88 | 89 | res, err := s.reconciler.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{Name: resourceName, Namespace: testNamespace}}) 90 | s.NoError(err) 91 | s.True(res.IsZero()) 92 | } 93 | 94 | func (s *NetworkPolicyReconcilerTestSuite) TestNetworkPolicyUpload_EmptyNamespace() { 95 | emptyList := networkingv1.NetworkPolicyList{} 96 | s.k8sClient.EXPECT().List(gomock.Any(), gomock.Eq(&emptyList), gomock.Eq(client.InNamespace("test-namespace"))).Return(nil) 97 | s.cloudClient.EXPECT().ReportNetworkPolicies(gomock.Any(), "test-namespace", gomock.Eq(make([]cloudclient.NetworkPolicyInput, 0))).Return(nil) 98 | 99 | res, err := s.reconciler.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{Name: "test-networkpolicy", Namespace: "test-namespace"}}) 100 | s.NoError(err) 101 | s.True(res.IsZero()) 102 | } 103 | 104 | func TestNetworkPolicyReconcilerTestSuite(t *testing.T) { 105 | suite.Run(t, new(NetworkPolicyReconcilerTestSuite)) 106 | } 107 | -------------------------------------------------------------------------------- /src/mapper/pkg/prometheus/metrics.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/prometheus/client_golang/prometheus/promauto" 6 | ) 7 | 8 | var ( 9 | socketScanReports = promauto.NewCounter(prometheus.CounterOpts{ 10 | Name: "socketscan_reported_connections", 11 | Help: "The total number of socket scan-sourced reported connections", 12 | }) 13 | dnsCaptureReports = promauto.NewCounter(prometheus.CounterOpts{ 14 | Name: "dns_reported_connections", 15 | Help: "The total number of DNS-sourced reported connections", 16 | }) 17 | tcpCaptureReports = promauto.NewCounter(prometheus.CounterOpts{ 18 | Name: "tcp_reported_connections", 19 | Help: "The total number of TCP-sourced reported connections", 20 | }) 21 | kafkaReports = promauto.NewCounter(prometheus.CounterOpts{ 22 | Name: "kafka_reported_topics", 23 | Help: "The total number of Kafka-sourced topics", 24 | }) 25 | istioReports = promauto.NewCounter(prometheus.CounterOpts{ 26 | Name: "istio_reported_connections", 27 | Help: "The total number of Istio-sourced connections", 28 | }) 29 | 30 | socketScanDrops = promauto.NewCounter(prometheus.CounterOpts{ 31 | Name: "socketscan_dropped_connections", 32 | Help: "The total number of socket scan-sourced reported connections that were dropped for performance", 33 | }) 34 | dnsCaptureDrops = promauto.NewCounter(prometheus.CounterOpts{ 35 | Name: "dns_dropped_connections", 36 | Help: "The total number of DNS-sourced reported connections that were dropped for performance", 37 | }) 38 | tcpCaptureDrops = promauto.NewCounter(prometheus.CounterOpts{ 39 | Name: "tcp_dropped_connections", 40 | Help: "The total number of TCP-sourced reported connections that were dropped for performance", 41 | }) 42 | kafkaReportsDrops = promauto.NewCounter(prometheus.CounterOpts{ 43 | Name: "kafka_dropped_topics", 44 | Help: "The total number of Kafka-sourced reported topics that were dropped for performance", 45 | }) 46 | istioReportsDrops = promauto.NewCounter(prometheus.CounterOpts{ 47 | Name: "istio_dropped_connections", 48 | Help: "The total number of Istio-sourced reported connections that were dropped for performance", 49 | }) 50 | 51 | awsReports = promauto.NewCounter(prometheus.CounterOpts{ 52 | Name: "aws_reports", 53 | Help: "The total number of AWS operations reported", 54 | }) 55 | awsReportsDrops = promauto.NewCounter(prometheus.CounterOpts{ 56 | Name: "aws_dropped_reports", 57 | Help: "The total number of AWS operations reported that were dropped for performance", 58 | }) 59 | 60 | gcpReports = promauto.NewCounter(prometheus.CounterOpts{ 61 | Name: "gcp_reports", 62 | Help: "The total number of GCP operations reported", 63 | }) 64 | gcpReportsDrops = promauto.NewCounter(prometheus.CounterOpts{ 65 | Name: "gcp_dropped_reports", 66 | Help: "The total number of GCP operations reported that were dropped for performance", 67 | }) 68 | 69 | azureReports = promauto.NewCounter(prometheus.CounterOpts{ 70 | Name: "azure_reports", 71 | Help: "The total number of Azure operations reported", 72 | }) 73 | azureReportsDrops = promauto.NewCounter(prometheus.CounterOpts{ 74 | Name: "azure_dropped_reports", 75 | Help: "The total number of Azure operations reported that were dropped for performance", 76 | }) 77 | ) 78 | 79 | func IncrementTCPCaptureReports(count int) { 80 | tcpCaptureReports.Add(float64(count)) 81 | } 82 | 83 | func IncrementTCPCaptureDrops(count int) { 84 | tcpCaptureDrops.Add(float64(count)) 85 | } 86 | 87 | func IncrementSocketScanReports(count int) { 88 | socketScanReports.Add(float64(count)) 89 | } 90 | 91 | func IncrementDNSCaptureReports(count int) { 92 | dnsCaptureReports.Add(float64(count)) 93 | } 94 | 95 | func IncrementKafkaReports(count int) { 96 | kafkaReports.Add(float64(count)) 97 | } 98 | 99 | func IncrementIstioReports(count int) { 100 | istioReports.Add(float64(count)) 101 | } 102 | 103 | func IncrementAWSOperationReports(count int) { 104 | awsReports.Add(float64(count)) 105 | } 106 | 107 | func IncrementGCPOperationReports(count int) { 108 | gcpReports.Add(float64(count)) 109 | } 110 | 111 | func IncrementSocketScanDrops(count int) { 112 | socketScanDrops.Add(float64(count)) 113 | } 114 | 115 | func IncrementDNSCaptureDrops(count int) { 116 | dnsCaptureDrops.Add(float64(count)) 117 | } 118 | 119 | func IncrementKafkaDrops(count int) { 120 | kafkaReportsDrops.Add(float64(count)) 121 | } 122 | 123 | func IncrementIstioDrops(count int) { 124 | istioReportsDrops.Add(float64(count)) 125 | } 126 | 127 | func IncrementAWSOperationDrops(count int) { 128 | awsReportsDrops.Add(float64(count)) 129 | } 130 | 131 | func IncrementGCPOperationDrops(count int) { 132 | gcpReportsDrops.Add(float64(count)) 133 | } 134 | 135 | func IncrementAzureOperationReports(count int) { 136 | azureReports.Add(float64(count)) 137 | } 138 | 139 | func IncrementAzureOperationDrops(count int) { 140 | azureReportsDrops.Add(float64(count)) 141 | } 142 | -------------------------------------------------------------------------------- /src/mapper/pkg/resolvers/test_gql_client/generate.go: -------------------------------------------------------------------------------- 1 | package test_gql_client 2 | 3 | //go:generate go run github.com/Khan/genqlient ./genqlient.yaml 4 | -------------------------------------------------------------------------------- /src/mapper/pkg/resolvers/test_gql_client/genqlient.graphql: -------------------------------------------------------------------------------- 1 | query ServiceIntents($namespaces: [String!]) { 2 | serviceIntents(namespaces: $namespaces) { 3 | client { 4 | name 5 | namespace 6 | podOwnerKind { 7 | group 8 | kind 9 | version 10 | } 11 | 12 | } 13 | intents { 14 | name 15 | namespace 16 | kubernetesService 17 | } 18 | } 19 | } 20 | 21 | query Intents( 22 | $namespaces: [String!], 23 | $includeLabels: [String!], 24 | $excludeServiceWithLabels: [String!], 25 | $includeAllLabels: Boolean, 26 | # @genqlient(pointer: true) 27 | $server: ServerFilter, 28 | ) { 29 | intents( 30 | namespaces: $namespaces, 31 | includeLabels: $includeLabels, 32 | excludeServiceWithLabels: $excludeServiceWithLabels, 33 | includeAllLabels: $includeAllLabels, 34 | server: $server, 35 | ) { 36 | client { 37 | name 38 | namespace 39 | podOwnerKind { 40 | group 41 | kind 42 | version 43 | } 44 | } 45 | server { 46 | name 47 | namespace 48 | podOwnerKind { 49 | group 50 | kind 51 | version 52 | } 53 | kubernetesService 54 | } 55 | } 56 | } 57 | 58 | mutation ReportCaptureResults($results: CaptureResults!) { 59 | reportCaptureResults(results: $results) 60 | } 61 | 62 | mutation ReportSocketScanResults($results: SocketScanResults!) { 63 | reportSocketScanResults(results: $results) 64 | } 65 | 66 | mutation ReportTCPCaptureResults($results: CaptureTCPResults!) { 67 | reportTCPCaptureResults(results: $results) 68 | } 69 | -------------------------------------------------------------------------------- /src/mapper/pkg/resolvers/test_gql_client/genqlient.yaml: -------------------------------------------------------------------------------- 1 | # genqlient config; for full documentation see: 2 | # https://github.com/Khan/genqlient/blob/main/docs/genqlient.yaml 3 | schema: 4 | - ../../../../mappergraphql/schema.graphql 5 | operations: 6 | - genqlient.graphql 7 | generated: generated.go 8 | 9 | bindings: 10 | Time: 11 | type: time.Time 12 | optional: generic 13 | optional_generic_type: github.com/otterize/nilable.Nilable -------------------------------------------------------------------------------- /src/mapper/pkg/resourcevisibility/ingress_reconciler.go: -------------------------------------------------------------------------------- 1 | package resourcevisibility 2 | 3 | import ( 4 | "context" 5 | "github.com/otterize/intents-operator/src/shared/errors" 6 | "github.com/otterize/intents-operator/src/shared/injectablerecorder" 7 | "github.com/otterize/network-mapper/src/mapper/pkg/cloudclient" 8 | "github.com/samber/lo" 9 | networkingv1 "k8s.io/api/networking/v1" 10 | "k8s.io/client-go/tools/record" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/controller" 14 | ) 15 | 16 | type IngressReconciler struct { 17 | client.Client 18 | injectablerecorder.InjectableRecorder 19 | otterizeCloud cloudclient.CloudClient 20 | } 21 | 22 | func NewIngressReconciler(client client.Client, otterizeCloudClient cloudclient.CloudClient) *IngressReconciler { 23 | return &IngressReconciler{ 24 | Client: client, 25 | otterizeCloud: otterizeCloudClient, 26 | } 27 | } 28 | 29 | func (r *IngressReconciler) SetupWithManager(mgr ctrl.Manager) error { 30 | recorder := mgr.GetEventRecorderFor("intents-operator") 31 | r.InjectRecorder(recorder) 32 | 33 | return ctrl.NewControllerManagedBy(mgr). 34 | For(&networkingv1.Ingress{}). 35 | WithOptions(controller.Options{RecoverPanic: lo.ToPtr(true)}). 36 | Complete(r) 37 | } 38 | 39 | func (r *IngressReconciler) InjectRecorder(recorder record.EventRecorder) { 40 | r.Recorder = recorder 41 | } 42 | 43 | func (r *IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 44 | namespace := req.Namespace 45 | var IngressList networkingv1.IngressList 46 | err := r.List(ctx, &IngressList, client.InNamespace(namespace)) 47 | if err != nil { 48 | return ctrl.Result{}, errors.Wrap(err) 49 | } 50 | 51 | ingressesToReport, err := r.convertToCloudIngresses(IngressList.Items) 52 | if err != nil { 53 | return ctrl.Result{}, errors.Wrap(err) 54 | } 55 | 56 | err = r.otterizeCloud.ReportK8sIngresses(ctx, namespace, ingressesToReport) 57 | if err != nil { 58 | return ctrl.Result{}, errors.Wrap(err) 59 | } 60 | return ctrl.Result{}, nil 61 | } 62 | 63 | func (r *IngressReconciler) convertToCloudIngresses(ingresses []networkingv1.Ingress) ([]cloudclient.K8sIngressInput, error) { 64 | ingressesToReport := make([]cloudclient.K8sIngressInput, 0) 65 | for _, ingress := range ingresses { 66 | ingressInput, ok, err := convertIngressResource(ingress) 67 | if err != nil { 68 | return nil, errors.Wrap(err) 69 | } 70 | if !ok { 71 | continue 72 | } 73 | 74 | ingressesToReport = append(ingressesToReport, cloudclient.K8sIngressInput{ 75 | Namespace: ingress.Namespace, 76 | Name: ingress.Name, 77 | Ingress: ingressInput, 78 | }) 79 | } 80 | return ingressesToReport, nil 81 | } 82 | -------------------------------------------------------------------------------- /src/mapperclient/client.go: -------------------------------------------------------------------------------- 1 | package mapperclient 2 | 3 | import ( 4 | "context" 5 | "github.com/Khan/genqlient/graphql" 6 | "github.com/otterize/intents-operator/src/shared/errors" 7 | "github.com/sirupsen/logrus" 8 | "net/http" 9 | "strings" 10 | ) 11 | 12 | type Client struct { 13 | client graphql.Client 14 | } 15 | 16 | func New(address string) *Client { 17 | // some usages of this lib pass /query, some don't 18 | if !strings.HasSuffix(address, "/query") { 19 | address = address + "/query" 20 | } 21 | 22 | logrus.Infof("Connecting to network-mapper at %s", address) 23 | 24 | return &Client{ 25 | client: graphql.NewClient(address, http.DefaultClient), 26 | } 27 | } 28 | 29 | func (c *Client) ReportAWSOperation(ctx context.Context, operation []AWSOperation) error { 30 | _, err := reportAWSOperation(ctx, c.client, operation) 31 | return errors.Wrap(err) 32 | } 33 | 34 | func (c *Client) ReportGCPOperation(ctx context.Context, operation []GCPOperation) error { 35 | _, err := reportGCPOperation(ctx, c.client, operation) 36 | return errors.Wrap(err) 37 | } 38 | 39 | func (c *Client) ReportAzureOperation(ctx context.Context, operation []AzureOperation) error { 40 | _, err := reportAzureOperation(ctx, c.client, operation) 41 | return errors.Wrap(err) 42 | } 43 | 44 | func (c *Client) ReportKafkaMapperResults(ctx context.Context, results KafkaMapperResults) error { 45 | _, err := reportKafkaMapperResults(ctx, c.client, results) 46 | return errors.Wrap(err) 47 | } 48 | 49 | func (c *Client) ReportCaptureResults(ctx context.Context, results CaptureResults) error { 50 | _, err := reportCaptureResults(ctx, c.client, results) 51 | return errors.Wrap(err) 52 | } 53 | 54 | func (c *Client) ReportTCPCaptureResults(ctx context.Context, results CaptureTCPResults) error { 55 | _, err := reportTCPCaptureResults(ctx, c.client, results) 56 | return err 57 | } 58 | 59 | func (c *Client) ReportSocketScanResults(ctx context.Context, results SocketScanResults) error { 60 | _, err := reportSocketScanResults(ctx, c.client, results) 61 | return errors.Wrap(err) 62 | } 63 | 64 | func (c *Client) ReportTrafficLevels(ctx context.Context, results TrafficLevelResults) error { 65 | _, err := reportTrafficLevelResults(ctx, c.client, results) 66 | return errors.Wrap(err) 67 | } 68 | 69 | func (c *Client) Health(ctx context.Context) error { 70 | _, err := Health(ctx, c.client) 71 | return errors.Wrap(err) 72 | } 73 | -------------------------------------------------------------------------------- /src/mapperclient/generate.go: -------------------------------------------------------------------------------- 1 | package mapperclient 2 | 3 | //go:generate go run go.uber.org/mock/mockgen -source=client.go -destination=mockclient/mocks.go 4 | //go:generate go run github.com/Khan/genqlient ./genqlient.yaml 5 | -------------------------------------------------------------------------------- /src/mapperclient/genqlient.yaml: -------------------------------------------------------------------------------- 1 | schema: 2 | - ../mappergraphql/schema.graphql 3 | operations: 4 | - operations.graphql 5 | generated: generated.go 6 | bindings: 7 | Time: 8 | type: time.Time 9 | optional: generic 10 | optional_generic_type: github.com/otterize/nilable.Nilable -------------------------------------------------------------------------------- /src/mapperclient/mockclient/mocks.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: client.go 3 | 4 | // Package mock_mapperclient is a generated GoMock package. 5 | package mock_mapperclient 6 | -------------------------------------------------------------------------------- /src/mapperclient/operations.graphql: -------------------------------------------------------------------------------- 1 | mutation reportCaptureResults($results: CaptureResults!) { 2 | reportCaptureResults(results: $results) 3 | } 4 | 5 | mutation reportTCPCaptureResults($results: CaptureTCPResults!) { 6 | reportTCPCaptureResults(results: $results) 7 | } 8 | 9 | mutation reportSocketScanResults($results: SocketScanResults!) { 10 | reportSocketScanResults(results: $results) 11 | } 12 | 13 | mutation reportKafkaMapperResults($results: KafkaMapperResults!) { 14 | reportKafkaMapperResults(results: $results) 15 | } 16 | 17 | mutation reportAWSOperation($operation: [AWSOperation!]!) { 18 | reportAWSOperation(operation: $operation) 19 | } 20 | 21 | mutation reportGCPOperation($operation: [GCPOperation!]!) { 22 | reportGCPOperation(operation: $operation) 23 | } 24 | 25 | mutation reportAzureOperation($operation: [AzureOperation!]!) { 26 | reportAzureOperation(operation: $operation) 27 | } 28 | 29 | mutation reportTrafficLevelResults($results: TrafficLevelResults!) { 30 | reportTrafficLevelResults(results: $results) 31 | } 32 | 33 | 34 | query Health { 35 | health 36 | } -------------------------------------------------------------------------------- /src/shared/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "github.com/spf13/viper" 6 | "strings" 7 | ) 8 | 9 | /* 10 | Shared config keys between all reporter components - DNS Sniffer, Kafka watcher, Istio watcher 11 | */ 12 | 13 | const ( 14 | MapperApiUrlKey = "mapper-api-url" 15 | MapperApiUrlDefault = "http://mapper:9090/query" 16 | DebugKey = "debug" 17 | DebugDefault = false 18 | PrometheusMetricsPortKey = "metrics-port" 19 | PrometheusMetricsPortDefault = 2112 20 | HealthProbesPortKey = "health-probes-port" 21 | HealthProbesPortDefault = "9090" 22 | TelemetryErrorsAPIKeyKey = "telemetry-errors-api-key" 23 | TelemetryErrorsAPIKeyDefault = "d86195588a41fa03aa6711993bb1c765" 24 | EnableTCPKey = "enable-tcp" 25 | EnableTCPSnifferDefault = true 26 | EnableSocketScannerKey = "enable-socket-scanner" 27 | EnableSocketScannerDefault = true 28 | EnableDNSKey = "enable-dns" 29 | EnableDNSSnifferDefault = true 30 | 31 | EnvPodKey = "pod" 32 | EnvNamespaceKey = "namespace" 33 | 34 | envPrefix = "OTTERIZE" 35 | ) 36 | 37 | var replacer = strings.NewReplacer("-", "_") 38 | 39 | func GetEnvVarForKey(key string) string { 40 | return fmt.Sprintf("%s_%s", envPrefix, replacer.Replace(key)) 41 | } 42 | 43 | func init() { 44 | viper.SetDefault(MapperApiUrlKey, MapperApiUrlDefault) 45 | viper.SetDefault(DebugKey, DebugDefault) 46 | viper.SetDefault(PrometheusMetricsPortKey, PrometheusMetricsPortDefault) 47 | viper.SetDefault(HealthProbesPortKey, HealthProbesPortDefault) 48 | viper.SetDefault(TelemetryErrorsAPIKeyKey, TelemetryErrorsAPIKeyDefault) 49 | viper.SetDefault(EnableTCPKey, EnableTCPSnifferDefault) 50 | viper.SetDefault(EnableSocketScannerKey, EnableSocketScannerDefault) 51 | viper.SetDefault(EnableDNSKey, EnableDNSSnifferDefault) 52 | viper.SetEnvPrefix(envPrefix) 53 | viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) 54 | viper.AutomaticEnv() 55 | } 56 | -------------------------------------------------------------------------------- /src/shared/echologrus/echologrus.go: -------------------------------------------------------------------------------- 1 | package echologrus 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | "github.com/labstack/gommon/log" 6 | "github.com/sirupsen/logrus" 7 | "io" 8 | "strconv" 9 | "time" 10 | ) 11 | 12 | // Logrus : implement Logger 13 | type Logrus struct { 14 | *logrus.Logger 15 | } 16 | 17 | // Logger ... 18 | var Logger *logrus.Logger 19 | 20 | // GetEchoLogger for e.Logger 21 | func GetEchoLogger() Logrus { 22 | return Logrus{Logger} 23 | } 24 | 25 | // Level returns logger level 26 | func (l Logrus) Level() log.Lvl { 27 | switch l.Logger.Level { 28 | case logrus.DebugLevel: 29 | return log.DEBUG 30 | case logrus.WarnLevel: 31 | return log.WARN 32 | case logrus.ErrorLevel: 33 | return log.ERROR 34 | case logrus.InfoLevel: 35 | return log.INFO 36 | default: 37 | l.Panic("Invalid level") 38 | } 39 | 40 | return log.OFF 41 | } 42 | 43 | // SetHeader is a stub to satisfy interface 44 | // It's controlled by Logger 45 | func (l Logrus) SetHeader(_ string) {} 46 | 47 | // SetPrefix It's controlled by Logger 48 | func (l Logrus) SetPrefix(s string) {} 49 | 50 | // Prefix It's controlled by Logger 51 | func (l Logrus) Prefix() string { 52 | return "" 53 | } 54 | 55 | // SetLevel set level to logger from given log.Lvl 56 | func (l Logrus) SetLevel(lvl log.Lvl) { 57 | switch lvl { 58 | case log.DEBUG: 59 | Logger.SetLevel(logrus.DebugLevel) 60 | case log.WARN: 61 | Logger.SetLevel(logrus.WarnLevel) 62 | case log.ERROR: 63 | Logger.SetLevel(logrus.ErrorLevel) 64 | case log.INFO: 65 | Logger.SetLevel(logrus.InfoLevel) 66 | default: 67 | l.Panic("Invalid level") 68 | } 69 | } 70 | 71 | // Output logger output func 72 | func (l Logrus) Output() io.Writer { 73 | return l.Out 74 | } 75 | 76 | // SetOutput change output, default os.Stdout 77 | func (l Logrus) SetOutput(w io.Writer) { 78 | Logger.SetOutput(w) 79 | } 80 | 81 | // Printj print json log 82 | func (l Logrus) Printj(j log.JSON) { 83 | Logger.WithFields(logrus.Fields(j)).Print() 84 | } 85 | 86 | // Debugj debug json log 87 | func (l Logrus) Debugj(j log.JSON) { 88 | Logger.WithFields(logrus.Fields(j)).Debug() 89 | } 90 | 91 | // Infoj info json log 92 | func (l Logrus) Infoj(j log.JSON) { 93 | Logger.WithFields(logrus.Fields(j)).Info() 94 | } 95 | 96 | // Warnj warning json log 97 | func (l Logrus) Warnj(j log.JSON) { 98 | Logger.WithFields(logrus.Fields(j)).Warn() 99 | } 100 | 101 | // Errorj error json log 102 | func (l Logrus) Errorj(j log.JSON) { 103 | Logger.WithFields(logrus.Fields(j)).Error() 104 | } 105 | 106 | // Fatalj fatal json log 107 | func (l Logrus) Fatalj(j log.JSON) { 108 | Logger.WithFields(logrus.Fields(j)).Fatal() 109 | } 110 | 111 | // Panicj panic json log 112 | func (l Logrus) Panicj(j log.JSON) { 113 | Logger.WithFields(logrus.Fields(j)).Panic() 114 | } 115 | 116 | // Print string log 117 | func (l Logrus) Print(i ...interface{}) { 118 | Logger.Print(i[0].(string)) 119 | } 120 | 121 | // Debug string log 122 | func (l Logrus) Debug(i ...interface{}) { 123 | Logger.Debug(i[0].(string)) 124 | } 125 | 126 | // Info string log 127 | func (l Logrus) Info(i ...interface{}) { 128 | Logger.Info(i[0].(string)) 129 | } 130 | 131 | // Warn string log 132 | func (l Logrus) Warn(i ...interface{}) { 133 | Logger.Warn(i[0].(string)) 134 | } 135 | 136 | // Error string log 137 | func (l Logrus) Error(i ...interface{}) { 138 | Logger.Error(i[0].(string)) 139 | } 140 | 141 | // Fatal string log 142 | func (l Logrus) Fatal(i ...interface{}) { 143 | Logger.Fatal(i[0].(string)) 144 | } 145 | 146 | // Panic string log 147 | func (l Logrus) Panic(i ...interface{}) { 148 | Logger.Panic(i[0].(string)) 149 | } 150 | 151 | func logrusMiddlewareHandler(c echo.Context, next echo.HandlerFunc) error { 152 | req := c.Request() 153 | res := c.Response() 154 | start := time.Now() 155 | if err := next(c); err != nil { 156 | c.Error(err) 157 | } 158 | stop := time.Now() 159 | 160 | p := req.URL.Path 161 | 162 | bytesIn := req.Header.Get(echo.HeaderContentLength) 163 | 164 | Logger.WithFields(map[string]interface{}{ 165 | "time_rfc3339": time.Now().Format(time.RFC3339), 166 | "remote_ip": c.RealIP(), 167 | "host": req.Host, 168 | "uri": req.RequestURI, 169 | "method": req.Method, 170 | "path": p, 171 | "referer": req.Referer(), 172 | "user_agent": req.UserAgent(), 173 | "status": res.Status, 174 | "latency": strconv.FormatInt(stop.Sub(start).Nanoseconds()/1000, 10), 175 | "latency_human": stop.Sub(start).String(), 176 | "bytes_in": bytesIn, 177 | "bytes_out": strconv.FormatInt(res.Size, 10), 178 | }).Debug("Handled request") 179 | 180 | return nil 181 | } 182 | 183 | func logger(next echo.HandlerFunc) echo.HandlerFunc { 184 | return func(c echo.Context) error { 185 | return logrusMiddlewareHandler(c, next) 186 | } 187 | } 188 | 189 | // Hook is a function to process middleware. 190 | func Hook() echo.MiddlewareFunc { 191 | return logger 192 | } 193 | -------------------------------------------------------------------------------- /src/shared/isrunningonaws/check.go: -------------------------------------------------------------------------------- 1 | package isrunningonaws 2 | 3 | import ( 4 | "context" 5 | awsconfig "github.com/aws/aws-sdk-go-v2/config" 6 | "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" 7 | "github.com/aws/smithy-go/logging" 8 | "github.com/sirupsen/logrus" 9 | "time" 10 | ) 11 | 12 | func Check() bool { 13 | ctxTimeout, cancel := context.WithTimeout(context.Background(), 5*time.Second) 14 | defer cancel() 15 | cfg, err := awsconfig.LoadDefaultConfig(ctxTimeout) 16 | if err != nil { 17 | logrus.Debug("Autodetect AWS (an error here is fine): Failed to load AWS config") 18 | return false 19 | } 20 | cfg.Logger = logging.Nop{} 21 | 22 | client := imds.NewFromConfig(cfg) 23 | 24 | result, err := client.GetInstanceIdentityDocument(ctxTimeout, &imds.GetInstanceIdentityDocumentInput{}) 25 | if err != nil { 26 | logrus.Debug("Autodetect AWS (an error here is fine): Failed to get instance identity document") 27 | return false 28 | } 29 | 30 | logrus.WithField("region", result.Region).Debug("Autodetect AWS: Running on AWS") 31 | return true 32 | } 33 | -------------------------------------------------------------------------------- /src/shared/kubeutils/kubeutils.go: -------------------------------------------------------------------------------- 1 | package kubeutils 2 | 3 | import ( 4 | "github.com/otterize/intents-operator/src/shared/errors" 5 | "os" 6 | "strings" 7 | ) 8 | 9 | const ( 10 | DefaultClusterDomain = "cluster.local" 11 | namespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" 12 | resolvFile = "/etc/resolv.conf" 13 | ) 14 | 15 | func GetCurrentNamespace() (string, error) { 16 | data, err := os.ReadFile(namespaceFile) 17 | if err != nil { 18 | return "", errors.Wrap(err) 19 | } 20 | return strings.TrimSpace(string(data)), nil 21 | } 22 | 23 | func GetClusterDomain() (string, error) { 24 | namespace, err := GetCurrentNamespace() 25 | if err != nil { 26 | return "", errors.Wrap(err) 27 | } 28 | data, err := os.ReadFile(resolvFile) 29 | if err != nil { 30 | return "", errors.Wrap(err) 31 | } 32 | expectedSearchDomainPrefix := namespace + ".svc." 33 | for _, line := range strings.Split(string(data), "\n") { 34 | line = strings.TrimSpace(line) 35 | words := strings.Split(line, " ") 36 | if len(words) == 0 || words[0] != "search" { 37 | continue 38 | } 39 | for _, searchDomain := range words { 40 | if strings.HasPrefix(searchDomain, expectedSearchDomainPrefix) { 41 | return searchDomain[len(expectedSearchDomainPrefix):], nil 42 | } 43 | } 44 | } 45 | return "", errors.Errorf("could not deduce cluster domain from %s", resolvFile) 46 | } 47 | -------------------------------------------------------------------------------- /src/shared/testbase/README.md: -------------------------------------------------------------------------------- 1 | # Testing instructions 2 | 3 | In order to run the tests locally, “envtest” requires some k8s binaries. To install them, run once: 4 | 5 | ```shell 6 | go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest 7 | source <(setup-envtest use -p env) 8 | sudo mkdir -p /usr/local/kubebuilder 9 | sudo ln -s "$KUBEBUILDER_ASSETS" /usr/local/kubebuilder/bin 10 | ``` 11 | -------------------------------------------------------------------------------- /src/shared/testbase/patch_matcher.go: -------------------------------------------------------------------------------- 1 | package testbase 2 | 3 | import ( 4 | "go.uber.org/mock/gomock" 5 | "sigs.k8s.io/controller-runtime/pkg/client" 6 | ) 7 | 8 | type ClientPatch struct { 9 | client.Patch 10 | } 11 | 12 | func (p ClientPatch) Matches(x interface{}) bool { 13 | patch := x.(client.Patch) 14 | actualData, err := patch.Data(nil) 15 | if err != nil { 16 | return false 17 | } 18 | 19 | expectedData, err := p.Data(nil) 20 | if err != nil { 21 | return false 22 | } 23 | 24 | return string(actualData) == string(expectedData) && patch.Type() == p.Type() 25 | } 26 | 27 | func (p ClientPatch) String() string { 28 | data, err := p.Data(nil) 29 | if err != nil { 30 | return "format error" 31 | } 32 | return string(data) 33 | } 34 | 35 | func MatchPatch(patch client.Patch) gomock.Matcher { 36 | return ClientPatch{patch} 37 | } 38 | -------------------------------------------------------------------------------- /src/shared/version/version: -------------------------------------------------------------------------------- 1 | 0-local -------------------------------------------------------------------------------- /src/shared/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "os" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | version string 10 | once sync.Once 11 | ) 12 | 13 | // Version returns the current version. 14 | // It is implemented by reading a file instead of go:embed to avoid cache busting the Dockerfile before the build. 15 | func Version() string { 16 | once.Do(func() { 17 | data, err := os.ReadFile("./shared/version/version") 18 | if err == nil { 19 | // only in development mode 20 | version = string(data) 21 | return 22 | } 23 | 24 | // only in production 25 | data, err = os.ReadFile("./version") 26 | if err != nil { 27 | panic(err) 28 | } 29 | version = string(data) 30 | }) 31 | 32 | return version 33 | } 34 | -------------------------------------------------------------------------------- /src/sniffer/cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/bombsimon/logrusr/v3" 7 | "github.com/labstack/echo/v4" 8 | "github.com/otterize/intents-operator/src/shared" 9 | "github.com/otterize/intents-operator/src/shared/clusterutils" 10 | "github.com/otterize/intents-operator/src/shared/errors" 11 | "github.com/otterize/intents-operator/src/shared/telemetries/componentinfo" 12 | "github.com/otterize/intents-operator/src/shared/telemetries/errorreporter" 13 | "github.com/otterize/intents-operator/src/shared/telemetries/telemetriesgql" 14 | "github.com/otterize/intents-operator/src/shared/telemetries/telemetrysender" 15 | "github.com/otterize/network-mapper/src/mapperclient" 16 | "github.com/otterize/network-mapper/src/shared/version" 17 | "golang.org/x/sync/errgroup" 18 | "net/http" 19 | ctrl "sigs.k8s.io/controller-runtime" 20 | "sigs.k8s.io/controller-runtime/pkg/manager/signals" 21 | "time" 22 | 23 | "github.com/labstack/echo-contrib/echoprometheus" 24 | sharedconfig "github.com/otterize/network-mapper/src/shared/config" 25 | "github.com/otterize/network-mapper/src/sniffer/pkg/sniffer" 26 | "github.com/sirupsen/logrus" 27 | "github.com/spf13/viper" 28 | ) 29 | 30 | func main() { 31 | logrus.SetLevel(logrus.InfoLevel) 32 | if viper.GetBool(sharedconfig.DebugKey) { 33 | logrus.SetLevel(logrus.DebugLevel) 34 | } 35 | logrus.SetFormatter(&logrus.JSONFormatter{ 36 | TimestampFormat: time.RFC3339, 37 | }) 38 | errgrp, errGroupCtx := errgroup.WithContext(signals.SetupSignalHandler()) 39 | clusterUID := clusterutils.GetOrCreateClusterUID(errGroupCtx) 40 | componentinfo.SetGlobalContextId(telemetrysender.Anonymize(clusterUID)) 41 | errorreporter.Init(telemetriesgql.TelemetryComponentTypeNetworkMapper, version.Version()) 42 | defer errorreporter.AutoNotify() 43 | shared.RegisterPanicHandlers() 44 | 45 | ctrl.SetLogger(logrusr.New(logrus.StandardLogger())) 46 | 47 | mapperClient := mapperclient.New(viper.GetString(sharedconfig.MapperApiUrlKey)) 48 | healthProbesPort := viper.GetInt(sharedconfig.HealthProbesPortKey) 49 | 50 | healthServer := echo.New() 51 | healthServer.HideBanner = true 52 | healthServer.GET("/healthz", func(c echo.Context) error { 53 | defer errorreporter.AutoNotify() 54 | err := mapperClient.Health(c.Request().Context()) 55 | if err != nil { 56 | return errors.Wrap(err) 57 | } 58 | return c.NoContent(http.StatusOK) 59 | }) 60 | 61 | metricsServer := echo.New() 62 | metricsServer.HideBanner = true 63 | 64 | metricsServer.GET("/metrics", echoprometheus.NewHandler()) 65 | 66 | componentinfo.SetGlobalContextId(telemetrysender.Anonymize(clusterUID)) 67 | logrus.Debug("Starting metrics server") 68 | errgrp.Go(func() error { 69 | logrus.Debug("Started metrics server") 70 | defer errorreporter.AutoNotify() 71 | err := metricsServer.Start(fmt.Sprintf(":%d", viper.GetInt(sharedconfig.PrometheusMetricsPortKey))) 72 | if err != nil { 73 | logrus.WithError(err).Error("Error when starting metrics server, however not returning an error as this may be due to the same port being used on the host node, and the sniffer runs in hostNetwork mode.") 74 | } 75 | return nil 76 | }) 77 | logrus.Debug("Starting health server") 78 | errgrp.Go(func() error { 79 | logrus.Debug("Started health server") 80 | defer errorreporter.AutoNotify() 81 | return healthServer.Start(fmt.Sprintf(":%d", healthProbesPort)) 82 | }) 83 | 84 | logrus.Debug("Starting sniffer") 85 | 86 | errgrp.Go(func() error { 87 | logrus.Debug("Started sniffer") 88 | defer errorreporter.AutoNotify() 89 | snifferInstance := sniffer.NewSniffer(mapperClient) 90 | return snifferInstance.RunForever(errGroupCtx) 91 | }) 92 | <-errGroupCtx.Done() 93 | timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 94 | defer cancel() 95 | err := healthServer.Shutdown(timeoutCtx) 96 | if err != nil { 97 | logrus.WithError(err).Panic("Error when shutting down") 98 | } 99 | 100 | err = metricsServer.Shutdown(timeoutCtx) 101 | if err != nil { 102 | logrus.WithError(err).Panic("Error when shutting down") 103 | } 104 | 105 | err = errgrp.Wait() 106 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 107 | logrus.WithError(err).Panic("Error when running server or HTTP server") 108 | } 109 | 110 | logrus.Info("Sniffer stopped") 111 | } 112 | -------------------------------------------------------------------------------- /src/sniffer/pkg/collectors/collector.go: -------------------------------------------------------------------------------- 1 | package collectors 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/mapperclient" 5 | "github.com/otterize/nilable" 6 | "github.com/samber/lo" 7 | "github.com/sirupsen/logrus" 8 | "time" 9 | ) 10 | 11 | type UniqueRequest struct { 12 | srcIP string 13 | srcHostname string 14 | destHostnameOrIP string // IP or hostname 15 | destIP string 16 | destPort nilable.Nilable[int] 17 | } 18 | 19 | type TimeAndTTL struct { 20 | lastSeen time.Time 21 | ttl nilable.Nilable[int] 22 | srcPorts *SourcePortsSet 23 | } 24 | 25 | // For each unique request info, we store the time of the last request (no need to report duplicates) and last seen TTL. 26 | type capturesMap map[UniqueRequest]TimeAndTTL 27 | 28 | type SourcePortsSet map[int]struct{} 29 | 30 | type NetworkCollector struct { 31 | capturedRequests capturesMap 32 | } 33 | 34 | func (c *NetworkCollector) resetData() { 35 | c.capturedRequests = make(capturesMap) 36 | } 37 | 38 | func (c *NetworkCollector) addCapturedRequest(srcIp string, srcHost string, destNameOrIP string, destIP string, seenAt time.Time, ttl nilable.Nilable[int], destPort *int, srcPort *int) { 39 | req := UniqueRequest{srcIp, srcHost, destNameOrIP, destIP, nilable.FromPtr(destPort)} 40 | existingRequest, requestFound := c.capturedRequests[req] 41 | if requestFound { 42 | existingSet := existingRequest.srcPorts 43 | if srcPort != nil { 44 | (*existingSet)[*srcPort] = struct{}{} 45 | } 46 | c.capturedRequests[req] = TimeAndTTL{seenAt, ttl, existingSet} 47 | return 48 | } 49 | 50 | newSet := make(SourcePortsSet) 51 | 52 | // if we reach here - the request is not found 53 | if srcPort != nil { 54 | newSet[*srcPort] = struct{}{} 55 | } 56 | 57 | c.capturedRequests[req] = TimeAndTTL{seenAt, ttl, lo.ToPtr(newSet)} 58 | } 59 | 60 | func (c *NetworkCollector) CollectResults() []mapperclient.RecordedDestinationsForSrc { 61 | type srcInfo struct { 62 | Ip string 63 | Hostname string 64 | } 65 | srcToDests := make(map[srcInfo][]mapperclient.Destination) 66 | 67 | for reqInfo, timeAndTTL := range c.capturedRequests { 68 | src := srcInfo{Ip: reqInfo.srcIP, Hostname: reqInfo.srcHostname} 69 | 70 | if _, ok := srcToDests[src]; !ok { 71 | srcToDests[src] = make([]mapperclient.Destination, 0) 72 | } 73 | 74 | destination := mapperclient.Destination{ 75 | Destination: reqInfo.destHostnameOrIP, 76 | DestinationIP: nilable.From(reqInfo.destIP), 77 | DestinationPort: reqInfo.destPort, 78 | LastSeen: timeAndTTL.lastSeen, 79 | TTL: timeAndTTL.ttl, 80 | SrcPorts: lo.Keys(*timeAndTTL.srcPorts), 81 | } 82 | srcToDests[src] = append(srcToDests[src], destination) 83 | } 84 | 85 | results := make([]mapperclient.RecordedDestinationsForSrc, 0) 86 | for src, destinations := range srcToDests { 87 | // Debug print the results 88 | logrus.Debugf("%s (%s):\n", src.Ip, src.Hostname) 89 | for _, dest := range destinations { 90 | logrus.Debugf(" %s, %s", dest.Destination, dest.LastSeen) 91 | } 92 | 93 | results = append(results, mapperclient.RecordedDestinationsForSrc{SrcIp: src.Ip, SrcHostname: src.Hostname, Destinations: destinations}) 94 | } 95 | 96 | c.resetData() 97 | 98 | return results 99 | } 100 | -------------------------------------------------------------------------------- /src/sniffer/pkg/collectors/dnssniffer_test.go: -------------------------------------------------------------------------------- 1 | package collectors 2 | 3 | import ( 4 | "encoding/hex" 5 | "github.com/otterize/network-mapper/src/mapperclient" 6 | "github.com/otterize/network-mapper/src/sniffer/pkg/ipresolver" 7 | "github.com/otterize/nilable" 8 | "testing" 9 | "time" 10 | 11 | "github.com/google/gopacket" 12 | "github.com/google/gopacket/layers" 13 | "github.com/stretchr/testify/suite" 14 | ) 15 | 16 | type SnifferTestSuite struct { 17 | suite.Suite 18 | } 19 | 20 | func (s *SnifferTestSuite) SetupSuite() { 21 | } 22 | 23 | func (s *SnifferTestSuite) TestHandlePacket() { 24 | sniffer := NewDNSSniffer(&ipresolver.MockIPResolver{}, false) 25 | 26 | rawDnsResponse, err := hex.DecodeString("f84d8969309600090f090002080045000059eb6c40004011b325d05b70340a65510d0035fcb40045a621339681800001000100000000037374730975732d656173742d3109616d617a6f6e61777303636f6d0000010001c00c000100010000003c00044815ce60") 27 | if err != nil { 28 | s.Require().NoError(err) 29 | } 30 | packet := gopacket.NewPacket(rawDnsResponse, layers.LayerTypeEthernet, gopacket.Default) 31 | timestamp := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) 32 | packet.Metadata().CaptureInfo.Timestamp = timestamp 33 | sniffer.HandlePacket(packet) 34 | _ = sniffer.RefreshHostsMapping() 35 | 36 | s.Require().Equal([]mapperclient.RecordedDestinationsForSrc{ 37 | { 38 | SrcIp: "10.101.81.13", 39 | Destinations: []mapperclient.Destination{ 40 | { 41 | Destination: "sts.us-east-1.amazonaws.com", 42 | DestinationIP: nilable.From("72.21.206.96"), 43 | LastSeen: timestamp, 44 | TTL: nilable.From(60), 45 | SrcPorts: []int{}, 46 | }, 47 | }, 48 | }, 49 | }, sniffer.CollectResults()) 50 | } 51 | 52 | func (s *SnifferTestSuite) TestHandlePacketWithCNAME() { 53 | sniffer := NewDNSSniffer(&ipresolver.MockIPResolver{}, false) 54 | 55 | rawDnsResponse, err := hex.DecodeString("92e72b05f87b02af9e5f513c0800450000b1443940004011e0100af400020af4000900359c2b009d16a123e085800001000200000001036170690c6f74746572697a652d64657603636f6d0000010001036170690c6f74746572697a652d64657603636f6d000005000100000006001b08696e7465726e616c0c6f74746572697a652d64657603636f6d0008696e7465726e616c0c6f74746572697a652d64657603636f6d00000100010000000600040bdc020000002904d0000000000000") 56 | if err != nil { 57 | s.Require().NoError(err) 58 | } 59 | packet := gopacket.NewPacket(rawDnsResponse, layers.LayerTypeEthernet, gopacket.Default) 60 | timestamp := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) 61 | packet.Metadata().CaptureInfo.Timestamp = timestamp 62 | sniffer.HandlePacket(packet) 63 | _ = sniffer.RefreshHostsMapping() 64 | 65 | s.Require().Equal([]mapperclient.RecordedDestinationsForSrc{ 66 | { 67 | SrcIp: "10.244.0.9", 68 | Destinations: []mapperclient.Destination{ 69 | { 70 | Destination: "api.otterize-dev.com", 71 | DestinationIP: nilable.From("11.220.2.0"), 72 | LastSeen: timestamp, 73 | TTL: nilable.From(6), 74 | SrcPorts: []int{}, 75 | }, 76 | }, 77 | }, 78 | }, sniffer.CollectResults()) 79 | } 80 | 81 | func TestDNSSnifferSuite(t *testing.T) { 82 | suite.Run(t, new(SnifferTestSuite)) 83 | } 84 | -------------------------------------------------------------------------------- /src/sniffer/pkg/collectors/socketscanner.go: -------------------------------------------------------------------------------- 1 | package collectors 2 | 3 | import ( 4 | "fmt" 5 | "github.com/otterize/go-procnet/procnet" 6 | sharedconfig "github.com/otterize/network-mapper/src/shared/config" 7 | "github.com/otterize/network-mapper/src/sniffer/pkg/utils" 8 | "github.com/otterize/nilable" 9 | "github.com/samber/lo" 10 | "github.com/spf13/viper" 11 | 12 | "time" 13 | ) 14 | 15 | type SocketScanner struct { 16 | NetworkCollector 17 | } 18 | 19 | func NewSocketScanner() *SocketScanner { 20 | s := SocketScanner{ 21 | NetworkCollector{}, 22 | } 23 | s.resetData() 24 | return &s 25 | } 26 | 27 | func (s *SocketScanner) scanTcpFile(hostname string, path string) { 28 | if !viper.GetBool(sharedconfig.EnableSocketScannerKey) { 29 | return 30 | } 31 | socks, err := procnet.SocksFromPath(path) 32 | if err != nil { 33 | // it's likely that some files will be deleted during our iteration, so we ignore errors reading the file. 34 | return 35 | } 36 | listenPorts := make(map[uint16]bool) 37 | for _, sock := range socks { 38 | if sock.State == procnet.Listen { 39 | // LISTEN ports always appear first 40 | listenPorts[sock.LocalAddr.Port] = true 41 | continue 42 | } 43 | if sock.LocalAddr.IP.IsLoopback() || sock.RemoteAddr.IP.IsLoopback() { 44 | // ignore localhost connections as they are irrelevant to the mapping 45 | continue 46 | } 47 | 48 | if sock.State != procnet.Established { 49 | // Skip sockets that are not in ESTABLISHED state, to avoid reporting stale connections (such as connections in TIME_WAIT). 50 | continue 51 | } 52 | 53 | // Only report sockets from the client-side by checking if the local port for this socket is the same port as a listen socket. 54 | if _, isServersideSocket := listenPorts[sock.LocalAddr.Port]; !isServersideSocket { 55 | // The hostname we have here is the hostname for the client. 56 | s.addCapturedRequest(sock.LocalAddr.IP.String(), hostname, sock.RemoteAddr.IP.String(), sock.RemoteAddr.IP.String(), time.Now(), nilable.Nilable[int]{}, lo.ToPtr(int(sock.LocalAddr.Port)), lo.ToPtr(int(sock.RemoteAddr.Port))) 57 | } 58 | } 59 | } 60 | 61 | func (s *SocketScanner) ScanProcDir() error { 62 | return utils.ScanProcDirProcesses(func(_ int64, pDir string) { 63 | hostname, err := utils.ExtractProcessHostname(pDir) 64 | if err != nil { 65 | return 66 | } 67 | s.scanTcpFile(hostname, fmt.Sprintf("%s/net/tcp", pDir)) 68 | s.scanTcpFile(hostname, fmt.Sprintf("%s/net/tcp6", pDir)) 69 | }) 70 | } 71 | -------------------------------------------------------------------------------- /src/sniffer/pkg/collectors/tcpsniffer_test.go: -------------------------------------------------------------------------------- 1 | package collectors 2 | 3 | import ( 4 | "encoding/hex" 5 | "github.com/otterize/network-mapper/src/mapperclient" 6 | "github.com/otterize/network-mapper/src/sniffer/pkg/ipresolver" 7 | "github.com/otterize/nilable" 8 | "github.com/stretchr/testify/require" 9 | "go.uber.org/mock/gomock" 10 | "testing" 11 | "time" 12 | 13 | "github.com/google/gopacket" 14 | "github.com/google/gopacket/layers" 15 | ) 16 | 17 | func TestTCPSniffer_TestHandlePacketAWS(t *testing.T) { 18 | controller := gomock.NewController(t) 19 | mockResolver := ipresolver.NewMockIPResolver(controller) 20 | mockResolver.EXPECT().ResolveIP("10.0.2.48").Return("client-1", true).Times(2) // once for the initial check, and then another for verification 21 | mockResolver.EXPECT().Refresh().Return(nil).Times(1) 22 | 23 | sniffer := NewTCPSniffer(mockResolver, true) 24 | 25 | tcpSYN, err := hex.DecodeString("4500004000004000400600000a0002300af4784ed93d1f40a16450e500000000b002fffffe34000002043fd8010303060101080ab6a645bc0000000004020000") 26 | if err != nil { 27 | require.NoError(t, err) 28 | } 29 | packet := gopacket.NewPacket(tcpSYN, layers.LayerTypeIPv4, gopacket.Default) 30 | timestamp := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) 31 | packet.Metadata().CaptureInfo.Timestamp = timestamp 32 | sniffer.HandlePacket(packet) 33 | require.NoError(t, sniffer.RefreshHostsMapping()) 34 | 35 | require.Equal(t, []mapperclient.RecordedDestinationsForSrc{ 36 | { 37 | SrcIp: "10.0.2.48", 38 | SrcHostname: "client-1", 39 | Destinations: []mapperclient.Destination{ 40 | { 41 | Destination: "10.244.120.78", 42 | DestinationIP: nilable.From("10.244.120.78"), 43 | DestinationPort: nilable.From(8000), 44 | SrcPorts: []int{55613}, 45 | LastSeen: timestamp, 46 | }, 47 | }, 48 | }, 49 | }, sniffer.CollectResults()) 50 | } 51 | 52 | func TestTCPSniffer_TestHandlePacketNonAWS(t *testing.T) { 53 | controller := gomock.NewController(t) 54 | mockResolver := ipresolver.NewMockIPResolver(controller) 55 | 56 | sniffer := NewTCPSniffer(mockResolver, false) 57 | 58 | tcpSYN, err := hex.DecodeString("4500004000004000400600000a0002300af4784ed93d1f40a16450e500000000b002fffffe34000002043fd8010303060101080ab6a645bc0000000004020000") 59 | if err != nil { 60 | require.NoError(t, err) 61 | } 62 | packet := gopacket.NewPacket(tcpSYN, layers.LayerTypeIPv4, gopacket.Default) 63 | timestamp := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) 64 | packet.Metadata().CaptureInfo.Timestamp = timestamp 65 | sniffer.HandlePacket(packet) 66 | require.NoError(t, sniffer.RefreshHostsMapping()) 67 | 68 | require.Equal(t, []mapperclient.RecordedDestinationsForSrc{ 69 | { 70 | SrcIp: "10.0.2.48", 71 | SrcHostname: "", 72 | Destinations: []mapperclient.Destination{ 73 | { 74 | Destination: "10.244.120.78", 75 | DestinationIP: nilable.From("10.244.120.78"), 76 | DestinationPort: nilable.From(8000), 77 | SrcPorts: []int{55613}, 78 | LastSeen: timestamp, 79 | }, 80 | }, 81 | }, 82 | }, sniffer.CollectResults()) 83 | } 84 | -------------------------------------------------------------------------------- /src/sniffer/pkg/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "github.com/spf13/viper" 5 | "time" 6 | ) 7 | 8 | const ( 9 | HostProcDirKey = "host-proc-dir" 10 | HostProcDirDefault = "/hostproc" 11 | CallsTimeoutKey = "calls-timeout" 12 | CallsTimeoutDefault = 60 * time.Second 13 | SnifferReportIntervalKey = "sniffer-report-interval" 14 | SnifferReportIntervalDefault = 1 * time.Second 15 | PacketsBufferLengthKey = "packets-buffer-length" 16 | PacketsBufferLengthDefault = 4096 17 | HostsMappingRefreshIntervalKey = "hosts-mapping-refresh-interval" 18 | HostsMappingRefreshIntervalDefault = 500 * time.Millisecond 19 | UseExtendedProcfsResolutionKey = "use-extended-procfs-resolution" 20 | UseExtendedProcfsResolutionDefault = false 21 | ) 22 | 23 | func init() { 24 | viper.SetDefault(SnifferReportIntervalKey, SnifferReportIntervalDefault) 25 | viper.SetDefault(PacketsBufferLengthKey, PacketsBufferLengthDefault) 26 | viper.SetDefault(CallsTimeoutKey, CallsTimeoutDefault) 27 | viper.SetDefault(HostProcDirKey, HostProcDirDefault) 28 | viper.SetDefault(HostsMappingRefreshIntervalKey, HostsMappingRefreshIntervalDefault) 29 | viper.SetDefault(UseExtendedProcfsResolutionKey, UseExtendedProcfsResolutionDefault) 30 | } 31 | -------------------------------------------------------------------------------- /src/sniffer/pkg/ipresolver/ipresolver.go: -------------------------------------------------------------------------------- 1 | package ipresolver 2 | 3 | import ( 4 | "go.uber.org/mock/gomock" 5 | "reflect" 6 | ) 7 | 8 | type IPResolver interface { 9 | Refresh() error 10 | ResolveIP(ipaddr string) (hostname string, ok bool) 11 | } 12 | 13 | // NewMockIPResolver creates a new mock instance. 14 | func NewMockIPResolver(ctrl *gomock.Controller) *MockIPResolver { 15 | mock := &MockIPResolver{ctrl: ctrl} 16 | mock.recorder = &MockIPResolverMockRecorder{mock} 17 | return mock 18 | } 19 | 20 | // MockMapperClient is a mock of IPResolver interface. 21 | type MockIPResolver struct { 22 | ctrl *gomock.Controller 23 | recorder *MockIPResolverMockRecorder 24 | } 25 | 26 | // MockMapperClientMockRecorder is the mock recorder for MockMapperClient. 27 | type MockIPResolverMockRecorder struct { 28 | mock *MockIPResolver 29 | } 30 | 31 | // EXPECT returns an object that allows the caller to indicate expected use. 32 | func (m *MockIPResolver) EXPECT() *MockIPResolverMockRecorder { 33 | return m.recorder 34 | } 35 | 36 | // Refresh mocks base method. 37 | func (m *MockIPResolver) Refresh() error { 38 | m.ctrl.T.Helper() 39 | ret := m.ctrl.Call(m, "Refresh") 40 | ret0, _ := ret[0].(error) 41 | return ret0 42 | } 43 | 44 | // ReportCaptureResults indicates an expected call of ReportCaptureResults. 45 | func (mr *MockIPResolverMockRecorder) Refresh() *gomock.Call { 46 | mr.mock.ctrl.T.Helper() 47 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Refresh", reflect.TypeOf((*MockIPResolver)(nil).Refresh)) 48 | } 49 | 50 | // ResolveIP mocks base method. 51 | func (m *MockIPResolver) ResolveIP(ipaddr string) (string, bool) { 52 | m.ctrl.T.Helper() 53 | ret := m.ctrl.Call(m, "ResolveIP", ipaddr) 54 | ret0, _ := ret[0].(string) 55 | ret1, _ := ret[1].(bool) 56 | return ret0, ret1 57 | } 58 | 59 | // ResolveIP indicates an expected call of ResolveIP. 60 | func (mr *MockIPResolverMockRecorder) ResolveIP(ipaddr interface{}) *gomock.Call { 61 | mr.mock.ctrl.T.Helper() 62 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveIP", reflect.TypeOf((*MockIPResolver)(nil).ResolveIP), ipaddr) 63 | } 64 | -------------------------------------------------------------------------------- /src/sniffer/pkg/ipresolver/process_monitor.go: -------------------------------------------------------------------------------- 1 | package ipresolver 2 | 3 | import ( 4 | "github.com/otterize/intents-operator/src/shared/errors" 5 | "github.com/otterize/network-mapper/src/sniffer/pkg/utils" 6 | "github.com/sirupsen/logrus" 7 | "k8s.io/apimachinery/pkg/util/sets" 8 | ) 9 | 10 | const MaxRetries = 3 11 | 12 | // ProcessMonitorCallback Should be idempotent on failures because retried on error 13 | type ProcessMonitorCallback func(pid int64, pDir string) error 14 | 15 | type ProcessMonitor struct { 16 | processes sets.Set[int64] 17 | failingProcesses map[int64]int 18 | onProcNew ProcessMonitorCallback 19 | onProcExit ProcessMonitorCallback 20 | forEachProcess utils.ProcessScanner 21 | } 22 | 23 | func NewProcessMonitor( 24 | onProcNew ProcessMonitorCallback, 25 | onProcExit ProcessMonitorCallback, 26 | forEachProcess utils.ProcessScanner, 27 | ) *ProcessMonitor { 28 | return &ProcessMonitor{ 29 | processes: sets.New[int64](), 30 | failingProcesses: make(map[int64]int), 31 | onProcNew: onProcNew, 32 | onProcExit: onProcExit, 33 | forEachProcess: forEachProcess, 34 | } 35 | } 36 | 37 | func (pm *ProcessMonitor) Poll() error { 38 | processesSeenLastTime := pm.processes.Clone() 39 | pm.processes = sets.New[int64]() 40 | 41 | if err := pm.forEachProcess(func(pid int64, pDir string) { 42 | if !processesSeenLastTime.Has(pid) { 43 | if err := pm.onProcNew(pid, pDir); err != nil { 44 | // Failed to handle 45 | failures := 0 46 | if _, ok := pm.failingProcesses[pid]; ok { 47 | failures = pm.failingProcesses[pid] 48 | } 49 | failures++ 50 | if failures <= MaxRetries { 51 | // Try again next interval 52 | pm.failingProcesses[pid] = failures 53 | return // Don't insert pid to handled set 54 | } else { 55 | logrus.Debugf("Giving up failing process: %d", pid) 56 | delete(pm.failingProcesses, pid) 57 | } 58 | } 59 | } 60 | // Shouldn't handle again 61 | pm.processes.Insert(pid) 62 | }); err != nil { 63 | return errors.Wrap(err) 64 | } 65 | 66 | exitedProcesses := processesSeenLastTime.Difference(pm.processes) 67 | for _, pid := range exitedProcesses.UnsortedList() { 68 | _ = pm.onProcExit(pid, "") 69 | } 70 | 71 | return nil 72 | } 73 | -------------------------------------------------------------------------------- /src/sniffer/pkg/ipresolver/process_montior_test.go: -------------------------------------------------------------------------------- 1 | package ipresolver 2 | 3 | import ( 4 | "github.com/otterize/network-mapper/src/sniffer/pkg/utils" 5 | "github.com/stretchr/testify/suite" 6 | "golang.org/x/exp/slices" 7 | "testing" 8 | ) 9 | 10 | type ProcessMonitorTestSuite struct { 11 | suite.Suite 12 | processMonitor *ProcessMonitor 13 | pidCalledNew []int64 14 | pidCalledExit []int64 15 | currenPids []int64 16 | } 17 | 18 | func (s *ProcessMonitorTestSuite) SetupTest() { 19 | s.processMonitor = NewProcessMonitor(s.onNew, s.onExit, s.scanPids) 20 | } 21 | 22 | func (s *ProcessMonitorTestSuite) onNew(pid int64, _ string) error { 23 | s.pidCalledNew = append(s.pidCalledNew, pid) 24 | return nil 25 | } 26 | 27 | func (s *ProcessMonitorTestSuite) onExit(pid int64, _ string) error { 28 | s.pidCalledExit = append(s.pidCalledExit, pid) 29 | return nil 30 | } 31 | 32 | func (s *ProcessMonitorTestSuite) scanPids(callback utils.ProcessScanCallback) error { 33 | for _, pid := range s.currenPids { 34 | callback(pid, "testDir") 35 | } 36 | return nil 37 | } 38 | 39 | func (s *ProcessMonitorTestSuite) TestNewProcess() { 40 | s.resetMockPid() 41 | 42 | s.currenPids = []int64{10, 20, 30} 43 | err := s.processMonitor.Poll() 44 | s.NoError(err) 45 | slices.Sort(s.pidCalledNew) 46 | slices.Sort(s.pidCalledExit) 47 | 48 | s.Require().Equal(s.currenPids, s.pidCalledNew) 49 | s.Require().Empty(s.pidCalledExit) 50 | s.resetMockPid() 51 | 52 | s.currenPids = []int64{10, 20, 30, 40} 53 | err = s.processMonitor.Poll() 54 | s.NoError(err) 55 | s.Require().Equal([]int64{40}, s.pidCalledNew) 56 | s.Require().Empty(s.pidCalledExit) 57 | s.resetMockPid() 58 | 59 | s.currenPids = []int64{40} 60 | err = s.processMonitor.Poll() 61 | s.NoError(err) 62 | s.Require().Empty(s.pidCalledNew) 63 | slices.Sort(s.pidCalledExit) 64 | s.Require().Equal([]int64{10, 20, 30}, s.pidCalledExit) 65 | s.resetMockPid() 66 | 67 | s.currenPids = []int64{40} 68 | err = s.processMonitor.Poll() 69 | s.NoError(err) 70 | s.Require().Empty(s.pidCalledNew) 71 | s.Require().Empty(s.pidCalledExit) 72 | } 73 | 74 | func (s *ProcessMonitorTestSuite) resetMockPid() { 75 | s.pidCalledNew = []int64{} 76 | s.pidCalledExit = []int64{} 77 | } 78 | 79 | func TestProcessMonitorTestSuite(t *testing.T) { 80 | suite.Run(t, new(ProcessMonitorTestSuite)) 81 | } 82 | -------------------------------------------------------------------------------- /src/sniffer/pkg/ipresolver/procfs_resolver.go: -------------------------------------------------------------------------------- 1 | package ipresolver 2 | 3 | import ( 4 | "github.com/otterize/intents-operator/src/shared/errors" 5 | "github.com/otterize/network-mapper/src/sniffer/pkg/utils" 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | type ProcFSIPResolverEntry struct { 10 | IPAddr string 11 | Hostname string 12 | ProcessRefCount int 13 | } 14 | 15 | type ProcFSIPResolver struct { 16 | byAddr map[string]*ProcFSIPResolverEntry 17 | byPid map[int64]*ProcFSIPResolverEntry 18 | monitor *ProcessMonitor 19 | } 20 | 21 | func NewProcFSIPResolver() *ProcFSIPResolver { 22 | r := ProcFSIPResolver{ 23 | monitor: nil, 24 | byAddr: make(map[string]*ProcFSIPResolverEntry), 25 | byPid: make(map[int64]*ProcFSIPResolverEntry), 26 | } 27 | r.monitor = NewProcessMonitor(r.onProcessNew, r.onProcessExit, utils.ScanProcDirProcesses) 28 | 29 | return &r 30 | } 31 | 32 | func (r *ProcFSIPResolver) ResolveIP(ipaddr string) (hostname string, ok bool) { 33 | if hostInfo, ok := r.byAddr[ipaddr]; ok { 34 | return hostInfo.Hostname, true 35 | } 36 | return "", false 37 | } 38 | 39 | func (r *ProcFSIPResolver) Refresh() error { 40 | return r.monitor.Poll() 41 | } 42 | 43 | func (r *ProcFSIPResolver) onProcessNew(pid int64, pDir string) (err error) { 44 | var hostname, ipaddr string 45 | hostname, err = utils.ExtractProcessHostname(pDir) 46 | if err != nil { 47 | logrus.Debugf("Failed to extract hostname for process %d: %v", pid, err) 48 | return errors.Wrap(err) 49 | } 50 | 51 | ipaddr, err = utils.ExtractProcessIPAddr(pDir) 52 | if err != nil { 53 | logrus.Debugf("Failed to extract IP address for process %d: %v", pid, err) 54 | return errors.Wrap(err) 55 | } 56 | 57 | if entry, ok := r.byAddr[ipaddr]; ok { 58 | if entry.Hostname == hostname { 59 | // Already mapped to this hostname, add another process reference 60 | r.byPid[pid] = entry 61 | entry.ProcessRefCount++ 62 | logrus.Debugf("Mapping %s:%s already exists, increased refcount to %d", ipaddr, hostname, entry.ProcessRefCount) 63 | return nil 64 | } else { 65 | // Shouldn't happen - it could happen if an ip replaces its pod very fast and the current single scan sees the new process and not the older one 66 | logrus.Warnf("IP mapping conflict: %s got new hostname %s, but already mapped to %s. Would use the newer hostname", ipaddr, hostname, entry.Hostname) 67 | // For now, treat it as a new IP mapping (make sure at exit to decrement ref count only if hostname matches) 68 | } 69 | } 70 | 71 | logrus.Debugf("Found new mapping %s:%s", ipaddr, hostname) 72 | newEntry := &ProcFSIPResolverEntry{ 73 | IPAddr: ipaddr, 74 | Hostname: hostname, 75 | ProcessRefCount: 1, 76 | } 77 | r.byPid[pid] = newEntry 78 | r.byAddr[ipaddr] = newEntry 79 | return nil 80 | } 81 | 82 | func (r *ProcFSIPResolver) onProcessExit(pid int64, _ string) error { 83 | if entry, ok := r.byPid[pid]; !ok { 84 | // Shouldn't happen 85 | logrus.Debugf("Unknown process %d exited", pid) 86 | return nil 87 | } else { 88 | entry.ProcessRefCount-- 89 | if entry.ProcessRefCount == 0 { 90 | // Should remove mapping, but validate this process actually holds the newest mapping 91 | if r.byAddr[entry.IPAddr] == entry { 92 | logrus.Debugf("Removing IP mapping %s:%s", entry.IPAddr, entry.Hostname) 93 | delete(r.byAddr, entry.IPAddr) 94 | } 95 | } 96 | 97 | // Remove process from pid map 98 | delete(r.byPid, pid) 99 | } 100 | return nil 101 | } 102 | -------------------------------------------------------------------------------- /src/sniffer/pkg/prometheus/metrics.go: -------------------------------------------------------------------------------- 1 | package prometheus 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "github.com/prometheus/client_golang/prometheus/promauto" 6 | ) 7 | 8 | var ( 9 | socketScanReports = promauto.NewCounter(prometheus.CounterOpts{ 10 | Name: "socketscan_reported_connections", 11 | Help: "The total number of socket scan-based reported connections", 12 | }) 13 | dnsCaptureReports = promauto.NewCounter(prometheus.CounterOpts{ 14 | Name: "dns_reported_connections", 15 | Help: "The total number of DNS-based reported connections", 16 | }) 17 | ) 18 | 19 | func IncrementSocketScanReports(count int) { 20 | socketScanReports.Add(float64(count)) 21 | } 22 | 23 | func IncrementDNSCaptureReports(count int) { 24 | dnsCaptureReports.Add(float64(count)) 25 | } 26 | -------------------------------------------------------------------------------- /src/sniffer/pkg/utils/procfs.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "github.com/mpvl/unique" 6 | "github.com/otterize/intents-operator/src/shared/errors" 7 | "github.com/sirupsen/logrus" 8 | "os" 9 | "regexp" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/otterize/network-mapper/src/sniffer/pkg/config" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | type ProcessScanCallback func(pid int64, pDir string) 18 | type ProcessScanner func(callback ProcessScanCallback) error 19 | 20 | func ScanProcDirProcesses(callback ProcessScanCallback) error { 21 | hostProcDir := viper.GetString(config.HostProcDirKey) 22 | files, err := os.ReadDir(hostProcDir) 23 | if err != nil { 24 | return errors.Wrap(err) 25 | } 26 | 27 | for _, f := range files { 28 | pid, err := strconv.ParseInt(f.Name(), 10, 64) 29 | if err != nil { 30 | // name is not a number, meaning it's not a process dir, skip 31 | continue 32 | } 33 | callback(pid, fmt.Sprintf("%s/%s", hostProcDir, f.Name())) 34 | } 35 | return nil 36 | } 37 | 38 | func ExtractProcessHostname(pDir string) (string, error) { 39 | if viper.GetBool(config.UseExtendedProcfsResolutionKey) { 40 | hostname, found, err := extractProcessHostnameUsingEtcHostname(pDir) 41 | if err != nil { 42 | return "", errors.Wrap(err) 43 | } 44 | if found { 45 | return hostname, nil 46 | } 47 | } 48 | return extractProcessHostnameUsingEnviron(pDir) 49 | 50 | } 51 | 52 | func extractProcessHostnameUsingEtcHostname(pDir string) (string, bool, error) { 53 | // Read the environment variables from the proc filesystem 54 | data, err := os.ReadFile(fmt.Sprintf("%s/root/etc/hostname", pDir)) 55 | if os.IsNotExist(err) { 56 | return "", false, nil 57 | } 58 | if err != nil { 59 | return "", false, errors.Wrap(err) 60 | } 61 | 62 | return strings.TrimSpace(string(data)), true, nil 63 | } 64 | 65 | func extractProcessHostnameUsingEnviron(pDir string) (string, error) { 66 | // Read the environment variables from the proc filesystem 67 | data, err := os.ReadFile(fmt.Sprintf("%s/environ", pDir)) 68 | if err != nil { 69 | return "", errors.Wrap(err) 70 | } 71 | 72 | // Split the environment variables by null byte 73 | envVars := strings.Split(string(data), "\x00") 74 | for _, envVarLine := range envVars { 75 | // Split the environment variable line into a name and value 76 | parts := strings.SplitN(envVarLine, "=", 2) 77 | if len(parts) != 2 { 78 | continue 79 | } 80 | 81 | // If the environment variable name matches the requested one, return its value 82 | if parts[0] == "HOSTNAME" { 83 | return parts[1], nil 84 | } 85 | } 86 | 87 | return "", errors.Errorf("couldn't find hostname in %s/environ", pDir) 88 | } 89 | 90 | func ExtractProcessIPAddr(pDir string) (string, error) { 91 | contentBytes, err := os.ReadFile(fmt.Sprintf("%s/net/fib_trie", pDir)) 92 | if err != nil { 93 | return "", errors.Wrap(err) 94 | } 95 | 96 | content := string(contentBytes) 97 | 98 | // Regular expression to match the IP addresses labelled as '/32 host LOCAL' but are not loopback addresses 99 | re := regexp.MustCompile(`(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*/32 host LOCAL`) 100 | 101 | matches := re.FindAllStringSubmatch(content, -1) 102 | 103 | ips := make([]string, 0) 104 | 105 | for _, match := range matches { 106 | if len(match) > 1 && !strings.HasPrefix(match[1], "127.") { 107 | ips = append(ips, match[1]) 108 | } 109 | } 110 | unique.Strings(&ips) 111 | 112 | if len(ips) == 0 { 113 | return "", errors.New("no IP addresses found") 114 | } 115 | if len(ips) > 1 { 116 | logrus.Warnf("Found multiple IP addresses (%s) in %s", ips, pDir) 117 | } 118 | 119 | return ips[0], nil 120 | } 121 | -------------------------------------------------------------------------------- /src/tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | // +build tools 3 | 4 | package networkmapper 5 | 6 | import _ "github.com/99designs/gqlgen" 7 | import _ "github.com/Khan/genqlient/generate" 8 | -------------------------------------------------------------------------------- /visualize-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/otterize/network-mapper/24ee0adf90a8036bf524419736c3f18a2b80dee4/visualize-example.png --------------------------------------------------------------------------------