├── .dockerfiles └── build_core.sh ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml └── workflows │ ├── build.yml │ ├── release.yml │ └── review.yml ├── .gitignore ├── .gitmodules ├── .yamllint.yml ├── Dockerfile.glibc.core ├── Makefile ├── README.md ├── artifacts └── .gitkeep ├── build.sh ├── includes ├── .gitkeep ├── netdata_core.h ├── vmlinux_508.h └── vmlinux_519.h ├── src ├── DEVELOPER.md ├── Makefile ├── cachestat.bpf.c ├── cachestat.c ├── dc.bpf.c ├── dc.c ├── disk.bpf.c ├── disk.c ├── fd.bpf.c ├── fd.c ├── filesystem.bpf.c ├── filesystem.c ├── hardirq.bpf.c ├── hardirq.c ├── includes │ ├── .gitkeep │ ├── cachestat.skel.h │ ├── dc.skel.h │ ├── disk.skel.h │ ├── fd.skel.h │ ├── filesystem.skel.h │ ├── hardirq.skel.h │ ├── mdflush.skel.h │ ├── mount.skel.h │ ├── oomkill.skel.h │ ├── process.skel.h │ ├── shm.skel.h │ ├── socket.skel.h │ ├── softirq.skel.h │ ├── swap.skel.h │ ├── sync.skel.h │ └── vfs.skel.h ├── mdflush.bpf.c ├── mdflush.c ├── mount.bpf.c ├── mount.c ├── netdata_core_common.h ├── networkviewer.bpf.c ├── networkviewer.c ├── nfs.bpf.c ├── oomkill.bpf.c ├── oomkill.c ├── process.bpf.c ├── process.c ├── rename_header.sh ├── shm.bpf.c ├── shm.c ├── socket.bpf.c ├── socket.c ├── softirq.bpf.c ├── softirq.c ├── swap.bpf.c ├── swap.c ├── sync.bpf.c ├── sync.c ├── tests │ ├── .gitkeep │ └── run_tests.sh ├── vfs.bpf.c └── vfs.c └── tools └── check-kernel-core.sh /.dockerfiles/build_core.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | build() { 6 | echo "[XXX]: Building against Kernel 5.15 for libc ${_LIBC} ..." 7 | ( 8 | cd co-re 9 | make CC=clang 10 | ) || return 1 11 | } 12 | 13 | _main() { 14 | if ! build; then 15 | echo "ERROR: Build failed ..." 16 | if [ -t 1 ]; then 17 | echo "Dropping into a shell ..." 18 | exec /bin/sh 19 | else 20 | exit 1 21 | fi 22 | fi 23 | } 24 | 25 | if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then 26 | _main "$@" 27 | fi 28 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # By Default the team working on this owns/reviews everything 2 | * @thiagoftsm @Ferroin @tkatsoulas 3 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 12 | 13 | ##### Summary 14 | 15 | ##### Test Plan 16 | 1. Clone this branch 17 | 2. Run the following commands: 18 | ```sh 19 | # git submodule update --init --recursive 20 | # make clean; make 21 | # cd src/tests 22 | # sh run_tests.sh 23 | ``` 24 | 3. Verify that you do not have any `libbpf` error inside `error.log`. 25 | 26 | ##### Additional information 27 | 28 | | Linux Distribution | Environment |Kernel Version | Error | Success | 29 | |--------------------|----------------|---------------|-------------|---------| 30 | | LINUX DISTRIBUTION | Bare metal/VM | uname -r | | | 31 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: github-actions 5 | directory: / 6 | schedule: 7 | interval: weekly 8 | 9 | - package-ecosystem: docker 10 | directory: / 11 | schedule: 12 | interval: weekly 13 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: CI 3 | on: 4 | pull_request: null 5 | concurrency: 6 | group: ci-${{ github.ref }} 7 | cancel-in-progress: true 8 | jobs: 9 | build-artifacts: 10 | name: Build Artifacts 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | kernel_version: 15 | - '6.6.17' 16 | libc: 17 | - glibc 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v4 22 | with: 23 | submodules: true 24 | - name: Run build.sh 25 | run: | 26 | os=core 27 | ./build.sh ${{ matrix.kernel_version }} ${{ matrix.libc }} "${os}" 28 | - name: List Artifacts 29 | run: | 30 | ls -lah artifacts 31 | test -f artifacts/netdata_ebpf-*.tar.xz 32 | - name: Upload Artifacts 33 | uses: actions/upload-artifact@v4 34 | if: success() 35 | with: 36 | name: artifacts-${{ matrix.kernel_version }}-${{ matrix.libc }} 37 | path: artifacts 38 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Builds and Releases packages for eBPF to be consumed by Netdata Agent 3 | name: CD 4 | on: 5 | push: 6 | tags: 7 | - 'v*' 8 | concurrency: 9 | group: cd-${{ github.ref }} 10 | cancel-in-progress: true 11 | jobs: 12 | build-artifacts: 13 | name: Build Artifacts 14 | strategy: 15 | matrix: 16 | kernel_version: 17 | - '6.6.17' 18 | libc: 19 | - glibc 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v4 24 | with: 25 | submodules: true 26 | - name: Run build.sh 27 | run: | 28 | os=core 29 | ./build.sh ${{ matrix.kernel_version }} ${{ matrix.libc }} "${os}" 30 | - name: List Artifacts 31 | run: | 32 | ls -lah artifacts 33 | test -f artifacts/netdata_ebpf-co-re-*.tar.xz 34 | - name: Upload Artifacts 35 | uses: actions/upload-artifact@v4 36 | if: success() 37 | with: 38 | name: artifacts 39 | path: artifacts 40 | 41 | create-release: 42 | name: Create Release 43 | needs: build-artifacts 44 | runs-on: ubuntu-latest 45 | steps: 46 | - name: Download all Artifacts 47 | uses: actions/download-artifact@v4 48 | with: 49 | name: artifacts 50 | path: artifacts 51 | - name: Set release_tag 52 | run: | 53 | echo "RELEASE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV 54 | - name: Consolidate Artifacts 55 | run: | 56 | mkdir -p final-artifacts 57 | for libc in glibc; do 58 | mkdir -p "packages/netdata-ebpf-co-re-${libc}-${RELEASE_TAG}" 59 | tar -C packages/netdata-ebpf-co-re-"$libc"-${RELEASE_TAG} -xvf artifacts/*.tar.xz 60 | tar -C packages/netdata-ebpf-co-re-"$libc"-${RELEASE_TAG} -Jcvf final-artifacts/netdata-ebpf-co-re-"$libc"-${RELEASE_TAG}.tar.xz ./ 61 | done 62 | cd final-artifacts && sha256sum *.tar.xz > sha256sums.txt 63 | - name: Create Release 64 | uses: ncipollo/release-action@v1 65 | with: 66 | allowUpdates: false 67 | artifactErrorsFailBuild: true 68 | artifacts: 'final-artifacts/*' 69 | draft: true 70 | token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} 71 | -------------------------------------------------------------------------------- /.github/workflows/review.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Runs various ReviewDog based checks against PR with suggested changes to improve quality 3 | name: Review 4 | on: 5 | pull_request: 6 | env: 7 | DO_NOT_TRACK: 1 8 | concurrency: 9 | group: review-${{ github.ref }} 10 | cancel-in-progress: true 11 | jobs: 12 | prep-review: 13 | name: Prepare Review Jobs 14 | runs-on: ubuntu-latest 15 | outputs: 16 | actionlint: ${{ steps.actionlint.outputs.run }} 17 | hadolint: ${{ steps.hadolint.outputs.run }} 18 | shellcheck: ${{ steps.shellcheck.outputs.run }} 19 | yamllint: ${{ steps.yamllint.outputs.run }} 20 | steps: 21 | - name: Clone repository 22 | uses: actions/checkout@v4 23 | with: 24 | submodules: recursive 25 | fetch-depth: 0 26 | - name: Check files for actionlint 27 | id: actionlint 28 | run: | 29 | if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '\.github/workflows/.*' ; then 30 | echo '::set-output name=run::true' 31 | echo 'GitHub Actions workflows have changed, need to run actionlint.' 32 | else 33 | echo '::set-output name=run::false' 34 | fi 35 | - name: Check files for hadolint 36 | id: hadolint 37 | run: | 38 | if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '.*Dockerfile.*' ; then 39 | echo '::set-output name=run::true' 40 | echo 'Dockerfiles have changed, need to run Hadolint.' 41 | else 42 | echo '::set-output name=run::false' 43 | fi 44 | - name: Check files for shellcheck 45 | id: shellcheck 46 | run: | 47 | if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '.*\.sh.*' ; then 48 | echo '::set-output name=run::true' 49 | echo 'Shell scripts have changed, need to run shellcheck.' 50 | else 51 | echo '::set-output name=run::false' 52 | fi 53 | - name: Check files for yamllint 54 | id: yamllint 55 | run: | 56 | if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '.*\.ya?ml' ; then 57 | echo '::set-output name=run::true' 58 | echo 'YAML files have changed, need to run yamllint.' 59 | else 60 | echo '::set-output name=run::false' 61 | fi 62 | 63 | actionlint: 64 | name: actionlint 65 | needs: prep-review 66 | if: needs.prep-review.outputs.actionlint == 'true' 67 | runs-on: ubuntu-latest 68 | steps: 69 | - name: Git clone repository 70 | uses: actions/checkout@v4 71 | with: 72 | submodules: recursive 73 | fetch-depth: 0 74 | - name: Run actionlint 75 | uses: reviewdog/action-actionlint@v1 76 | with: 77 | github_token: ${{ secrets.GITHUB_TOKEN }} 78 | reporter: github-pr-check 79 | 80 | hadolint: 81 | name: hadolint 82 | needs: prep-review 83 | if: needs.prep-review.outputs.hadolint == 'true' 84 | runs-on: ubuntu-latest 85 | steps: 86 | - name: Git clone repository 87 | uses: actions/checkout@v4 88 | with: 89 | fetch-depth: 0 90 | - name: Run hadolint 91 | uses: reviewdog/action-hadolint@v1 92 | with: 93 | github_token: ${{ secrets.GITHUB_TOKEN }} 94 | reporter: github-pr-check 95 | 96 | shellcheck: 97 | name: shellcheck 98 | needs: prep-review 99 | if: needs.prep-review.outputs.shellcheck == 'true' 100 | runs-on: ubuntu-latest 101 | steps: 102 | - name: Git clone repository 103 | uses: actions/checkout@v4 104 | with: 105 | submodules: recursive 106 | fetch-depth: 0 107 | - name: Run shellcheck 108 | uses: reviewdog/action-shellcheck@v1 109 | with: 110 | github_token: ${{ secrets.GITHUB_TOKEN }} 111 | reporter: github-pr-check 112 | path: "." 113 | pattern: "*.sh*" 114 | exclude: "./.git/*" 115 | 116 | yamllint: 117 | name: yamllint 118 | needs: prep-review 119 | if: needs.prep-review.outputs.yamllint == 'true' 120 | runs-on: ubuntu-latest 121 | steps: 122 | - name: Git clone repository 123 | uses: actions/checkout@v4 124 | with: 125 | submodules: recursive 126 | fetch-depth: 0 127 | - name: Run yamllint 128 | uses: reviewdog/action-yamllint@v1 129 | with: 130 | github_token: ${{ secrets.GITHUB_TOKEN }} 131 | reporter: github-pr-check 132 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.bak 3 | *.log 4 | 5 | *.tar.* 6 | *.tar 7 | 8 | *.ll 9 | *.o* 10 | 11 | *.skel.h 12 | *.a 13 | 14 | src/tests/* 15 | ./src/*.o 16 | .local_libbpf/bpf 17 | .local_libbpf/pkgconfig 18 | !src/tests/*.sh 19 | 20 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "libbpf"] 2 | path = libbpf 3 | url = https://github.com/netdata/libbpf 4 | [submodule "kernel-collector"] 5 | path = kernel-collector 6 | url = https://github.com/netdata/kernel-collector.git 7 | -------------------------------------------------------------------------------- /.yamllint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | yaml-files: 3 | - '*.yaml' 4 | - '*.yml' 5 | - '.yamllint' 6 | - 'collectors/python.d.plugin/*.conf' 7 | - 'collectors/python.d.plugin/*/*.conf' 8 | 9 | ignore: | 10 | mqtt_websockets/ 11 | packaging/makeself/tmp/ 12 | 13 | rules: 14 | braces: enable 15 | brackets: enable 16 | colons: enable 17 | commas: enable 18 | comments: disable 19 | comments-indentation: disable 20 | document-end: disable 21 | document-start: disable 22 | empty-lines: enable 23 | empty-values: enable 24 | hyphens: enable 25 | indentation: enable 26 | line-length: 27 | max: 150 28 | level: warning 29 | allow-non-breakable-words: true 30 | allow-non-breakable-inline-mappings: true 31 | key-duplicates: enable 32 | key-ordering: disable 33 | new-line-at-end-of-file: enable 34 | new-lines: enable 35 | octal-values: enable 36 | quoted-strings: disable 37 | trailing-spaces: enable 38 | truthy: 39 | check-keys: false 40 | allowed-values: ["true", "false", "yes", "no"] 41 | -------------------------------------------------------------------------------- /Dockerfile.glibc.core: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 AS build 2 | 3 | ARG ARCH=x86 4 | ENV ARCH=$ARCH 5 | 6 | ARG LOCAL_KERNEL_VERSION=6.6.17 7 | 8 | ENV _LIBC=glibc 9 | 10 | ENV DEBIAN_FRONTEND=noninteractive 11 | # hadolint ignore=DL3018,DL3015,DL3008,DL3009 12 | RUN apt-get update && \ 13 | apt-get install -y build-essential autoconf automake coreutils pkg-config \ 14 | bc libelf-dev libssl-dev clang-tools-16 libclang-16-dev \ 15 | llvm-16 rsync bison flex tar xz-utils wget libbfd-dev \ 16 | libcap-dev 17 | 18 | # hadolint ignore=DL3059 19 | RUN ln -s /usr/bin/clang-16 /usr/bin/clang && \ 20 | ln -s /usr/bin/llvm-strip-16 /usr/bin/llvm-strip 21 | 22 | # hadolint ignore=DL3003,SC3009,DL4006,SC2046 23 | RUN mkdir -p /usr/src && \ 24 | cd /usr/src && \ 25 | wget -q https://cdn.kernel.org/pub/linux/kernel/v$(echo "$LOCAL_KERNEL_VERSION" | cut -f 1 -d '.').x/linux-${LOCAL_KERNEL_VERSION}.tar.xz && \ 26 | tar -xf linux-${LOCAL_KERNEL_VERSION}.tar.xz && \ 27 | make -C linux-${LOCAL_KERNEL_VERSION}/tools/bpf/bpftool/ && \ 28 | cp linux-${LOCAL_KERNEL_VERSION}/tools/bpf/bpftool/bpftool /usr/bin/ 29 | 30 | WORKDIR /ebpf-co-re 31 | 32 | COPY .dockerfiles/build_core.sh /build.sh 33 | COPY . . 34 | 35 | CMD ["/build.sh"] 36 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CC=gcc 2 | 3 | SOURCE_DIR = src/ 4 | _LIBC ?= glibc 5 | KERNEL_VERSION="$(shell cat /proc/sys/kernel/osrelease)" 6 | FIRST_KERNEL_VERSION=$(shell sh ./tools/complement.sh "$(KERNEL_VERSION)") 7 | VER_MAJOR=$(shell echo $(KERNEL_VERSION) | cut -d. -f1) 8 | VER_MINOR=$(shell echo $(KERNEL_VERSION) | cut -d. -f2) 9 | VER_PATCH=$(shell echo $(KERNEL_VERSION) | cut -d. -f3 | cut -d\- -f1) 10 | RUNNING_VERSION_CODE=$(shell echo $$(( $(VER_MAJOR) * 65536 + $(VER_MINOR) * 256 + $(VER_PATCH))) ) 11 | 12 | 13 | EXTRA_CFLAGS += -fno-stack-protector 14 | 15 | all: 16 | cd $(SOURCE_DIR) && $(MAKE) all; 17 | tar -cf artifacts/netdata_ebpf-co-re-$(_LIBC)-${VER_MAJOR}.${VER_MINOR}.${VER_PATCH}.tar includes/*.skel.h 18 | if [ "$${DEBUG:-0}" -eq 1 ]; then tar -uvf artifacts/netdata_ebpf-co-re-$(_LIBC)-${VER_MAJOR}.${VER_MINOR}.${VER_PATCH}.tar tools/check-kernel-core.sh; fi 19 | xz -f artifacts/netdata_ebpf-co-re-$(_LIBC)-${VER_MAJOR}.${VER_MINOR}.${VER_PATCH}.tar 20 | ( cd artifacts; sha256sum netdata_ebpf-co-re-$(_LIBC)-${VER_MAJOR}.${VER_MINOR}.${VER_PATCH}.tar.xz > netdata_ebpf-co-re-$(_LIBC)-${VER_MAJOR}.${VER_MINOR}.${VER_PATCH}.tar.xz.sha256sum ) 21 | 22 | clean: 23 | cd $(SOURCE_DIR) && $(MAKE) clean; 24 | rm -f artifacts/* 25 | rm -f includes/*skel.h 26 | rm -rf .local_libbpf 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Linux Kernel eBPF CO-RE 2 | 3 | ## Directory structure 4 | 5 | The respository has the following directory structure: 6 | 7 | - `artifacts`: directory that will have the eBPF programs when the compilation 8 | process ends. 9 | - `includes`: headers used to compile `eBPF.plugin`. 10 | - `kernel-collector`: this is a submodule'd fork of 11 | [netdata/libbpf](https://github.com/netdata/kernel-collector). 12 | - `libbpf`: this is a submodule'd fork of 13 | [netdata/libbpf](https://github.com/netdata/libbpf) which is itself a fork of 14 | the official `libbpf` package, the user-space side of eBPF system calls. 15 | 16 | ## Requirements 17 | 18 | #### Packages 19 | 20 | To compile the eBPF CO-RE, it will be necessary to have the following 21 | packages: 22 | 23 | - libelf headers 24 | - LLVM/Clang; this is because GCC prior to 10.0 cannot compile eBPF code. 25 | - `bpftool`: used to generate source codes. 26 | 27 | #### Initializing Submodules 28 | 29 | `libbpf` directory is included as a git submodule and it is necessary to fetch contents with the git command below: 30 | ```bash 31 | git submodule update --init --recursive 32 | 33 | -------------------------------------------------------------------------------- /artifacts/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/ebpf-co-re/c74ab79e3150663b5ab65cc9f9f3ee727dc64857/artifacts/.gitkeep -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | log() { 6 | printf "%s\n" "${1}" 7 | } 8 | 9 | error() { 10 | log "ERROR: ${1}" 11 | } 12 | 13 | fail() { 14 | log "FATAL: ${1}" 15 | exit 1 16 | } 17 | 18 | if [ "$#" -eq 0 ]; then 19 | log "Usage: $(basename "$0") [] []" 20 | exit 1 21 | fi 22 | 23 | KERNEL_VERSION="${1}" 24 | LIBC="${2:-glibc}" 25 | OS="${3:-generic}" 26 | 27 | TAG="ebpf-co-re:$(echo "${KERNEL_VERSION}" | tr '.' '_')_${LIBC}_${OS}" 28 | 29 | # Treat a LIBC=static as a static build (STATIC=1) 30 | if [ "$LIBC" = "static" ]; then 31 | STATIC=1 32 | export STATIC 33 | fi 34 | 35 | git clean -d -f -x 36 | 37 | docker build \ 38 | -f Dockerfile."${LIBC}"."${OS}" \ 39 | -t "${TAG}" \ 40 | --build-arg KERNEL_VERSION="${KERNEL_VERSION}" \ 41 | ./ | tee prepare.log 42 | 43 | if [ -t 1 ]; then 44 | docker run \ 45 | -i -t --rm \ 46 | -v "$PWD":/ebpf-co-re \ 47 | -w /ebpf-co-re \ 48 | --security-opt seccomp=unconfined \ 49 | -e DEBUG \ 50 | -e STATIC \ 51 | "${TAG}" | tee build.log 52 | else 53 | docker run \ 54 | --rm \ 55 | -v "$PWD":/ebpf-co-re \ 56 | -w /ebpf-co-re \ 57 | --security-opt seccomp=unconfined \ 58 | -e DEBUG \ 59 | -e STATIC \ 60 | "${TAG}" | tee build.log 61 | fi 62 | -------------------------------------------------------------------------------- /includes/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/ebpf-co-re/c74ab79e3150663b5ab65cc9f9f3ee727dc64857/includes/.gitkeep -------------------------------------------------------------------------------- /includes/netdata_core.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-3.0-or-later 2 | 3 | #ifndef _NETDATA_CORE_ 4 | #define _NETDATA_CORE_ 1 5 | 6 | #ifndef TASK_COMM_LEN 7 | #define TASK_COMM_LEN 16 8 | #endif 9 | 10 | #ifndef PID_MAX_DEFAULT 11 | #define PID_MAX_DEFAULT 0x8000 12 | #endif 13 | 14 | // Any header must be included after this line 15 | 16 | #include "netdata_common.h" 17 | 18 | #endif /* _NETDATA_CORE_ */ 19 | -------------------------------------------------------------------------------- /src/DEVELOPER.md: -------------------------------------------------------------------------------- 1 | # Developers 2 | 3 | This MD file was added to help developers starting with eBPF development. 4 | 5 | In this repo we are using the same [pattern](https://elixir.bootlin.com/linux/v6.3-rc2/source/samples/bpf) that was used with 6 | latest [BTF](https://docs.kernel.org/bpf/btf.html) code. All source files ending with `.bpf.c` are eBPF code converted to 7 | `.skel.h` files (These are headers used to load eBPF code). We have independent source files `*.c` to demonstrate 8 | the usage of `skel.h` files, these files are used with [eBPF.plugin](https://github.com/netdata/netdata/tree/master/collectors/ebpf.plugin) 9 | to load specific eBPF programs. 10 | 11 | ## Libbpf 12 | 13 | This repo using only the latest [latest](https://github.com/netdata/libbpf) libbpf version. 14 | 15 | ## Compiling kernel 16 | 17 | To be able to test and compile the repo code, your kernel needs to be compiled with at least the following options: 18 | 19 | ```sh 20 | CONFIG_DEBUG_INFO_BTF=y 21 | CONFIG_PAHOLE_HAS_SPLIT_BTF=y 22 | CONFIG_DEBUG_INFO_BTF_MODULES=y 23 | CONFIG_MODULE_ALLOW_BTF_MISMATCH=y 24 | ``` 25 | 26 | Your environment also needs to have [pahole](https://lwn.net/Articles/335942/) installed. Install it using package management or with the following steps: 27 | 28 | ```sh 29 | # git clone https://git.kernel.org/pub/scm/devel/pahole/pahole.git 30 | # cd pahole 31 | # git submodule update --init 32 | # mkdir build 33 | # cd build 34 | # cmake -D__LIB=lib64 -DCMAKE_INSTALL_PREFIX=/usr .. 35 | # make 36 | # make install 37 | 38 | ``` 39 | 40 | ## Internal Code division 41 | 42 | The code division for `CO-RE` code (`bpf.c`) is the same used for [legacy code](https://github.com/netdata/kernel-collector/blob/master/kernel/DEVELOPER.md#internal-code-division). 43 | 44 | ## Headers 45 | 46 | By default `eBPF CO-RE` code needs a header generated with the following `bpftool` command: 47 | 48 | ```sh 49 | # bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h 50 | ``` 51 | 52 | ## Skel files 53 | 54 | When all compilation is finished, the `skel.c` files are stored inside `includes/` directory. These are the files used with [eBPF.plugin](https://github.com/netdata/netdata/tree/master/collectors/ebpf.plugin). 55 | 56 | ### Skel code division 57 | 58 | Inside these headers we have: 59 | 60 | - A `structure` that defines eveyrything insie `bpf.c` files (maps and eBPF programs). 61 | - Functions to work with a specific eBPF code. To explain better I will use `NAME` to define these 'specific' code: 62 | - `NAME_bpf__open`: function that open the `CO-RE` code. 63 | - `NAME_bpf__load`: function that loads the binary code without to attach to final target. 64 | - `NAME_bpf__attach`: Attach `CO-RE` code to targets that can be `trampolines`, and `tracepoints`. For probes it is preferred to use `bpf_program__attach_kprobe`. 65 | Anything that we want to modify in the code needs to be done before to call it. 66 | - `NAME_bpf__destroy`: function that unloads the `CO-RE` code 67 | 68 | -------------------------------------------------------------------------------- /src/Makefile: -------------------------------------------------------------------------------- 1 | LIBBPF = ../libbpf 2 | CFLAGS = -Wall -ggdb 3 | INCLUDES = -I../.local_libbpf/ -I../includes/ -I$(LIBBPF)/src/ -I. -I../kernel-collector/includes/ -I$(LIBBPF)/include/uapi/ 4 | CLANG ?= clang 5 | LLVM_STRIP ?= llvm-strip 6 | OUTPUT = tests/ 7 | ARCH := $(shell uname -m | sed 's/x86_64/x86/') 8 | CLANG_BPF_SYS_INCLUDES = $(shell $(CLANG) -v -E - &1 \ 9 | | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') 10 | KERNEL_VERSION="$(shell cat /proc/sys/kernel/osrelease)" 11 | FIRST_KERNEL_VERSION=$(shell sh ../kernel-collector/tools/complement.sh "$(KERNEL_VERSION)") 12 | VER_MAJOR=$(shell echo $(KERNEL_VERSION) | cut -d. -f1) 13 | VER_MINOR=$(shell echo $(KERNEL_VERSION) | cut -d. -f2) 14 | VER_PATCH=$(shell echo $(KERNEL_VERSION) | cut -d. -f3 | cut -d\- -f1) 15 | RUNNING_VERSION_CODE=$(shell echo $$(( $(VER_MAJOR) * 65536 + $(VER_MINOR) * 256 + $(VER_PATCH))) ) 16 | 17 | _LIBC ?= glibc 18 | 19 | APPS = cachestat \ 20 | dc \ 21 | disk \ 22 | fd \ 23 | filesystem \ 24 | hardirq \ 25 | networkviewer \ 26 | mdflush \ 27 | mount \ 28 | oomkill \ 29 | process \ 30 | shm \ 31 | socket \ 32 | softirq \ 33 | swap \ 34 | sync \ 35 | vfs \ 36 | # 37 | 38 | all: compress 39 | 40 | libbpf: 41 | cd $(LIBBPF)/src && $(MAKE) BUILD_STATIC_ONLY=1 DESTDIR=../../.local_libbpf INCLUDEDIR= LIBDIR= UAPIDIR= install \ 42 | 43 | %.bpf.o: %.bpf.c libbpf 44 | $(CLANG) -fno-stack-protector $(INCLUDES) -ggdb -O2 -target bpf -DMY_LINUX_VERSION_CODE=$(RUNNING_VERSION_CODE) -D__TARGET_ARCH_$(ARCH) $(CLANG_BPF_SYS_INCLUDES) -c $(filter %.c,$^) -o $@ 45 | $(LLVM_STRIP) -g $@ # strip useless DWARF info 46 | 47 | %.skel.h: %.bpf.o 48 | bpftool gen skeleton $< > ../includes/$@ 49 | 50 | $(patsubst %,%.o,$(APPS)): %.o: %.skel.h 51 | 52 | %.o: %.c 53 | $(CC) $(CFLAGS) -DMY_LINUX_VERSION_CODE=$(RUNNING_VERSION_CODE) $(INCLUDES) -c $(filter %.c,$^) -o $@ 54 | 55 | $(APPS): %: %.o 56 | $(CC) $(CFLAGS) -L../.local_libbpf $^ -lbpf -lelf -lz -o $(OUTPUT)$@ 57 | 58 | compress: $(APPS) 59 | 60 | clean: 61 | rm -f ../artifacts/netdata_ebpf-CO-RE-*.tar.xz 62 | rm -f ../artifacts/netdata_ebpf-CO-RE-*.tar.xz.sha256sum 63 | cd $(LIBBPF)/src/ && make clean 64 | cd $(OUTPUT) && find . -type f ! -name '*.sh' ! -name '.git*' -delete 65 | rm -f *.o 66 | -------------------------------------------------------------------------------- /src/cachestat.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_cache.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 16 | __type(key, __u32); 17 | __type(value, __u64); 18 | __uint(max_entries, NETDATA_CACHESTAT_END); 19 | } cstat_global SEC(".maps"); 20 | 21 | struct { 22 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 23 | __type(key, __u32); 24 | __type(value, netdata_cachestat_t); 25 | __uint(max_entries, PID_MAX_DEFAULT); 26 | } cstat_pid SEC(".maps"); 27 | 28 | struct { 29 | __uint(type, BPF_MAP_TYPE_ARRAY); 30 | __type(key, __u32); 31 | __type(value, __u64); 32 | __uint(max_entries, NETDATA_CONTROLLER_END); 33 | } cstat_ctrl SEC(".maps"); 34 | 35 | /************************************************************************************ 36 | * 37 | * CACHESTAT Common 38 | * 39 | ***********************************************************************************/ 40 | 41 | static __always_inline int netdata_cachetat_not_update_apps(__u32 idx) 42 | { 43 | libnetdata_update_global(&cstat_global, idx, 1); 44 | 45 | __u32 key = NETDATA_CONTROLLER_APPS_ENABLED; 46 | __u32 *apps = bpf_map_lookup_elem(&cstat_ctrl ,&key); 47 | if (apps && *apps) 48 | return 0; 49 | 50 | return 1; 51 | } 52 | 53 | static __always_inline int netdata_common_page_cache_lru() 54 | { 55 | netdata_cachestat_t *fill, data = {}; 56 | 57 | if (netdata_cachetat_not_update_apps(NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU)) 58 | return 0; 59 | 60 | __u32 key = 0; 61 | __u32 tgid = 0; 62 | fill = netdata_get_pid_structure(&key, &tgid, &cstat_ctrl, &cstat_pid); 63 | if (fill) { 64 | libnetdata_update_u32(&fill->add_to_page_cache_lru, 1); 65 | } else { 66 | data.ct = bpf_ktime_get_ns(); 67 | libnetdata_update_uid_gid(&data.uid, &data.gid); 68 | data.tgid = tgid; 69 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 70 | 71 | data.add_to_page_cache_lru = 1; 72 | bpf_map_update_elem(&cstat_pid, &key, &data, BPF_ANY); 73 | 74 | libnetdata_update_global(&cstat_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 75 | } 76 | 77 | return 0; 78 | } 79 | 80 | static __always_inline int netdata_common_page_accessed() 81 | { 82 | netdata_cachestat_t *fill, data = {}; 83 | if (netdata_cachetat_not_update_apps(NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED)) 84 | return 0; 85 | 86 | __u32 key = 0; 87 | __u32 tgid = 0; 88 | fill = netdata_get_pid_structure(&key, &tgid, &cstat_ctrl, &cstat_pid); 89 | if (fill) { 90 | libnetdata_update_u32(&fill->mark_page_accessed, 1); 91 | } else { 92 | data.ct = bpf_ktime_get_ns(); 93 | data.tgid = tgid; 94 | libnetdata_update_uid_gid(&data.uid, &data.gid); 95 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 96 | 97 | data.mark_page_accessed = 1; 98 | bpf_map_update_elem(&cstat_pid, &key, &data, BPF_ANY); 99 | 100 | libnetdata_update_global(&cstat_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 101 | } 102 | 103 | return 0; 104 | } 105 | 106 | static __always_inline int netdata_common_page_dirtied() 107 | { 108 | netdata_cachestat_t *fill, data = {}; 109 | 110 | if (netdata_cachetat_not_update_apps(NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED)) 111 | return 0; 112 | 113 | __u32 key = 0; 114 | __u32 tgid = 0; 115 | fill = netdata_get_pid_structure(&key, &tgid, &cstat_ctrl, &cstat_pid); 116 | if (fill) { 117 | libnetdata_update_u32(&fill->account_page_dirtied, 1); 118 | } else { 119 | data.ct = bpf_ktime_get_ns(); 120 | data.tgid = tgid; 121 | libnetdata_update_uid_gid(&data.uid, &data.gid); 122 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 123 | 124 | data.account_page_dirtied = 1; 125 | bpf_map_update_elem(&cstat_pid, &key, &data, BPF_ANY); 126 | 127 | libnetdata_update_global(&cstat_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 128 | } 129 | 130 | return 0; 131 | } 132 | 133 | static __always_inline int netdata_common_buffer_dirty() 134 | { 135 | netdata_cachestat_t *fill, data = {}; 136 | 137 | if (netdata_cachetat_not_update_apps(NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY)) 138 | return 0; 139 | 140 | __u32 key = 0; 141 | __u32 tgid = 0; 142 | fill = netdata_get_pid_structure(&key, &tgid, &cstat_ctrl, &cstat_pid); 143 | if (fill) { 144 | libnetdata_update_u32(&fill->mark_buffer_dirty, 1); 145 | } else { 146 | data.ct = bpf_ktime_get_ns(); 147 | data.tgid = tgid; 148 | libnetdata_update_uid_gid(&data.uid, &data.gid); 149 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 150 | 151 | data.mark_buffer_dirty = 1; 152 | bpf_map_update_elem(&cstat_pid, &key, &data, BPF_ANY); 153 | 154 | libnetdata_update_global(&cstat_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 155 | } 156 | 157 | return 0; 158 | } 159 | 160 | /************************************************************************************ 161 | * 162 | * CACHESTAT Section (Probe) 163 | * 164 | ***********************************************************************************/ 165 | 166 | SEC("kprobe/add_to_page_cache_lru") 167 | int BPF_KPROBE(netdata_add_to_page_cache_lru_kprobe) 168 | { 169 | return netdata_common_page_cache_lru(); 170 | } 171 | 172 | SEC("kprobe/mark_page_accessed") 173 | int BPF_KPROBE(netdata_mark_page_accessed_kprobe) 174 | { 175 | return netdata_common_page_accessed(); 176 | } 177 | 178 | // When kernel 5.16.0 was released the function __set_page_dirty became static 179 | // and a new function was created. 180 | SEC("kprobe/__folio_mark_dirty") 181 | int BPF_KPROBE(netdata_folio_mark_dirty_kprobe) 182 | { 183 | return netdata_common_page_dirtied(); 184 | } 185 | 186 | // When kernel 5.15.0 was released the function account_page_dirtied became static 187 | // https://elixir.bootlin.com/linux/v5.15/source/mm/page-writeback.c#L2441 188 | // as consequence of this, we are monitoring the function from caller. 189 | SEC("kprobe/__set_page_dirty") 190 | int BPF_KPROBE(netdata_set_page_dirty_kprobe) 191 | { 192 | struct page *page = (struct page *)PT_REGS_PARM1(ctx) ; 193 | struct address_space *mapping = _(page->mapping); 194 | 195 | if (!mapping) 196 | return 0; 197 | 198 | return netdata_common_page_dirtied(); 199 | } 200 | 201 | SEC("kprobe/account_page_dirtied") 202 | int BPF_KPROBE(netdata_account_page_dirtied_kprobe) 203 | { 204 | return netdata_common_page_dirtied(); 205 | } 206 | 207 | SEC("kprobe/mark_buffer_dirty") 208 | int BPF_KPROBE(netdata_mark_buffer_dirty_kprobe) 209 | { 210 | return netdata_common_buffer_dirty(); 211 | } 212 | 213 | /************************************************************************************ 214 | * 215 | * CACHESTAT Section (Probe) 216 | * 217 | ***********************************************************************************/ 218 | 219 | SEC("fentry/add_to_page_cache_lru") 220 | int BPF_PROG(netdata_add_to_page_cache_lru_fentry) 221 | { 222 | return netdata_common_page_cache_lru(); 223 | } 224 | 225 | SEC("fentry/mark_page_accessed") 226 | int BPF_PROG(netdata_mark_page_accessed_fentry) 227 | { 228 | return netdata_common_page_accessed(); 229 | } 230 | 231 | // When kernel 5.16.0 was released the function __set_page_dirty became static 232 | // and a new function was created. 233 | SEC("fentry/__folio_mark_dirty") 234 | int BPF_PROG(netdata_folio_mark_dirty_fentry) 235 | { 236 | return netdata_common_page_dirtied(); 237 | } 238 | 239 | // When kernel 5.15.0 was released the function account_page_dirtied became static 240 | // https://elixir.bootlin.com/linux/v5.15/source/mm/page-writeback.c#L2441 241 | // as consequence of this, we are monitoring the function from caller. 242 | SEC("fentry/__set_page_dirty") 243 | int BPF_PROG(netdata_set_page_dirty_fentry, struct page *page) 244 | { 245 | if (!page->mapping) 246 | return 0; 247 | 248 | return netdata_common_page_dirtied(); 249 | } 250 | 251 | SEC("fentry/account_page_dirtied") 252 | int BPF_PROG(netdata_account_page_dirtied_fentry) 253 | { 254 | return netdata_common_page_dirtied(); 255 | } 256 | 257 | SEC("fentry/mark_buffer_dirty") 258 | int BPF_PROG(netdata_mark_buffer_dirty_fentry) 259 | { 260 | return netdata_common_buffer_dirty(); 261 | } 262 | 263 | char _license[] SEC("license") = "GPL"; 264 | 265 | -------------------------------------------------------------------------------- /src/dc.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_dc.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 16 | __type(key, __u32); 17 | __type(value, __u64); 18 | __uint(max_entries, NETDATA_DIRECTORY_CACHE_END); 19 | } dcstat_global SEC(".maps"); 20 | 21 | struct { 22 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 23 | __type(key, __u32); 24 | __type(value, netdata_dc_stat_t); 25 | __uint(max_entries, PID_MAX_DEFAULT); 26 | } dcstat_pid SEC(".maps"); 27 | 28 | struct { 29 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 30 | __type(key, __u32); 31 | __type(value, __u64); 32 | __uint(max_entries, NETDATA_CONTROLLER_END); 33 | } dcstat_ctrl SEC(".maps"); 34 | 35 | /*********************************************************************************** 36 | * 37 | * DC COMMON 38 | * 39 | ***********************************************************************************/ 40 | 41 | static __always_inline int netdata_dc_not_update_apps() 42 | { 43 | __u32 key = NETDATA_CONTROLLER_APPS_ENABLED; 44 | __u32 *apps = bpf_map_lookup_elem(&dcstat_ctrl ,&key); 45 | if (apps && *apps) 46 | return 0; 47 | 48 | return 1; 49 | } 50 | 51 | static __always_inline int netdata_common_lookup_fast() 52 | { 53 | netdata_dc_stat_t *fill, data = {}; 54 | __u32 key = 0; 55 | __u32 tgid = 0; 56 | 57 | libnetdata_update_global(&dcstat_global, NETDATA_KEY_DC_REFERENCE, 1); 58 | 59 | if (netdata_dc_not_update_apps()) 60 | return 0; 61 | 62 | fill = netdata_get_pid_structure(&key, &tgid, &dcstat_ctrl, &dcstat_pid); 63 | if (fill) { 64 | libnetdata_update_u32(&fill->references, 1); 65 | } else { 66 | data.references = 1; 67 | data.tgid = tgid; 68 | libnetdata_update_uid_gid(&data.uid, &data.gid); 69 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 70 | bpf_map_update_elem(&dcstat_pid, &key, &data, BPF_ANY); 71 | 72 | libnetdata_update_global(&dcstat_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 73 | } 74 | 75 | return 0; 76 | } 77 | 78 | static __always_inline int netdata_common_d_lookup(long ret) 79 | { 80 | netdata_dc_stat_t *fill, data = {}; 81 | __u32 key = 0; 82 | __u32 tgid = 0; 83 | 84 | libnetdata_update_global(&dcstat_global, NETDATA_KEY_DC_SLOW, 1); 85 | 86 | if (netdata_dc_not_update_apps()) 87 | return 0; 88 | 89 | fill = netdata_get_pid_structure(&key, &tgid, &dcstat_ctrl, &dcstat_pid); 90 | if (fill) { 91 | libnetdata_update_u32(&fill->slow, 1); 92 | } else { 93 | data.slow = 1; 94 | data.tgid = tgid; 95 | libnetdata_update_uid_gid(&data.uid, &data.gid); 96 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 97 | bpf_map_update_elem(&dcstat_pid, &key, &data, BPF_ANY); 98 | 99 | libnetdata_update_global(&dcstat_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 100 | } 101 | 102 | // file not found 103 | if (!ret) { 104 | libnetdata_update_global(&dcstat_global, NETDATA_KEY_DC_MISS, 1); 105 | fill = netdata_get_pid_structure(&key, &tgid, &dcstat_ctrl, &dcstat_pid); 106 | if (fill) { 107 | libnetdata_update_u32(&fill->missed, 1); 108 | } 109 | } 110 | 111 | return 0; 112 | } 113 | 114 | /*********************************************************************************** 115 | * 116 | * DC SECTION(kprobe) 117 | * 118 | ***********************************************************************************/ 119 | 120 | SEC("kprobe/lookup_fast") 121 | int BPF_KPROBE(netdata_lookup_fast_kprobe) 122 | { 123 | return netdata_common_lookup_fast(); 124 | } 125 | 126 | SEC("kretprobe/d_lookup") 127 | int BPF_KRETPROBE(netdata_d_lookup_kretprobe) 128 | { 129 | long ret = PT_REGS_RC(ctx); 130 | 131 | return netdata_common_d_lookup(ret); 132 | } 133 | 134 | /*********************************************************************************** 135 | * 136 | * DC SECTION(trampoline) 137 | * 138 | ***********************************************************************************/ 139 | 140 | SEC("fentry/lookup_fast") 141 | int BPF_PROG(netdata_lookup_fast_fentry) 142 | { 143 | return netdata_common_lookup_fast(); 144 | } 145 | 146 | SEC("fexit/d_lookup") 147 | int BPF_PROG(netdata_d_lookup_fexit, const struct dentry *parent, const struct qstr *name, 148 | struct dentry *ret) 149 | { 150 | return netdata_common_d_lookup((long)ret); 151 | } 152 | 153 | char _license[] SEC("license") = "GPL"; 154 | 155 | -------------------------------------------------------------------------------- /src/dc.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include "netdata_defs.h" 12 | #include "netdata_tests.h" 13 | #include "netdata_core_common.h" 14 | #include "netdata_dc.h" 15 | 16 | #include "dc.skel.h" 17 | 18 | char *function_list[] = { "lookup_fast", 19 | "d_lookup" 20 | }; 21 | // This preprocessor is defined here, because it is not useful in kernel-colector 22 | #define NETDATA_DCSTAT_RELEASE_TASK 2 23 | 24 | static inline void ebpf_disable_probes(struct dc_bpf *obj) 25 | { 26 | bpf_program__set_autoload(obj->progs.netdata_lookup_fast_kprobe, false); 27 | bpf_program__set_autoload(obj->progs.netdata_d_lookup_kretprobe, false); 28 | } 29 | 30 | static inline void ebpf_disable_trampoline(struct dc_bpf *obj) 31 | { 32 | bpf_program__set_autoload(obj->progs.netdata_lookup_fast_fentry, false); 33 | bpf_program__set_autoload(obj->progs.netdata_d_lookup_fexit, false); 34 | } 35 | 36 | static void ebpf_set_trampoline_target(struct dc_bpf *obj) 37 | { 38 | bpf_program__set_attach_target(obj->progs.netdata_lookup_fast_fentry, 0, 39 | function_list[NETDATA_LOOKUP_FAST]); 40 | 41 | bpf_program__set_attach_target(obj->progs.netdata_d_lookup_fexit, 0, 42 | function_list[NETDATA_D_LOOKUP]); 43 | } 44 | 45 | static int ebpf_attach_probes(struct dc_bpf *obj) 46 | { 47 | obj->links.netdata_d_lookup_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_d_lookup_kretprobe, 48 | true, function_list[NETDATA_D_LOOKUP]); 49 | int ret = libbpf_get_error(obj->links.netdata_d_lookup_kretprobe); 50 | if (ret) 51 | return -1; 52 | 53 | obj->links.netdata_lookup_fast_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_lookup_fast_kprobe, 54 | false, function_list[NETDATA_LOOKUP_FAST]); 55 | ret = libbpf_get_error(obj->links.netdata_lookup_fast_kprobe); 56 | if (ret) 57 | return -1; 58 | 59 | return 0; 60 | } 61 | 62 | static inline int ebpf_load_and_attach(struct dc_bpf *obj, int selector) 63 | { 64 | // Adjust memory 65 | int ret; 66 | if (!selector) { // trampoline 67 | ebpf_disable_probes(obj); 68 | 69 | ebpf_set_trampoline_target(obj); 70 | } else if (selector == NETDATA_MODE_PROBE) { // kprobe 71 | ebpf_disable_trampoline(obj); 72 | } 73 | 74 | ret = dc_bpf__load(obj); 75 | if (ret) { 76 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 77 | return -1; 78 | } 79 | 80 | if (!selector) { 81 | ret = dc_bpf__attach(obj); 82 | } else { 83 | ret = ebpf_attach_probes(obj); 84 | } 85 | 86 | if (!ret) { 87 | fprintf(stdout, "Directory Cache loaded with success\n"); 88 | } 89 | 90 | return ret; 91 | } 92 | 93 | static int dc_read_apps_array(int fd, int ebpf_nprocs) 94 | { 95 | netdata_dc_stat_t stored[ebpf_nprocs]; 96 | 97 | uint32_t key, next_key; 98 | uint64_t counter = 0; 99 | key = next_key = 0; 100 | 101 | while (!bpf_map_get_next_key(fd, &key, &next_key)) { 102 | if (!bpf_map_lookup_elem(fd, &key, stored)) { 103 | counter++; 104 | } 105 | memset(stored, 0, ebpf_nprocs*sizeof(netdata_dc_stat_t)); 106 | 107 | key = next_key; 108 | } 109 | 110 | if (counter) { 111 | fprintf(stdout, "Apps data stored with success. It collected %lu pids\n", counter); 112 | return 0; 113 | } 114 | 115 | return 2; 116 | } 117 | 118 | static pid_t ebpf_update_tables(int global, int apps) 119 | { 120 | pid_t pid = ebpf_fill_global(global); 121 | 122 | netdata_dc_stat_t stats = { .references = 1, .slow = 1, .missed = 1}; 123 | 124 | uint32_t idx = (uint32_t)pid; 125 | int ret = bpf_map_update_elem(apps, &idx, &stats, 0); 126 | if (ret) 127 | fprintf(stderr, "Cannot insert value to apps table."); 128 | 129 | return pid; 130 | } 131 | 132 | static int ebpf_dc_tests(int selector, enum netdata_apps_level map_level) 133 | { 134 | struct dc_bpf *obj = NULL; 135 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 136 | if (ebpf_nprocs < 0) 137 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 138 | 139 | obj = dc_bpf__open(); 140 | if (!obj) { 141 | goto load_error; 142 | } 143 | 144 | int ret = ebpf_load_and_attach(obj, selector); 145 | if (ret && selector != NETDATA_MODE_PROBE) { 146 | dc_bpf__destroy(obj); 147 | 148 | obj = dc_bpf__open(); 149 | if (!obj) { 150 | goto load_error; 151 | } 152 | 153 | selector = NETDATA_MODE_PROBE; 154 | ret = ebpf_load_and_attach(obj, selector); 155 | } 156 | 157 | if (!ret) { 158 | int fd = bpf_map__fd(obj->maps.dcstat_ctrl); 159 | ebpf_core_fill_ctrl(obj->maps.dcstat_ctrl, map_level); 160 | 161 | fd = bpf_map__fd(obj->maps.dcstat_global); 162 | int fd2 = bpf_map__fd(obj->maps.dcstat_pid); 163 | (void)ebpf_update_tables(fd, fd2); 164 | sleep(60); 165 | 166 | ret = ebpf_read_global_array(fd, ebpf_nprocs, NETDATA_DIRECTORY_CACHE_END); 167 | if (!ret) { 168 | ret = dc_read_apps_array(fd2, ebpf_nprocs); 169 | if (ret) 170 | fprintf(stdout, "Empty apps table\n"); 171 | } else 172 | fprintf(stderr, "Cannot read global table\n"); 173 | } else { 174 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 175 | ret = 3; 176 | } 177 | 178 | dc_bpf__destroy(obj); 179 | 180 | return ret; 181 | load_error: 182 | fprintf(stderr, "Cannot open or load BPF object\n"); 183 | return 2; 184 | } 185 | 186 | int main(int argc, char **argv) 187 | { 188 | static struct option long_options[] = { 189 | {"help", no_argument, 0, 0 }, 190 | {"probe", no_argument, 0, 0 }, 191 | {"tracepoint", no_argument, 0, 0 }, 192 | {"trampoline", no_argument, 0, 0 }, 193 | {"pid", required_argument, 0, 0 }, 194 | {0, no_argument, 0, 0} 195 | }; 196 | 197 | int selector = NETDATA_MODE_TRAMPOLINE; 198 | int option_index = 0; 199 | enum netdata_apps_level map_level = NETDATA_APPS_LEVEL_REAL_PARENT; 200 | while (1) { 201 | int c = getopt_long_only(argc, argv, "", long_options, &option_index); 202 | if (c == -1) 203 | break; 204 | 205 | switch (option_index) { 206 | case NETDATA_EBPF_CORE_IDX_HELP: { 207 | ebpf_core_print_help(argv[0], "dc", 1, 1); 208 | exit(0); 209 | } 210 | case NETDATA_EBPF_CORE_IDX_PROBE: { 211 | selector = NETDATA_MODE_PROBE; 212 | break; 213 | } 214 | case NETDATA_EBPF_CORE_IDX_TRACEPOINT: { 215 | selector = NETDATA_MODE_PROBE; 216 | fprintf(stdout, "This specific software does not have tracepoint, using kprobe instead\n"); 217 | break; 218 | } 219 | case NETDATA_EBPF_CORE_IDX_TRAMPOLINE: { 220 | selector = NETDATA_MODE_TRAMPOLINE; 221 | break; 222 | } 223 | case NETDATA_EBPF_CORE_IDX_PID: { 224 | int user_input = (int)strtol(optarg, NULL, 10); 225 | map_level = ebpf_check_map_level(user_input); 226 | break; 227 | } 228 | default: { 229 | break; 230 | } 231 | } 232 | } 233 | 234 | int ret = netdata_ebf_memlock_limit(); 235 | if (ret) { 236 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 237 | return 1; 238 | } 239 | 240 | libbpf_set_print(netdata_libbpf_vfprintf); 241 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 242 | 243 | char *lookup_fast = netdata_update_name(function_list[NETDATA_LOOKUP_FAST]); 244 | if (!lookup_fast) { 245 | return 2; 246 | } 247 | function_list[NETDATA_LOOKUP_FAST] = lookup_fast; 248 | 249 | struct btf *bf = NULL; 250 | if (!selector) { 251 | bf = netdata_parse_btf_file((const char *)NETDATA_BTF_FILE); 252 | if (bf) { 253 | selector = ebpf_find_functions(bf, selector, function_list, NETDATA_DC_COUNTER); 254 | btf__free(bf); 255 | } 256 | } 257 | 258 | int stop_software = 0; 259 | while (stop_software < 2) { 260 | if (ebpf_dc_tests(selector, map_level) && !stop_software) { 261 | selector = 1; 262 | stop_software++; 263 | } else 264 | stop_software = 2; 265 | } 266 | 267 | free(lookup_fast); 268 | 269 | return 0; 270 | } 271 | 272 | -------------------------------------------------------------------------------- /src/disk.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_disk.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | //Hardware 15 | struct { 16 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 17 | __type(key, block_key_t); 18 | __type(value, __u64); 19 | __uint(max_entries, NETDATA_DISK_HISTOGRAM_LENGTH); 20 | } tbl_disk_iocall SEC(".maps"); 21 | 22 | // Temporary use only 23 | struct { 24 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 25 | __type(key, netdata_disk_key_t); 26 | __type(value, __u64); 27 | __uint(max_entries, 8192); 28 | } tmp_disk_tp_stat SEC(".maps"); 29 | 30 | struct { 31 | __uint(type, BPF_MAP_TYPE_ARRAY); 32 | __type(key, __u32); 33 | __type(value, __u64); 34 | __uint(max_entries, NETDATA_CONTROLLER_END); 35 | } disk_ctrl SEC(".maps"); 36 | 37 | 38 | /************************************************************************************ 39 | * 40 | * DISK SECTION 41 | * 42 | ***********************************************************************************/ 43 | 44 | SEC("tracepoint/block/block_rq_issue") 45 | int netdata_block_rq_issue(struct netdata_block_rq_issue *ptr) 46 | { 47 | // blkid generates these and we're not interested in them 48 | if (!ptr->dev) 49 | return 0; 50 | 51 | netdata_disk_key_t key = {}; 52 | key.dev = ptr->dev; 53 | key.sector = ptr->sector; 54 | 55 | if (key.sector < 0) 56 | key.sector = 0; 57 | 58 | __u64 value = bpf_ktime_get_ns(); 59 | 60 | bpf_map_update_elem(&tmp_disk_tp_stat, &key, &value, BPF_ANY); 61 | 62 | libnetdata_update_global(&disk_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 63 | 64 | return 0; 65 | } 66 | 67 | SEC("tracepoint/block/block_rq_complete") 68 | int netdata_block_rq_complete(struct netdata_block_rq_complete *ptr) 69 | { 70 | __u64 *fill; 71 | netdata_disk_key_t key = {}; 72 | block_key_t blk = {}; 73 | key.dev = ptr->dev; 74 | key.sector = ptr->sector; 75 | 76 | if (key.sector < 0) 77 | key.sector = 0; 78 | 79 | fill = bpf_map_lookup_elem(&tmp_disk_tp_stat ,&key); 80 | if (!fill) 81 | return 0; 82 | 83 | // calculate and convert to microsecond 84 | u64 curr = bpf_ktime_get_ns(); 85 | __u64 data, *update; 86 | curr -= *fill; 87 | curr /= 1000; 88 | 89 | blk.bin = libnetdata_select_idx(curr, NETDATA_FS_MAX_BINS_POS); 90 | blk.dev = netdata_new_encode_dev(ptr->dev); 91 | 92 | // Update IOPS 93 | update = bpf_map_lookup_elem(&tbl_disk_iocall ,&blk); 94 | if (update) { 95 | libnetdata_update_u64(update, 1); 96 | } else { 97 | data = 1; 98 | bpf_map_update_elem(&tbl_disk_iocall, &blk, &data, BPF_ANY); 99 | } 100 | 101 | bpf_map_delete_elem(&tmp_disk_tp_stat, &key); 102 | 103 | libnetdata_update_global(&disk_ctrl, NETDATA_CONTROLLER_PID_TABLE_DEL, 1); 104 | 105 | return 0; 106 | } 107 | 108 | char _license[] SEC("license") = "GPL"; 109 | 110 | -------------------------------------------------------------------------------- /src/disk.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include "netdata_defs.h" 12 | #include "netdata_core_common.h" 13 | #include "netdata_tests.h" 14 | 15 | #include "disk.skel.h" 16 | 17 | // Copied and redefined from ../include/netdata_disk.h 18 | typedef struct block_key { 19 | uint32_t bin; 20 | uint32_t dev; 21 | } block_key_t; 22 | 23 | static inline int ebpf_load_and_attach(struct disk_bpf *obj) 24 | { 25 | int ret = disk_bpf__load(obj); 26 | if (ret) { 27 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 28 | return -1; 29 | } 30 | 31 | ret = disk_bpf__attach(obj); 32 | if (!ret) { 33 | fprintf(stdout, "Disk loaded with success\n"); 34 | } 35 | 36 | return ret; 37 | } 38 | 39 | static void ebpf_update_table(int global) 40 | { 41 | block_key_t idx = { .bin = 0, .dev = 0}; 42 | unsigned char value = 'a'; 43 | int ret = bpf_map_update_elem(global, &idx, &value, 0); 44 | if (ret) 45 | fprintf(stderr, "Cannot insert value to global table."); 46 | } 47 | 48 | static int disk_read_array(int fd, int ebpf_nprocs) 49 | { 50 | uint64_t stored[ebpf_nprocs]; 51 | 52 | uint64_t counter = 0; 53 | block_key_t idx = { .bin = 0, .dev = 0}; 54 | if (!bpf_map_lookup_elem(fd, &idx, stored)) { 55 | int j; 56 | for (j = 0; j < ebpf_nprocs; j++) { 57 | counter += stored[j]; 58 | } 59 | } 60 | 61 | if (counter) { 62 | fprintf(stdout, "Data stored with success\n"); 63 | return 0; 64 | } 65 | 66 | return 2; 67 | } 68 | 69 | static int ebpf_disk_tests() 70 | { 71 | struct disk_bpf *obj = NULL; 72 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 73 | if (ebpf_nprocs < 0) 74 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 75 | 76 | obj = disk_bpf__open(); 77 | if (!obj) { 78 | fprintf(stderr, "Cannot open or load BPF object\n"); 79 | 80 | return 2; 81 | } 82 | 83 | int ret = ebpf_load_and_attach(obj); 84 | if (!ret) { 85 | int fd = bpf_map__fd(obj->maps.tbl_disk_iocall); 86 | ebpf_update_table(fd); 87 | 88 | ret = disk_read_array(fd, ebpf_nprocs); 89 | if (ret) 90 | fprintf(stderr, "Cannot read global table\n"); 91 | } else { 92 | ret = 3; 93 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 94 | } 95 | 96 | disk_bpf__destroy(obj); 97 | 98 | return ret; 99 | } 100 | 101 | int main(int argc, char **argv) 102 | { 103 | static struct option long_options[] = { 104 | {"help", no_argument, 0, 'h' }, 105 | {0, 0, 0, 0} 106 | }; 107 | 108 | int option_index = 0; 109 | while (1) { 110 | int c = getopt_long(argc, argv, "", long_options, &option_index); 111 | if (c == -1) 112 | break; 113 | 114 | switch (c) { 115 | case 'h': { 116 | ebpf_tracepoint_help("Disk"); 117 | exit(0); 118 | } 119 | default: { 120 | break; 121 | } 122 | } 123 | } 124 | 125 | int ret = netdata_ebf_memlock_limit(); 126 | if (ret) { 127 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 128 | return 1; 129 | } 130 | 131 | libbpf_set_print(netdata_libbpf_vfprintf); 132 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 133 | 134 | return ebpf_disk_tests(); 135 | } 136 | 137 | -------------------------------------------------------------------------------- /src/fd.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_fd.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_HASH); 16 | __type(key, __u32); 17 | __type(value, struct netdata_fd_stat_t); 18 | __uint(max_entries, PID_MAX_DEFAULT); 19 | } tbl_fd_pid SEC(".maps"); 20 | 21 | struct { 22 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 23 | __type(key, __u32); 24 | __type(value, __u64); 25 | __uint(max_entries, NETDATA_FD_COUNTER); 26 | } tbl_fd_global SEC(".maps"); 27 | 28 | struct { 29 | __uint(type, BPF_MAP_TYPE_ARRAY); 30 | __type(key, __u32); 31 | __type(value, __u64); 32 | __uint(max_entries, NETDATA_CONTROLLER_END); 33 | } fd_ctrl SEC(".maps"); 34 | 35 | /************************************************************************************ 36 | * 37 | * COMMON SECTION 38 | * 39 | ***********************************************************************************/ 40 | 41 | static __always_inline int netdata_are_apps_enabled() 42 | { 43 | __u32 key = NETDATA_CONTROLLER_APPS_ENABLED; 44 | __u32 *apps = bpf_map_lookup_elem(&fd_ctrl ,&key); 45 | if (apps) 46 | if (*apps == 0) 47 | return 0; 48 | 49 | return 1; 50 | } 51 | 52 | /************************************************************************************ 53 | * 54 | * KPROBE SECTION 55 | * 56 | ***********************************************************************************/ 57 | static __always_inline int netdata_apps_do_sys_openat2(long ret) 58 | { 59 | struct netdata_fd_stat_t *fill; 60 | struct netdata_fd_stat_t data = { }; 61 | 62 | if (!netdata_are_apps_enabled()) 63 | return 0; 64 | 65 | __u32 key = 0; 66 | __u32 tgid = 0; 67 | fill = netdata_get_pid_structure(&key, &tgid, &fd_ctrl, &tbl_fd_pid); 68 | if (fill) { 69 | libnetdata_update_u32(&fill->open_call, 1) ; 70 | if (ret < 0) 71 | libnetdata_update_u32(&fill->open_err, 1) ; 72 | } else { 73 | data.ct = bpf_ktime_get_ns(); 74 | data.tgid = tgid; 75 | libnetdata_update_uid_gid(&data.uid, &data.gid); 76 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 77 | data.open_call = 1; 78 | if (ret < 0) 79 | data.open_err = 1; 80 | 81 | bpf_map_update_elem(&tbl_fd_pid, &key, &data, BPF_ANY); 82 | 83 | libnetdata_update_global(&fd_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 84 | } 85 | 86 | 87 | return 0; 88 | } 89 | 90 | static __always_inline void netdata_sys_open_global(long ret) 91 | { 92 | if (ret < 0) 93 | libnetdata_update_global(&tbl_fd_global, NETDATA_KEY_ERROR_DO_SYS_OPEN, 1); 94 | 95 | libnetdata_update_global(&tbl_fd_global, NETDATA_KEY_CALLS_DO_SYS_OPEN, 1); 96 | } 97 | 98 | static __always_inline int netdata_apps_close_fd(int ret) 99 | { 100 | struct netdata_fd_stat_t data = { }; 101 | struct netdata_fd_stat_t *fill; 102 | 103 | if (!netdata_are_apps_enabled()) 104 | return 0; 105 | 106 | __u32 key = 0; 107 | __u32 tgid = 0; 108 | fill = netdata_get_pid_structure(&key, &tgid, &fd_ctrl, &tbl_fd_pid); 109 | if (fill) { 110 | libnetdata_update_u32(&fill->close_call, 1) ; 111 | if (ret < 0) 112 | libnetdata_update_u32(&fill->close_err, 1) ; 113 | } else { 114 | data.ct = bpf_ktime_get_ns(); 115 | data.tgid = tgid; 116 | libnetdata_update_uid_gid(&data.uid, &data.gid); 117 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 118 | data.close_call = 1; 119 | if (ret < 0) 120 | data.close_err = 1; 121 | 122 | bpf_map_update_elem(&tbl_fd_pid, &key, &data, BPF_ANY); 123 | 124 | libnetdata_update_global(&fd_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 125 | } 126 | 127 | return 0; 128 | } 129 | 130 | static __always_inline void netdata_close_global(int ret) 131 | { 132 | if (ret < 0) 133 | libnetdata_update_global(&tbl_fd_global, NETDATA_KEY_ERROR_CLOSE_FD, 1); 134 | 135 | libnetdata_update_global(&tbl_fd_global, NETDATA_KEY_CALLS_CLOSE_FD, 1); 136 | } 137 | 138 | /************************************************************************************ 139 | * 140 | * FD SECTION(kprobe) 141 | * 142 | ***********************************************************************************/ 143 | 144 | SEC("kretprobe/do_sys_openat2") 145 | int BPF_KRETPROBE(netdata_sys_open_kretprobe) 146 | { 147 | long ret = (long)PT_REGS_RC(ctx); 148 | netdata_sys_open_global(ret); 149 | 150 | return netdata_apps_do_sys_openat2(ret); 151 | } 152 | 153 | SEC("kprobe/do_sys_openat2") 154 | int BPF_KPROBE(netdata_sys_open_kprobe) 155 | { 156 | netdata_sys_open_global(0); 157 | 158 | return netdata_apps_do_sys_openat2(0); 159 | } 160 | 161 | SEC("kretprobe/close_fd") 162 | int BPF_KRETPROBE(netdata_close_fd_kretprobe) 163 | { 164 | int ret = (ssize_t)PT_REGS_RC(ctx); 165 | netdata_close_global(ret); 166 | 167 | return netdata_apps_close_fd(ret); 168 | } 169 | 170 | SEC("kprobe/close_fd") 171 | int BPF_KPROBE(netdata_close_fd_kprobe) 172 | { 173 | netdata_close_global(0); 174 | 175 | return netdata_apps_close_fd(0); 176 | } 177 | 178 | SEC("kretprobe/__close_fd") 179 | int BPF_KRETPROBE(netdata___close_fd_kretprobe) 180 | { 181 | int ret = (ssize_t)PT_REGS_RC(ctx); 182 | netdata_close_global(ret); 183 | 184 | return netdata_apps_close_fd(ret); 185 | } 186 | 187 | SEC("kprobe/__close_fd") 188 | int BPF_KPROBE(netdata___close_fd_kprobe) 189 | { 190 | netdata_close_global(0); 191 | 192 | return netdata_apps_close_fd(0); 193 | } 194 | 195 | /************************************************************************************ 196 | * 197 | * FD SECTION(trampoline) 198 | * 199 | ***********************************************************************************/ 200 | 201 | SEC("fexit/do_sys_openat2") 202 | int BPF_PROG(netdata_sys_open_fexit, int dfd, const char *filename, struct open_how *how, long ret) 203 | { 204 | netdata_sys_open_global(ret); 205 | 206 | return netdata_apps_do_sys_openat2(ret); 207 | } 208 | 209 | SEC("fentry/do_sys_openat2") 210 | int BPF_PROG(netdata_sys_open_fentry) 211 | { 212 | netdata_sys_open_global(0); 213 | 214 | return netdata_apps_do_sys_openat2(0); 215 | } 216 | 217 | SEC("fentry/close_fd") 218 | int BPF_PROG(netdata_close_fd_fentry) 219 | { 220 | netdata_close_global(0); 221 | 222 | return netdata_apps_close_fd(0); 223 | } 224 | 225 | SEC("fexit/close_fd") 226 | int BPF_PROG(netdata_close_fd_fexit, unsigned fd, int ret) 227 | { 228 | netdata_close_global(ret); 229 | 230 | return netdata_apps_close_fd(ret); 231 | } 232 | 233 | SEC("fentry/__close_fd") 234 | int BPF_PROG(netdata___close_fd_fentry) 235 | { 236 | netdata_close_global(0); 237 | 238 | return netdata_apps_close_fd(0); 239 | } 240 | 241 | SEC("fexit/__close_fd") 242 | int BPF_PROG(netdata___close_fd_fexit, struct files_struct *files, unsigned fd, int ret) 243 | { 244 | netdata_close_global(ret); 245 | 246 | return netdata_apps_close_fd(ret); 247 | } 248 | 249 | char _license[] SEC("license") = "GPL"; 250 | 251 | -------------------------------------------------------------------------------- /src/filesystem.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_core_read.h" 4 | #include "bpf_helpers.h" 5 | 6 | #include "netdata_core.h" 7 | #include "netdata_fs.h" 8 | 9 | /************************************************************************************ 10 | * 11 | * MAP Section 12 | * 13 | ***********************************************************************************/ 14 | 15 | struct { 16 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 17 | __type(key, __u32); 18 | __type(value, __u64); 19 | __uint(max_entries, NETDATA_FS_MAX_ELEMENTS); 20 | } tbl_fs SEC(".maps"); 21 | 22 | struct { 23 | __uint(type, BPF_MAP_TYPE_ARRAY); 24 | __type(key, __u32); 25 | __type(value, __u64); 26 | __uint(max_entries, NETDATA_CONTROLLER_END); 27 | } fs_ctrl SEC(".maps"); 28 | 29 | struct { 30 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 31 | __type(key, __u32); 32 | __type(value, __u64); 33 | __uint(max_entries, 4192); 34 | } tmp_fs SEC(".maps"); 35 | 36 | 37 | /************************************************************************************ 38 | * 39 | * COMMON 40 | * 41 | ***********************************************************************************/ 42 | 43 | static __always_inline int netdata_fs_entry() 44 | { 45 | __u64 pid_tgid = bpf_get_current_pid_tgid(); 46 | __u32 pid = (__u32)(pid_tgid >> 32); 47 | __u64 ts = bpf_ktime_get_ns(); 48 | 49 | bpf_map_update_elem(&tmp_fs, &pid, &ts, BPF_ANY); 50 | 51 | libnetdata_update_global(&fs_ctrl, NETDATA_CONTROLLER_TEMP_TABLE_ADD, 1); 52 | 53 | return 0; 54 | } 55 | 56 | static __always_inline int netdata_fs_store_bin(__u32 selection) 57 | { 58 | __u64 *fill, data; 59 | __u64 pid_tgid = bpf_get_current_pid_tgid(); 60 | __u32 bin, pid = (__u32)(pid_tgid >> 32); 61 | 62 | fill = bpf_map_lookup_elem(&tmp_fs, &pid); 63 | if (!fill) 64 | return 0; 65 | 66 | data = bpf_ktime_get_ns() - *fill; 67 | bpf_map_delete_elem(&tmp_fs, &pid); 68 | 69 | libnetdata_update_global(&fs_ctrl, NETDATA_CONTROLLER_TEMP_TABLE_DEL, 1); 70 | 71 | // Skip entries with backward time 72 | if ( (s64)data < 0) 73 | return 0; 74 | 75 | // convert to microseconds 76 | data /= 1000; 77 | bin = libnetdata_select_idx(data, NETDATA_FS_MAX_BINS_POS); 78 | __u32 idx = selection * NETDATA_FS_MAX_BINS + bin; 79 | if (idx >= NETDATA_FS_MAX_ELEMENTS) 80 | return 0; 81 | 82 | fill = bpf_map_lookup_elem(&tbl_fs, &idx); 83 | if (fill) { 84 | libnetdata_update_u64(fill, 1); 85 | return 0; 86 | } 87 | 88 | data = 1; 89 | bpf_map_update_elem(&tbl_fs, &idx, &data, BPF_ANY); 90 | 91 | return 0; 92 | } 93 | 94 | /************************************************************************************ 95 | * 96 | * ENTRY SECTION (trampoline) 97 | * 98 | ***********************************************************************************/ 99 | 100 | SEC("fentry/fs_file_read") 101 | int BPF_PROG(netdata_fs_file_read_entry, struct kiocb *iocb) 102 | { 103 | struct file *fp = iocb->ki_filp; 104 | if (!fp) 105 | return 0; 106 | 107 | return netdata_fs_entry(); 108 | } 109 | 110 | SEC("fentry/fs_file_write") 111 | int BPF_PROG(netdata_fs_file_write_entry, struct kiocb *iocb) 112 | { 113 | struct file *fp = iocb->ki_filp; 114 | if (!fp) 115 | return 0; 116 | 117 | return netdata_fs_entry(); 118 | } 119 | 120 | SEC("fentry/fs_file_open") 121 | int BPF_PROG(netdata_fs_file_open_entry, struct inode *inode, struct file *filp) 122 | { 123 | if (!filp) 124 | return 0; 125 | 126 | return netdata_fs_entry(); 127 | } 128 | 129 | SEC("fentry/fs_2nd_file_open") 130 | int BPF_PROG(netdata_fs_2nd_file_open_entry, struct inode *inode, struct file *filp) 131 | { 132 | if (!filp) 133 | return 0; 134 | 135 | return netdata_fs_entry(); 136 | } 137 | 138 | SEC("fentry/fs_getattr") 139 | int BPF_PROG(netdata_fs_getattr_entry) 140 | { 141 | return netdata_fs_entry(); 142 | } 143 | 144 | /************************************************************************************ 145 | * 146 | * END SECTION (trampoline) 147 | * 148 | ***********************************************************************************/ 149 | 150 | SEC("fexit/fs_file_read") 151 | int BPF_PROG(netdata_fs_file_read_exit) 152 | { 153 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_READ); 154 | } 155 | 156 | SEC("fexit/fs_file_write") 157 | int BPF_PROG(netdata_fs_file_write_exit) 158 | { 159 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_WRITE); 160 | } 161 | 162 | SEC("fexit/fs_file_open") 163 | int BPF_PROG(netdata_fs_file_open_exit) 164 | { 165 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_OPEN); 166 | } 167 | 168 | SEC("fexit/fs_2nd_file_open") 169 | int BPF_PROG(netdata_fs_2nd_file_open_exit) 170 | { 171 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_OPEN); 172 | } 173 | 174 | SEC("fexit/fs_getattr") 175 | int BPF_PROG(netdata_fs_getattr_exit) 176 | { 177 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_SYNC); 178 | } 179 | 180 | /************************************************************************************ 181 | * 182 | * ENTRY SECTION (kprobe) 183 | * 184 | ***********************************************************************************/ 185 | 186 | SEC("kprobe/fs_file_read") 187 | int BPF_KPROBE(netdata_fs_file_read_probe, struct kiocb *iocb) 188 | { 189 | struct file *fp = BPF_CORE_READ(iocb, ki_filp); 190 | if (!fp) 191 | return 0; 192 | 193 | return netdata_fs_entry(); 194 | } 195 | 196 | SEC("kprobe/fs_file_write") 197 | int BPF_KPROBE(netdata_fs_file_write_probe, struct kiocb *iocb) 198 | { 199 | struct file *fp = BPF_CORE_READ(iocb, ki_filp); 200 | if (!fp) 201 | return 0; 202 | 203 | return netdata_fs_entry(); 204 | } 205 | 206 | SEC("kprobe/fs_file_open") 207 | int BPF_KPROBE(netdata_fs_file_open_probe, struct inode *inode, struct file *filp) 208 | { 209 | if (!filp) 210 | return 0; 211 | 212 | return netdata_fs_entry(); 213 | } 214 | 215 | SEC("kprobe/fs_2nd_file_open") 216 | int BPF_KPROBE(netdata_fs_2nd_file_open_probe, struct inode *inode, struct file *filp) 217 | { 218 | if (!filp) 219 | return 0; 220 | 221 | return netdata_fs_entry(); 222 | } 223 | 224 | SEC("kprobe/fs_getattr") 225 | int BPF_KPROBE(netdata_fs_getattr_probe) 226 | { 227 | return netdata_fs_entry(); 228 | } 229 | 230 | /************************************************************************************ 231 | * 232 | * END SECTION (kretprobe) 233 | * 234 | ***********************************************************************************/ 235 | 236 | SEC("kretprobe/fs_file_read") 237 | int BPF_KRETPROBE(netdata_fs_file_read_retprobe) 238 | { 239 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_READ); 240 | } 241 | 242 | SEC("kretprobe/fs_file_write") 243 | int BPF_KRETPROBE(netdata_fs_file_write_retprobe) 244 | { 245 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_WRITE); 246 | } 247 | 248 | SEC("kretprobe/fs_file_open") 249 | int BPF_KRETPROBE(netdata_fs_file_open_retprobe) 250 | { 251 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_OPEN); 252 | } 253 | 254 | SEC("kretprobe/fs_2nd_file_open") 255 | int BPF_KRETPROBE(netdata_fs_2nd_file_open_retprobe) 256 | { 257 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_OPEN); 258 | } 259 | 260 | SEC("kretprobe/fs_getattr") 261 | int BPF_KRETPROBE(netdata_fs_getattr_retprobe) 262 | { 263 | return netdata_fs_store_bin(NETDATA_KEY_CALLS_SYNC); 264 | } 265 | 266 | char _license[] SEC("license") = "GPL"; 267 | 268 | 269 | -------------------------------------------------------------------------------- /src/hardirq.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_hardirq.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 16 | __type(key, hardirq_key_t); 17 | __type(value, hardirq_val_t); 18 | __uint(max_entries, NETDATA_HARDIRQ_MAX_IRQS); 19 | } tbl_hardirq SEC(".maps"); 20 | 21 | struct { 22 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 23 | __type(key, __u32); 24 | __type(value, hardirq_val_t); 25 | __uint(max_entries, NETDATA_HARDIRQ_STATIC_END); 26 | } tbl_hardirq_static SEC(".maps"); 27 | 28 | 29 | /************************************************************************************ 30 | * 31 | * HARDIRQ SECTION 32 | * 33 | ***********************************************************************************/ 34 | 35 | SEC("tracepoint/irq/irq_handler_entry") 36 | int netdata_irq_handler_entry(struct netdata_irq_handler_entry *ptr) 37 | { 38 | hardirq_key_t key = {}; 39 | hardirq_val_t *valp, val = {}; 40 | 41 | key.irq = ptr->irq; 42 | valp = bpf_map_lookup_elem(&tbl_hardirq, &key); 43 | if (valp) { 44 | valp->ts = bpf_ktime_get_ns(); 45 | } else { 46 | val.latency = 0; 47 | val.ts = bpf_ktime_get_ns(); 48 | bpf_map_update_elem(&tbl_hardirq, &key, &val, BPF_ANY); 49 | } 50 | 51 | return 0; 52 | } 53 | 54 | SEC("tracepoint/irq/irq_handler_exit") 55 | int netdata_irq_handler_exit(struct netdata_irq_handler_exit *ptr) 56 | { 57 | hardirq_key_t key = {}; 58 | hardirq_val_t *valp; 59 | 60 | key.irq = ptr->irq; 61 | valp = bpf_map_lookup_elem(&tbl_hardirq, &key); 62 | if (!valp) { 63 | return 0; 64 | } 65 | 66 | // get time diff and convert to microseconds. 67 | u64 latency = (bpf_ktime_get_ns() - valp->ts) / 1000; 68 | libnetdata_update_u64(&valp->latency, latency); 69 | 70 | return 0; 71 | } 72 | 73 | /************************************************************************************ 74 | * 75 | * HARDIRQ STATIC 76 | * 77 | ***********************************************************************************/ 78 | 79 | #define HARDIRQ_STATIC_GEN_ENTRY(__type, __enum_idx) \ 80 | int netdata_irq_ ##__type(struct netdata_irq_vectors_entry *ptr) \ 81 | { \ 82 | u32 idx; \ 83 | hardirq_val_t *valp, val = {}; \ 84 | \ 85 | idx = __enum_idx; \ 86 | valp = bpf_map_lookup_elem(&tbl_hardirq_static, &idx); \ 87 | if (valp) { \ 88 | valp->ts = bpf_ktime_get_ns(); \ 89 | } else { \ 90 | val.latency = 0; \ 91 | val.ts = bpf_ktime_get_ns(); \ 92 | bpf_map_update_elem(&tbl_hardirq_static, &idx, &val, BPF_ANY); \ 93 | } \ 94 | \ 95 | return 0; \ 96 | } 97 | 98 | #define HARDIRQ_STATIC_GEN_EXIT(__type, __enum_idx) \ 99 | int netdata_irq_ ##__type(struct netdata_irq_vectors_exit *ptr) \ 100 | { \ 101 | u32 idx; \ 102 | hardirq_val_t *valp; \ 103 | \ 104 | idx = __enum_idx; \ 105 | valp = bpf_map_lookup_elem(&tbl_hardirq_static, &idx); \ 106 | if (!valp) { \ 107 | return 0; \ 108 | } \ 109 | \ 110 | /* get time diff and convert to microseconds. */ \ 111 | u64 latency = (bpf_ktime_get_ns() - valp->ts) / 1000; \ 112 | libnetdata_update_u64(&valp->latency, latency); \ 113 | \ 114 | return 0; \ 115 | } 116 | 117 | SEC("tracepoint/irq_vectors/thermal_apic_entry") 118 | HARDIRQ_STATIC_GEN_ENTRY( 119 | thermal_apic_entry, 120 | NETDATA_HARDIRQ_STATIC_APIC_THERMAL 121 | ) 122 | SEC("tracepoint/irq_vectors/thermal_apic_exit") 123 | HARDIRQ_STATIC_GEN_EXIT( 124 | thermal_apic_exit, 125 | NETDATA_HARDIRQ_STATIC_APIC_THERMAL 126 | ) 127 | 128 | SEC("tracepoint/irq_vectors/threshold_apic_entry") 129 | HARDIRQ_STATIC_GEN_ENTRY( 130 | threshold_apic_entry, 131 | NETDATA_HARDIRQ_STATIC_APIC_THRESHOLD 132 | ) 133 | SEC("tracepoint/irq_vectors/threshold_apic_exit") 134 | HARDIRQ_STATIC_GEN_EXIT( 135 | threshold_apic_exit, 136 | NETDATA_HARDIRQ_STATIC_APIC_THRESHOLD 137 | ) 138 | 139 | SEC("tracepoint/irq_vectors/error_apic_entry") 140 | HARDIRQ_STATIC_GEN_ENTRY( 141 | error_apic_entry, 142 | NETDATA_HARDIRQ_STATIC_APIC_ERROR 143 | ) 144 | SEC("tracepoint/irq_vectors/error_apic_exit") 145 | HARDIRQ_STATIC_GEN_EXIT( 146 | error_apic_exit, 147 | NETDATA_HARDIRQ_STATIC_APIC_ERROR 148 | ) 149 | 150 | SEC("tracepoint/irq_vectors/deferred_error_apic_entry") 151 | HARDIRQ_STATIC_GEN_ENTRY( 152 | deferred_error_apic_entry, 153 | NETDATA_HARDIRQ_STATIC_APIC_DEFERRED_ERROR 154 | ) 155 | SEC("tracepoint/irq_vectors/deferred_error_apic_exit") 156 | HARDIRQ_STATIC_GEN_EXIT( 157 | deferred_error_apic_exit, 158 | NETDATA_HARDIRQ_STATIC_APIC_DEFERRED_ERROR 159 | ) 160 | 161 | SEC("tracepoint/irq_vectors/spurious_apic_entry") 162 | HARDIRQ_STATIC_GEN_ENTRY( 163 | spurious_apic_entry, 164 | NETDATA_HARDIRQ_STATIC_APIC_SPURIOUS 165 | ) 166 | SEC("tracepoint/irq_vectors/spurious_apic_exit") 167 | HARDIRQ_STATIC_GEN_EXIT( 168 | spurious_apic_exit, 169 | NETDATA_HARDIRQ_STATIC_APIC_SPURIOUS 170 | ) 171 | 172 | SEC("tracepoint/irq_vectors/call_function_entry") 173 | HARDIRQ_STATIC_GEN_ENTRY( 174 | call_function_entry, 175 | NETDATA_HARDIRQ_STATIC_FUNC_CALL 176 | ) 177 | SEC("tracepoint/irq_vectors/call_function_exit") 178 | HARDIRQ_STATIC_GEN_EXIT( 179 | call_function_exit, 180 | NETDATA_HARDIRQ_STATIC_FUNC_CALL 181 | ) 182 | 183 | SEC("tracepoint/irq_vectors/call_function_single_entry") 184 | HARDIRQ_STATIC_GEN_ENTRY( 185 | call_function_single_entry, 186 | NETDATA_HARDIRQ_STATIC_FUNC_CALL_SINGLE 187 | ) 188 | SEC("tracepoint/irq_vectors/call_function_single_exit") 189 | HARDIRQ_STATIC_GEN_EXIT( 190 | call_function_single_exit, 191 | NETDATA_HARDIRQ_STATIC_FUNC_CALL_SINGLE 192 | ) 193 | 194 | SEC("tracepoint/irq_vectors/reschedule_entry") 195 | HARDIRQ_STATIC_GEN_ENTRY( 196 | reschedule_entry, 197 | NETDATA_HARDIRQ_STATIC_RESCHEDULE 198 | ) 199 | SEC("tracepoint/irq_vectors/reschedule_exit") 200 | HARDIRQ_STATIC_GEN_EXIT( 201 | reschedule_exit, 202 | NETDATA_HARDIRQ_STATIC_RESCHEDULE 203 | ) 204 | 205 | SEC("tracepoint/irq_vectors/local_timer_entry") 206 | HARDIRQ_STATIC_GEN_ENTRY( 207 | local_timer_entry, 208 | NETDATA_HARDIRQ_STATIC_LOCAL_TIMER 209 | ) 210 | SEC("tracepoint/irq_vectors/local_timer_exit") 211 | HARDIRQ_STATIC_GEN_EXIT( 212 | local_timer_exit, 213 | NETDATA_HARDIRQ_STATIC_LOCAL_TIMER 214 | ) 215 | 216 | SEC("tracepoint/irq_vectors/irq_work_entry") 217 | HARDIRQ_STATIC_GEN_ENTRY( 218 | irq_work_entry, 219 | NETDATA_HARDIRQ_STATIC_IRQ_WORK 220 | ) 221 | SEC("tracepoint/irq_vectors/irq_work_exit") 222 | HARDIRQ_STATIC_GEN_EXIT( 223 | irq_work_exit, 224 | NETDATA_HARDIRQ_STATIC_IRQ_WORK 225 | ) 226 | 227 | SEC("tracepoint/irq_vectors/x86_platform_ipi_entry") 228 | HARDIRQ_STATIC_GEN_ENTRY( 229 | x86_platform_ipi_entry, 230 | NETDATA_HARDIRQ_STATIC_X86_PLATFORM_IPI 231 | ) 232 | SEC("tracepoint/irq_vectors/x86_platform_ipi_exit") 233 | HARDIRQ_STATIC_GEN_EXIT( 234 | x86_platform_ipi_exit, 235 | NETDATA_HARDIRQ_STATIC_X86_PLATFORM_IPI 236 | ) 237 | 238 | char _license[] SEC("license") = "GPL"; 239 | 240 | -------------------------------------------------------------------------------- /src/hardirq.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include "netdata_defs.h" 12 | #include "netdata_tests.h" 13 | #include "netdata_core_common.h" 14 | 15 | #include "hardirq.skel.h" 16 | 17 | // Copied and redefined from ../include/netdata_hardirq.h 18 | #define NETDATA_HARDIRQ_NAME_LEN 32 19 | typedef struct hardirq_val { 20 | // incremental counter storing the total latency so far. 21 | uint64_t latency; 22 | 23 | // temporary timestamp stored at the IRQ entry handler, to be diff'd with a 24 | // timestamp at the IRQ exit handler, to get the latency to add to the 25 | // `latency` field. 26 | uint64_t ts; 27 | 28 | // identifies the IRQ with a human-readable string. 29 | char name[NETDATA_HARDIRQ_NAME_LEN]; 30 | } hardirq_val_t; 31 | 32 | static inline int ebpf_load_and_attach(struct hardirq_bpf *obj) 33 | { 34 | int ret = hardirq_bpf__load(obj); 35 | if (ret) { 36 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 37 | return -1; 38 | } 39 | 40 | ret = hardirq_bpf__attach(obj); 41 | if (!ret) { 42 | fprintf(stdout, "Hardirq loaded with success\n"); 43 | } 44 | 45 | return ret; 46 | } 47 | 48 | static void ebpf_update_table(int global) 49 | { 50 | uint32_t idx = 0; 51 | hardirq_val_t value = { .ts = 1, .latency = 1, .name = "netdata_testing" }; 52 | int ret = bpf_map_update_elem(global, &idx, &value, 0); 53 | if (ret) 54 | fprintf(stderr, "Cannot insert value to global table."); 55 | } 56 | 57 | static int hardirq_read_array(int fd, int ebpf_nprocs) 58 | { 59 | hardirq_val_t stored[ebpf_nprocs]; 60 | 61 | uint64_t counter = 0; 62 | int idx = 0; 63 | if (!bpf_map_lookup_elem(fd, &idx, stored)) { 64 | int j; 65 | for (j = 0; j < ebpf_nprocs; j++) { 66 | counter += stored[j].ts + stored[j].latency; 67 | } 68 | } 69 | 70 | if (counter) { 71 | fprintf(stdout, "Data stored with success\n"); 72 | return 0; 73 | } 74 | 75 | return 2; 76 | } 77 | 78 | static int ebpf_hardirq_tests() 79 | { 80 | struct hardirq_bpf *obj = NULL; 81 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 82 | if (ebpf_nprocs < 0) 83 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 84 | 85 | obj = hardirq_bpf__open(); 86 | if (!obj) { 87 | fprintf(stderr, "Cannot open or load BPF object\n"); 88 | 89 | return 2; 90 | } 91 | 92 | int ret = ebpf_load_and_attach(obj); 93 | if (!ret) { 94 | int fd = bpf_map__fd(obj->maps.tbl_hardirq); 95 | ebpf_update_table(fd); 96 | 97 | ret = hardirq_read_array(fd, ebpf_nprocs); 98 | if (ret) 99 | fprintf(stderr, "Cannot read global table\n"); 100 | } else { 101 | ret = 3; 102 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 103 | } 104 | 105 | hardirq_bpf__destroy(obj); 106 | 107 | return ret; 108 | } 109 | 110 | int main(int argc, char **argv) 111 | { 112 | static struct option long_options[] = { 113 | {"help", no_argument, 0, 'h' }, 114 | {0, 0, 0, 0} 115 | }; 116 | 117 | int option_index = 0; 118 | while (1) { 119 | int c = getopt_long(argc, argv, "", long_options, &option_index); 120 | if (c == -1) 121 | break; 122 | 123 | switch (c) { 124 | case 'h': { 125 | ebpf_tracepoint_help("hardirq"); 126 | exit(0); 127 | } 128 | default: { 129 | break; 130 | } 131 | } 132 | } 133 | 134 | int ret = netdata_ebf_memlock_limit(); 135 | if (ret) { 136 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 137 | return 1; 138 | } 139 | 140 | libbpf_set_print(netdata_libbpf_vfprintf); 141 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 142 | 143 | return ebpf_hardirq_tests(); 144 | } 145 | 146 | -------------------------------------------------------------------------------- /src/includes/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/ebpf-co-re/c74ab79e3150663b5ab65cc9f9f3ee727dc64857/src/includes/.gitkeep -------------------------------------------------------------------------------- /src/includes/oomkill.skel.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 | 3 | /* THIS FILE IS AUTOGENERATED! */ 4 | #ifndef __OOMKILL_BPF_SKEL_H__ 5 | #define __OOMKILL_BPF_SKEL_H__ 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | struct oomkill_bpf { 12 | struct bpf_object_skeleton *skeleton; 13 | struct bpf_object *obj; 14 | struct { 15 | struct bpf_map *tbl_oomkill; 16 | } maps; 17 | struct { 18 | struct bpf_program *netdata_oom_mark_victim; 19 | } progs; 20 | struct { 21 | struct bpf_link *netdata_oom_mark_victim; 22 | } links; 23 | }; 24 | 25 | static void 26 | oomkill_bpf__destroy(struct oomkill_bpf *obj) 27 | { 28 | if (!obj) 29 | return; 30 | if (obj->skeleton) 31 | bpf_object__destroy_skeleton(obj->skeleton); 32 | free(obj); 33 | } 34 | 35 | static inline int 36 | oomkill_bpf__create_skeleton(struct oomkill_bpf *obj); 37 | 38 | static inline struct oomkill_bpf * 39 | oomkill_bpf__open_opts(const struct bpf_object_open_opts *opts) 40 | { 41 | struct oomkill_bpf *obj; 42 | int err; 43 | 44 | obj = (struct oomkill_bpf *)calloc(1, sizeof(*obj)); 45 | if (!obj) { 46 | errno = ENOMEM; 47 | return NULL; 48 | } 49 | 50 | err = oomkill_bpf__create_skeleton(obj); 51 | if (err) 52 | goto err_out; 53 | 54 | err = bpf_object__open_skeleton(obj->skeleton, opts); 55 | if (err) 56 | goto err_out; 57 | 58 | return obj; 59 | err_out: 60 | oomkill_bpf__destroy(obj); 61 | errno = -err; 62 | return NULL; 63 | } 64 | 65 | static inline struct oomkill_bpf * 66 | oomkill_bpf__open(void) 67 | { 68 | return oomkill_bpf__open_opts(NULL); 69 | } 70 | 71 | static inline int 72 | oomkill_bpf__load(struct oomkill_bpf *obj) 73 | { 74 | return bpf_object__load_skeleton(obj->skeleton); 75 | } 76 | 77 | static inline struct oomkill_bpf * 78 | oomkill_bpf__open_and_load(void) 79 | { 80 | struct oomkill_bpf *obj; 81 | int err; 82 | 83 | obj = oomkill_bpf__open(); 84 | if (!obj) 85 | return NULL; 86 | err = oomkill_bpf__load(obj); 87 | if (err) { 88 | oomkill_bpf__destroy(obj); 89 | errno = -err; 90 | return NULL; 91 | } 92 | return obj; 93 | } 94 | 95 | static inline int 96 | oomkill_bpf__attach(struct oomkill_bpf *obj) 97 | { 98 | return bpf_object__attach_skeleton(obj->skeleton); 99 | } 100 | 101 | static inline void 102 | oomkill_bpf__detach(struct oomkill_bpf *obj) 103 | { 104 | return bpf_object__detach_skeleton(obj->skeleton); 105 | } 106 | 107 | static inline const void *oomkill_bpf__elf_bytes(size_t *sz); 108 | 109 | static inline int 110 | oomkill_bpf__create_skeleton(struct oomkill_bpf *obj) 111 | { 112 | struct bpf_object_skeleton *s; 113 | 114 | s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s)); 115 | if (!s) 116 | goto err; 117 | obj->skeleton = s; 118 | 119 | s->sz = sizeof(*s); 120 | s->name = "oomkill_bpf"; 121 | s->obj = &obj->obj; 122 | 123 | /* maps */ 124 | s->map_cnt = 1; 125 | s->map_skel_sz = sizeof(*s->maps); 126 | s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz); 127 | if (!s->maps) 128 | goto err; 129 | 130 | s->maps[0].name = "tbl_oomkill"; 131 | s->maps[0].map = &obj->maps.tbl_oomkill; 132 | 133 | /* programs */ 134 | s->prog_cnt = 1; 135 | s->prog_skel_sz = sizeof(*s->progs); 136 | s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz); 137 | if (!s->progs) 138 | goto err; 139 | 140 | s->progs[0].name = "netdata_oom_mark_victim"; 141 | s->progs[0].prog = &obj->progs.netdata_oom_mark_victim; 142 | s->progs[0].link = &obj->links.netdata_oom_mark_victim; 143 | 144 | s->data = (void *)oomkill_bpf__elf_bytes(&s->data_sz); 145 | 146 | return 0; 147 | err: 148 | bpf_object__destroy_skeleton(s); 149 | return -ENOMEM; 150 | } 151 | 152 | static inline const void *oomkill_bpf__elf_bytes(size_t *sz) 153 | { 154 | *sz = 2520; 155 | return (const void *)"\ 156 | \x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\ 157 | \0\0\0\0\0\0\0\0\0\0\0\x98\x06\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0d\0\ 158 | \x0c\0\x61\x11\x08\0\0\0\0\0\x63\x1a\xfc\xff\0\0\0\0\xb7\x01\0\0\0\0\0\0\x73\ 159 | \x1a\xfb\xff\0\0\0\0\xbf\xa2\0\0\0\0\0\0\x07\x02\0\0\xfc\xff\xff\xff\xbf\xa3\0\ 160 | \0\0\0\0\0\x07\x03\0\0\xfb\xff\xff\xff\x18\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb7\ 161 | \x04\0\0\0\0\0\0\x85\0\0\0\x02\0\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\0\0\0\ 162 | \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x47\x50\x4c\0\x9f\ 163 | \xeb\x01\0\x18\0\0\0\0\0\0\0\xc8\x01\0\0\xc8\x01\0\0\x99\x01\0\0\0\0\0\0\0\0\0\ 164 | \x02\x03\0\0\0\x01\0\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\x01\0\0\0\0\0\0\0\x03\0\0\ 165 | \0\0\x02\0\0\0\x04\0\0\0\x05\0\0\0\x05\0\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\ 166 | \0\0\0\0\0\x02\x02\0\0\0\0\0\0\0\0\0\0\x02\x07\0\0\0\x19\0\0\0\0\0\0\x08\x08\0\ 167 | \0\0\x1e\0\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\x02\x0a\0\0\0\0\0\0\ 168 | \0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x40\0\0\0\0\0\0\0\x04\0\0\x04\x20\0\0\ 169 | \0\x2c\0\0\0\x01\0\0\0\0\0\0\0\x31\0\0\0\x05\0\0\0\x40\0\0\0\x35\0\0\0\x06\0\0\ 170 | \0\x80\0\0\0\x3b\0\0\0\x09\0\0\0\xc0\0\0\0\x47\0\0\0\0\0\0\x0e\x0b\0\0\0\x01\0\ 171 | \0\0\0\0\0\0\0\0\0\x02\x0e\0\0\0\x53\0\0\0\x02\0\0\x04\x10\0\0\0\x71\0\0\0\x0f\ 172 | \0\0\0\0\0\0\0\x75\0\0\0\x02\0\0\0\x40\0\0\0\x79\0\0\0\0\0\0\x08\x10\0\0\0\x7d\ 173 | \0\0\0\0\0\0\x08\x11\0\0\0\x83\0\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\0\0\0\0\x01\ 174 | \0\0\x0d\x02\0\0\0\x9a\0\0\0\x0d\0\0\0\x9e\0\0\0\x01\0\0\x0c\x12\0\0\0\x7d\x01\ 175 | \0\0\0\0\0\x01\x01\0\0\0\x08\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x14\0\0\0\x04\0\ 176 | \0\0\x04\0\0\0\x82\x01\0\0\0\0\0\x0e\x15\0\0\0\x01\0\0\0\x8b\x01\0\0\x01\0\0\ 177 | \x0f\0\0\0\0\x0c\0\0\0\0\0\0\0\x20\0\0\0\x91\x01\0\0\x01\0\0\x0f\0\0\0\0\x16\0\ 178 | \0\0\0\0\0\0\x04\0\0\0\0\x69\x6e\x74\0\x5f\x5f\x41\x52\x52\x41\x59\x5f\x53\x49\ 179 | \x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x5f\x5f\x75\x38\0\x75\x6e\x73\x69\x67\ 180 | \x6e\x65\x64\x20\x63\x68\x61\x72\0\x74\x79\x70\x65\0\x6b\x65\x79\0\x76\x61\x6c\ 181 | \x75\x65\0\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\0\x74\x62\x6c\x5f\x6f\ 182 | \x6f\x6d\x6b\x69\x6c\x6c\0\x6e\x65\x74\x64\x61\x74\x61\x5f\x6f\x6f\x6d\x5f\x6d\ 183 | \x61\x72\x6b\x5f\x76\x69\x63\x74\x69\x6d\x5f\x65\x6e\x74\x72\x79\0\x70\x61\x64\ 184 | \0\x70\x69\x64\0\x75\x36\x34\0\x5f\x5f\x75\x36\x34\0\x6c\x6f\x6e\x67\x20\x6c\ 185 | \x6f\x6e\x67\x20\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x70\x74\x72\ 186 | \0\x6e\x65\x74\x64\x61\x74\x61\x5f\x6f\x6f\x6d\x5f\x6d\x61\x72\x6b\x5f\x76\x69\ 187 | \x63\x74\x69\x6d\0\x74\x72\x61\x63\x65\x70\x6f\x69\x6e\x74\x2f\x6f\x6f\x6d\x2f\ 188 | \x6d\x61\x72\x6b\x5f\x76\x69\x63\x74\x69\x6d\0\x2f\x68\x6f\x6d\x65\x2f\x74\x68\ 189 | \x69\x61\x67\x6f\x2f\x4e\x65\x74\x64\x61\x74\x61\x2f\x6b\x65\x72\x6e\x65\x6c\ 190 | \x2d\x63\x6f\x6c\x6c\x65\x63\x74\x6f\x72\x2f\x63\x6f\x2d\x72\x65\x2f\x6f\x6f\ 191 | \x6d\x6b\x69\x6c\x6c\x2e\x62\x70\x66\x2e\x63\0\x20\x20\x20\x20\x69\x6e\x74\x20\ 192 | \x6b\x65\x79\x20\x3d\x20\x70\x74\x72\x2d\x3e\x70\x69\x64\x3b\0\x20\x20\x20\x20\ 193 | \x75\x38\x20\x76\x61\x6c\x20\x3d\x20\x30\x3b\0\x20\x20\x20\x20\x62\x70\x66\x5f\ 194 | \x6d\x61\x70\x5f\x75\x70\x64\x61\x74\x65\x5f\x65\x6c\x65\x6d\x28\x26\x74\x62\ 195 | \x6c\x5f\x6f\x6f\x6d\x6b\x69\x6c\x6c\x2c\x20\x26\x6b\x65\x79\x2c\x20\x26\x76\ 196 | \x61\x6c\x2c\x20\x42\x50\x46\x5f\x41\x4e\x59\x29\x3b\0\x20\x20\x20\x20\x72\x65\ 197 | \x74\x75\x72\x6e\x20\x30\x3b\0\x63\x68\x61\x72\0\x5f\x6c\x69\x63\x65\x6e\x73\ 198 | \x65\0\x2e\x6d\x61\x70\x73\0\x6c\x69\x63\x65\x6e\x73\x65\0\x9f\xeb\x01\0\x20\0\ 199 | \0\0\0\0\0\0\x14\0\0\0\x14\0\0\0\x6c\0\0\0\x80\0\0\0\0\0\0\0\x08\0\0\0\xb6\0\0\ 200 | \0\x01\0\0\0\0\0\0\0\x13\0\0\0\x10\0\0\0\xb6\0\0\0\x06\0\0\0\0\0\0\0\xd1\0\0\0\ 201 | \x0b\x01\0\0\x14\x74\0\0\x08\0\0\0\xd1\0\0\0\x0b\x01\0\0\x09\x74\0\0\x18\0\0\0\ 202 | \xd1\0\0\0\x23\x01\0\0\x08\x78\0\0\x28\0\0\0\xd1\0\0\0\0\0\0\0\0\0\0\0\x40\0\0\ 203 | \0\xd1\0\0\0\x33\x01\0\0\x05\x7c\0\0\x60\0\0\0\xd1\0\0\0\x6f\x01\0\0\x05\x80\0\ 204 | \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\x02\0\0\ 205 | \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x1a\0\0\0\x12\0\x02\0\0\0\0\0\0\0\0\0\x70\0\0\0\ 206 | \0\0\0\0\x51\0\0\0\x11\0\x03\0\0\0\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x6b\0\0\0\x11\ 207 | \0\x04\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\x01\0\0\0\x03\0\0\ 208 | \0\xc0\x01\0\0\0\0\0\0\x04\0\0\0\x03\0\0\0\xd8\x01\0\0\0\0\0\0\x04\0\0\0\x04\0\ 209 | \0\0\x2c\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x40\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\ 210 | \0\x50\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x60\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ 211 | \x70\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x80\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\ 212 | \x90\0\0\0\0\0\0\0\x04\0\0\0\x01\0\0\0\x08\x09\x0a\0\x2e\x74\x65\x78\x74\0\x2e\ 213 | \x72\x65\x6c\x2e\x42\x54\x46\x2e\x65\x78\x74\0\x2e\x6d\x61\x70\x73\0\x6e\x65\ 214 | \x74\x64\x61\x74\x61\x5f\x6f\x6f\x6d\x5f\x6d\x61\x72\x6b\x5f\x76\x69\x63\x74\ 215 | \x69\x6d\0\x2e\x72\x65\x6c\x74\x72\x61\x63\x65\x70\x6f\x69\x6e\x74\x2f\x6f\x6f\ 216 | \x6d\x2f\x6d\x61\x72\x6b\x5f\x76\x69\x63\x74\x69\x6d\0\x74\x62\x6c\x5f\x6f\x6f\ 217 | \x6d\x6b\x69\x6c\x6c\0\x2e\x6c\x6c\x76\x6d\x5f\x61\x64\x64\x72\x73\x69\x67\0\ 218 | \x5f\x6c\x69\x63\x65\x6e\x73\x65\0\x2e\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\ 219 | \x6d\x74\x61\x62\0\x2e\x72\x65\x6c\x2e\x42\x54\x46\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ 220 | \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ 221 | \0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ 222 | \x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\ 223 | \0\0\0\x36\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\ 224 | \x70\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x14\0\0\0\ 225 | \x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xb0\0\0\0\0\0\0\0\x20\0\0\0\0\0\0\ 226 | \0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x6c\0\0\0\x01\0\0\0\x03\0\ 227 | \0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd0\0\0\0\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ 228 | \0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x88\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ 229 | \0\0\0\0\0\xd4\0\0\0\0\0\0\0\x79\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\ 230 | \0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4d\ 231 | \x04\0\0\0\0\0\0\xa0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\ 232 | \0\0\0\x7c\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf0\x04\0\0\0\0\0\0\ 233 | \x78\0\0\0\0\0\0\0\x0c\0\0\0\x02\0\0\0\x08\0\0\0\0\0\0\0\x18\0\0\0\0\0\0\0\x32\ 234 | \0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x05\0\0\0\0\0\0\x10\0\0\0\ 235 | \0\0\0\0\x07\0\0\0\x02\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x84\0\0\0\x09\ 236 | \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x78\x05\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\ 237 | \x07\0\0\0\x05\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x07\0\0\0\x09\0\0\0\0\ 238 | \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x05\0\0\0\0\0\0\x70\0\0\0\0\0\0\0\x07\0\0\0\ 239 | \x06\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x5d\0\0\0\x03\x4c\xff\x6f\0\0\0\ 240 | \x80\0\0\0\0\0\0\0\0\0\0\0\0\x08\x06\0\0\0\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\ 241 | \0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x74\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\ 242 | \0\0\0\0\0\0\x0b\x06\0\0\0\0\0\0\x8d\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\ 243 | \0\0\0\0\0\0\0\0\0\0\0"; 244 | } 245 | 246 | #endif /* __OOMKILL_BPF_SKEL_H__ */ 247 | -------------------------------------------------------------------------------- /src/mdflush.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_mdflush.h" 7 | 8 | // Copied from https://elixir.bootlin.com/linux/v5.16-rc2/source/include/uapi/linux/major.h 9 | // that has the same value of https://elixir.bootlin.com/linux/v4.14/source/include/uapi/linux/major.h#L25 10 | #define NETDATA_MD_MAJOR 9 11 | 12 | 13 | // Preprocessors copied from https://elixir.bootlin.com/linux/v4.14/source/include/linux/kdev_t.h#L10 14 | // they are the same in https://elixir.bootlin.com/linux/v5.16-rc2/source/include/linux/kdev_t.h 15 | #define NETDATA_MINORBITS 20 16 | #define NETDATA_MINORMASK ((1U << NETDATA_MINORBITS) - 1) 17 | 18 | #define NETDATA_MAJOR(dev) ((unsigned int) ((dev) >> NETDATA_MINORBITS)) 19 | #define NETDATA_MINOR(dev) ((unsigned int) ((dev) & NETDATA_MINORMASK)) 20 | 21 | // Preprocessor copied from https://elixir.bootlin.com/linux/v5.16-rc2/source/include/uapi/linux/raid/md_u.h#L69 22 | // Like the previous value is not changing between versions 23 | /* 63 partitions with the alternate major number (mdp) */ 24 | #define Netdata_MdpMinorShift 6 25 | 26 | /************************************************************************************ 27 | * 28 | * MAPS 29 | * 30 | ***********************************************************************************/ 31 | 32 | struct { 33 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 34 | __type(key, mdflush_key_t); 35 | __type(value, mdflush_val_t); 36 | __uint(max_entries, 1024); 37 | } tbl_mdflush SEC(".maps"); 38 | 39 | /************************************************************************************ 40 | * 41 | * COMMON SECTION 42 | * 43 | ***********************************************************************************/ 44 | 45 | static __always_inline int netdata_md_common(struct mddev *mddev) 46 | { 47 | mdflush_key_t key = 0; 48 | mdflush_val_t *valp, val; 49 | 50 | // get correct key. 51 | // this essentially does the logic here: 52 | // https://elixir.bootlin.com/linux/v4.14/source/drivers/md/md.c#L5256 53 | bpf_probe_read(&key, sizeof(key), &mddev->unit); 54 | int partitioned = (NETDATA_MAJOR(key) != NETDATA_MD_MAJOR); 55 | int shift = partitioned ? Netdata_MdpMinorShift : 0; 56 | key = NETDATA_MINOR(key) >> shift; 57 | 58 | valp = bpf_map_lookup_elem(&tbl_mdflush, &key); 59 | if (valp) { 60 | *valp += 1; 61 | } else { 62 | val = 1; 63 | bpf_map_update_elem(&tbl_mdflush, &key, &val, BPF_ANY); 64 | } 65 | 66 | return 0; 67 | } 68 | 69 | /************************************************************************************ 70 | * 71 | * MDFLUSH SECTION(kprobe) 72 | * 73 | ***********************************************************************************/ 74 | 75 | SEC("kprobe/md_flush_request") 76 | int BPF_KPROBE(netdata_md_flush_request_kprobe) 77 | { 78 | struct mddev *mddev = (struct mddev *)PT_REGS_PARM1(ctx); 79 | 80 | return netdata_md_common(mddev); 81 | } 82 | 83 | /************************************************************************************ 84 | * 85 | * MDFLUSH SECTION(trampoline) 86 | * 87 | ***********************************************************************************/ 88 | 89 | SEC("fentry/md_flush_request") 90 | int BPF_PROG(netdata_md_flush_request_fentry, struct mddev *mddev) 91 | { 92 | return netdata_md_common(mddev); 93 | } 94 | 95 | char _license[] SEC("license") = "GPL"; 96 | 97 | -------------------------------------------------------------------------------- /src/mdflush.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include "netdata_defs.h" 12 | #include "netdata_tests.h" 13 | #include "netdata_core_common.h" 14 | 15 | #include "mdflush.skel.h" 16 | 17 | enum netdata_md_function_list { 18 | NETDATA_MD_FLUSH_REQUEST, 19 | 20 | // Always insert before this value 21 | NETDATA_MD_END 22 | }; 23 | 24 | char *function_list[] = { "md_flush_request" }; 25 | 26 | static inline void ebpf_disable_probes(struct mdflush_bpf *obj) 27 | { 28 | bpf_program__set_autoload(obj->progs.netdata_md_flush_request_kprobe, false); 29 | } 30 | 31 | static inline void ebpf_disable_trampoline(struct mdflush_bpf *obj) 32 | { 33 | bpf_program__set_autoload(obj->progs.netdata_md_flush_request_fentry, false); 34 | } 35 | 36 | static void ebpf_set_trampoline_target(struct mdflush_bpf *obj) 37 | { 38 | bpf_program__set_attach_target(obj->progs.netdata_md_flush_request_fentry, 0, 39 | function_list[NETDATA_MD_FLUSH_REQUEST]); 40 | } 41 | 42 | static int ebpf_load_probes(struct mdflush_bpf *obj) 43 | { 44 | obj->links.netdata_md_flush_request_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_md_flush_request_kprobe, 45 | false, function_list[NETDATA_MD_FLUSH_REQUEST]); 46 | int ret = libbpf_get_error(obj->links.netdata_md_flush_request_kprobe); 47 | if (ret) 48 | return -1; 49 | 50 | return 0; 51 | } 52 | 53 | static inline int ebpf_load_and_attach(struct mdflush_bpf *obj, int selector) 54 | { 55 | if (!selector) { // trampoline 56 | ebpf_disable_probes(obj); 57 | 58 | ebpf_set_trampoline_target(obj); 59 | } else if (selector == NETDATA_MODE_PROBE) { // kprobe 60 | ebpf_disable_trampoline(obj); 61 | } 62 | 63 | int ret = mdflush_bpf__load(obj); 64 | if (ret) { 65 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 66 | return -1; 67 | } 68 | 69 | if (!selector) 70 | ret = mdflush_bpf__attach(obj); 71 | else 72 | ret = ebpf_load_probes(obj); 73 | 74 | if (!ret) { 75 | fprintf(stdout, "md_flush_request loaded with success\n"); 76 | } 77 | 78 | return ret; 79 | } 80 | 81 | static void ebpf_update_tables(int global) 82 | { 83 | (void)ebpf_fill_global(global); 84 | } 85 | 86 | static int ebpf_mdflush_tests(int selector) 87 | { 88 | struct mdflush_bpf *obj = NULL; 89 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 90 | if (ebpf_nprocs < 0) 91 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 92 | 93 | obj = mdflush_bpf__open(); 94 | if (!obj) { 95 | fprintf(stderr, "Cannot open or load BPF object\n"); 96 | 97 | return 2; 98 | } 99 | 100 | int ret = ebpf_load_and_attach(obj, selector); 101 | if (!ret) { 102 | int fd = bpf_map__fd(obj->maps.tbl_mdflush); 103 | ebpf_update_tables(fd); 104 | 105 | ret = ebpf_read_global_array(fd, ebpf_nprocs, 1); 106 | if (ret) 107 | fprintf(stderr, "Cannot read global table\n"); 108 | } else { 109 | ret = 3; 110 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 111 | } 112 | 113 | mdflush_bpf__destroy(obj); 114 | 115 | return ret; 116 | } 117 | 118 | int main(int argc, char **argv) 119 | { 120 | static struct option long_options[] = { 121 | {"help", no_argument, 0, 'h' }, 122 | {"probe", no_argument, 0, 'p' }, 123 | {"tracepoint", no_argument, 0, 'r' }, 124 | {"trampoline", no_argument, 0, 't' }, 125 | {0, 0, 0, 0} 126 | }; 127 | 128 | int selector = NETDATA_MODE_TRAMPOLINE; 129 | int option_index = 0; 130 | while (1) { 131 | int c = getopt_long(argc, argv, "", long_options, &option_index); 132 | if (c == -1) 133 | break; 134 | 135 | switch (c) { 136 | case 'h': { 137 | ebpf_core_print_help(argv[0], "mdflush", 1, 0); 138 | exit(0); 139 | } 140 | case 'p': { 141 | selector = NETDATA_MODE_PROBE; 142 | break; 143 | } 144 | case 'r': { 145 | selector = NETDATA_MODE_PROBE; 146 | fprintf(stdout, "This specific software does not have tracepoint, using kprobe instead\n"); 147 | break; 148 | } 149 | case 't': { 150 | selector = NETDATA_MODE_TRAMPOLINE; 151 | break; 152 | } 153 | default: { 154 | break; 155 | } 156 | } 157 | } 158 | 159 | // Adjust memory 160 | int ret = netdata_ebf_memlock_limit(); 161 | if (ret) { 162 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 163 | return 1; 164 | } 165 | 166 | libbpf_set_print(netdata_libbpf_vfprintf); 167 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 168 | 169 | char *md_flush_request = netdata_update_name(function_list[NETDATA_MD_FLUSH_REQUEST]); 170 | if (!md_flush_request) { 171 | fprintf(stderr, "Module `md` is not loaded, so it is not possible to monitor calls for md_flush_request.\n"); 172 | return -1; 173 | } 174 | function_list[NETDATA_MD_FLUSH_REQUEST] = md_flush_request; 175 | 176 | struct btf *bf = NULL; 177 | if (!selector) { 178 | bf = netdata_parse_btf_file((const char *)NETDATA_BTF_FILE); 179 | if (bf) { 180 | selector = ebpf_find_functions(bf, selector, function_list, NETDATA_MD_END); 181 | btf__free(bf); 182 | } 183 | } 184 | 185 | int stop_software = 0; 186 | while (stop_software < 2) { 187 | if (ebpf_mdflush_tests(selector) && !stop_software) { 188 | selector = 1; 189 | stop_software++; 190 | } else 191 | stop_software = 2; 192 | } 193 | 194 | free(md_flush_request); 195 | 196 | return 0; 197 | } 198 | 199 | -------------------------------------------------------------------------------- /src/mount.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_mount.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 16 | __type(key, __u32); 17 | __type(value, __u64); 18 | __uint(max_entries, NETDATA_MOUNT_END); 19 | } tbl_mount SEC(".maps"); 20 | 21 | /************************************************************************************ 22 | * 23 | * MOUNT SECTION (tracepoint) 24 | * 25 | ***********************************************************************************/ 26 | 27 | SEC("tracepoint/syscalls/sys_exit_mount") 28 | int netdata_mount_exit(struct trace_event_raw_sys_exit *arg) 29 | { 30 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_MOUNT_CALL, 1); 31 | 32 | int ret = (int)arg->ret; 33 | if (ret < 0) 34 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_MOUNT_ERROR, 1); 35 | 36 | return 0; 37 | } 38 | 39 | SEC("tracepoint/syscalls/sys_exit_umount") 40 | int netdata_umount_exit(struct trace_event_raw_sys_exit *arg) 41 | { 42 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_UMOUNT_CALL, 1); 43 | 44 | int ret = (int)arg->ret; 45 | if (ret < 0) 46 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_UMOUNT_ERROR, 1); 47 | 48 | return 0; 49 | } 50 | 51 | /************************************************************************************ 52 | * 53 | * MOUNT SECTION (kprobe) 54 | * 55 | ***********************************************************************************/ 56 | 57 | SEC("kprobe/netdata_mount_probe") 58 | int BPF_KPROBE(netdata_mount_probe) 59 | { 60 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_MOUNT_CALL, 1); 61 | 62 | return 0; 63 | } 64 | 65 | SEC("kretprobe/netdata_mount_retprobe") 66 | int BPF_KRETPROBE(netdata_mount_retprobe) 67 | { 68 | int ret = (int)PT_REGS_RC(ctx); 69 | if (ret < 0) 70 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_MOUNT_ERROR, 1); 71 | 72 | return 0; 73 | } 74 | 75 | SEC("kprobe/netdata_umount_probe") 76 | int BPF_KPROBE(netdata_umount_probe) 77 | { 78 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_UMOUNT_CALL, 1); 79 | 80 | return 0; 81 | } 82 | 83 | SEC("kretprobe/netdata_umount_retprobe") 84 | int BPF_KRETPROBE(netdata_umount_retprobe) 85 | { 86 | int ret = (int)PT_REGS_RC(ctx); 87 | if (ret < 0) 88 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_UMOUNT_ERROR, 1); 89 | 90 | return 0; 91 | } 92 | 93 | /************************************************************************************ 94 | * 95 | * MOUNT SECTION (trampoline) 96 | * 97 | ***********************************************************************************/ 98 | 99 | SEC("fentry/netdata_mount") 100 | int BPF_PROG(netdata_mount_fentry) 101 | { 102 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_MOUNT_CALL, 1); 103 | 104 | return 0; 105 | } 106 | 107 | SEC("fexit/netdata_mount") 108 | int BPF_PROG(netdata_mount_fexit, const struct pt_regs *regs) 109 | { 110 | int ret = (int)PT_REGS_RC(regs); 111 | if (ret < 0) 112 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_MOUNT_ERROR, 1); 113 | 114 | return 0; 115 | } 116 | 117 | SEC("fentry/netdata_umount") 118 | int BPF_PROG(netdata_umount_fentry) 119 | { 120 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_UMOUNT_CALL, 1); 121 | 122 | return 0; 123 | } 124 | 125 | SEC("fexit/netdata_umount") 126 | int BPF_PROG(netdata_umount_fexit, const struct pt_regs *regs) 127 | { 128 | int ret = (int)PT_REGS_RC(regs); 129 | if (ret < 0) 130 | libnetdata_update_global(&tbl_mount, NETDATA_KEY_UMOUNT_ERROR, 1); 131 | 132 | return 0; 133 | } 134 | 135 | char _license[] SEC("license") = "GPL"; 136 | 137 | -------------------------------------------------------------------------------- /src/mount.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | #include "netdata_defs.h" 15 | #include "netdata_tests.h" 16 | #include "netdata_core_common.h" 17 | #include "netdata_mount.h" 18 | 19 | #include "mount.skel.h" 20 | 21 | enum netdata_mount_syscalls { 22 | NETDATA_MOUNT_SYSCALL, 23 | NETDATA_UMOUNT_SYSCALL, 24 | 25 | NETDATA_MOUNT_SYSCALLS_END 26 | }; 27 | 28 | char *syscalls[] = { "__x64_sys_mount", 29 | "__x64_sys_umount" }; 30 | 31 | static int attach_probe(struct mount_bpf *obj) 32 | { 33 | obj->links.netdata_mount_probe = bpf_program__attach_kprobe(obj->progs.netdata_mount_probe, 34 | false, syscalls[NETDATA_MOUNT_SYSCALL]); 35 | int ret = libbpf_get_error(obj->links.netdata_mount_probe); 36 | if (ret) 37 | return -1; 38 | 39 | obj->links.netdata_mount_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_mount_retprobe, 40 | true, syscalls[NETDATA_MOUNT_SYSCALL]); 41 | ret = libbpf_get_error(obj->links.netdata_mount_retprobe); 42 | if (ret) 43 | return -1; 44 | 45 | obj->links.netdata_umount_probe = bpf_program__attach_kprobe(obj->progs.netdata_umount_probe, 46 | false, syscalls[NETDATA_UMOUNT_SYSCALL]); 47 | ret = libbpf_get_error(obj->links.netdata_umount_probe); 48 | if (ret) 49 | return -1; 50 | 51 | obj->links.netdata_umount_retprobe = bpf_program__attach_kprobe(obj->progs.netdata_umount_retprobe, 52 | true, syscalls[NETDATA_UMOUNT_SYSCALL]); 53 | ret = libbpf_get_error(obj->links.netdata_umount_retprobe); 54 | if (ret) 55 | return -1; 56 | 57 | return 0; 58 | } 59 | 60 | static inline void netdata_ebpf_disable_probe(struct mount_bpf *obj) 61 | { 62 | bpf_program__set_autoload(obj->progs.netdata_mount_probe, false); 63 | bpf_program__set_autoload(obj->progs.netdata_mount_retprobe, false); 64 | bpf_program__set_autoload(obj->progs.netdata_umount_probe, false); 65 | bpf_program__set_autoload(obj->progs.netdata_umount_retprobe, false); 66 | } 67 | 68 | static inline void netdata_ebpf_disable_tracepoint(struct mount_bpf *obj) 69 | { 70 | bpf_program__set_autoload(obj->progs.netdata_mount_exit, false); 71 | bpf_program__set_autoload(obj->progs.netdata_umount_exit, false); 72 | } 73 | 74 | static inline void netdata_ebpf_disable_trampoline(struct mount_bpf *obj) 75 | { 76 | bpf_program__set_autoload(obj->progs.netdata_mount_fentry, false); 77 | bpf_program__set_autoload(obj->progs.netdata_umount_fentry, false); 78 | bpf_program__set_autoload(obj->progs.netdata_mount_fexit, false); 79 | bpf_program__set_autoload(obj->progs.netdata_umount_fexit, false); 80 | } 81 | 82 | static inline void netdata_set_trampoline_target(struct mount_bpf *obj) 83 | { 84 | bpf_program__set_attach_target(obj->progs.netdata_mount_fentry, 0, 85 | syscalls[NETDATA_MOUNT_SYSCALL]); 86 | 87 | bpf_program__set_attach_target(obj->progs.netdata_mount_fexit, 0, 88 | syscalls[NETDATA_MOUNT_SYSCALL]); 89 | 90 | bpf_program__set_attach_target(obj->progs.netdata_umount_fentry, 0, 91 | syscalls[NETDATA_UMOUNT_SYSCALL]); 92 | 93 | bpf_program__set_attach_target(obj->progs.netdata_umount_fexit, 0, 94 | syscalls[NETDATA_UMOUNT_SYSCALL]); 95 | } 96 | 97 | static inline int ebpf_load_and_attach(struct mount_bpf *obj, int selector) 98 | { 99 | if (!selector) { //trampoline 100 | netdata_ebpf_disable_probe(obj); 101 | netdata_ebpf_disable_tracepoint(obj); 102 | 103 | netdata_set_trampoline_target(obj); 104 | } else if (selector == 1) { // probe 105 | netdata_ebpf_disable_trampoline(obj); 106 | netdata_ebpf_disable_tracepoint(obj); 107 | } else { // tracepoint 108 | netdata_ebpf_disable_probe(obj); 109 | netdata_ebpf_disable_trampoline(obj); 110 | } 111 | 112 | int ret = mount_bpf__load(obj); 113 | if (ret) { 114 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 115 | return -1; 116 | } 117 | 118 | if (selector == 1) // attach kprobe 119 | ret = attach_probe(obj); 120 | else { 121 | ret = mount_bpf__attach(obj); 122 | } 123 | 124 | if (!ret) { 125 | fprintf(stdout, "%s loaded with success\n", (selector) ? "tracepoint" : "probe"); 126 | } 127 | 128 | return ret; 129 | } 130 | 131 | static int call_syscalls() 132 | { 133 | char *dst = { "./mydst" }; 134 | if (mkdir(dst, 0777)) { 135 | fprintf(stdout, "Cannot create directory\n"); 136 | return -1; 137 | } 138 | 139 | // I am not testing return, because errors are also stored at hash map 140 | (void)mount("none", dst, "tmpfs", 0, "mode=0777"); 141 | (void)umount(dst); 142 | 143 | rmdir(dst); 144 | 145 | return 0; 146 | } 147 | 148 | static int mount_read_array(int fd) 149 | { 150 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 151 | if (ebpf_nprocs < 0) 152 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 153 | uint64_t stored[ebpf_nprocs]; 154 | 155 | uint32_t idx; 156 | uint64_t counter = 0; 157 | for (idx = 0; idx < NETDATA_MOUNT_END; idx++) { 158 | if (!bpf_map_lookup_elem(fd, &idx, stored)) { 159 | int j; 160 | for (j = 0; j < ebpf_nprocs; j++) { 161 | counter += stored[j]; 162 | } 163 | } 164 | 165 | memset(stored, 0, sizeof(uint64_t) * ebpf_nprocs); 166 | } 167 | 168 | if (counter >= 2) { 169 | fprintf(stdout, "Data stored with success\n"); 170 | return 0; 171 | } 172 | 173 | return 2; 174 | } 175 | 176 | static int ebpf_mount_tests(int selector) 177 | { 178 | struct mount_bpf *obj = NULL; 179 | 180 | obj = mount_bpf__open(); 181 | if (!obj) { 182 | goto load_error; 183 | } 184 | 185 | int ret = ebpf_load_and_attach(obj, selector); 186 | if (ret && selector != NETDATA_MODE_PROBE) { 187 | mount_bpf__destroy(obj); 188 | 189 | obj = mount_bpf__open(); 190 | if (!obj) { 191 | goto load_error; 192 | } 193 | 194 | selector = NETDATA_MODE_PROBE; 195 | ret = ebpf_load_and_attach(obj, selector); 196 | } 197 | 198 | if (!ret) { 199 | ret = call_syscalls(); 200 | if (!ret) { 201 | int fd = bpf_map__fd(obj->maps.tbl_mount); 202 | ret = mount_read_array(fd); 203 | } 204 | } else { 205 | ret = 3; 206 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 207 | } 208 | 209 | mount_bpf__destroy(obj); 210 | 211 | return ret; 212 | load_error: 213 | fprintf(stderr, "Cannot open or load BPF object\n"); 214 | return 2; 215 | } 216 | 217 | int main(int argc, char **argv) 218 | { 219 | static struct option long_options[] = { 220 | {"help", no_argument, 0, 'h' }, 221 | {"probe", no_argument, 0, 'p' }, 222 | {"tracepoint", no_argument, 0, 'r' }, 223 | {"trampoline", no_argument, 0, 't' }, 224 | {0, 0, 0, 0} 225 | }; 226 | 227 | int selector = 0; 228 | int option_index = 0; 229 | while (1) { 230 | int c = getopt_long(argc, argv, "", long_options, &option_index); 231 | if (c == -1) 232 | break; 233 | 234 | switch (option_index) { 235 | case NETDATA_EBPF_CORE_IDX_HELP: { 236 | ebpf_core_print_help(argv[0], "mount", 1, 0); 237 | exit(0); 238 | } 239 | case NETDATA_EBPF_CORE_IDX_PROBE: { 240 | selector = NETDATA_MODE_PROBE; 241 | break; 242 | } 243 | case NETDATA_EBPF_CORE_IDX_TRACEPOINT: { 244 | fprintf(stdout, "This specific software does not have tracepoint, using kprobe instead.\n"); 245 | selector = NETDATA_MODE_PROBE; 246 | break; 247 | } 248 | case NETDATA_EBPF_CORE_IDX_TRAMPOLINE: { 249 | selector = NETDATA_MODE_TRAMPOLINE; 250 | break; 251 | } 252 | default: { 253 | break; 254 | } 255 | } 256 | } 257 | 258 | // Adjust memory 259 | int ret = netdata_ebf_memlock_limit(); 260 | if (ret) { 261 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 262 | return 1; 263 | } 264 | 265 | libbpf_set_print(netdata_libbpf_vfprintf); 266 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 267 | 268 | struct btf *bf = NULL; 269 | if (!selector) { 270 | bf = netdata_parse_btf_file((const char *)NETDATA_BTF_FILE); 271 | if (bf) 272 | selector = ebpf_find_functions(bf, selector, syscalls, NETDATA_MOUNT_SYSCALLS_END); 273 | } 274 | 275 | int stop_software = 0; 276 | while (stop_software < 2) { 277 | if (ebpf_mount_tests(selector) && !stop_software) { 278 | selector = 1; 279 | stop_software++; 280 | } else 281 | stop_software = 2; 282 | } 283 | 284 | return 0; 285 | } 286 | 287 | -------------------------------------------------------------------------------- /src/netdata_core_common.h: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-3.0-or-later 2 | 3 | #ifndef _NETDATA_CORE_COMMON_H_ 4 | #define _NETDATA_CORE_COMMON_H_ 1 5 | 6 | #include "netdata_defs.h" 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #ifndef TASK_COMM_LEN 13 | #define TASK_COMM_LEN 16 14 | #endif 15 | 16 | enum NETDATA_EBPF_CORE_IDX { 17 | NETDATA_EBPF_CORE_IDX_HELP, 18 | NETDATA_EBPF_CORE_IDX_PROBE, 19 | NETDATA_EBPF_CORE_IDX_TRACEPOINT, 20 | NETDATA_EBPF_CORE_IDX_TRAMPOLINE, 21 | NETDATA_EBPF_CORE_IDX_PID 22 | }; 23 | 24 | #define NETDATA_EBPF_CORE_MIN_STORE 128 25 | 26 | #define NETDATA_EBPF_KERNEL_5_19_0 332544 27 | 28 | #define NETDATA_CORE_PROCESS_NUMBER 4096 29 | 30 | typedef struct ebpf_specify_name { 31 | char *program_name; 32 | char *function_to_attach; 33 | size_t length; 34 | char *optional; 35 | bool retprobe; 36 | } ebpf_specify_name_t; 37 | 38 | /** 39 | * Update names 40 | * 41 | * Open /proc/kallsyms and update the name for specific function 42 | * 43 | * THIS FUNCTION IS ALSO PRESENT IN `kernel-collector` REPO, AS SOON IT IS TRANSFERRED FROM TEST TO 44 | * COMMON FILE, IT NEEDS TO BE REMOVED FROM HERE. 45 | * 46 | * @param names vector with names to modify target. 47 | */ 48 | static inline void ebpf_update_names(ebpf_specify_name_t *names) 49 | { 50 | if (!names) 51 | return; 52 | 53 | char line[256]; 54 | FILE *fp = fopen("/proc/kallsyms", "r"); 55 | if (!fp) 56 | return; 57 | 58 | int total; 59 | for (total = 0; names[total].program_name; total++); 60 | 61 | if (!total) 62 | return; 63 | 64 | char *data; 65 | int all_filled = 0; 66 | while ( (data = fgets(line, 255, fp))) { 67 | data += 19; 68 | ebpf_specify_name_t *name; 69 | int i; 70 | for (i = 0, name = &names[i]; name->program_name; i++, name = &names[i]) { 71 | if (name->optional) 72 | continue; 73 | 74 | char *end = strchr(data, ' '); 75 | if (!end) 76 | end = strchr(data, '\n'); 77 | 78 | if (end) 79 | *end = '\0'; 80 | 81 | if (!strcmp(name->function_to_attach, data)) { 82 | all_filled++; 83 | name->optional = strdup(data); 84 | break; 85 | } 86 | } 87 | 88 | if (all_filled == total) 89 | break; 90 | } 91 | 92 | fclose(fp); 93 | } 94 | 95 | /** 96 | * Fill Control table 97 | * 98 | * Fill control table with data allowing eBPF collectors to store specific data. 99 | * 100 | * @param map the loaded map 101 | * @param map_level how are we going to store PIDs 102 | */ 103 | static inline void ebpf_core_fill_ctrl(struct bpf_map *map, enum netdata_apps_level map_level) 104 | { 105 | int fd = bpf_map__fd(map); 106 | 107 | unsigned int i, end = bpf_map__max_entries(map); 108 | uint64_t values[NETDATA_CONTROLLER_END] = { 1, map_level, 0, 0, 0, 0}; 109 | for (i = 0; i < end; i++) { 110 | int ret = bpf_map_update_elem(fd, &i, &values[i], 0); 111 | if (ret) 112 | fprintf(stderr, "\"error\" : \"Add key(%u) for controller table failed.\",", i); 113 | } 114 | } 115 | 116 | /** 117 | * Check map level 118 | * 119 | * Verify if the given value is one of expected values to store inside hash table 120 | * 121 | * @param value is the value given 122 | * 123 | * @return It returns the given value when there is no error, or it returns the default when value is 124 | * invalid. 125 | */ 126 | static inline enum netdata_apps_level ebpf_check_map_level(int value) 127 | { 128 | if (value < NETDATA_APPS_LEVEL_REAL_PARENT || value > NETDATA_APPS_LEVEL_IGNORE) { 129 | fprintf(stderr, "\"Error\" : \"Value given (%d) is not valid, resetting to default 0 (Real Parent).\",\n", 130 | value); 131 | value = NETDATA_APPS_LEVEL_REAL_PARENT; 132 | } 133 | 134 | return value; 135 | } 136 | 137 | static inline void ebpf_core_print_help(char *name, char *info, int has_trampoline, int has_integration) { 138 | fprintf(stdout, "%s tests if it is possible to monitor %s on host\n\n" 139 | "The following options are available:\n\n" 140 | "--help : Prints this help.\n" 141 | "--probe : Use probe and do no try to use trampolines (fentry/fexit).\n" 142 | "--tracepoint : Use tracepoint.\n" 143 | , name, info); 144 | if (has_trampoline) 145 | fprintf(stdout, "--trampoline : Try to use trampoline(fentry/fexit). If this is not possible" 146 | " probes will be used.\n"); 147 | if (has_integration) 148 | fprintf(stdout, "--pid : Store PID according argument given. Values can be:\n" 149 | "\t\t0 - Real parents\n\t\t1 - Parents\n\t\t2 - All PIDs\n\t\t3 - Ignore PIDs\n"); 150 | } 151 | 152 | /** 153 | * Liibbpf vfprintf 154 | * 155 | * We use this function to filter separate software error from libbpf errors. 156 | * 157 | * @param level message level. 158 | * @param format is the second argument used with vfprintf; 159 | * @param args is tthe list of args used with format. 160 | * 161 | * @return it returns number of bytes written. 162 | */ 163 | static inline int netdata_libbpf_vfprintf(enum libbpf_print_level level, const char *format, va_list args) 164 | { 165 | // FOR DEVELOPERS: To avoid generation of a lot of messages we are not printing all debug messages. 166 | // When some software is developed, we strongly suggest to comment next two lines to take a look 167 | // in all messages. 168 | if (level == LIBBPF_DEBUG) 169 | return 0; 170 | 171 | static FILE *libbpf_err = NULL; 172 | if (!libbpf_err) { 173 | libbpf_err = fopen("libbpf.log", "a"); 174 | if (!libbpf_err) { 175 | fprintf(stderr, "Cannot open libbpf.log"); 176 | exit(1); 177 | } 178 | } 179 | 180 | return vfprintf(libbpf_err, format, args); 181 | } 182 | 183 | #endif /* _NETDATA_CORE_COMMON_H_ */ 184 | 185 | -------------------------------------------------------------------------------- /src/networkviewer.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 10 | #define __USE_GNU 11 | #include 12 | #include 13 | 14 | #include "netdata_defs.h" 15 | #include "netdata_tests.h" 16 | #include "netdata_core_common.h" 17 | #include "netdata_socket.h" 18 | 19 | #include "networkviewer.skel.h" 20 | 21 | // Socket functions 22 | char *function_list[] = { "inet_csk_accept", 23 | "tcp_retransmit_skb", 24 | "tcp_cleanup_rbuf", 25 | "tcp_close", 26 | "udp_recvmsg", 27 | "tcp_sendmsg", 28 | "udp_sendmsg", 29 | "tcp_v4_connect", 30 | "tcp_v6_connect", 31 | "tcp_set_state"}; 32 | 33 | #define NETDATA_IPV4 4 34 | #define NETDATA_IPV6 6 35 | 36 | static int ebpf_attach_probes(struct networkviewer_bpf *obj) 37 | { 38 | obj->links.netdata_nv_inet_csk_accept_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_inet_csk_accept_kretprobe, 39 | true, function_list[NETDATA_FCNT_INET_CSK_ACCEPT]); 40 | int ret = libbpf_get_error(obj->links.netdata_nv_inet_csk_accept_kretprobe); 41 | if (ret) 42 | return -1; 43 | 44 | obj->links.netdata_nv_tcp_v4_connect_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_tcp_v4_connect_kprobe, 45 | false, function_list[NETDATA_FCNT_TCP_V4_CONNECT]); 46 | ret = libbpf_get_error(obj->links.netdata_nv_tcp_v4_connect_kprobe); 47 | if (ret) 48 | return -1; 49 | 50 | /* 51 | obj->links.netdata_nv_tcp_v6_connect_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_tcp_v6_connect_kprobe, 52 | false, function_list[NETDATA_FCNT_TCP_V6_CONNECT]); 53 | ret = libbpf_get_error(obj->links.netdata_nv_tcp_v6_connect_kprobe); 54 | if (ret) 55 | return -1; 56 | */ 57 | 58 | obj->links.netdata_nv_tcp_retransmit_skb_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_tcp_retransmit_skb_kprobe, 59 | false, function_list[NETDATA_FCNT_TCP_RETRANSMIT]); 60 | ret = libbpf_get_error(obj->links.netdata_nv_tcp_retransmit_skb_kprobe); 61 | if (ret) 62 | return -1; 63 | 64 | obj->links.netdata_nv_tcp_cleanup_rbuf_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_tcp_cleanup_rbuf_kprobe, 65 | false, function_list[NETDATA_FCNT_TCP_CLOSE]); 66 | ret = libbpf_get_error(obj->links.netdata_nv_tcp_cleanup_rbuf_kprobe); 67 | if (ret) 68 | return -1; 69 | 70 | obj->links.netdata_nv_tcp_close_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_tcp_close_kprobe, 71 | false, function_list[NETDATA_FCNT_CLEANUP_RBUF]); 72 | ret = libbpf_get_error(obj->links.netdata_nv_tcp_close_kprobe); 73 | if (ret) 74 | return -1; 75 | 76 | obj->links.netdata_nv_udp_recvmsg_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_udp_recvmsg_kprobe, 77 | false, function_list[NETDATA_FCNT_UDP_RECEVMSG]); 78 | ret = libbpf_get_error(obj->links.netdata_nv_udp_recvmsg_kprobe); 79 | if (ret) 80 | return -1; 81 | 82 | obj->links.netdata_nv_tcp_sendmsg_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_tcp_sendmsg_kprobe, 83 | false, function_list[NETDATA_FCNT_TCP_SENDMSG]); 84 | ret = libbpf_get_error(obj->links.netdata_nv_tcp_sendmsg_kprobe); 85 | if (ret) 86 | return -1; 87 | 88 | obj->links.netdata_nv_udp_sendmsg_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_udp_sendmsg_kprobe, 89 | false, function_list[NETDATA_FCNT_UDP_SENDMSG]); 90 | ret = libbpf_get_error(obj->links.netdata_nv_udp_sendmsg_kprobe); 91 | if (ret) 92 | return -1; 93 | 94 | obj->links.netdata_nv_tcp_set_state_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_nv_tcp_set_state_kprobe, 95 | true, function_list[NETDATA_FCNT_TCP_SET_STATE]); 96 | ret = libbpf_get_error(obj->links.netdata_nv_tcp_set_state_kprobe); 97 | if (ret) 98 | return -1; 99 | 100 | return 0; 101 | } 102 | 103 | static void ebpf_disable_probes(struct networkviewer_bpf *obj) 104 | { 105 | bpf_program__set_autoload(obj->progs.netdata_nv_inet_csk_accept_kretprobe, false); 106 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_v4_connect_kprobe, false); 107 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_v6_connect_kprobe, false); 108 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_retransmit_skb_kprobe, false); 109 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_cleanup_rbuf_kprobe, false); 110 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_close_kprobe, false); 111 | bpf_program__set_autoload(obj->progs.netdata_nv_udp_recvmsg_kprobe, false); 112 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_sendmsg_kprobe, false); 113 | bpf_program__set_autoload(obj->progs.netdata_nv_udp_sendmsg_kprobe, false); 114 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_set_state_kprobe, false); 115 | } 116 | 117 | static void ebpf_disable_trampoline(struct networkviewer_bpf *obj) 118 | { 119 | bpf_program__set_autoload(obj->progs.netdata_nv_inet_csk_accept_fexit, false); 120 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_v4_connect_fentry, false); 121 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_v6_connect_fentry, false); 122 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_retransmit_skb_fentry, false); 123 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_cleanup_rbuf_fentry, false); 124 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_close_fentry, false); 125 | bpf_program__set_autoload(obj->progs.netdata_nv_udp_recvmsg_fentry, false); 126 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_sendmsg_fentry, false); 127 | bpf_program__set_autoload(obj->progs.netdata_nv_udp_sendmsg_fentry, false); 128 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_set_state_fentry, false); 129 | } 130 | 131 | static void ebpf_set_trampoline_target(struct networkviewer_bpf *obj) 132 | { 133 | bpf_program__set_attach_target(obj->progs.netdata_nv_inet_csk_accept_fexit, 0, 134 | function_list[NETDATA_FCNT_INET_CSK_ACCEPT]); 135 | 136 | bpf_program__set_attach_target(obj->progs.netdata_nv_tcp_v4_connect_fentry, 0, 137 | function_list[NETDATA_FCNT_TCP_V4_CONNECT]); 138 | 139 | bpf_program__set_attach_target(obj->progs.netdata_nv_tcp_v6_connect_fentry, 0, 140 | function_list[NETDATA_FCNT_TCP_V6_CONNECT]); 141 | 142 | bpf_program__set_attach_target(obj->progs.netdata_nv_tcp_retransmit_skb_fentry, 0, 143 | function_list[NETDATA_FCNT_TCP_RETRANSMIT]); 144 | 145 | bpf_program__set_attach_target(obj->progs.netdata_nv_tcp_cleanup_rbuf_fentry, 0, 146 | function_list[NETDATA_FCNT_CLEANUP_RBUF]); 147 | 148 | bpf_program__set_attach_target(obj->progs.netdata_nv_tcp_close_fentry, 0, 149 | function_list[NETDATA_FCNT_TCP_CLOSE]); 150 | 151 | bpf_program__set_attach_target(obj->progs.netdata_nv_udp_recvmsg_fentry, 0, 152 | function_list[NETDATA_FCNT_UDP_RECEVMSG]); 153 | 154 | bpf_program__set_attach_target(obj->progs.netdata_nv_tcp_sendmsg_fentry, 0, 155 | function_list[NETDATA_FCNT_TCP_SENDMSG]); 156 | 157 | bpf_program__set_attach_target(obj->progs.netdata_nv_udp_sendmsg_fentry, 0, 158 | function_list[NETDATA_FCNT_UDP_SENDMSG]); 159 | 160 | bpf_program__set_attach_target(obj->progs.netdata_nv_tcp_set_state_fentry, 0, 161 | function_list[NETDATA_FCNT_TCP_SET_STATE]); 162 | } 163 | 164 | static inline int ebpf_load_and_attach(struct networkviewer_bpf *obj, int selector) 165 | { 166 | // Adjust memory 167 | int ret; 168 | if (!selector) { // trampoline 169 | ebpf_disable_probes(obj); 170 | 171 | ebpf_set_trampoline_target(obj); 172 | } else if (selector == NETDATA_MODE_PROBE) { // kprobe 173 | ebpf_disable_trampoline(obj); 174 | } 175 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_v6_connect_kprobe, false); 176 | bpf_program__set_autoload(obj->progs.netdata_nv_tcp_v6_connect_fentry, false); 177 | 178 | ret = networkviewer_bpf__load(obj); 179 | if (ret) { 180 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 181 | return -1; 182 | } 183 | 184 | if (!selector) { 185 | ret = networkviewer_bpf__attach(obj); 186 | } else { 187 | ret = ebpf_attach_probes(obj); 188 | } 189 | 190 | if (!ret) { 191 | fprintf(stdout, "Socket loaded with success\n"); 192 | } 193 | 194 | return ret; 195 | } 196 | 197 | static int netdata_read_socket(struct networkviewer_bpf *obj, int ebpf_nprocs) 198 | { 199 | netdata_socket_t stored[ebpf_nprocs]; 200 | 201 | uint64_t counter = 0; 202 | int fd = bpf_map__fd(obj->maps.tbl_nv_socket); 203 | netdata_nv_idx_t key = { }; 204 | netdata_nv_idx_t next_key = { }; 205 | while (!bpf_map_get_next_key(fd, &key, &next_key)) { 206 | if (!bpf_map_lookup_elem(fd, &key, stored)) { 207 | counter++; 208 | } 209 | 210 | key = next_key; 211 | } 212 | 213 | if (counter) { 214 | fprintf(stdout, "Socket data stored with success. It collected %lu sockets\n", counter); 215 | return 0; 216 | } 217 | 218 | fprintf(stdout, "Cannot read socket data.\n"); 219 | 220 | return 2; 221 | } 222 | 223 | 224 | int ebpf_networkviewer_tests(int selector, enum netdata_apps_level map_level) 225 | { 226 | struct networkviewer_bpf *obj = NULL; 227 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 228 | if (ebpf_nprocs < 0) 229 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 230 | 231 | obj = networkviewer_bpf__open(); 232 | if (!obj) { 233 | goto load_error; 234 | } 235 | 236 | // obj->rodata->collect_everything = true; 237 | 238 | int ret = ebpf_load_and_attach(obj, selector); 239 | if (ret && selector != NETDATA_MODE_PROBE) { 240 | networkviewer_bpf__destroy(obj); 241 | 242 | obj = networkviewer_bpf__open(); 243 | if (!obj) { 244 | goto load_error; 245 | } 246 | 247 | selector = NETDATA_MODE_PROBE; 248 | ret = ebpf_load_and_attach(obj, selector); 249 | } 250 | 251 | if (!ret) { 252 | ebpf_core_fill_ctrl(obj->maps.nv_ctrl, map_level); 253 | 254 | sleep(60); 255 | 256 | // Separator between load and result 257 | fprintf(stdout, "\n================= READ DATA =================\n\n"); 258 | if (!ret) { 259 | 260 | ret += netdata_read_socket(obj, ebpf_nprocs); 261 | 262 | if (!ret) 263 | fprintf(stdout, "All stored data were retrieved with success!\n"); 264 | } else 265 | fprintf(stderr, "Cannot read global table\n"); 266 | } else { 267 | ret = 3; 268 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 269 | } 270 | 271 | 272 | networkviewer_bpf__destroy(obj); 273 | 274 | return ret; 275 | 276 | load_error: 277 | fprintf(stderr, "Cannot open or load BPF object\n"); 278 | return 2; 279 | } 280 | 281 | int main(int argc, char **argv) 282 | { 283 | static struct option long_options[] = { 284 | {"help", no_argument, 0, 0 }, 285 | {"probe", no_argument, 0, 0 }, 286 | {"tracepoint", no_argument, 0, 0 }, 287 | {"trampoline", no_argument, 0, 0 }, 288 | {"pid", required_argument, 0, 0 }, 289 | {0, no_argument, 0, 0} 290 | }; 291 | 292 | int selector = NETDATA_MODE_TRAMPOLINE; 293 | int option_index = 0; 294 | enum netdata_apps_level map_level = NETDATA_APPS_LEVEL_REAL_PARENT; 295 | while (1) { 296 | int c = getopt_long_only(argc, argv, "", long_options, &option_index); 297 | if (c == -1) 298 | break; 299 | 300 | switch (option_index) { 301 | case NETDATA_EBPF_CORE_IDX_HELP: { 302 | ebpf_core_print_help(argv[0], "networkviewer", 1, 1); 303 | exit(0); 304 | } 305 | case NETDATA_EBPF_CORE_IDX_PROBE: { 306 | selector = NETDATA_MODE_PROBE; 307 | break; 308 | } 309 | case NETDATA_EBPF_CORE_IDX_TRACEPOINT: { 310 | selector = NETDATA_MODE_PROBE; 311 | fprintf(stdout, "This specific software does not have tracepoint, using kprobe instead\n"); 312 | break; 313 | } 314 | case NETDATA_EBPF_CORE_IDX_TRAMPOLINE: { 315 | selector = NETDATA_MODE_TRAMPOLINE; 316 | break; 317 | } 318 | case NETDATA_EBPF_CORE_IDX_PID: { 319 | int user_input = (int)strtol(optarg, NULL, 10); 320 | map_level = ebpf_check_map_level(user_input); 321 | break; 322 | } 323 | default: { 324 | break; 325 | } 326 | } 327 | } 328 | 329 | // Adjust memory 330 | int ret = netdata_ebf_memlock_limit(); 331 | if (ret) { 332 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 333 | return 1; 334 | } 335 | 336 | libbpf_set_print(netdata_libbpf_vfprintf); 337 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 338 | 339 | int stop_software = 0; 340 | while (stop_software < 2) { 341 | if (ebpf_networkviewer_tests(selector, map_level) && !stop_software) { 342 | selector = 1; 343 | stop_software++; 344 | } else 345 | stop_software = 2; 346 | } 347 | 348 | return 0; 349 | } 350 | 351 | -------------------------------------------------------------------------------- /src/nfs.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_core_read.h" 4 | #include "bpf_helpers.h" 5 | 6 | #include "netdata_core.h" 7 | #include "netdata_fs.h" 8 | 9 | /************************************************************************************ 10 | * 11 | * MAP Section 12 | * 13 | ***********************************************************************************/ 14 | 15 | struct { 16 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 17 | __type(key, __u32); 18 | __type(value, __u64); 19 | __uint(max_entries, NETDATA_FS_MAX_ELEMENTS); 20 | } tbl_nfs SEC(".maps"); 21 | 22 | struct { 23 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 24 | __type(key, __u32); 25 | __type(value, __u64); 26 | __uint(max_entries, 4192); 27 | } tmp_nfs SEC(".maps"); 28 | 29 | 30 | /************************************************************************************ 31 | * 32 | * COMMON 33 | * 34 | ***********************************************************************************/ 35 | 36 | static __always_inline int netdata_nfs_entry() 37 | { 38 | __u64 pid_tgid = bpf_get_current_pid_tgid(); 39 | __u32 pid = (__u32)(pid_tgid >> 32); 40 | __u64 ts = bpf_ktime_get_ns(); 41 | 42 | bpf_map_update_elem(&tmp_nfs, &pid, &ts, BPF_ANY); 43 | 44 | return 0; 45 | } 46 | 47 | static __always_inline int netdata_nfs_store_bin(__u32 selection) 48 | { 49 | __u64 *fill, data; 50 | __u64 pid_tgid = bpf_get_current_pid_tgid(); 51 | __u32 bin, pid = (__u32)(pid_tgid >> 32); 52 | 53 | fill = bpf_map_lookup_elem(&tmp_nfs, &pid); 54 | if (!fill) 55 | return 0; 56 | 57 | data = bpf_ktime_get_ns() - *fill; 58 | bpf_map_delete_elem(&tmp_nfs, &pid); 59 | 60 | // Skip entries with backward time 61 | if ( (s64)data < 0) 62 | return 0; 63 | 64 | // convert to microseconds 65 | data /= 1000; 66 | bin = libnetdata_select_idx(data, NETDATA_FS_MAX_BINS_POS); 67 | __u32 idx = selection * NETDATA_FS_MAX_BINS + bin; 68 | if (idx >= NETDATA_FS_MAX_ELEMENTS) 69 | return 0; 70 | 71 | fill = bpf_map_lookup_elem(&tbl_nfs, &idx); 72 | if (fill) { 73 | libnetdata_update_u64(fill, 1); 74 | return 0; 75 | } 76 | 77 | data = 1; 78 | bpf_map_update_elem(&tbl_nfs, &idx, &data, BPF_ANY); 79 | 80 | return 0; 81 | } 82 | 83 | /************************************************************************************ 84 | * 85 | * ENTRY SECTION (trampoline) 86 | * 87 | ***********************************************************************************/ 88 | 89 | SEC("fentry/nfs_file_read") 90 | int BPF_PROG(netdata_nfs_file_read_entry, struct kiocb *iocb) 91 | { 92 | struct file *fp = iocb->ki_filp; 93 | if (!fp) 94 | return 0; 95 | 96 | return netdata_nfs_entry(); 97 | } 98 | 99 | SEC("fentry/nfs_file_write") 100 | int BPF_PROG(netdata_nfs_file_write_entry, struct kiocb *iocb) 101 | { 102 | struct file *fp = iocb->ki_filp; 103 | if (!fp) 104 | return 0; 105 | 106 | return netdata_nfs_entry(); 107 | } 108 | 109 | SEC("fentry/nfs_file_open") 110 | int BPF_PROG(netdata_nfs_file_open_entry, struct inode *inode, struct file *filp) 111 | { 112 | if (!filp) 113 | return 0; 114 | 115 | return netdata_nfs_entry(); 116 | } 117 | 118 | SEC("fentry/nfs4_file_open") 119 | int BPF_PROG(netdata_nfs4_file_open_entry, struct inode *inode, struct file *filp) 120 | { 121 | if (!filp) 122 | return 0; 123 | 124 | return netdata_nfs_entry(); 125 | } 126 | 127 | SEC("fentry/nfs_getattr") 128 | int BPF_PROG(netdata_nfs_getattr_entry) 129 | { 130 | return netdata_nfs_entry(); 131 | } 132 | 133 | /************************************************************************************ 134 | * 135 | * END SECTION (trampoline) 136 | * 137 | ***********************************************************************************/ 138 | 139 | SEC("fexit/nfs_file_read") 140 | int BPF_PROG(netdata_nfs_file_read_exit) 141 | { 142 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_READ); 143 | } 144 | 145 | SEC("fexit/nfs_file_write") 146 | int BPF_PROG(netdata_nfs_file_write_exit) 147 | { 148 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_WRITE); 149 | } 150 | 151 | SEC("fexit/nfs_file_open") 152 | int BPF_PROG(netdata_nfs_file_open_exit) 153 | { 154 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_OPEN); 155 | } 156 | 157 | SEC("fexit/nfs4_file_open") 158 | int BPF_PROG(netdata_nfs4_file_open_exit) 159 | { 160 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_OPEN); 161 | } 162 | 163 | SEC("fexit/nfs_getattr") 164 | int BPF_PROG(netdata_nfs_getattr_exit) 165 | { 166 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_SYNC); 167 | } 168 | 169 | /************************************************************************************ 170 | * 171 | * ENTRY SECTION (kprobe) 172 | * 173 | ***********************************************************************************/ 174 | 175 | SEC("kprobe/nfs_file_read") 176 | int BPF_KPROBE(netdata_nfs_file_read_probe, struct kiocb *iocb) 177 | { 178 | struct file *fp = BPF_CORE_READ(iocb, ki_filp); 179 | if (!fp) 180 | return 0; 181 | 182 | return netdata_nfs_entry(); 183 | } 184 | 185 | SEC("kprobe/nfs_file_write") 186 | int BPF_KPROBE(netdata_nfs_file_write_probe, struct kiocb *iocb) 187 | { 188 | struct file *fp = BPF_CORE_READ(iocb, ki_filp); 189 | if (!fp) 190 | return 0; 191 | 192 | return netdata_nfs_entry(); 193 | } 194 | 195 | SEC("kprobe/nfs_file_open") 196 | int BPF_KPROBE(netdata_nfs_file_open_probe, struct inode *inode, struct file *filp) 197 | { 198 | if (!filp) 199 | return 0; 200 | 201 | return netdata_nfs_entry(); 202 | } 203 | 204 | SEC("kprobe/nfs4_file_open") 205 | int BPF_KPROBE(netdata_nfs4_file_open_probe, struct inode *inode, struct file *filp) 206 | { 207 | if (!filp) 208 | return 0; 209 | 210 | return netdata_nfs_entry(); 211 | } 212 | 213 | SEC("kprobe/nfs_getattr") 214 | int BPF_KPROBE(netdata_nfs_getattr_probe) 215 | { 216 | return netdata_nfs_entry(); 217 | } 218 | 219 | /************************************************************************************ 220 | * 221 | * END SECTION (kretprobe) 222 | * 223 | ***********************************************************************************/ 224 | 225 | SEC("kretprobe/nfs_file_read") 226 | int BPF_KRETPROBE(netdata_nfs_file_read_retprobe) 227 | { 228 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_READ); 229 | } 230 | 231 | SEC("kretprobe/nfs_file_write") 232 | int BPF_KRETPROBE(netdata_nfs_file_write_retprobe) 233 | { 234 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_WRITE); 235 | } 236 | 237 | SEC("kretprobe/nfs_file_open") 238 | int BPF_KRETPROBE(netdata_nfs_file_open_retprobe) 239 | { 240 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_OPEN); 241 | } 242 | 243 | SEC("kretprobe/nfs4_file_open") 244 | int BPF_KRETPROBE(netdata_nfs4_file_open_retprobe) 245 | { 246 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_OPEN); 247 | } 248 | 249 | SEC("kretprobe/nfs_getattr") 250 | int BPF_KRETPROBE(netdata_nfs_getattr_retprobe) 251 | { 252 | return netdata_nfs_store_bin(NETDATA_KEY_CALLS_SYNC); 253 | } 254 | 255 | char _license[] SEC("license") = "GPL"; 256 | 257 | 258 | -------------------------------------------------------------------------------- /src/oomkill.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_oomkill.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 16 | __type(key, int); 17 | __type(value, __u8); 18 | __uint(max_entries, NETDATA_OOMKILL_MAX_ENTRIES); 19 | } tbl_oomkill SEC(".maps"); 20 | 21 | /*********************************************************************************** 22 | * 23 | * OOMKILL SECTION(tracepoint) 24 | * 25 | ***********************************************************************************/ 26 | 27 | SEC("tracepoint/oom/mark_victim") 28 | int netdata_oom_mark_victim(struct netdata_oom_mark_victim_entry *ptr) { 29 | int key = ptr->pid; 30 | u8 val = 0; 31 | bpf_map_update_elem(&tbl_oomkill, &key, &val, BPF_ANY); 32 | return 0; 33 | } 34 | 35 | char _license[] SEC("license") = "GPL"; 36 | 37 | -------------------------------------------------------------------------------- /src/oomkill.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include "netdata_defs.h" 12 | #include "netdata_tests.h" 13 | #include "netdata_core_common.h" 14 | 15 | #include "oomkill.skel.h" 16 | 17 | static inline int ebpf_load_and_attach(struct oomkill_bpf *obj) 18 | { 19 | int ret = oomkill_bpf__load(obj); 20 | if (ret) { 21 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 22 | return -1; 23 | } 24 | 25 | ret = oomkill_bpf__attach(obj); 26 | if (!ret) { 27 | fprintf(stdout, "OOMkill loaded with success\n"); 28 | } 29 | 30 | return ret; 31 | } 32 | 33 | static void ebpf_update_table(int global) 34 | { 35 | int idx = 0; 36 | unsigned char value = 'a'; 37 | int ret = bpf_map_update_elem(global, &idx, &value, 0); 38 | if (ret) 39 | fprintf(stderr, "Cannot insert value to global table."); 40 | } 41 | 42 | static int oomkill_read_array(int fd, int ebpf_nprocs) 43 | { 44 | unsigned char stored[ebpf_nprocs]; 45 | 46 | unsigned char counter = 0; 47 | int idx = 0; 48 | if (!bpf_map_lookup_elem(fd, &idx, stored)) { 49 | int j; 50 | for (j = 0; j < ebpf_nprocs; j++) { 51 | counter += stored[j]; 52 | } 53 | } 54 | 55 | if (counter) { 56 | fprintf(stdout, "Data stored with success\n"); 57 | return 0; 58 | } 59 | 60 | return 2; 61 | } 62 | 63 | static int ebpf_oomkill_tests() 64 | { 65 | struct oomkill_bpf *obj = NULL; 66 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 67 | if (ebpf_nprocs < 0) 68 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 69 | 70 | obj = oomkill_bpf__open(); 71 | if (!obj) { 72 | fprintf(stderr, "Cannot open or load BPF object\n"); 73 | 74 | return 2; 75 | } 76 | 77 | int ret = ebpf_load_and_attach(obj); 78 | if (!ret) { 79 | int fd = bpf_map__fd(obj->maps.tbl_oomkill); 80 | ebpf_update_table(fd); 81 | 82 | ret = oomkill_read_array(fd, ebpf_nprocs); 83 | if (ret) 84 | fprintf(stderr, "Cannot read global table\n"); 85 | } else { 86 | ret = 3; 87 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 88 | } 89 | 90 | oomkill_bpf__destroy(obj); 91 | 92 | return ret; 93 | } 94 | 95 | int main(int argc, char **argv) 96 | { 97 | static struct option long_options[] = { 98 | {"help", no_argument, 0, 'h' }, 99 | {0, 0, 0, 0} 100 | }; 101 | 102 | int option_index = 0; 103 | while (1) { 104 | int c = getopt_long(argc, argv, "", long_options, &option_index); 105 | if (c == -1) 106 | break; 107 | 108 | switch (c) { 109 | case 'h': { 110 | ebpf_tracepoint_help("OOMkill"); 111 | exit(0); 112 | } 113 | default: { 114 | break; 115 | } 116 | } 117 | } 118 | 119 | int ret = netdata_ebf_memlock_limit(); 120 | if (ret) { 121 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 122 | return 1; 123 | } 124 | 125 | libbpf_set_print(netdata_libbpf_vfprintf); 126 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 127 | 128 | return ebpf_oomkill_tests(); 129 | } 130 | 131 | -------------------------------------------------------------------------------- /src/process.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_process.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_HASH); 16 | __type(key, __u32); 17 | __type(value, struct netdata_pid_stat_t); 18 | __uint(max_entries, PID_MAX_DEFAULT); 19 | } tbl_pid_stats SEC(".maps"); 20 | 21 | struct { 22 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 23 | __type(key, __u32); 24 | __type(value, __u64); 25 | __uint(max_entries, NETDATA_GLOBAL_COUNTER); 26 | } tbl_total_stats SEC(".maps"); 27 | 28 | struct { 29 | __uint(type, BPF_MAP_TYPE_ARRAY); 30 | __type(key, __u32); 31 | __type(value, __u64); 32 | __uint(max_entries, NETDATA_CONTROLLER_END); 33 | } process_ctrl SEC(".maps"); 34 | 35 | /************************************************************************************ 36 | * 37 | * COMMON SECTION 38 | * 39 | ***********************************************************************************/ 40 | 41 | static __always_inline void netdata_fill_common_process_data(struct netdata_pid_stat_t *data) 42 | { 43 | data->ct = bpf_ktime_get_ns(); 44 | bpf_get_current_comm(&data->name, TASK_COMM_LEN); 45 | 46 | __u64 pid_tgid = bpf_get_current_pid_tgid(); 47 | __u32 tgid = (__u32)( 0x00000000FFFFFFFF & pid_tgid); 48 | __u32 pid = (0xFFFFFFFF00000000 & pid_tgid)>>32; 49 | 50 | data->tgid = tgid; 51 | data->pid = pid; 52 | } 53 | 54 | static __always_inline int netdata_process_not_update_apps() 55 | { 56 | __u32 key = NETDATA_CONTROLLER_APPS_ENABLED; 57 | __u32 *apps = bpf_map_lookup_elem(&process_ctrl ,&key); 58 | if (apps && *apps) 59 | return 0; 60 | 61 | return 1; 62 | } 63 | 64 | 65 | static __always_inline int netdata_common_release_task() 66 | { 67 | struct netdata_pid_stat_t *fill; 68 | __u32 key = 0; 69 | __u32 tgid = 0; 70 | 71 | libnetdata_update_global(&tbl_total_stats, NETDATA_KEY_CALLS_RELEASE_TASK, 1); 72 | if (netdata_process_not_update_apps()) 73 | return 0; 74 | 75 | fill = netdata_get_pid_structure(&key, &tgid, &process_ctrl, &tbl_pid_stats); 76 | if (fill) { 77 | libnetdata_update_u32(&fill->release_call, 1) ; 78 | 79 | libnetdata_update_global(&process_ctrl, NETDATA_CONTROLLER_PID_TABLE_DEL, 1); 80 | } 81 | 82 | return 0; 83 | } 84 | 85 | static __always_inline int netdata_common_fork_clone(int ret) 86 | { 87 | __u32 key = 0; 88 | __u32 tgid = 0; 89 | struct netdata_pid_stat_t data = { }; 90 | struct netdata_pid_stat_t *fill; 91 | 92 | if (ret < 0) { 93 | libnetdata_update_global(&tbl_total_stats, NETDATA_KEY_ERROR_PROCESS, 1); 94 | } 95 | 96 | if (netdata_process_not_update_apps()) 97 | return 0; 98 | 99 | fill = netdata_get_pid_structure(&key, &tgid, &process_ctrl, &tbl_pid_stats); 100 | if (fill) { 101 | fill->release_call = 0; 102 | 103 | if (ret < 0) { 104 | libnetdata_update_u32(&fill->task_err, 1) ; 105 | } 106 | } else { 107 | netdata_fill_common_process_data(&data); 108 | data.tgid = tgid; 109 | if (ret < 0) { 110 | data.task_err = 1; 111 | } 112 | bpf_map_update_elem(&tbl_pid_stats, &key, &data, BPF_ANY); 113 | 114 | libnetdata_update_global(&process_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 115 | } 116 | 117 | return 0; 118 | } 119 | 120 | /************************************************************************************ 121 | * 122 | * PROCESS SECTION (tracepoints) 123 | * 124 | ***********************************************************************************/ 125 | 126 | // It must be always enabled 127 | SEC("tracepoint/sched/sched_process_exit") 128 | int netdata_tracepoint_sched_process_exit(struct netdata_sched_process_exit *ptr) 129 | { 130 | struct netdata_pid_stat_t *fill; 131 | __u32 key = 0; 132 | __u32 tgid = 0; 133 | 134 | libnetdata_update_global(&tbl_total_stats, NETDATA_KEY_CALLS_DO_EXIT, 1); 135 | if (netdata_process_not_update_apps()) 136 | return 0; 137 | 138 | fill = netdata_get_pid_structure(&key, &tgid, &process_ctrl, &tbl_pid_stats); 139 | if (fill) { 140 | libnetdata_update_u32(&fill->exit_call, 1) ; 141 | } 142 | 143 | return 0; 144 | } 145 | 146 | // It must be always enabled 147 | SEC("tracepoint/sched/sched_process_exec") 148 | int netdata_tracepoint_sched_process_exec(struct netdata_sched_process_exec *ptr) 149 | { 150 | struct netdata_pid_stat_t data = { }; 151 | struct netdata_pid_stat_t *fill; 152 | __u32 key = 0; 153 | __u32 tgid = 0; 154 | // This is necessary, because it represents the main function to start a thread 155 | libnetdata_update_global(&tbl_total_stats, NETDATA_KEY_CALLS_PROCESS, 1); 156 | 157 | libnetdata_update_global(&tbl_total_stats, NETDATA_KEY_CALLS_DO_EXIT, 1); 158 | if (netdata_process_not_update_apps()) 159 | return 0; 160 | 161 | fill = netdata_get_pid_structure(&key, &tgid, &process_ctrl, &tbl_pid_stats); 162 | if (fill) { 163 | fill->release_call = 0; 164 | libnetdata_update_u32(&fill->create_process, 1) ; 165 | } else { 166 | netdata_fill_common_process_data(&data); 167 | data.tgid = tgid; 168 | data.create_process = 1; 169 | 170 | bpf_map_update_elem(&tbl_pid_stats, &key, &data, BPF_ANY); 171 | 172 | libnetdata_update_global(&process_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 173 | } 174 | 175 | return 0; 176 | } 177 | 178 | // It must be always enabled 179 | SEC("tracepoint/sched/sched_process_fork") 180 | int netdata_tracepoint_sched_process_fork(struct netdata_sched_process_fork *ptr) 181 | { 182 | struct netdata_pid_stat_t data = { }; 183 | struct netdata_pid_stat_t *fill; 184 | __u32 key = 0; 185 | __u32 tgid = 0; 186 | 187 | libnetdata_update_global(&tbl_total_stats, NETDATA_KEY_CALLS_PROCESS, 1); 188 | 189 | // Parent ID = 1 means that init called process/thread creation 190 | int thread = 0; 191 | if (ptr->parent_pid != ptr->child_pid && ptr->parent_pid != 1) { 192 | thread = 1; 193 | libnetdata_update_global(&tbl_total_stats, NETDATA_KEY_CALLS_THREAD, 1); 194 | } 195 | 196 | if (netdata_process_not_update_apps()) 197 | return 0; 198 | 199 | fill = netdata_get_pid_structure(&key, &tgid, &process_ctrl, &tbl_pid_stats); 200 | if (fill) { 201 | fill->release_call = 0; 202 | libnetdata_update_u32(&fill->create_process, 1); 203 | if (thread) 204 | libnetdata_update_u32(&fill->create_thread, 1); 205 | } else { 206 | netdata_fill_common_process_data(&data); 207 | data.tgid = tgid; 208 | data.create_process = 1; 209 | if (thread) 210 | data.create_thread = 1; 211 | 212 | bpf_map_update_elem(&tbl_pid_stats, &key, &data, BPF_ANY); 213 | 214 | libnetdata_update_global(&process_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 215 | } 216 | 217 | return 0; 218 | } 219 | 220 | SEC("tracepoint/syscalls/sys_exit_clone") 221 | int netdata_clone_exit(struct trace_event_raw_sys_exit *ctx) 222 | { 223 | int ret = (int)ctx->ret; 224 | return netdata_common_fork_clone(ret); 225 | } 226 | 227 | SEC("tracepoint/syscalls/sys_exit_clone3") 228 | int netdata_clone3_exit(struct trace_event_raw_sys_exit *ctx) 229 | { 230 | int ret = (int)ctx->ret; 231 | return netdata_common_fork_clone(ret); 232 | } 233 | 234 | SEC("tracepoint/syscalls/sys_exit_fork") 235 | int netdata_fork_exit(struct trace_event_raw_sys_exit *ctx) 236 | { 237 | int ret = (int)ctx->ret; 238 | return netdata_common_fork_clone(ret); 239 | } 240 | 241 | SEC("tracepoint/syscalls/sys_exit_vfork") 242 | int netdata_vfork_exit(struct trace_event_raw_sys_exit *ctx) 243 | { 244 | int ret = (int)ctx->ret; 245 | return netdata_common_fork_clone(ret); 246 | } 247 | 248 | /************************************************************************************ 249 | * 250 | * PROCESS SECTION (kprobe) 251 | * 252 | ***********************************************************************************/ 253 | 254 | SEC("kprobe/release_task") 255 | int BPF_KPROBE(netdata_release_task_probe) 256 | { 257 | return netdata_common_release_task(); 258 | } 259 | 260 | // Must be disabled on user ring when kernel is newer than 5.9.16 261 | SEC("kretprobe/_do_fork") 262 | int BPF_KPROBE(netdata_do_fork_probe) 263 | { 264 | int ret = (int)PT_REGS_RC(ctx); 265 | return netdata_common_fork_clone(ret); 266 | } 267 | 268 | // Must be disabled on user ring when kernel is older than 5.10.0 269 | SEC("kretprobe/kernel_clone") 270 | int BPF_KPROBE(netdata_kernel_clone_probe) 271 | { 272 | int ret = (int)PT_REGS_RC(ctx); 273 | return netdata_common_fork_clone(ret); 274 | } 275 | 276 | /************************************************************************************ 277 | * 278 | * PROCESS SECTION (trampoline) 279 | * 280 | ***********************************************************************************/ 281 | 282 | SEC("fentry/release_task") 283 | int BPF_PROG(netdata_release_task_fentry) 284 | { 285 | return netdata_common_release_task(); 286 | } 287 | 288 | SEC("fexit/netdata_clone_fexit") 289 | int BPF_PROG(netdata_clone_fexit, const struct pt_regs *regs) 290 | { 291 | int ret = (int)PT_REGS_RC(regs); 292 | 293 | return netdata_common_fork_clone(ret); 294 | } 295 | 296 | SEC("fexit/netdata_clone3_fexit") 297 | int BPF_PROG(netdata_clone3_fexit, const struct pt_regs *regs) 298 | { 299 | int ret = (int)PT_REGS_RC(regs); 300 | 301 | return netdata_common_fork_clone(ret); 302 | } 303 | 304 | char _license[] SEC("license") = "GPL"; 305 | 306 | -------------------------------------------------------------------------------- /src/process.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 10 | #define __USE_GNU 11 | #include 12 | #include 13 | 14 | #include "netdata_defs.h" 15 | #include "netdata_tests.h" 16 | #include "netdata_core_common.h" 17 | #include "netdata_process.h" 18 | 19 | #include "process.skel.h" 20 | 21 | enum core_process { 22 | PROCESS_RELEASE_TASK_NAME, 23 | PROCESS_SYS_CLONE, 24 | PROCESS_SYS_CLONE3, 25 | PROCESS_SYS_FORK, 26 | PROCESS_KERNEL_CLONE, 27 | }; 28 | 29 | static char *names[] = { 30 | "release_task", 31 | "__x64_sys_clone", 32 | "__x64_sys_clone3", 33 | "_do_fork", 34 | "kernel_clone" 35 | }; 36 | 37 | static void ebpf_disable_probes(struct process_bpf *obj) 38 | { 39 | bpf_program__set_autoload(obj->progs.netdata_release_task_probe, false); 40 | bpf_program__set_autoload(obj->progs.netdata_do_fork_probe, false); 41 | bpf_program__set_autoload(obj->progs.netdata_kernel_clone_probe, false); 42 | } 43 | 44 | static void ebpf_disable_tracepoints(struct process_bpf *obj) 45 | { 46 | bpf_program__set_autoload(obj->progs.netdata_clone_exit, false); 47 | bpf_program__set_autoload(obj->progs.netdata_clone3_exit, false); 48 | bpf_program__set_autoload(obj->progs.netdata_fork_exit, false); 49 | bpf_program__set_autoload(obj->progs.netdata_vfork_exit, false); 50 | } 51 | 52 | static void ebpf_disable_trampoline(struct process_bpf *obj) 53 | { 54 | bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false); 55 | bpf_program__set_autoload(obj->progs.netdata_clone_fexit, false); 56 | bpf_program__set_autoload(obj->progs.netdata_clone3_fexit, false); 57 | } 58 | 59 | static void ebpf_set_trampoline_target(struct process_bpf *obj) 60 | { 61 | bpf_program__set_attach_target(obj->progs.netdata_release_task_fentry, 0, 62 | names[PROCESS_RELEASE_TASK_NAME]); 63 | 64 | bpf_program__set_attach_target(obj->progs.netdata_clone_fexit, 0, 65 | names[PROCESS_SYS_CLONE]); 66 | 67 | bpf_program__set_attach_target(obj->progs.netdata_clone3_fexit, 0, 68 | names[PROCESS_SYS_CLONE3]); 69 | } 70 | 71 | #if (MY_LINUX_VERSION_CODE <= KERNEL_VERSION(5,3,0)) 72 | // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8f6ccf6159aed1f04c6d179f61f6fb2691261e84 73 | static inline void ebpf_disable_clone3(struct process_bpf *obj) 74 | { 75 | bpf_program__set_autoload(obj->progs.netdata_clone3_exit, false); 76 | bpf_program__set_autoload(obj->progs.netdata_clone3_fexit, false); 77 | } 78 | #endif 79 | 80 | static inline int process_attach_kprobe_target(struct process_bpf *obj) 81 | { 82 | obj->links.netdata_release_task_probe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_probe, 83 | false, names[PROCESS_RELEASE_TASK_NAME]); 84 | int ret = libbpf_get_error(obj->links.netdata_release_task_probe); 85 | if (ret) 86 | goto endakt; 87 | 88 | #if (MY_LINUX_VERSION_CODE <= KERNEL_VERSION(5,9,16)) 89 | obj->links.netdata_do_fork_probe = bpf_program__attach_kprobe(obj->progs.netdata_do_fork_probe, 90 | false, names[PROCESS_SYS_FORK]); 91 | ret = libbpf_get_error(obj->links.netdata_do_fork_probe); 92 | #else 93 | obj->links.netdata_kernel_clone_probe = bpf_program__attach_kprobe(obj->progs.netdata_kernel_clone_probe, 94 | false, names[PROCESS_KERNEL_CLONE]); 95 | ret = libbpf_get_error(obj->links.netdata_kernel_clone_probe); 96 | #endif 97 | endakt: 98 | return ret; 99 | } 100 | 101 | static inline int ebpf_load_and_attach(struct process_bpf *obj, int selector) 102 | { 103 | if (!selector) { // trampoline 104 | ebpf_disable_probes(obj); 105 | ebpf_disable_tracepoints(obj); 106 | 107 | ebpf_set_trampoline_target(obj); 108 | } else if (selector == NETDATA_MODE_PROBE) { // kprobe 109 | ebpf_disable_tracepoints(obj); 110 | ebpf_disable_trampoline(obj); 111 | 112 | #if (MY_LINUX_VERSION_CODE <= KERNEL_VERSION(5,9,16)) 113 | bpf_program__set_autoload(obj->progs.netdata_kernel_clone_probe, false); 114 | #else 115 | bpf_program__set_autoload(obj->progs.netdata_do_fork_probe, false); 116 | #endif 117 | } else { // tracepoint 118 | ebpf_disable_probes(obj); 119 | ebpf_disable_trampoline(obj); 120 | } 121 | 122 | #if (MY_LINUX_VERSION_CODE <= KERNEL_VERSION(5,3,0)) 123 | ebpf_disable_clone3(obj); 124 | #endif 125 | 126 | int ret = process_bpf__load(obj); 127 | if (ret) { 128 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 129 | return -1; 130 | } 131 | 132 | if (selector != NETDATA_MODE_PROBE) 133 | ret = process_bpf__attach(obj); 134 | else 135 | ret = process_attach_kprobe_target(obj); 136 | 137 | if (!ret) { 138 | fprintf(stdout, "Process loaded with success\n"); 139 | } 140 | 141 | return ret; 142 | } 143 | 144 | static pid_t ebpf_update_tables(int global, int apps) 145 | { 146 | pid_t pid = ebpf_fill_global(global); 147 | 148 | struct netdata_pid_stat_t stats = { .pid = pid, .tgid = pid, .exit_call = 1, .release_call = 1, 149 | .create_process = 1, .create_thread = 1, .task_err = 1 }; 150 | 151 | uint32_t idx; 152 | for (idx = 0 ; idx < NETDATA_EBPF_CORE_MIN_STORE; idx++) { 153 | int ret = bpf_map_update_elem(apps, &idx, &stats, 0); 154 | if (ret) 155 | fprintf(stderr, "Cannot insert value to global table."); 156 | } 157 | 158 | return pid; 159 | } 160 | 161 | static int process_read_apps_array(int fd, int ebpf_nprocs, uint32_t child) 162 | { 163 | struct netdata_pid_stat_t stored[ebpf_nprocs]; 164 | 165 | uint64_t counter = 0; 166 | int key, next_key; 167 | key = next_key = 0; 168 | while (!bpf_map_get_next_key(fd, &key, &next_key)) { 169 | if (!bpf_map_lookup_elem(fd, &key, stored)) { 170 | counter++; 171 | } 172 | memset(stored, 0, ebpf_nprocs*sizeof(struct netdata_pid_stat_t)); 173 | 174 | key = next_key; 175 | } 176 | 177 | if (counter) { 178 | fprintf(stdout, "Apps data stored with success. It collected %lu pids\n", counter); 179 | return 0; 180 | } 181 | 182 | return 2; 183 | } 184 | 185 | 186 | static int ebpf_process_tests(int selector, enum netdata_apps_level map_level) 187 | { 188 | struct process_bpf *obj = NULL; 189 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 190 | if (ebpf_nprocs < 0) 191 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 192 | 193 | obj = process_bpf__open(); 194 | if (!obj) { 195 | goto load_error; 196 | } 197 | 198 | int ret = ebpf_load_and_attach(obj, selector); 199 | if (ret && selector != NETDATA_MODE_PROBE) { 200 | process_bpf__destroy(obj); 201 | 202 | obj = process_bpf__open(); 203 | if (!obj) { 204 | goto load_error; 205 | } 206 | 207 | selector = NETDATA_MODE_PROBE; 208 | ret = ebpf_load_and_attach(obj, selector); 209 | } 210 | 211 | if (!ret) { 212 | int fd = bpf_map__fd(obj->maps.process_ctrl); 213 | ebpf_core_fill_ctrl(obj->maps.process_ctrl, map_level); 214 | 215 | fd = bpf_map__fd(obj->maps.tbl_total_stats); 216 | int fd2 = bpf_map__fd(obj->maps.tbl_pid_stats); 217 | pid_t my_pid = ebpf_update_tables(fd, fd2); 218 | // Wait data from more processes 219 | sleep(60); 220 | 221 | ret = ebpf_read_global_array(fd, ebpf_nprocs, NETDATA_GLOBAL_COUNTER); 222 | if (!ret) { 223 | ret = process_read_apps_array(fd2, ebpf_nprocs, (uint32_t)my_pid); 224 | if (ret) 225 | fprintf(stdout, "Empty apps table\n"); 226 | } else 227 | fprintf(stderr, "Cannot read global table\n"); 228 | } else { 229 | ret = 3; 230 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 231 | } 232 | 233 | process_bpf__destroy(obj); 234 | 235 | return ret; 236 | load_error: 237 | fprintf(stderr, "Cannot open or load BPF object\n"); 238 | return 2; 239 | } 240 | 241 | int main(int argc, char **argv) 242 | { 243 | static struct option long_options[] = { 244 | {"help", no_argument, 0, 0 }, 245 | {"probe", no_argument, 0, 0 }, 246 | {"tracepoint", no_argument, 0, 0 }, 247 | {"trampoline", no_argument, 0, 0 }, 248 | {"pid", required_argument, 0, 0 }, 249 | {0, no_argument, 0, 0} 250 | }; 251 | 252 | int selector = NETDATA_MODE_TRAMPOLINE; 253 | int option_index = 0; 254 | enum netdata_apps_level map_level = NETDATA_APPS_LEVEL_REAL_PARENT; 255 | while (1) { 256 | int c = getopt_long_only(argc, argv, "", long_options, &option_index); 257 | if (c == -1) 258 | break; 259 | 260 | switch (option_index) { 261 | case NETDATA_EBPF_CORE_IDX_HELP: { 262 | ebpf_core_print_help(argv[0], "mount", 1, 1); 263 | exit(0); 264 | } 265 | case NETDATA_EBPF_CORE_IDX_PROBE: { 266 | selector = NETDATA_MODE_PROBE; 267 | break; 268 | } 269 | case NETDATA_EBPF_CORE_IDX_TRACEPOINT: { 270 | selector = NETDATA_MODE_TRACEPOINT; 271 | break; 272 | } 273 | case NETDATA_EBPF_CORE_IDX_TRAMPOLINE: { 274 | selector = NETDATA_MODE_TRAMPOLINE; 275 | break; 276 | } 277 | case NETDATA_EBPF_CORE_IDX_PID: { 278 | int user_input = (int)strtol(optarg, NULL, 10); 279 | map_level = ebpf_check_map_level(user_input); 280 | break; 281 | } 282 | default: { 283 | break; 284 | } 285 | } 286 | } 287 | 288 | // Adjust memory 289 | int ret = netdata_ebf_memlock_limit(); 290 | if (ret) { 291 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 292 | return 1; 293 | } 294 | 295 | libbpf_set_print(netdata_libbpf_vfprintf); 296 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 297 | 298 | int stop_software = 0; 299 | while (stop_software < 2) { 300 | if (ebpf_process_tests(selector, map_level) && !stop_software) { 301 | selector = 1; 302 | stop_software++; 303 | } else 304 | stop_software = 2; 305 | } 306 | return 0; 307 | } 308 | 309 | -------------------------------------------------------------------------------- /src/rename_header.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then 4 | echo "Give kernel as parameter: kernel major version, kernel minor version, and function name" 5 | exit 1 6 | fi 7 | 8 | NAME="$3" 9 | 10 | parse_kernel_version() { 11 | R="${1}.${2}" 12 | 13 | read -r -a P <<< "$(echo "${R}" | tr '.' ' ')" 14 | 15 | printf "%03d%03d" "${P[0]}" "${P[1]}" 16 | } 17 | 18 | select_kernel_version() { 19 | KVER=$(parse_kernel_version "${1}" "${2}") 20 | 21 | VER5_19_0="005019" 22 | 23 | if [ "${KVER}" -lt "${VER5_19_0}" ]; then 24 | KSELECTED="508"; 25 | else 26 | KSELECTED="519"; 27 | fi 28 | 29 | echo "${KSELECTED}" 30 | } 31 | 32 | if [ "socket.skel.h" != "${NAME}" ]; then 33 | exit 0; 34 | fi 35 | 36 | 37 | KNAME=$(select_kernel_version "${1}" "${2}") 38 | 39 | mv "../includes/$3" "../includes/socket_$KNAME.skel.h" 40 | -------------------------------------------------------------------------------- /src/shm.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_shm.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 16 | __type(key, __u32); 17 | __type(value, __u64); 18 | __uint(max_entries, NETDATA_SHM_END); 19 | } tbl_shm SEC(".maps"); 20 | 21 | struct { 22 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 23 | __type(key, __u32); 24 | __type(value, netdata_shm_t); 25 | __uint(max_entries, PID_MAX_DEFAULT); 26 | } tbl_pid_shm SEC(".maps"); 27 | 28 | struct { 29 | __uint(type, BPF_MAP_TYPE_ARRAY); 30 | __type(key, __u32); 31 | __type(value, __u64); 32 | __uint(max_entries, NETDATA_CONTROLLER_END); 33 | } shm_ctrl SEC(".maps"); 34 | 35 | /************************************************************************************ 36 | * 37 | * SHARED MEMORY (common) 38 | * 39 | ***********************************************************************************/ 40 | 41 | static __always_inline void netdata_update_stored_data(netdata_shm_t *data, __u32 selector) 42 | { 43 | // we are using if/else if instead switch to avoid warnings 44 | if (selector == NETDATA_KEY_SHMGET_CALL) 45 | libnetdata_update_u32(&data->get, 1); 46 | else if (selector == NETDATA_KEY_SHMAT_CALL) 47 | libnetdata_update_u32(&data->at, 1); 48 | else if (selector == NETDATA_KEY_SHMDT_CALL) 49 | libnetdata_update_u32(&data->dt, 1); 50 | else if (selector == NETDATA_KEY_SHMCTL_CALL) 51 | libnetdata_update_u32(&data->ctl, 1); 52 | } 53 | 54 | static __always_inline void netdata_set_structure_value(netdata_shm_t *data, __u32 selector) 55 | { 56 | // we are using if/else if instead switch to avoid warnings 57 | if (selector == NETDATA_KEY_SHMGET_CALL) 58 | data->get = 1; 59 | else if (selector == NETDATA_KEY_SHMAT_CALL) 60 | data->at = 1; 61 | else if (selector == NETDATA_KEY_SHMDT_CALL) 62 | data->dt = 1; 63 | else if (selector == NETDATA_KEY_SHMCTL_CALL) 64 | data->ctl = 1; 65 | } 66 | 67 | static __always_inline int netdata_update_apps(__u32 idx) 68 | { 69 | netdata_shm_t data = {}; 70 | 71 | __u32 key = 0; 72 | __u32 tgid = 0; 73 | netdata_shm_t *fill = netdata_get_pid_structure(&key, &tgid, &shm_ctrl, &tbl_pid_shm); 74 | if (fill) { 75 | netdata_update_stored_data(fill, idx); 76 | } else { 77 | data.ct = bpf_ktime_get_ns(); 78 | data.tgid = tgid; 79 | libnetdata_update_uid_gid(&data.uid, &data.gid); 80 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 81 | 82 | netdata_set_structure_value(&data, idx); 83 | bpf_map_update_elem(&tbl_pid_shm, &key, &data, BPF_ANY); 84 | 85 | libnetdata_update_global(&shm_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 86 | } 87 | 88 | return 0; 89 | } 90 | 91 | static __always_inline int netdata_global_apps_shm(__u32 idx) 92 | { 93 | libnetdata_update_global(&tbl_shm, idx, 1); 94 | 95 | // check if apps is enabled; if not, don't record apps data. 96 | __u32 key = NETDATA_CONTROLLER_APPS_ENABLED; 97 | __u32 *apps = bpf_map_lookup_elem(&shm_ctrl, &key); 98 | if (apps) { 99 | if (*apps == 0) { 100 | return 0; 101 | } 102 | } 103 | 104 | return 1; 105 | } 106 | 107 | static __always_inline int netdata_ebpf_common_shmget() 108 | { 109 | int store_apps = netdata_global_apps_shm(NETDATA_KEY_SHMGET_CALL); 110 | if (!store_apps) 111 | return 0; 112 | 113 | return netdata_update_apps(NETDATA_KEY_SHMGET_CALL); 114 | } 115 | 116 | static __always_inline int netdata_ebpf_common_shmat() 117 | { 118 | int store_apps = netdata_global_apps_shm(NETDATA_KEY_SHMAT_CALL); 119 | if (!store_apps) 120 | return 0; 121 | 122 | return netdata_update_apps(NETDATA_KEY_SHMAT_CALL); 123 | } 124 | 125 | static __always_inline int netdata_ebpf_common_shmdt() 126 | { 127 | int store_apps = netdata_global_apps_shm(NETDATA_KEY_SHMDT_CALL); 128 | if (!store_apps) 129 | return 0; 130 | 131 | return netdata_update_apps(NETDATA_KEY_SHMDT_CALL); 132 | } 133 | 134 | static __always_inline int netdata_ebpf_common_shmctl() 135 | { 136 | int store_apps = netdata_global_apps_shm(NETDATA_KEY_SHMCTL_CALL); 137 | if (!store_apps) 138 | return 0; 139 | 140 | return netdata_update_apps(NETDATA_KEY_SHMCTL_CALL); 141 | } 142 | 143 | /************************************************************************************ 144 | * 145 | * SHARED MEMORY (tracepoint) 146 | * 147 | ***********************************************************************************/ 148 | 149 | SEC("tracepoint/syscalls/sys_enter_shmget") 150 | int netdata_syscall_shmget(struct trace_event_raw_sys_enter *arg) 151 | { 152 | return netdata_ebpf_common_shmget(); 153 | } 154 | 155 | SEC("tracepoint/syscalls/sys_enter_shmat") 156 | int netdata_syscall_shmat(struct trace_event_raw_sys_enter *arg) 157 | { 158 | return netdata_ebpf_common_shmat(); 159 | } 160 | 161 | SEC("tracepoint/syscalls/sys_enter_shmdt") 162 | int netdata_syscall_shmdt(struct trace_event_raw_sys_enter *arg) 163 | { 164 | return netdata_ebpf_common_shmdt(); 165 | } 166 | 167 | SEC("tracepoint/syscalls/sys_enter_shmctl") 168 | int netdata_syscall_shmctl(struct trace_event_raw_sys_enter *arg) 169 | { 170 | return netdata_ebpf_common_shmctl(); 171 | } 172 | 173 | /************************************************************************************ 174 | * 175 | * SHARED MEMORY (kprobe) 176 | * 177 | ***********************************************************************************/ 178 | 179 | SEC("kprobe/netdata_shmget_probe") 180 | int BPF_KPROBE(netdata_shmget_probe) 181 | { 182 | return netdata_ebpf_common_shmget(); 183 | } 184 | 185 | SEC("kprobe/netdata_shmat_probe") 186 | int BPF_KPROBE(netdata_shmat_probe) 187 | { 188 | return netdata_ebpf_common_shmat(); 189 | } 190 | 191 | SEC("kprobe/netdata_shmdt_probe") 192 | int BPF_KPROBE(netdata_shmdt_probe) 193 | { 194 | return netdata_ebpf_common_shmdt(); 195 | } 196 | 197 | SEC("kprobe/netdata_shmctl_probe") 198 | int BPF_KPROBE(netdata_shmctl_probe) 199 | { 200 | return netdata_ebpf_common_shmctl(); 201 | } 202 | 203 | /************************************************************************************ 204 | * 205 | * SHARED MEMORY (trampoline) 206 | * 207 | ***********************************************************************************/ 208 | 209 | SEC("fentry/netdata_shmget") 210 | int BPF_PROG(netdata_shmget_fentry) 211 | { 212 | return netdata_ebpf_common_shmget(); 213 | } 214 | 215 | SEC("fentry/netdata_shmat") 216 | int BPF_PROG(netdata_shmat_fentry) 217 | { 218 | return netdata_ebpf_common_shmat(); 219 | } 220 | 221 | SEC("fentry/netdata_shmdt") 222 | int BPF_PROG(netdata_shmdt_fentry) 223 | { 224 | return netdata_ebpf_common_shmdt(); 225 | } 226 | 227 | SEC("fentry/netdata_shmctl") 228 | int BPF_PROG(netdata_shmctl_fentry) 229 | { 230 | return netdata_ebpf_common_shmctl(); 231 | } 232 | 233 | char _license[] SEC("license") = "GPL"; 234 | 235 | -------------------------------------------------------------------------------- /src/shm.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | #include "netdata_defs.h" 15 | #include "netdata_tests.h" 16 | #include "netdata_core_common.h" 17 | #include "netdata_shm.h" 18 | 19 | #include "shm.skel.h" 20 | 21 | char *syscalls[] = { "__x64_sys_shmget", 22 | "__x64_sys_shmat", 23 | "__x64_sys_shmdt", 24 | "__x64_sys_shmctl" 25 | }; 26 | // This preprocessor is defined here, because it is not useful in kernel-colector 27 | #define NETDATA_SHM_RELEASE_TASK 4 28 | 29 | static void ebpf_disable_tracepoint(struct shm_bpf *obj) 30 | { 31 | bpf_program__set_autoload(obj->progs.netdata_syscall_shmget, false); 32 | bpf_program__set_autoload(obj->progs.netdata_syscall_shmat, false); 33 | bpf_program__set_autoload(obj->progs.netdata_syscall_shmdt, false); 34 | bpf_program__set_autoload(obj->progs.netdata_syscall_shmctl, false); 35 | } 36 | 37 | static void ebpf_disable_kprobe(struct shm_bpf *obj) 38 | { 39 | bpf_program__set_autoload(obj->progs.netdata_shmget_probe, false); 40 | bpf_program__set_autoload(obj->progs.netdata_shmat_probe, false); 41 | bpf_program__set_autoload(obj->progs.netdata_shmdt_probe, false); 42 | bpf_program__set_autoload(obj->progs.netdata_shmctl_probe, false); 43 | } 44 | 45 | static void ebpf_disable_trampoline(struct shm_bpf *obj) 46 | { 47 | bpf_program__set_autoload(obj->progs.netdata_shmget_fentry, false); 48 | bpf_program__set_autoload(obj->progs.netdata_shmat_fentry, false); 49 | bpf_program__set_autoload(obj->progs.netdata_shmdt_fentry, false); 50 | bpf_program__set_autoload(obj->progs.netdata_shmctl_fentry, false); 51 | } 52 | 53 | static int ebpf_attach_kprobe(struct shm_bpf *obj) 54 | { 55 | obj->links.netdata_shmget_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmget_probe, 56 | false, syscalls[NETDATA_KEY_SHMGET_CALL]); 57 | int ret = libbpf_get_error(obj->links.netdata_shmget_probe); 58 | if (ret) 59 | return -1; 60 | 61 | obj->links.netdata_shmat_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmat_probe, 62 | false, syscalls[NETDATA_KEY_SHMAT_CALL]); 63 | ret = libbpf_get_error(obj->links.netdata_shmat_probe); 64 | if (ret) 65 | return -1; 66 | 67 | obj->links.netdata_shmdt_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmdt_probe, 68 | false, syscalls[NETDATA_KEY_SHMDT_CALL]); 69 | ret = libbpf_get_error(obj->links.netdata_shmdt_probe); 70 | if (ret) 71 | return -1; 72 | 73 | obj->links.netdata_shmctl_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmctl_probe, 74 | false, syscalls[NETDATA_KEY_SHMCTL_CALL]); 75 | ret = libbpf_get_error(obj->links.netdata_shmctl_probe); 76 | if (ret) 77 | return -1; 78 | 79 | return 0; 80 | } 81 | 82 | static void ebpf_set_trampoline_target(struct shm_bpf *obj) 83 | { 84 | bpf_program__set_attach_target(obj->progs.netdata_shmget_fentry, 0, 85 | syscalls[NETDATA_KEY_SHMGET_CALL]); 86 | 87 | bpf_program__set_attach_target(obj->progs.netdata_shmat_fentry, 0, 88 | syscalls[NETDATA_KEY_SHMAT_CALL]); 89 | 90 | bpf_program__set_attach_target(obj->progs.netdata_shmdt_fentry, 0, 91 | syscalls[NETDATA_KEY_SHMDT_CALL]); 92 | 93 | bpf_program__set_attach_target(obj->progs.netdata_shmctl_fentry, 0, 94 | syscalls[NETDATA_KEY_SHMCTL_CALL]); 95 | } 96 | 97 | static inline int ebpf_load_and_attach(struct shm_bpf *obj, int selector) 98 | { 99 | if (!selector) { // trampoline 100 | ebpf_disable_tracepoint(obj); 101 | ebpf_disable_kprobe(obj); 102 | 103 | ebpf_set_trampoline_target(obj); 104 | } else if (selector == 1) { // kprobe 105 | ebpf_disable_tracepoint(obj); 106 | ebpf_disable_trampoline(obj); 107 | } else { // tracepoint 108 | ebpf_disable_kprobe(obj); 109 | ebpf_disable_trampoline(obj); 110 | } 111 | 112 | int ret = shm_bpf__load(obj); 113 | if (ret) { 114 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 115 | return -1; 116 | } 117 | 118 | if (selector != 1) // Not kprobe 119 | ret = shm_bpf__attach(obj); 120 | else 121 | ret = ebpf_attach_kprobe(obj); 122 | 123 | if (!ret) { 124 | char *method = ebpf_select_type(selector); 125 | fprintf(stdout, "%s loaded with success\n", method); 126 | } 127 | 128 | return ret; 129 | } 130 | 131 | /* This is kept to show how to use the syscalls 132 | int call_syscalls() 133 | { 134 | #define SHMSZ 27 135 | // Copied and adapt from https://github.com/netdata/netdata/pull/11560#issuecomment-927613811 136 | key_t name = 5678; 137 | 138 | int shmid = shmget(name, SHMSZ, IPC_CREAT | 0666); 139 | if (shmid < 0) 140 | return 2; 141 | 142 | sleep(1); 143 | 144 | char *shm = shmat(shmid, NULL, 0); 145 | if (shm == (char *) -1) { 146 | perror("shmat"); 147 | return 2; 148 | } 149 | 150 | char c, *s = shm; 151 | for (c = 'a'; c <= 'z'; c++) 152 | *s++ = c; 153 | *s = 0; 154 | 155 | sleep(1); 156 | 157 | struct shmid_ds dsbuf; 158 | if ((shmctl(shmid, IPC_STAT, &dsbuf)) == -1) { 159 | perror("shmctl"); 160 | return 2; 161 | } 162 | 163 | if ((shmdt(shm)) == -1) { 164 | perror("shmdt"); 165 | return 2; 166 | } 167 | 168 | return 0; 169 | } 170 | */ 171 | 172 | void shm_fill_tables(struct shm_bpf *obj) 173 | { 174 | int fd = bpf_map__fd(obj->maps.tbl_shm); 175 | uint32_t key; 176 | uint64_t global_data = 64; 177 | for (key = 0; key < NETDATA_SHM_END; key++) { 178 | if (bpf_map_update_elem(fd, &key, &global_data, BPF_ANY)) 179 | fprintf(stderr, "Cannot insert key %u\n", key); 180 | } 181 | 182 | fd = bpf_map__fd(obj->maps.tbl_pid_shm); 183 | netdata_shm_t apps_data = { .get = 1, .at = 1, .dt = 1, .ctl = 1}; 184 | for (key = 0; key < NETDATA_EBPF_CORE_MIN_STORE; key++) { 185 | if (bpf_map_update_elem(fd, &key, &apps_data, BPF_ANY)) 186 | fprintf(stderr, "Cannot insert key %u\n", key); 187 | } 188 | } 189 | 190 | static int shm_read_apps_array(int fd, int ebpf_nprocs) 191 | { 192 | netdata_shm_t stored[ebpf_nprocs]; 193 | 194 | int key, next_key; 195 | key = next_key = 0; 196 | uint64_t counter = 0; 197 | while (!bpf_map_get_next_key(fd, &key, &next_key)) { 198 | if (!bpf_map_lookup_elem(fd, &key, stored)) { 199 | counter++; 200 | } 201 | memset(stored, 0, ebpf_nprocs * sizeof(netdata_shm_t)); 202 | 203 | key = next_key; 204 | } 205 | 206 | if (counter) { 207 | fprintf(stdout, "Apps data stored with success. It collected %lu pids\n", counter); 208 | return 0; 209 | } 210 | 211 | return 2; 212 | } 213 | 214 | int ebpf_shm_tests(struct btf *bf, int selector, enum netdata_apps_level map_level) 215 | { 216 | struct shm_bpf *obj = NULL; 217 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 218 | if (ebpf_nprocs < 0) 219 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 220 | 221 | if (bf) 222 | selector = ebpf_find_functions(bf, selector, syscalls, NETDATA_SHM_END); 223 | 224 | obj = shm_bpf__open(); 225 | if (!obj) { 226 | goto load_error; 227 | } 228 | 229 | int ret = ebpf_load_and_attach(obj, selector); 230 | if (ret && selector != NETDATA_MODE_PROBE) { 231 | shm_bpf__destroy(obj); 232 | 233 | obj = shm_bpf__open(); 234 | if (!obj) { 235 | goto load_error; 236 | } 237 | 238 | selector = NETDATA_MODE_PROBE; 239 | ret = ebpf_load_and_attach(obj, selector); 240 | } 241 | 242 | if (!ret) { 243 | int fd = bpf_map__fd(obj->maps.shm_ctrl); 244 | ebpf_core_fill_ctrl(obj->maps.shm_ctrl, map_level); 245 | 246 | //ret = call_syscalls(); 247 | shm_fill_tables(obj); 248 | sleep(60); 249 | fd = bpf_map__fd(obj->maps.tbl_shm); 250 | ret = ebpf_read_global_array(fd, ebpf_nprocs, NETDATA_SHM_END); 251 | if (!ret) { 252 | fd = bpf_map__fd(obj->maps.tbl_pid_shm); 253 | ret = shm_read_apps_array(fd, ebpf_nprocs); 254 | } 255 | } else { 256 | ret = 3; 257 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 258 | } 259 | 260 | shm_bpf__destroy(obj); 261 | 262 | return ret; 263 | load_error: 264 | fprintf(stderr, "Cannot open or load BPF object\n"); 265 | return 2; 266 | } 267 | 268 | int main(int argc, char **argv) 269 | { 270 | static struct option long_options[] = { 271 | {"help", no_argument, 0, 'h' }, 272 | {"probe", no_argument, 0, 'p' }, 273 | {"tracepoint", no_argument, 0, 'r' }, 274 | {"trampoline", no_argument, 0, 't' }, 275 | {"pid", required_argument, 0, 0 }, 276 | {0, 0, 0, 0} 277 | }; 278 | 279 | // use trampoline as default 280 | int selector = NETDATA_MODE_TRAMPOLINE; 281 | int option_index = 0; 282 | enum netdata_apps_level map_level = NETDATA_APPS_LEVEL_REAL_PARENT; 283 | while (1) { 284 | int c = getopt_long(argc, argv, "", long_options, &option_index); 285 | if (c == -1) 286 | break; 287 | 288 | switch (option_index) { 289 | case NETDATA_EBPF_CORE_IDX_HELP: { 290 | ebpf_core_print_help(argv[0], "shared_memory", 1, 1); 291 | exit(0); 292 | } 293 | case NETDATA_EBPF_CORE_IDX_PROBE: { 294 | selector = NETDATA_MODE_PROBE; 295 | break; 296 | } 297 | case NETDATA_EBPF_CORE_IDX_TRACEPOINT: { 298 | selector = NETDATA_MODE_TRACEPOINT; 299 | break; 300 | } 301 | case NETDATA_EBPF_CORE_IDX_TRAMPOLINE: { 302 | selector = NETDATA_MODE_TRAMPOLINE; 303 | break; 304 | } 305 | case NETDATA_EBPF_CORE_IDX_PID: { 306 | int user_input = (int)strtol(optarg, NULL, 10); 307 | map_level = ebpf_check_map_level(user_input); 308 | break; 309 | } 310 | default: { 311 | break; 312 | } 313 | } 314 | } 315 | 316 | // Adjust memory 317 | int ret = netdata_ebf_memlock_limit(); 318 | if (ret) { 319 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 320 | return 1; 321 | } 322 | 323 | libbpf_set_print(netdata_libbpf_vfprintf); 324 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 325 | 326 | struct btf *bf = NULL; 327 | if (!selector) { 328 | bf = netdata_parse_btf_file((const char *)NETDATA_BTF_FILE); 329 | } 330 | 331 | ret = ebpf_shm_tests(bf, selector, map_level); 332 | 333 | if (bf) 334 | btf__free(bf); 335 | 336 | return 0; 337 | } 338 | 339 | -------------------------------------------------------------------------------- /src/softirq.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_softirq.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 16 | __type(key, __u32); 17 | __type(value, softirq_val_t); 18 | __uint(max_entries, NETDATA_SOFTIRQ_MAX_IRQS); 19 | } tbl_softirq SEC(".maps"); 20 | 21 | /*********************************************************************************** 22 | * 23 | * SOFTIRQ SECTION(tracepoint) 24 | * 25 | ***********************************************************************************/ 26 | 27 | SEC("tracepoint/irq/softirq_entry") 28 | int netdata_softirq_entry(struct netdata_softirq_entry *ptr) 29 | { 30 | softirq_val_t *valp, val = {}; 31 | u32 vec = ptr->vec; 32 | 33 | // out-of-range index. 34 | if (vec > NETDATA_SOFTIRQ_MAX_IRQS-1) { 35 | return 0; 36 | } 37 | 38 | valp = bpf_map_lookup_elem(&tbl_softirq, &vec); 39 | if (valp) { 40 | valp->ts = bpf_ktime_get_ns(); 41 | } else { 42 | val.latency = 0; 43 | val.ts = bpf_ktime_get_ns(); 44 | bpf_map_update_elem(&tbl_softirq, &vec, &val, BPF_ANY); 45 | } 46 | 47 | return 0; 48 | } 49 | 50 | SEC("tracepoint/irq/softirq_exit") 51 | int netdata_softirq_exit(struct netdata_softirq_exit *ptr) 52 | { 53 | softirq_val_t *valp; 54 | u32 vec = ptr->vec; 55 | 56 | // out-of-range index. 57 | if (vec > NETDATA_SOFTIRQ_MAX_IRQS-1) { 58 | return 0; 59 | } 60 | 61 | valp = bpf_map_lookup_elem(&tbl_softirq, &vec); 62 | if (!valp) { 63 | return 0; 64 | } 65 | 66 | // get time diff and convert to microseconds. 67 | u64 latency = (bpf_ktime_get_ns() - valp->ts) / 1000; 68 | libnetdata_update_u64(&valp->latency, latency); 69 | 70 | return 0; 71 | } 72 | 73 | char _license[] SEC("license") = "GPL"; 74 | 75 | -------------------------------------------------------------------------------- /src/softirq.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include "netdata_defs.h" 12 | #include "netdata_tests.h" 13 | #include "netdata_core_common.h" 14 | 15 | #include "softirq.skel.h" 16 | 17 | // Copied and redefined from ../include/netdata_softirq.h 18 | typedef struct softirq_val { 19 | // incremental counter storing the total latency so far. 20 | uint64_t latency; 21 | 22 | // temporary timestamp stored at the entry handler, to be diff'd with a 23 | // timestamp at the exit handler, to get the latency to add to the 24 | // `latency` field. 25 | uint64_t ts; 26 | } softirq_val_t; 27 | 28 | static inline int ebpf_load_and_attach(struct softirq_bpf *obj) 29 | { 30 | int ret = softirq_bpf__load(obj); 31 | if (ret) { 32 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 33 | return -1; 34 | } 35 | 36 | ret = softirq_bpf__attach(obj); 37 | if (!ret) { 38 | fprintf(stdout, "Softirq loaded with success\n"); 39 | } 40 | 41 | return ret; 42 | } 43 | 44 | static void ebpf_update_table(int global) 45 | { 46 | uint32_t idx = 0; 47 | softirq_val_t value = { .ts = 1, .latency = 1 }; 48 | int ret = bpf_map_update_elem(global, &idx, &value, 0); 49 | if (ret) 50 | fprintf(stderr, "Cannot insert value to global table."); 51 | } 52 | 53 | static int softirq_read_array(int fd, int ebpf_nprocs) 54 | { 55 | softirq_val_t stored[ebpf_nprocs]; 56 | 57 | uint64_t counter = 0; 58 | int idx = 0; 59 | if (!bpf_map_lookup_elem(fd, &idx, stored)) { 60 | int j; 61 | for (j = 0; j < ebpf_nprocs; j++) { 62 | counter += stored[j].ts + stored[j].latency; 63 | } 64 | } 65 | 66 | if (counter) { 67 | fprintf(stdout, "Data stored with success\n"); 68 | return 0; 69 | } 70 | 71 | return 2; 72 | } 73 | 74 | static int ebpf_softirq_tests() 75 | { 76 | struct softirq_bpf *obj = NULL; 77 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 78 | if (ebpf_nprocs < 0) 79 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 80 | 81 | obj = softirq_bpf__open(); 82 | if (!obj) { 83 | fprintf(stderr, "Cannot open or load BPF object\n"); 84 | 85 | return 2; 86 | } 87 | 88 | int ret = ebpf_load_and_attach(obj); 89 | if (!ret) { 90 | int fd = bpf_map__fd(obj->maps.tbl_softirq); 91 | ebpf_update_table(fd); 92 | 93 | ret = softirq_read_array(fd, ebpf_nprocs); 94 | if (ret) 95 | fprintf(stderr, "Cannot read global table\n"); 96 | } else { 97 | ret = 3; 98 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 99 | } 100 | 101 | softirq_bpf__destroy(obj); 102 | 103 | return ret; 104 | } 105 | 106 | int main(int argc, char **argv) 107 | { 108 | static struct option long_options[] = { 109 | {"help", no_argument, 0, 'h' }, 110 | {0, 0, 0, 0} 111 | }; 112 | 113 | int option_index = 0; 114 | while (1) { 115 | int c = getopt_long(argc, argv, "", long_options, &option_index); 116 | if (c == -1) 117 | break; 118 | 119 | switch (c) { 120 | case 'h': { 121 | ebpf_tracepoint_help("softirq"); 122 | exit(0); 123 | } 124 | default: { 125 | break; 126 | } 127 | } 128 | } 129 | 130 | int ret = netdata_ebf_memlock_limit(); 131 | if (ret) { 132 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 133 | return 1; 134 | } 135 | 136 | libbpf_set_print(netdata_libbpf_vfprintf); 137 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 138 | 139 | return ebpf_softirq_tests(); 140 | } 141 | 142 | -------------------------------------------------------------------------------- /src/swap.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_swap.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 16 | __type(key, __u32); 17 | __type(value, __u64); 18 | __uint(max_entries, NETDATA_SWAP_END); 19 | } tbl_swap SEC(".maps"); 20 | 21 | struct { 22 | __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 23 | __type(key, __u32); 24 | __type(value, netdata_swap_access_t); 25 | __uint(max_entries, PID_MAX_DEFAULT); 26 | } tbl_pid_swap SEC(".maps"); 27 | 28 | struct { 29 | __uint(type, BPF_MAP_TYPE_ARRAY); 30 | __type(key, __u32); 31 | __type(value, __u64); 32 | __uint(max_entries, NETDATA_CONTROLLER_END); 33 | } swap_ctrl SEC(".maps"); 34 | 35 | /*********************************************************************************** 36 | * 37 | * SWAP COMMON 38 | * 39 | ***********************************************************************************/ 40 | 41 | static __always_inline int netdata_swap_not_update_apps() 42 | { 43 | __u32 key = NETDATA_CONTROLLER_APPS_ENABLED; 44 | __u32 *apps = bpf_map_lookup_elem(&swap_ctrl ,&key); 45 | if (apps && *apps) 46 | return 0; 47 | 48 | return 1; 49 | } 50 | 51 | static __always_inline int common_readpage() 52 | { 53 | netdata_swap_access_t data = {}; 54 | 55 | libnetdata_update_global(&tbl_swap, NETDATA_KEY_SWAP_READPAGE_CALL, 1); 56 | 57 | __u32 key = 0; 58 | __u32 tgid = 0; 59 | if (netdata_swap_not_update_apps()) 60 | return 0; 61 | 62 | netdata_swap_access_t *fill = netdata_get_pid_structure(&key, &tgid, &swap_ctrl, &tbl_pid_swap); 63 | if (fill) { 64 | libnetdata_update_u32(&fill->read, 1); 65 | } else { 66 | data.ct = bpf_ktime_get_ns(); 67 | data.tgid = tgid; 68 | libnetdata_update_uid_gid(&data.uid, &data.gid); 69 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 70 | data.read = 1; 71 | 72 | bpf_map_update_elem(&tbl_pid_swap, &key, &data, BPF_ANY); 73 | 74 | libnetdata_update_global(&swap_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 75 | } 76 | 77 | return 0; 78 | } 79 | 80 | static __always_inline int common_writepage() 81 | { 82 | netdata_swap_access_t data = {}; 83 | 84 | libnetdata_update_global(&tbl_swap, NETDATA_KEY_SWAP_WRITEPAGE_CALL, 1); 85 | 86 | __u32 key = 0; 87 | __u32 tgid = 0; 88 | if (netdata_swap_not_update_apps()) 89 | return 0; 90 | 91 | netdata_swap_access_t *fill = netdata_get_pid_structure(&key, &tgid, &swap_ctrl, &tbl_pid_swap); 92 | if (fill) { 93 | libnetdata_update_u32(&fill->write, 1); 94 | } else { 95 | data.ct = bpf_ktime_get_ns(); 96 | data.tgid = tgid; 97 | libnetdata_update_uid_gid(&data.uid, &data.gid); 98 | bpf_get_current_comm(&data.name, TASK_COMM_LEN); 99 | data.write = 1; 100 | 101 | bpf_map_update_elem(&tbl_pid_swap, &key, &data, BPF_ANY); 102 | 103 | libnetdata_update_global(&swap_ctrl, NETDATA_CONTROLLER_PID_TABLE_ADD, 1); 104 | } 105 | 106 | return 0; 107 | } 108 | 109 | /*********************************************************************************** 110 | * 111 | * SWAP SECTION(kprobe) 112 | * 113 | ***********************************************************************************/ 114 | 115 | SEC("kprobe/swap_read_folio") 116 | int BPF_KPROBE(netdata_swap_read_folio_probe) 117 | { 118 | return common_readpage(); 119 | } 120 | 121 | SEC("kprobe/swap_readpage") 122 | int BPF_KPROBE(netdata_swap_readpage_probe) 123 | { 124 | return common_readpage(); 125 | } 126 | 127 | SEC("kprobe/swap_writepage") 128 | int BPF_KPROBE(netdata_swap_writepage_probe) 129 | { 130 | return common_writepage(); 131 | } 132 | 133 | /*********************************************************************************** 134 | * 135 | * SWAP SECTION(trampoline) 136 | * 137 | ***********************************************************************************/ 138 | 139 | SEC("fentry/swap_read_folio") 140 | int BPF_PROG(netdata_swap_read_folio_fentry) 141 | { 142 | return common_readpage(); 143 | } 144 | 145 | SEC("fentry/swap_readpage") 146 | int BPF_PROG(netdata_swap_readpage_fentry) 147 | { 148 | return common_readpage(); 149 | } 150 | 151 | SEC("fentry/swap_writepage") 152 | int BPF_PROG(netdata_swap_writepage_fentry) 153 | { 154 | return common_writepage(); 155 | } 156 | 157 | char _license[] SEC("license") = "GPL"; 158 | 159 | -------------------------------------------------------------------------------- /src/swap.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #define _GNU_SOURCE /* See feature_test_macros(7) */ 7 | #define __USE_GNU 8 | #include 9 | #include 10 | 11 | #include "netdata_defs.h" 12 | #include "netdata_tests.h" 13 | #include "netdata_core_common.h" 14 | #include "netdata_swap.h" 15 | 16 | #include "swap.skel.h" 17 | 18 | // Alma Linux modified internal name, this structure was brought for it. 19 | static ebpf_specify_name_t swap_names[] = { {.program_name = "netdata_swap_read_folio_probe", 20 | .function_to_attach = "swap_read_folio", 21 | .length = 15, 22 | .optional = NULL, 23 | .retprobe = 0}, 24 | {.program_name = "netdata_swap_readpage_probe", 25 | .function_to_attach = "swap_readpage", 26 | .length = 13, 27 | .optional = NULL, 28 | .retprobe = 0}, 29 | {.program_name = NULL}}; 30 | 31 | 32 | char *function_list[] = { NULL, // Filled after to discover available functions 33 | "swap_writepage" 34 | }; 35 | // This preprocessor is defined here, because it is not useful in kernel-colector 36 | #define NETDATA_SWAP_RELEASE_TASK 2 37 | 38 | static void netdata_ebpf_disable_probe(struct swap_bpf *obj) 39 | { 40 | bpf_program__set_autoload(obj->progs.netdata_swap_readpage_probe, false); 41 | bpf_program__set_autoload(obj->progs.netdata_swap_read_folio_probe, false); 42 | bpf_program__set_autoload(obj->progs.netdata_swap_writepage_probe, false); 43 | } 44 | 45 | static inline void netdata_ebpf_disable_specific_probe(struct swap_bpf *obj) 46 | { 47 | if (swap_names[0].optional) { 48 | bpf_program__set_autoload(obj->progs.netdata_swap_readpage_probe, false); 49 | } else { 50 | bpf_program__set_autoload(obj->progs.netdata_swap_read_folio_probe, false); 51 | } 52 | } 53 | 54 | static void netdata_ebpf_disable_trampoline(struct swap_bpf *obj) 55 | { 56 | bpf_program__set_autoload(obj->progs.netdata_swap_readpage_fentry, false); 57 | bpf_program__set_autoload(obj->progs.netdata_swap_read_folio_fentry, false); 58 | bpf_program__set_autoload(obj->progs.netdata_swap_writepage_fentry, false); 59 | } 60 | 61 | static inline void netdata_ebpf_disable_specific_trampoline(struct swap_bpf *obj) 62 | { 63 | if (swap_names[0].optional) { 64 | bpf_program__set_autoload(obj->progs.netdata_swap_readpage_fentry, false); 65 | } else { 66 | bpf_program__set_autoload(obj->progs.netdata_swap_read_folio_fentry, false); 67 | } 68 | } 69 | 70 | static void netdata_set_trampoline_target(struct swap_bpf *obj) 71 | { 72 | bpf_program__set_attach_target(obj->progs.netdata_swap_readpage_fentry, 0, 73 | function_list[NETDATA_KEY_SWAP_READPAGE_CALL]); 74 | 75 | bpf_program__set_attach_target(obj->progs.netdata_swap_writepage_fentry, 0, 76 | function_list[NETDATA_KEY_SWAP_WRITEPAGE_CALL]); 77 | } 78 | 79 | static int attach_kprobe(struct swap_bpf *obj) 80 | { 81 | int ret; 82 | if (swap_names[0].optional) { 83 | obj->links.netdata_swap_read_folio_probe = bpf_program__attach_kprobe(obj->progs.netdata_swap_read_folio_probe, 84 | false, function_list[NETDATA_KEY_SWAP_READPAGE_CALL]); 85 | ret = libbpf_get_error(obj->links.netdata_swap_read_folio_probe); 86 | } else { 87 | obj->links.netdata_swap_readpage_probe = bpf_program__attach_kprobe(obj->progs.netdata_swap_readpage_probe, 88 | false, function_list[NETDATA_KEY_SWAP_READPAGE_CALL]); 89 | ret = libbpf_get_error(obj->links.netdata_swap_readpage_probe); 90 | } 91 | 92 | if (ret) 93 | return -1; 94 | 95 | obj->links.netdata_swap_writepage_probe = bpf_program__attach_kprobe(obj->progs.netdata_swap_writepage_probe, 96 | false, function_list[NETDATA_KEY_SWAP_WRITEPAGE_CALL]); 97 | ret = libbpf_get_error(obj->links.netdata_swap_writepage_probe); 98 | if (ret) 99 | return -1; 100 | 101 | return 0; 102 | } 103 | 104 | static int ebpf_load_and_attach(struct swap_bpf *obj, int selector) 105 | { 106 | if (!selector) { //trampoline 107 | netdata_ebpf_disable_probe(obj); 108 | netdata_ebpf_disable_specific_trampoline(obj); 109 | 110 | netdata_set_trampoline_target(obj); 111 | } else if (selector) { // probe 112 | netdata_ebpf_disable_trampoline(obj); 113 | netdata_ebpf_disable_specific_probe(obj); 114 | } 115 | 116 | int ret = swap_bpf__load(obj); 117 | if (ret) { 118 | fprintf(stderr, "failed to load BPF object: %d\n", ret); 119 | return -1; 120 | } 121 | 122 | if (selector) // attach kprobe 123 | ret = attach_kprobe(obj); 124 | else { 125 | ret = swap_bpf__attach(obj); 126 | } 127 | 128 | if (!ret) { 129 | fprintf(stdout, "%s loaded with success\n", (!selector) ? "trampoline" : "probe"); 130 | } 131 | 132 | return ret; 133 | } 134 | 135 | static void ebpf_fill_tables(int global, int apps) 136 | { 137 | (void)ebpf_fill_global(global); 138 | 139 | netdata_swap_access_t swap_data = { .read = 1, .write = 1 }; 140 | 141 | uint32_t idx; 142 | for (idx = 0; idx < NETDATA_EBPF_CORE_MIN_STORE; idx++) { 143 | int ret = bpf_map_update_elem(apps, &idx, &swap_data, 0); 144 | if (ret) 145 | fprintf(stderr, "Cannot insert value to apps table."); 146 | } 147 | } 148 | 149 | static int swap_read_apps_array(int fd, int ebpf_nprocs) 150 | { 151 | netdata_swap_access_t stored[ebpf_nprocs]; 152 | 153 | int key, next_key; 154 | key = next_key = 0; 155 | uint64_t counter = 0; 156 | while (!bpf_map_get_next_key(fd, &key, &next_key)) { 157 | if (!bpf_map_lookup_elem(fd, &key, stored)) { 158 | counter++; 159 | } 160 | memset(stored, 0, ebpf_nprocs * sizeof(netdata_swap_access_t)); 161 | 162 | key = next_key; 163 | } 164 | 165 | if (counter) { 166 | fprintf(stdout, "Apps data stored with success. It collected %lu pids\n", counter); 167 | return 0; 168 | } 169 | 170 | return 2; 171 | } 172 | 173 | int ebpf_load_swap(int selector, enum netdata_apps_level map_level) 174 | { 175 | int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); 176 | if (ebpf_nprocs < 0) 177 | ebpf_nprocs = NETDATA_CORE_PROCESS_NUMBER; 178 | 179 | struct swap_bpf *obj = NULL; 180 | 181 | obj = swap_bpf__open(); 182 | if (!obj) { 183 | goto load_error; 184 | } 185 | 186 | int ret = ebpf_load_and_attach(obj, selector); 187 | if (ret && selector != NETDATA_MODE_PROBE) { 188 | swap_bpf__destroy(obj); 189 | 190 | obj = swap_bpf__open(); 191 | if (!obj) { 192 | goto load_error; 193 | } 194 | 195 | selector = NETDATA_MODE_PROBE; 196 | ret = ebpf_load_and_attach(obj, selector); 197 | } 198 | 199 | 200 | if (!ret) { 201 | int fd = bpf_map__fd(obj->maps.swap_ctrl); 202 | ebpf_core_fill_ctrl(obj->maps.swap_ctrl, map_level); 203 | 204 | fd = bpf_map__fd(obj->maps.tbl_swap); 205 | int fd2 = bpf_map__fd(obj->maps.tbl_pid_swap); 206 | ebpf_fill_tables(fd, fd2); 207 | sleep(60); 208 | ret = ebpf_read_global_array(fd, ebpf_nprocs, NETDATA_SWAP_END); 209 | if (!ret) { 210 | ret = swap_read_apps_array(fd2, ebpf_nprocs); 211 | if (ret) 212 | fprintf(stdout, "Empty apps table\n"); 213 | } else 214 | fprintf(stderr, "Cannot read global table\n"); 215 | } else { 216 | ret = 3; 217 | fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); 218 | } 219 | 220 | swap_bpf__destroy(obj); 221 | 222 | return ret; 223 | load_error: 224 | fprintf(stderr, "Cannot open or load BPF object\n"); 225 | return 2; 226 | } 227 | 228 | static inline void fill_swap_fcnt() 229 | { 230 | ebpf_update_names(swap_names); 231 | int i; 232 | for (i = 0; swap_names[i].program_name ; i++) { 233 | if (swap_names[i].optional) { 234 | function_list[NETDATA_KEY_SWAP_READPAGE_CALL] = swap_names[i].optional; 235 | break; 236 | } 237 | } 238 | } 239 | 240 | int main(int argc, char **argv) 241 | { 242 | static struct option long_options[] = { 243 | {"help", no_argument, 0, 0 }, 244 | {"probe", no_argument, 0, 0 }, 245 | {"tracepoint", no_argument, 0, 0 }, 246 | {"trampoline", no_argument, 0, 0 }, 247 | {"pid", required_argument, 0, 0 }, 248 | {0, 0, 0, 0} 249 | }; 250 | 251 | int selector = NETDATA_MODE_TRAMPOLINE; 252 | int option_index = 0; 253 | enum netdata_apps_level map_level = NETDATA_APPS_LEVEL_REAL_PARENT; 254 | while (1) { 255 | int c = getopt_long_only(argc, argv, "", long_options, &option_index); 256 | if (c == -1) 257 | break; 258 | 259 | switch (option_index) { 260 | case NETDATA_EBPF_CORE_IDX_HELP: { 261 | ebpf_core_print_help(argv[0], "swap", 1, 1); 262 | exit(0); 263 | } 264 | case NETDATA_EBPF_CORE_IDX_PROBE: { 265 | selector = NETDATA_MODE_PROBE; 266 | break; 267 | } 268 | case NETDATA_EBPF_CORE_IDX_TRACEPOINT: { 269 | selector = NETDATA_MODE_PROBE; 270 | fprintf(stdout, "This specific software does not have tracepoint, using kprobe instead\n"); 271 | break; 272 | } 273 | case NETDATA_EBPF_CORE_IDX_TRAMPOLINE: { 274 | selector = NETDATA_MODE_TRAMPOLINE; 275 | break; 276 | } 277 | case NETDATA_EBPF_CORE_IDX_PID: { 278 | int user_input = (int)strtol(optarg, NULL, 10); 279 | map_level = ebpf_check_map_level(user_input); 280 | break; 281 | } 282 | default: { 283 | break; 284 | } 285 | } 286 | } 287 | 288 | // Adjust memory 289 | int ret = netdata_ebf_memlock_limit(); 290 | if (ret) { 291 | fprintf(stderr, "Cannot increase memory: error = %d\n", ret); 292 | return 1; 293 | } 294 | 295 | libbpf_set_print(netdata_libbpf_vfprintf); 296 | libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 297 | 298 | fill_swap_fcnt(); 299 | if (!function_list[NETDATA_KEY_SWAP_READPAGE_CALL]) { 300 | fprintf(stderr, "Cannot find all necessary functions\n"); 301 | return 0; 302 | } 303 | 304 | struct btf *bf = NULL; 305 | if (!selector) { 306 | bf = netdata_parse_btf_file((const char *)NETDATA_BTF_FILE); 307 | if (bf) { 308 | selector = ebpf_find_functions(bf, selector, function_list, NETDATA_SWAP_END); 309 | btf__free(bf); 310 | } 311 | } 312 | 313 | int stop_software = 0; 314 | while (stop_software < 2) { 315 | if (ebpf_load_swap(selector, map_level) && !stop_software) { 316 | selector = 1; 317 | stop_software++; 318 | } else 319 | stop_software = 2; 320 | } 321 | 322 | return 0; 323 | } 324 | 325 | -------------------------------------------------------------------------------- /src/sync.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux_508.h" 2 | #include "bpf_tracing.h" 3 | #include "bpf_helpers.h" 4 | 5 | #include "netdata_core.h" 6 | #include "netdata_sync.h" 7 | 8 | /************************************************************************************ 9 | * 10 | * MAPS 11 | * 12 | ***********************************************************************************/ 13 | 14 | struct { 15 | __uint(type, BPF_MAP_TYPE_ARRAY); 16 | __type(key, __u32); 17 | __type(value, __u64); 18 | __uint(max_entries, NETDATA_SYNC_END); 19 | } tbl_sync SEC(".maps"); 20 | 21 | /************************************************************************************ 22 | * 23 | * SYNC SECTION (trampoline and kprobe) 24 | * 25 | ***********************************************************************************/ 26 | 27 | SEC("fentry/netdata_sync") 28 | int BPF_PROG(netdata_sync_fentry) 29 | { 30 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 31 | 32 | return 0; 33 | } 34 | 35 | SEC("kprobe/netdata_sync") 36 | int BPF_KPROBE(netdata_sync_kprobe) 37 | { 38 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 39 | 40 | return 0; 41 | } 42 | 43 | SEC("tracepoint/syscalls/sys_enter_syncfs") 44 | int netdata_syncfs_entry(struct trace_event_raw_sys_enter *ctx) 45 | { 46 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 47 | 48 | return 0; 49 | } 50 | 51 | SEC("tracepoint/syscalls/sys_enter_msync") 52 | int netdata_msync_entry(struct trace_event_raw_sys_enter *ctx) 53 | { 54 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 55 | 56 | return 0; 57 | } 58 | 59 | SEC("tracepoint/syscalls/sys_enter_sync_file_range") 60 | int netdata_sync_file_range_entry(struct trace_event_raw_sys_enter *ctx) 61 | { 62 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 63 | 64 | return 0; 65 | } 66 | 67 | SEC("tracepoint/syscalls/sys_enter_fsync") 68 | int netdata_fsync_entry(struct trace_event_raw_sys_enter *ctx) 69 | { 70 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 71 | 72 | return 0; 73 | } 74 | 75 | SEC("tracepoint/syscalls/sys_enter_fdatasync") 76 | int netdata_fdatasync_entry(struct trace_event_raw_sys_enter *ctx) 77 | { 78 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 79 | 80 | return 0; 81 | } 82 | 83 | SEC("tracepoint/syscalls/sys_enter_sync") 84 | int netdata_sync_entry(struct trace_event_raw_sys_enter *ctx) 85 | { 86 | libnetdata_update_global(&tbl_sync, NETDATA_KEY_SYNC_CALL, 1); 87 | 88 | return 0; 89 | } 90 | 91 | 92 | char _license[] SEC("license") = "GPL"; 93 | 94 | -------------------------------------------------------------------------------- /src/tests/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netdata/ebpf-co-re/c74ab79e3150663b5ab65cc9f9f3ee727dc64857/src/tests/.gitkeep -------------------------------------------------------------------------------- /src/tests/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | three_tests=( "cachestat" "dc" "fd" "mount" "networkviewer" "process" "shm" "socket" "swap" "sync" "vfs" ) 4 | one_test=( "disk" "hardirq" "oomkill" "softirq" ) 5 | end_loop= 6 | ADDITIONAL_ARG= 7 | 8 | echo "Running all tests with three options" 9 | for i in "${three_tests[@]}" ; do 10 | { 11 | pid=$("./$i" --help | grep pid) 12 | if [ -z "${pid}" ]; then 13 | end_loop=0 14 | else 15 | end_loop=3 16 | fi 17 | 18 | for j in $(seq 0 $end_loop); do 19 | if [ -z "${pid}" ]; then 20 | printf "================ Running %s without PID ================\n" "${i}" 21 | ADDITIONAL_ARG="" 22 | else 23 | printf "================ Running %s with PID GROUP %s ================\n" "${i}" "${j}" 24 | ADDITIONAL_ARG="--pid $j" 25 | fi 26 | 27 | echo "---> Probe: " 28 | probe_cmd="./$i --probe ${ADDITIONAL_ARG}" 29 | eval "$probe_cmd" 30 | 31 | echo "---> Tracepoint: " 32 | tracepoint_cmd="./$i --tracepoint ${ADDITIONAL_ARG}" 33 | eval "$tracepoint_cmd" 34 | 35 | echo "---> Trampoline: " 36 | trampoline_cmd="./$i --trampoline ${ADDITIONAL_ARG}" 37 | eval "$trampoline_cmd" 38 | echo " " 39 | done 40 | } >> success.log 2>> error.log 41 | done 42 | 43 | echo "Running all tests with single option" 44 | for i in "${one_test[@]}" ; do 45 | { 46 | echo "================ Running $i ================" 47 | "./$i" 48 | echo " " 49 | } 50 | done 51 | 52 | echo "We are not running filesystem or mdflush, because they can generate error, please run them." 53 | 54 | ls -lh ./*.log 55 | -------------------------------------------------------------------------------- /tools/check-kernel-core.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function get_kernel_version() { 4 | r="$(uname -r | cut -f 1 -d '-')" 5 | 6 | read -r -a p <<< "$(echo "${r}" | tr '.' ' ')" 7 | 8 | printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}" 9 | } 10 | 11 | function get_rh_version() { 12 | if [ ! -f /etc/redhat-release ] ; then 13 | printf "000000000" 14 | return 15 | fi 16 | 17 | r="$(cut -f 4 -d ' ' < /etc/redhat-release)" 18 | 19 | read -r -a p <<< "$(echo "${r}" | tr '.' ' ')" 20 | 21 | printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}" 22 | } 23 | 24 | if [ "$(uname -s)" != "Linux" ]; then 25 | echo >&2 "This does not appear to be a Linux system." 26 | exit 1 27 | fi 28 | 29 | KERNEL_VERSION="$(uname -r)" 30 | 31 | if [ "$(get_kernel_version)" -lt 004014000 ] && [ "$(get_rh_version)" -lt 0070061810 ] ; then 32 | echo >&2 "WARNING: Your kernel appears to be older than 4.11 or you are using RH version older than 7.6.1810. This may still work in some cases, but probably won't." 33 | fi 34 | 35 | CONFIG_PATH="" 36 | MODULE_LOADED="" 37 | 38 | if modprobe configs 2> /dev/null; then 39 | MODULE_LOADED=1 40 | fi 41 | 42 | if [ -r /proc/config.gz ]; then 43 | CONFIG_PATH="/proc/config.gz" 44 | elif [ -r "/lib/modules/${KERNEL_VERSION}/source/.config" ]; then 45 | CONFIG_PATH="/lib/modules/${KERNEL_VERSION}/source/.config" 46 | elif [ -r "/lib/modules/${KERNEL_VERSION}.x86_64/source/.config" ]; then 47 | CONFIG_PATH="/lib/modules/${KERNEL_VERSION}.x86_64/source/.config" 48 | elif [ -n "$(find /boot -name "config-${KERNEL_VERSION}*")" ]; then 49 | CONFIG_PATH="$(find /boot -name "config-${KERNEL_VERSION}*" | head -n 1)" 50 | fi 51 | 52 | if [ -n "${CONFIG_PATH}" ]; then 53 | GREP='grep' 54 | 55 | if echo "${CONFIG_PATH}" | grep -q '.gz'; then 56 | GREP='zgrep' 57 | fi 58 | 59 | REQUIRED_CONFIG="DEBUG_INFO_BTF DEBUG_INFO_BTF_MODULES" 60 | 61 | for required_config in ${REQUIRED_CONFIG}; do 62 | if ! "${GREP}" -q "CONFIG_${required_config}=y" "${CONFIG_PATH}"; then 63 | echo >&2 " Missing Kernel Config: ${required_config}" 64 | exit 1 65 | fi 66 | done 67 | fi 68 | 69 | if [ -n "${MODULE_LOADED}" ]; then 70 | modprobe -r configs 2> /dev/null || true # Ignore failures from CONFIGS being builtin 71 | fi 72 | 73 | exit 0 74 | --------------------------------------------------------------------------------