├── .github └── workflows │ └── ci.yaml ├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── Makefile.am ├── README.md ├── VERSION ├── autogen.sh ├── bin └── README.md ├── config_m4 ├── debug.m4 ├── profiling.m4 └── versioning.m4 ├── configure.ac ├── csv └── README.md ├── doc ├── CONFIG-KEYS ├── Changelog ├── Dependencies ├── Deprecated │ ├── README.md │ ├── centos_INSTALL.md │ └── debian_INSTALL.md ├── integration-with-pmtelemetryd.md └── network-devices-conf-snip.md ├── docker ├── Dockerfile └── scripts │ ├── grpc.sh │ ├── mdt_dialout_collector.conf │ └── mdt_dialout_collector.sh ├── grpc-collector.pc.in ├── install.sh ├── proto ├── Cisco │ ├── cisco_dialout.proto │ └── cisco_telemetry.proto ├── Huawei │ ├── huawei_dialout.proto │ └── huawei_telemetry.proto ├── Juniper │ ├── juniper_dialout.proto │ ├── juniper_gnmi.proto │ ├── juniper_gnmi_ext.proto │ ├── juniper_telemetry.proto │ ├── juniper_telemetry_header.proto │ └── juniper_telemetry_header_extension.proto ├── Makefile.am ├── Nokia │ ├── nokia_dialout.proto │ ├── nokia_gnmi.proto │ └── nokia_gnmi_ext.proto └── OpenConfig │ └── openconfig_interfaces.proto ├── ptm └── README.md └── src ├── Makefile.am ├── bridge ├── Makefile.am ├── grpc_collector_bridge.cc └── grpc_collector_bridge.h ├── cfgWrapper ├── Makefile.am ├── cfg_wrapper.cc └── cfg_wrapper.h ├── core ├── Makefile.am ├── mdt_dialout_core.cc └── mdt_dialout_core.h ├── dataDelivery ├── Makefile.am ├── kafka_delivery.cc ├── kafka_delivery.h ├── zmq_delivery.cc └── zmq_delivery.h ├── dataManipulation ├── Makefile.am ├── data_manipulation.cc └── data_manipulation.h ├── dataWrapper ├── Makefile.am ├── data_wrapper.cc └── data_wrapper.h ├── include ├── csv │ └── rapidcsv.h ├── grpc │ └── socket_mutator.h └── kafka │ ├── AdminClient.h │ ├── AdminClientConfig.h │ ├── AdminCommon.h │ ├── BrokerMetadata.h │ ├── ConsumerCommon.h │ ├── ConsumerConfig.h │ ├── ConsumerRecord.h │ ├── Error.h │ ├── Header.h │ ├── Interceptors.h │ ├── KafkaClient.h │ ├── KafkaConsumer.h │ ├── KafkaException.h │ ├── KafkaProducer.h │ ├── Log.h │ ├── ProducerCommon.h │ ├── ProducerConfig.h │ ├── ProducerRecord.h │ ├── Project.h │ ├── Properties.h │ ├── RdKafkaHelper.h │ ├── Timestamp.h │ ├── Types.h │ ├── Utility.h │ └── addons │ ├── KafkaMetrics.h │ ├── KafkaRecoverableProducer.h │ └── UnorderedOffsetCommitQueue.h ├── mdt_dialout_collector.cc ├── pmtelemetryd.c ├── proto ├── Cisco │ └── README.md ├── Huawei │ └── README.md ├── Juniper │ └── README.md ├── Makefile.am ├── Nokia │ └── README.md └── OpenConfig │ └── README.md ├── tests ├── README.md └── zmq_pull.cc └── utils ├── Makefile.am ├── cfg_handler.cc ├── cfg_handler.h ├── logs_handler.cc └── logs_handler.h /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | #tags: 9 | # - v* 10 | 11 | env: 12 | IMAGE_NAME: mdt-dialout-collector 13 | 14 | jobs: 15 | ci: 16 | runs-on: ubuntu-latest 17 | permissions: 18 | packages: write 19 | contents: read 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | 24 | - name: Build image 25 | run: docker build docker --tag $IMAGE_NAME --label "runnumber=${GITHUB_RUN_ID}" 26 | 27 | - name: Log into registry 28 | run: echo "${{ secrets.GHCR_PAT }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin 29 | 30 | - name: Push image 31 | run: | 32 | IMAGE_ID=ghcr.io/${{ github.repository_owner }}/$IMAGE_NAME 33 | 34 | # Change all uppercase to lowercase 35 | IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') 36 | # Strip git ref prefix from version 37 | VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 38 | # Strip "v" prefix from tag name 39 | #[[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//') 40 | # Use Docker `latest` tag convention 41 | [ "$VERSION" == "main" ] && VERSION=latest 42 | echo IMAGE_ID=$IMAGE_ID 43 | echo VERSION=$VERSION 44 | docker tag $IMAGE_NAME $IMAGE_ID:$VERSION 45 | docker push $IMAGE_ID:$VERSION 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | 34 | # VSCode 35 | .vscode/* 36 | 37 | # SWP 38 | *.swp 39 | 40 | # BUILD 41 | build/* 42 | 43 | # Protobuf & gRPC helpers 44 | src/proto/Cisco/*.cc 45 | src/proto/Cisco/*.h 46 | src/proto/Huawei/*.cc 47 | src/proto/Huawei/*.h 48 | src/proto/Juniper/*.cc 49 | src/proto/Juniper/*.h 50 | src/proto/OpenConfig/*.cc 51 | src/proto/OpenConfig/*.h 52 | 53 | # BIN 54 | bin/mdt_dialout_collector 55 | bin/pmtelemetryd 56 | src/mdt_dialout_collector 57 | src/pmtelemetryd 58 | bin/zmq_pull 59 | 60 | # YouCompleteMe 61 | .cache/* 62 | compile_commands.json 63 | 64 | # CSV 65 | csv/*.csv 66 | 67 | # Autotools 68 | *~ 69 | *.dirstamp 70 | libtool.m4 71 | ltoptions.m4 72 | ltsugar.m4 73 | ltversion.m4 74 | lt~obsolete.m4 75 | *aclocal.m4 76 | 77 | Makefile.in 78 | Makefile 79 | .deps 80 | .libs 81 | autom4te.cache 82 | config.* 83 | configure 84 | install-sh 85 | ltmain.sh 86 | missing 87 | depcomp 88 | libtool 89 | compile 90 | m4 91 | ar-lib 92 | test-driver 93 | grpc-collector.pc 94 | 95 | /build-* 96 | /build/* 97 | 98 | proto/.proto_mark 99 | 100 | !.gitignore 101 | !.travis.yml 102 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Salvatore Cuzzilla 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = \ 2 | $(top_srcdir)/aclocal.m4 \ 3 | $(top_srcdir)/configure \ 4 | $(top_srcdir)/Makefile.in 5 | 6 | # PKGCONFIG_USER comes from configure.ac 7 | pkgconfigdir = $(PKGCONFIG_USER) 8 | pkgconfig_DATA = \ 9 | grpc-collector.pc 10 | 11 | SUBDIRS = proto src 12 | 13 | #Could be improved.. 14 | .PHONY: doc 15 | 16 | install-exec-hook: 17 | 18 | uninstall-hook: 19 | 20 | maintainer-clean-local: \ 21 | rm -rf $(top_srcdir)/m4 \ 22 | rm -rf $(top_srcdir)/build-aux 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build status](https://github.com/network-analytics/mdt-dialout-collector/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/network-analytics/mdt-dialout-collector/actions/workflows/ci.yaml) 2 | 3 | ## Table of Content 4 | 5 | 6 | * [Introduction](#introduction) 7 | * [Deployment options](#deployment-options) 8 | * [Standalone binary with mdt-dialout-collector](#standalone-binary-with-mdt-dialout-collector) 9 | * [Library/Header integration with pmtelemetryd](#libraryheader-integration-with-pmtelemetryd) 10 | * [Build/Install](#buildinstall) 11 | * [References](#references) 12 | 13 | 14 | ## Introduction 15 | **mdt-dialout-collector** & **gRPC dial-out libraries** are leveraging the [**gRPC Framework**](https://grpc.io/) to implement a multi-vendor gRPC Dial-out collector. 16 | The [doc/Changelog](https://github.com/network-analytics/mdt-dialout-collector/blob/main/doc/Changelog) file is including additional details about the supported network devices. 17 | 18 | The collector functionalities can be logically grouped into three categories: 19 | 20 | 1. **Data Collection** - they are steering the daemon(s) behavior. 21 | 2. **Data Manipulation** - they are conveniently transforming the in-transit data-stream. 22 | 3. **Data Delivery** - they are inter-connecting the collector with the next stage in the pipeline. 23 | 24 | The [doc/CONFIG-KEYS](https://github.com/network-analytics/mdt-dialout-collector/blob/main/doc/CONFIG-KEYS) file is including the description for each one of the available options. 25 | 26 | ## Deployment options 27 | 28 | The gRPC dial-out data-collection functionality can be deployed in two ways: 29 | 30 | ### Standalone binary with mdt-dialout-collector 31 | ```TEXT 32 | +------------------------------------------------------+ 33 | +---------+ | +------------+ +--------------+ +--------------+ | +---------+ 34 | | network |-->| | collection |-->| manipulation |-->| kafka client | |-->| kafka | 35 | +---------+ | +------------+ +--------------+ +--------------+ | | cluster | 36 | | [mdt-dialout-collector] | +---------+ 37 | +------------------------------------------------------+ 38 | ``` 39 | the building process is generating a single binary: 40 | ```TEXT 41 | /opt/mdt-dialout-collector/bin/mdt_dialout_collector 42 | ``` 43 | which, by default, is reading the running options from: 44 | ```TEXT 45 | /etc/opt/mdt-dialout-collector/mdt_dialout_collector.conf 46 | ``` 47 | Additionally, the default configuration file can be further specified via the following command line: 48 | ```TEXT 49 | /opt/mdt-dialout-collector/bin/mdt_dialout_collector -f 50 | ``` 51 | 52 | ### Library/Header integration with pmtelemetryd 53 | ```TEXT 54 | +---------------------------------------------------------+ 55 | +---------+ | +------------+ +--------------+ +-----------------+ | +------------+ 56 | | network |-->| | collection |-->| manipulation |-->| ZMQ (PUSH/PULL) | |-->| pipeline | 57 | +---------+ | +------------+ +--------------+ +-----------------+ | | next stage | 58 | | [pmtelemetryd] | +------------+ 59 | +---------------------------------------------------------+ 60 | ``` 61 | the building process is generating both the library and the header file required to build [pmtelemetryd](https://github.com/pmacct/pmacct/blob/master/INSTALL) with gRPC dial-out support: 62 | ``` 63 | /usr/local/lib/libgrpc_collector.la 64 | 65 | /usr/local/include/grpc_collector_bridge/grpc_collector_bridge.h 66 | ``` 67 | there is one main pmtelemetryd [CONFIG-KEYS](https://github.com/pmacct/pmacct/blob/master/CONFIG-KEYS) which is mandatory in order to enable the embedded gRPC dial-out collector: 68 | ```TEXT 69 | KEY: telemetry_daemon_grpc_collector_conf 70 | DESC: Points to a file containing the configuration of the gRPC collector thread. An 71 | example of the configuration plus all available config keys is available here: 72 | https://github.com/network-analytics/mdt-dialout-collector 73 | DEFAULT: none 74 | ``` 75 | 76 | ## Build/Install 77 | 78 | [install.sh](https://github.com/network-analytics/mdt-dialout-collector/blob/main/install.sh) is automating the build/install process, taking care of all [dependencies](https://github.com/network-analytics/mdt-dialout-collector/blob/main/doc/Dependencies). 79 | 80 | - The Standalone binary can be deployed using: 81 | ```SHELL 82 | sudo /bin/sh -c "$(curl -fsSL https://github.com/network-analytics/mdt-dialout-collector/raw/main/install.sh)" -- -b -v current 83 | ``` 84 | 85 | - The Library/Header can be deployed using: 86 | ```SHELL 87 | sudo /bin/sh -c "$(curl -fsSL https://github.com/network-analytics/mdt-dialout-collector/raw/main/install.sh)" -- -l -v current 88 | ``` 89 | 90 | ## References 91 | 92 | - [Integration with PMACCT/pmtelemetryd](https://github.com/network-analytics/mdt-dialout-collector/blob/main/doc/integration-with-pmtelemetryd.md) 93 | - [Network devices configuration snippets](https://github.com/network-analytics/mdt-dialout-collector/blob/main/doc/network-devices-conf-snip.md) 94 | - [Multivendor (async) gRPC dial-out collector - APNIC Blog](https://blog.apnic.net/2022/10/17/multivendor-async-grpc-dial-out-collector/) 95 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | v1.9.9 2 | -------------------------------------------------------------------------------- /autogen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | export AUTOMAKE="automake --foreign -a" 4 | autoreconf -f -i 5 | -------------------------------------------------------------------------------- /bin/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/network-analytics/mdt-dialout-collector/7829ae694f8d44b5eab4770fc32c737a61e8d228/bin/README.md -------------------------------------------------------------------------------- /config_m4/debug.m4: -------------------------------------------------------------------------------- 1 | # Check for debug mode - MUST BE THE FIRST CHECK 2 | AC_MSG_CHECKING(whether to enable debug mode) 3 | debug_default="no" 4 | AC_ARG_ENABLE(debug, 5 | AS_HELP_STRING([--enable-debug], [turn on debug mode [default=no]]) 6 | , , enable_debug=$debug_default) 7 | if test "$enable_debug" = "yes"; then 8 | CFLAGS="$CFLAGS -g -O0" 9 | CXXFLAGS="$CXXFLAGS -g -O0 -fno-inline" 10 | AC_DEFINE([DEBUG], [], [Description]) 11 | AC_MSG_RESULT(yes) 12 | else 13 | CFLAGS="$CFLAGS -O3" #--compiler-options -fno-strict-aliasing --compiler-options -fno-inline 14 | CXXFLAGS="$CXXFLAGS -O3" #-fomit-frame-pointer" 15 | AC_DEFINE([NDEBUG], [], [Description]) 16 | AC_MSG_RESULT(no) 17 | fi 18 | AM_CONDITIONAL(DEBUG, test "$enable_debug" = yes) 19 | 20 | 21 | -------------------------------------------------------------------------------- /config_m4/profiling.m4: -------------------------------------------------------------------------------- 1 | # Check for profiling mode 2 | AC_MSG_CHECKING(whether to enable profiling mode) 3 | profile_default="no" 4 | AC_ARG_ENABLE(profile, 5 | AS_HELP_STRING([--enable-profile], [turn on profile mode [default=no]]) 6 | , , enable_profile=$profile_default) 7 | if test "$enable_profile" = "yes"; then 8 | CFLAGS="$( echo $CFLAGS | sed s/-fomit-frame-pointer//g )" 9 | CXXFLAGS="$( echo $CXXFLAGS | sed s/-fomit-frame-pointer//g )" 10 | CFLAGS="$CFLAGS -pg" 11 | CXXFLAGS="$CXXFLAGS -pg" 12 | LDFLAGS="$LDFLAGS -pg" 13 | AC_MSG_RESULT(yes) 14 | else 15 | AC_MSG_RESULT(no) 16 | fi 17 | -------------------------------------------------------------------------------- /config_m4/versioning.m4: -------------------------------------------------------------------------------- 1 | # Set application version based on the git version 2 | 3 | #Default 4 | MY_PROJECT_VERSION="$PACKAGE_VERSION" #Unknown (no GIT repository detected)" 5 | FILE_VERSION=`cat $srcdir/VERSION` 6 | 7 | AC_CHECK_PROG(ff_git,git,yes,no) 8 | 9 | #Normalize 10 | MY_PROJECT_VERSION_NORMALIZED=`echo $MY_PROJECT_VERSION | sed s/dev//g | sed s/RC.*//g | tr -d v` 11 | 12 | #Substs 13 | AC_SUBST([MY_PROJECT_VERSION], ["$MY_PROJECT_VERSION"]) 14 | AC_SUBST([MY_PROJECT_VERSION_NORMALIZED], ["$MY_PROJECT_VERSION_NORMALIZED"]) 15 | 16 | AC_MSG_CHECKING([the build version]) 17 | AC_MSG_RESULT([$MY_PROJECT_VERSION ($MY_PROJECT_VERSION_NORMALIZED)]) 18 | 19 | AC_MSG_CHECKING([the build number]) 20 | if test $ff_git = no 21 | then 22 | AC_MSG_RESULT([git not found!]) 23 | else 24 | 25 | if test -d $srcdir/.git ; then 26 | #Try to retrieve the build number 27 | _MY_PROJECT_GIT_BUILD=`git log -1 --pretty=%H` 28 | _MY_PROJECT_GIT_BRANCH=`git rev-parse --abbrev-ref HEAD` 29 | _MY_PROJECT_GIT_DESCRIBE=`git describe --abbrev=40` 30 | 31 | AC_SUBST([MY_PROJECT_BUILD], ["$_MY_PROJECT_GIT_BUILD"]) 32 | AC_SUBST([MY_PROJECT_BRANCH], ["$_MY_PROJECT_GIT_BRANCH"]) 33 | AC_SUBST([MY_PROJECT_DESCRIBE], ["$_MY_PROJECT_GIT_DESCRIBE"]) 34 | 35 | fi 36 | 37 | AC_MSG_RESULT([$_MY_PROJECT_GIT_BUILD]) 38 | fi 39 | -------------------------------------------------------------------------------- /configure.ac: -------------------------------------------------------------------------------- 1 | m4_include([VERSION]) #Force reconf on VERSION change 2 | 3 | AC_INIT( 4 | [mdt-dialout-collector], 5 | [m4_esyscmd_s(cat VERSION)], 6 | [salvatore@cuzzilla.org], 7 | [mdt-dialout-collector], 8 | [http://github.com/network-analytics/mdt-dialout-collector]) 9 | 10 | AC_CONFIG_AUX_DIR([build-aux]) 11 | AC_CONFIG_MACRO_DIR([m4]) 12 | AC_PREFIX_DEFAULT([/usr/local]) 13 | 14 | AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects]) 15 | 16 | AC_USE_SYSTEM_EXTENSIONS 17 | 18 | AC_PROG_INSTALL 19 | AC_PROG_CC 20 | AC_PROG_CXX 21 | LT_PATH_LD 22 | AM_PROG_CC_C_O 23 | m4_ifdef([AM_PROG_AR], [AM_PROG_AR]) 24 | m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) 25 | 26 | PKG_PROG_PKG_CONFIG 27 | 28 | AUTOCONF_ENV="SHELL=/bin/sh" 29 | 30 | LT_INIT 31 | AC_ENABLE_STATIC 32 | 33 | # Some useful default flags 34 | CFLAGS="-Werror -Wall -std=gnu89 $CFLAGS" 35 | CXXFLAGS="-Werror -Wall -std=c++17 $CXXFLAGS" 36 | 37 | AC_ARG_WITH( 38 | [pkgconfigdir], 39 | [AS_HELP_STRING([--with-pkgconfigdir], 40 | [pkg-config directory to install the .pc file.])], 41 | [ with_pkgconfigdir=$with_pkgconfigdir ], 42 | [ with_pkgconfigdir=$libdir/pkgconfig ]) 43 | 44 | PKGCONFIG_USER=$with_pkgconfigdir 45 | AC_SUBST([PKGCONFIG_USER]) 46 | 47 | # Debug 48 | m4_include([config_m4/debug.m4]) 49 | 50 | # Check for profiling mode 51 | m4_include([config_m4/profiling.m4]) 52 | 53 | # Checking libs 54 | m4_include([config_m4/versioning.m4]) 55 | 56 | PKG_CHECK_MODULES([JSONCPP], [jsoncpp], [], 57 | [AC_MSG_ERROR([Please install libjsoncpp ])]) 58 | 59 | PKG_CHECK_MODULES([KAFKA], [rdkafka], [], 60 | [AC_MSG_ERROR([Please install librdkafka ])]) 61 | PKG_CHECK_MODULES([KAFKACPP], [rdkafka++], [], 62 | [AC_MSG_ERROR([Please install librdkafka++ ])]) 63 | 64 | PKG_CHECK_MODULES([SPDLOG], [spdlog], [], 65 | [AC_MSG_ERROR([Please install libspdlog ])]) 66 | 67 | PKG_CHECK_MODULES([PROTOBUF], [protobuf], [], 68 | [AC_MSG_ERROR([Please install libgrpc ])]) 69 | 70 | PKG_CHECK_MODULES([GRPC], [grpc], [], 71 | [AC_MSG_ERROR([Please install libgrpc ])]) 72 | 73 | PKG_CHECK_MODULES([GRPCCPP], [grpc++], [], 74 | [AC_MSG_ERROR([Please install libgrpc++ ])]) 75 | 76 | # Output files 77 | AC_CONFIG_FILES([ 78 | Makefile 79 | proto/Makefile 80 | src/Makefile 81 | src/bridge/Makefile 82 | src/cfgWrapper/Makefile 83 | src/core/Makefile 84 | src/dataDelivery/Makefile 85 | src/dataManipulation/Makefile 86 | src/dataWrapper/Makefile 87 | src/proto/Makefile 88 | src/utils/Makefile 89 | grpc-collector.pc 90 | ]) 91 | 92 | AC_OUTPUT 93 | -------------------------------------------------------------------------------- /csv/README.md: -------------------------------------------------------------------------------- 1 | #### Data-stream enrichment - specifically: node_id / platform_id 2 | 3 | ##### Example of CSV file format 4 | ```TEXT 5 | 1.1.1.1,daisy-router-01,Cisco-XR 6 | 2.2.2.2,daisy-router-02,Cisco-XE 7 | 3.3.3.3,daisy-router-03,Huawei-VRP 8 | 4,4,4,4,daisy-router-04,JunOS 9 | ``` 10 | 11 | if not already existing, crate a file "csv/label_map.csv" resembling the above format. 12 | The client IP address (field1) is matched & the data-stream is enriched 13 | with field2 & field3 according to this data format: 14 | 15 | ```JSON 16 | 17 | "label": { 18 | "node_id": , 19 | "platform_id": 20 | }, 21 | ``` 22 | 23 | When new rows are added to the CSV, you can refresh the daemon seamlessly: 24 | 25 | ```SHELL 26 | $ sudo kill -USR1 `cat /var/run/mdt_dialout_collector.pid` 27 | ``` 28 | 29 | -------------------------------------------------------------------------------- /doc/Changelog: -------------------------------------------------------------------------------- 1 | Adopted versioning schema: vMAJOR.MINOR.PATCH: 2 | *MAJOR version is incremented when incompatible API changes are commited. 3 | *MINOR version is incremented when new functionalities are commited, however the new implementation is still backward-compatible. 4 | *PATCH version is incremented when backward-compatible bug fixes are commited. 5 | 6 | The keys used are: 7 | !: fixed/modified feature, -: deleted feature, +: new feature 8 | 9 | 10 | current (main branch) -- 06-07-2024 11 | + Adding the ability to disable the checks related to socket binding to a particular device 12 | + Adding the ability to configure the Kafka option "enable.ssl.certificate.verification". Reference: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md 13 | + Adding the ability to configure the Kafka option "ssl.key.password". Reference: https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md 14 | + Adding support for rocky Linux v8.9, install.sh script (@ustorbeck) 15 | ! Fixing Debian v12 detection, install.sh script (@ustorbeck) 16 | ! Fixing script path names in the Dockerfile so that no additional options are required to build the docker image (@ustorbeck) 17 | ! Fixing a crash on startup in zmq delivery mode (@ustorbeck) 18 | 19 | 20 | v1.1.4 -- 05-12-2023 21 | + Adding automatic version number retrieval from the VERSION file 22 | + Adding the ability to specify the release version for installation via the `install.sh` parameter 23 | ! Fixing the Cflags path within the grpc-collector.pc.in file 24 | ! Fixing the location where the huawei_sstream pointer is checked and (deleted) 25 | 26 | v1.1.3 -- 07-10-2023 27 | + Adding dedicated vector per vendor to handle threads, standalone collector 28 | + Adding support for rhel & rocky Linux, install.sh script 29 | ! Adding checks before every "delete" statement to avoid double deletion 30 | ! Fixing Segfault issue affecting the start_grpc_dialout_collector() function 31 | ! Fixing memory leak affecting the Srv::*Stream::Start() functions 32 | ! Making the free_grpc_payload() and the InitGrpcPayload() function safer (pointers handling) 33 | ! Making start_grpc_dialout_collector() safer (vector to store the workers threads) 34 | + Implemented log_socket_options() function to enable logging of the configured socket options for enhanced debugging and transparency 35 | 36 | v1.1.2 -- 26-06-2023 37 | + JunOS "sensor_path" formatted as JSON 38 | ! Testing gRPC dial-out support for Cisco NX-OS (testing device: NX-OS 10.2(2)@Nexus9000) 39 | ! Documentation general review/update | adding integration-with-pmtelemetryd.md | adding network-devices-conf-snip.md 40 | ! Minor, multiple, fixes on install.sh (install vs clone dir) 41 | 42 | v1.1.1 -- 21-01-2023 43 | + When running in "Library mode" the ZMQ uri is learned from pmtelemetryd. In "binary mode" the ZMQ uri is statically set to "ipc:///tmp/grpc.sock" 44 | ! Minor, multiple, fixes on install.sh 45 | 46 | v1.1.0 -- 17-01-2023 47 | + Documentation general review/update | adding CONFIG-KEYS | adding structured Changelog 48 | + Introduced install.sh to automate the build/install process 49 | + Introducing data delivery via ZMQ (PUSH/PULL). With this new data delivery option I enable the possibility to embed the gRPC dial-out collector 50 | into pmtelemetryd 51 | 52 | v1.0.0 -- 29-09-2022 53 | + Introduced gRPC dial-out support for Cisco XR/XE (testing devices: Cisco-XR 7.4.1@NCS-540 | Cisco-XE 17.06.01prd7@C8000V) 54 | (https://github.com/ios-xr/model-driven-telemetry/blob/ebc059d77f813b63bb5a3139f5178ad11665d49f/protos/66x/mdt_grpc_dialout/mdt_grpc_dialout.proto) 55 | + Introduced GPB-KV encoding capabilities for Cisco XR/XE (testing devices: Cisco-XR 7.4.1@NCS-540 | Cisco-XE 17.06.01prd7@C8000V) 56 | (https://github.com/ios-xr/model-driven-telemetry/blob/ebc059d77f813b63bb5a3139f5178ad11665d49f/protos/66x/telemetry.proto) 57 | + Introduced gRPC dial-out support for Juniper JunOS (testing device: JunOS 20.4R3-S2.6@mx10003) 58 | (https://www.juniper.net/documentation/us/en/software/junos/interfaces-telemetry/topics/topic-map/telemetry-grpc-dialout-ta.html) 59 | + Introduced JSON/GPB-KV encoding capabilities for Juniper JunOS (testing device: JunOS 20.4R3-S2.6@mx10003) 60 | (https://www.juniper.net/documentation/us/en/software/junos/interfaces-telemetry/topics/topic-map/telemetry-grpc-dialout-ta.html) 61 | + Introduced gRPC dial-out support for Huawei VRP (testing device: VRP V800R021C10SPC300T@NE40E) 62 | (https://support.huawei.com/enterprise/en/doc/EDOC1100139549/40577baf/common-proto-files) 63 | + Introduced GPB (OpenConfig Interfaces) encoding capabilities for Huawei VRP (testing device: VRP V800R021C10SPC300T@NE40E) 64 | (https://support.huawei.com/enterprise/en/doc/EDOC1100139549/40577baf/common-proto-files) 65 | + Introduced data manipulation options for Cisco GPB/GPB-KV encoded messages | adding enable_cisco_gpbkv2json | adding enable_cisco_message_to_json_string 66 | + Introduced data enrichement options | adding enable_label_encode_as_map | adding enable_label_encode_as_map_ptm 67 | + Introducing data delivery via Kafka producer. The connection with Kafks can be secured with SSL or left unsecure using PLANTEXT 68 | + Introducing logging capabilities. Both console and syslog logging are supported 69 | -------------------------------------------------------------------------------- /doc/Dependencies: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | 3 | *gRPC | (https://github.com/grpc/grpc) | BSD 3-Clause License | 4 | *JsonCpp | (https://github.com/open-source-parsers/jsoncpp) | MIT License | 5 | *librdkafka | (https://github.com/edenhill/librdkafka) | BSD 2-Clause License | 6 | *cppzmq | (https://github.com/zeromq/cppzmq) | MIT License | 7 | *Modern C++ Kafka API | (https://github.com/morganstanley/modern-cpp-kafka) | Apache License Version 2.0| 8 | *libconfig | (http://hyperrealm.github.io/libconfig/) | LGPL v2.1 | 9 | *rapidcsv | (https://github.com/d99kris/rapidcsv) | BSD-3-Clause license | 10 | *spdlog | (https://github.com/gabime/spdlog) | MIT License | 11 | -------------------------------------------------------------------------------- /doc/Deprecated/centos_INSTALL.md: -------------------------------------------------------------------------------- 1 | ### Build@CentOS 8 Stream (CentOS-Stream-8-x86_64-20221125-boot) 2 | 3 | - Install the necessary tools to build 4 | ```SHELL 5 | #yum install vim epel-release tmux wget mc most mlocate (optional) 6 | yum install bash git cmake autoconf libtool pkg-config gcc-toolset-11 7 | ``` 8 | 9 | - Build & install the gRPC framework 10 | ```SHELL 11 | cd /root 12 | scl enable gcc-toolset-11 bash 13 | 14 | git clone --recurse-submodules -b v1.45.2 --depth 1 --shallow-submodules https://github.com/grpc/grpc 15 | 16 | export MY_INSTALL_DIR=$HOME/.local 17 | mkdir -p $MY_INSTALL_DIR 18 | export PATH="$MY_INSTALL_DIR/bin:$PATH" 19 | 20 | cd grpc 21 | mkdir -p cmake/build 22 | pushd cmake/build 23 | 24 | cmake -DgRPC_INSTALL=ON \ 25 | -DCMAKE_BUILD_TYPE=Release \ 26 | -DgRPC_BUILD_TESTS=OFF \ 27 | -DCMAKE_INSTALL_PREFIX=$MY_INSTALL_DIR \ 28 | -DABSL_PROPAGATE_CXX_STD=ON \ 29 | -DgRPC_ABSL_PROVIDER=module \ 30 | -DgRPC_CARES_PROVIDER=module \ 31 | -DgRPC_PROTOBUF_PROVIDER=module \ 32 | -DgRPC_RE2_PROVIDER=module \ 33 | -DgRPC_SSL_PROVIDER=module \ 34 | -DgRPC_ZLIB_PROVIDER=module \ 35 | ../.. 36 | 37 | make -j`echo $(($(egrep 'processor' /proc/cpuinfo | wc -l) - 1))` 38 | 39 | make install 40 | popd 41 | ``` 42 | 43 | - Build & install the collector deamons (Run the collector natively) 44 | ```SHELL 45 | cd /root 46 | scl enable gcc-toolset-11 bash 47 | 48 | export PATH="/root/.local/bin:$PATH" 49 | export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig/ 50 | 51 | yum install -y jsoncpp-devel libconfig-devel spdlog-devel cppzmq-devel openssl-devel 52 | 53 | cd /tmp 54 | git clone https://github.com/edenhill/librdkafka.git 55 | cd librdkafka 56 | ./configure 57 | make 58 | make install 59 | 60 | sed -i '/SPDLOG_FMT_EXTERNAL/s/^\/\/ //g' /usr/include/spdlog/tweakme.h 61 | 62 | cd /opt 63 | git clone https://github.com/network-analytics/mdt-dialout-collector.git 64 | 65 | cd mdt-dialout-collector 66 | mkdir build 67 | cd build 68 | cmake ../ 69 | 70 | make -j`echo $(($(egrep 'processor' /proc/cpuinfo | wc -l) - 1))` 71 | ``` 72 | 73 | - Build & install the collector libraries (Integrate the collector, via ZMQ, with [pmacct](https://github.com/pmacct/pmacct)) 74 | ```SHELL 75 | cd /root 76 | scl enable gcc-toolset-11 bash 77 | 78 | export PATH="/root/.local/bin:$PATH" 79 | export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/root/.local/lib/pkgconfig:/root/.local/lib64/pkgconfig:/usr/local/lib/pkgconfig/ 80 | ln -s /root/.local/bin/grpc_cpp_plugin /usr/local/bin/grpc_cpp_plugin 81 | 82 | yum install -y jsoncpp-devel libconfig-devel spdlog-devel cppzmq-devel openssl-devel 83 | 84 | cd /tmp 85 | git clone https://github.com/edenhill/librdkafka.git 86 | cd librdkafka 87 | ./configure 88 | make 89 | make install 90 | 91 | sed -i '/SPDLOG_FMT_EXTERNAL/s/^\/\/ //g' /usr/include/spdlog/tweakme.h 92 | 93 | cd /opt 94 | git clone https://github.com/network-analytics/mdt-dialout-collector.git 95 | 96 | cd /opt/mdt-dialout-collector 97 | ./autogen.sh 98 | CPPFLAGS="-I/root/.local/include" ./configure 99 | 100 | make -j`echo $(($(egrep 'processor' /proc/cpuinfo | wc -l) - 1))` 101 | make install 102 | ``` 103 | 104 | -------------------------------------------------------------------------------- /doc/Deprecated/debian_INSTALL.md: -------------------------------------------------------------------------------- 1 | ### Build@Debian Stable (debian-11.5.0-amd64-netinst) 2 | 3 | - Install the necessary tools to build 4 | ```SHELL 5 | #apt install sudo tmux vim-nox wget mc most locate (optional) 6 | apt install bash git cmake build-essential autoconf libtool pkg-config 7 | ``` 8 | 9 | - Build & install the gRPC framework 10 | ```SHELL 11 | cd /root 12 | 13 | git clone --recurse-submodules -b v1.45.2 --depth 1 --shallow-submodules https://github.com/grpc/grpc 14 | 15 | export MY_INSTALL_DIR=$HOME/.local 16 | mkdir -p $MY_INSTALL_DIR 17 | export PATH="$MY_INSTALL_DIR/bin:$PATH" 18 | 19 | cd grpc 20 | mkdir -p cmake/build 21 | pushd cmake/build 22 | 23 | cmake -DgRPC_INSTALL=ON \ 24 | -DCMAKE_BUILD_TYPE=Release \ 25 | -DgRPC_BUILD_TESTS=OFF \ 26 | -DCMAKE_INSTALL_PREFIX=$MY_INSTALL_DIR \ 27 | -DABSL_PROPAGATE_CXX_STD=ON \ 28 | -DgRPC_ABSL_PROVIDER=module \ 29 | -DgRPC_CARES_PROVIDER=module \ 30 | -DgRPC_PROTOBUF_PROVIDER=module \ 31 | -DgRPC_RE2_PROVIDER=module \ 32 | -DgRPC_SSL_PROVIDER=module \ 33 | -DgRPC_ZLIB_PROVIDER=module \ 34 | ../.. 35 | 36 | make -j`echo $(($(egrep 'processor' /proc/cpuinfo | wc -l) - 1))` 37 | 38 | make install 39 | popd 40 | ``` 41 | 42 | - Build & install the collector deamons (Run the collector natively) 43 | ```SHELL 44 | cd /root 45 | 46 | export PATH="/root/.local/bin:$PATH" 47 | 48 | apt install -y libjsoncpp-dev librdkafka-dev libconfig++-dev libspdlog-dev libzmq3-dev libssl-dev 49 | 50 | cd /opt 51 | git clone https://github.com/network-analytics/mdt-dialout-collector.git 52 | 53 | cd mdt-dialout-collector 54 | mkdir build 55 | cd build 56 | cmake ../ 57 | 58 | make -j`echo $(($(egrep 'processor' /proc/cpuinfo | wc -l) - 1))` 59 | ``` 60 | 61 | - Build & install the collector libraries (Integrate the collector, via ZMQ, with [pmacct](https://github.com/pmacct/pmacct)) 62 | ```SHELL 63 | cd /root 64 | 65 | export PATH="/root/.local/bin:$PATH" 66 | export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/root/.local/lib/pkgconfig 67 | ln -s /root/.local/bin/grpc_cpp_plugin /usr/local/bin/grpc_cpp_plugin 68 | 69 | apt install -y libjsoncpp-dev librdkafka-dev libconfig++-dev libspdlog-dev libzmq3-dev libssl-dev 70 | 71 | cd /opt 72 | git clone https://github.com/network-analytics/mdt-dialout-collector.git 73 | 74 | cd mdt-dialout-collector 75 | ./autogen.sh 76 | CPPFLAGS="-I/root/.local/include -I/usr/include/jsoncpp" ./configure 77 | 78 | make -j`echo $(($(egrep 'processor' /proc/cpuinfo | wc -l) - 1))` 79 | make install 80 | ``` 81 | 82 | -------------------------------------------------------------------------------- /doc/integration-with-pmtelemetryd.md: -------------------------------------------------------------------------------- 1 | ## Table of Content 2 | 3 | 4 | * [Introduction](#introduction) 5 | * [Testing Environment](#testing-environment) 6 | * [Compile/Install gRPC dial-out library/Header for pmtelemetryd](#compileinstall-gRPC-dial-out-libraryheader-for-pmtelemetryd) 7 | * [Compile/Install pmtelemetryd with gRPC dial-out support enabled](#compileinstall-pmtelemetryd-with-gRPC-dial-out-support-enabled) 8 | * [pmtelemetryd's minimal configuration snippet](#pmtelemetryds-minimal-configuration-snippet) 9 | * [gRPC's dial-out minimal configuration snippet](#gRPCs-dial-out-minimal-configuration-snippet) 10 | 11 | 12 | ## Introduction 13 | 14 | The following paragraphs outline the steps necessary to integrate the gRPC dial-out data collection functionality into pmacct/pmtelemetryd. 15 | I've included a minimal set of configuration snippets that can serve to verify the installation as well as act as a starting point for more intricate scenarios. 16 | 17 | ## Testing Environment 18 | 19 | ```SHELL 20 | $ sudo cat /etc/os-release 21 | 22 | PRETTY_NAME="Debian GNU/Linux 11 (bullseye)" 23 | NAME="Debian GNU/Linux" 24 | VERSION_ID="11" 25 | VERSION="11 (bullseye)" 26 | VERSION_CODENAME=bullseye 27 | ID=debian 28 | HOME_URL="https://www.debian.org/" 29 | SUPPORT_URL="https://www.debian.org/support" 30 | BUG_REPORT_URL="https://bugs.debian.org/" 31 | ``` 32 | 33 | ## Compile/Install gRPC dial-out library/Header for pmtelemetryd 34 | 35 | ```SHELL 36 | sudo /bin/sh -c "$(curl -fsSL https://github.com/network-analytics/mdt-dialout-collector/raw/main/install.sh)" -- -l -v current 37 | ``` 38 | 39 | #### *(sh install.sh -l)* explained 40 | 41 | - The gRPC framework source code is cloned by default under the "/root" folder: 42 | - The gRPC framework is compiled and installed under the "/root/.local/{bin,include,lib,share}" folders: 43 | - The gRPC dial-out source code is cloned/compiled under the "/opt/mdt-dialout-collector" folder: 44 | - The building process is generating both the library and the header file required to build pmtelemetryd with gRPC dial-out support: 45 | ```SHELL 46 | /usr/local/lib/libgrpc_collector.la 47 | /usr/local/include/grpc_collector_bridge/grpc_collector_bridge.h 48 | ``` 49 | 50 | ## Compile/Install pmtelemetryd with gRPC dial-out support enabled 51 | 52 | ```SHELL 53 | sudo apt install libzmq3-dev libjansson-dev librdkafka-dev 54 | 55 | cd /opt 56 | sudo git clone https://github.com/pmacct/pmacct.git 57 | cd /opt/pmacct 58 | sudo ./autogen.sh 59 | sudo ./configure --enable-debug --enable-zmq --enable-jansson --enable-kafka --enable-grpc-collector 60 | sudo make -j 61 | sudo make install 62 | ``` 63 | 64 | ## pmtelemetryd's minimal configuration snippet 65 | 66 | ```SHELL 67 | $ sudo cat /root/etc/pmtelemetryd.conf 68 | 69 | ! ### Generic Settings 70 | core_proc_name: pmtelemetryd-grpc 71 | pidfile: /root/var/run/pmtelemetryd-grpc 72 | logfile: /root/var/log/pmacct/pmtelemetryd.log 73 | ! 74 | ! ### gRPC dial-out Settings 75 | telemetry_daemon_decoder: json 76 | telemetry_daemon_grpc_collector_conf: /root/etc/pmtelemetryd-grpc-dialout.conf 77 | ! 78 | ! ### Kafka Settings 79 | telemetry_daemon_msglog_output: json 80 | telemetry_daemon_msglog_kafka_topic: kafka.topic 81 | telemetry_daemon_msglog_kafka_config_file: /root/etc/kafka.conf 82 | 83 | 84 | $ sudo cat /root/etc/kafka.conf 85 | 86 | global, compression.type, snappy 87 | global, queue.buffering.max.messages, 10000000 88 | global, batch.size, 2147483647 89 | global, batch.num.messages, 1000000 90 | global, linger.ms, 200 91 | global, client.id, debian 92 | global, security.protocol, plaintext 93 | global, metadata.broker.list, 192.168.100.1:9092 94 | ``` 95 | 96 | ## gRPC's dial-out minimal configuration snippet 97 | 98 | ```SHELL 99 | $ cat /root/etc/pmtelemetryd-grpc-dialout.conf 100 | 101 | iface = "enp1s0"; 102 | ipv4_socket_cisco = "192.168.100.254:10001"; 103 | data_delivery_method = "zmq"; 104 | 105 | spdlog_level = "debug"; 106 | 107 | enable_cisco_gpbkv2json = "false"; 108 | enable_cisco_message_to_json_string = "true"; 109 | ``` 110 | -------------------------------------------------------------------------------- /doc/network-devices-conf-snip.md: -------------------------------------------------------------------------------- 1 | ## Table of Content 2 | 3 | 4 | * [Cisco-XR 7.4.1@NCS-540](#cisco-xr-741ncs-540) 5 | * [Cisco-XE 17.06.01prd7@C8000V](#cisco-xe-170601prd7c8000v) 6 | * [Cisco-NX-OS 10.2(2)@Nexus9000](#cisco-nx-os-1022nexus9000) 7 | * [JunOS 20.4R3-S2.6@mx10003](#junos-204r3-s26mx10003) 8 | * [Huawei VRP V800R021C10SPC300T@NE40E](#huawei-vrp-v800r021c10spc300tne40e) 9 | 10 | 11 | ## Cisco-XR 7.4.1@NCS-540 12 | ```SHELL 13 | # Reference documentation: https://www.cisco.com/c/en/us/td/docs/routers/asr9000/software/asr9k-r7-0/telemetry/configuration/guide/b-telemetry-cg-asr9000-70x/b-telemetry-cg-asr9000-70x_chapter_010.html 14 | 15 | telemetry model-driven strict-timer 16 | telemetry model-driven destination-group COLLECTOR 17 | telemetry model-driven destination-group COLLECTOR address-family ipv4 192.168.100.254 port 10001 18 | telemetry model-driven destination-group COLLECTOR address-family ipv4 192.168.100.254 port 10001 encoding json 19 | telemetry model-driven destination-group COLLECTOR address-family ipv4 192.168.100.254 port 10001 protocol grpc no-tls 20 | telemetry model-driven sensor-group SENSOR 21 | telemetry model-driven sensor-group SENSOR sensor-path openconfig-interfaces:interfaces 22 | telemetry model-driven 23 | telemetry model-driven subscription SUBSCRIPTION 24 | telemetry model-driven subscription SUBSCRIPTION sensor-group-id SENSOR sample-interval 60000 25 | telemetry model-driven subscription SUBSCRIPTION destination-id COLLECTOR 26 | telemetry model-driven subscription SUBSCRIPTION source-interface Loopback0 27 | 28 | ``` 29 | 30 | ## Cisco-XE 17.06.01prd7@C8000V 31 | ```SHELL 32 | # Reference documentation: https://www.cisco.com/c/en/us/td/docs/ios-xml/ios/prog/configuration/173/b_173_programmability_cg/model_driven_telemetry.html 33 | 34 | telemetry ietf subscription 1 35 | ! 36 | encoding encode-kvgpb 37 | filter xpath /oc-if:interfaces/oc-if:interface 38 | source-address 192.168.100.100 39 | stream yang-push 40 | update-policy periodic 6000 41 | receiver ip address 192.168.100.254 10001 protocol grpc-tcp 42 | 43 | ``` 44 | 45 | ## Cisco-NX-OS 10.2(2)@Nexus9000 46 | ```SHELL 47 | # Reference documentation: https://www.cisco.com/c/en/us/td/docs/dcn/nx-os/nexus9000/101x/programmability/cisco-nexus-9000-series-nx-os-programmability-guide-release-101x/m-n9k-model-driven-telemetry-101x.html 48 | 49 | feature telemetry 50 | ! 51 | feature openconfig 52 | ! 53 | telemetry 54 | ! 55 | destination-profile 56 | use-vrf vrf100 57 | source-interface Vlan100 58 | ! 59 | destination-group 1 60 | ! encoding GPB-KV 61 | host 192.168.100.254 port 10001 protocol gRPC encoding GPB 62 | ! enabling gRPC dial-out 63 | grpc-async 64 | ! 65 | sensor-group 1 66 | data-source YANG 67 | path openconfig-interfaces:interfaces 68 | ! 69 | subscription 1 70 | dst-grp 1 71 | snsr-grp 1 sample-interval 60000 72 | ! 73 | ``` 74 | 75 | ## JunOS 20.4R3-S2.6@mx10003 76 | ```SHELL 77 | # Reference documentation: https://www.juniper.net/documentation/us/en/software/junos/interfaces-telemetry/topics/topic-map/telemetry-grpc-dialout-ta.html 78 | 79 | set groups TLM services analytics streaming-server GRPC_SERVER remote-address 192.168.100.254 80 | set groups TLM services analytics streaming-server GRPC_SERVER remote-port 10001 81 | ! 82 | set groups TLM services analytics export-profile GRPC_PROFILE local-address 192.168.100.100 83 | set groups TLM services analytics export-profile GRPC_PROFILE reporting-rate 60 84 | set groups TLM services analytics export-profile GRPC_PROFILE format json-gnmi 85 | set groups TLM services analytics export-profile GRPC_PROFILE transport grpc 86 | ! 87 | set groups TLM services analytics sensor OC_IF server-name GRPC_SERVER 88 | set groups TLM services analytics sensor OC_IF export-name GRPC_PROFILE 89 | set groups TLM services analytics sensor OC_IF resource /interfaces 90 | ``` 91 | 92 | ## Huawei VRP V800R021C10SPC300T@NE40E 93 | ```SHELL 94 | # Reference documentation: https://support.huawei.com/enterprise/en/doc/EDOC1100290800/862530fd/example-for-configuring-grpc-in-dial-out-mode 95 | 96 | telemetry 97 | # 98 | sensor-group SENSOR 99 | sensor-path openconfig-interfaces:interfaces/interface/state 100 | sensor-path openconfig-interfaces:interfaces/interface/state/counters 101 | # 102 | destination-group TLM 103 | ipv4-address 192.168.100.254 port 10001 protocol grpc no-tls 104 | # 105 | subscription SUBSCRIPTION 106 | local-source-address ipv4 192.168.100.100 107 | sensor-group SENSOR 108 | destination-group TLM 109 | # 110 | ``` 111 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | # Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | ARG VERSION=stable-slim 6 | ARG PLATFORM=linux/amd64 7 | 8 | FROM --platform=${PLATFORM} debian:${VERSION} 9 | 10 | LABEL Author="Salvatore Cuzzilla (Swisscom AG)" 11 | 12 | RUN apt-get update && apt-get -y full-upgrade && \ 13 | apt-get install -y \ 14 | bash \ 15 | git \ 16 | cmake \ 17 | build-essential \ 18 | autoconf \ 19 | libtool \ 20 | pkg-config \ 21 | libjsoncpp-dev \ 22 | librdkafka-dev \ 23 | libconfig++-dev \ 24 | libspdlog-dev \ 25 | libzmq3-dev 26 | 27 | WORKDIR /tmp/mdt-dialout-collector 28 | COPY scripts/grpc.sh scripts/ 29 | COPY scripts/mdt_dialout_collector.sh scripts/ 30 | COPY scripts/mdt_dialout_collector.conf /etc/opt/mdt-dialout-collector/ 31 | 32 | RUN ./scripts/grpc.sh 33 | RUN rm -rf grpc 34 | 35 | RUN ./scripts/mdt_dialout_collector.sh 36 | 37 | ENTRYPOINT ["/opt/mdt-dialout-collector/bin/mdt_dialout_collector"] 38 | CMD ["-V"] 39 | 40 | -------------------------------------------------------------------------------- /docker/scripts/grpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | # Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 5 | # Distributed under the MIT License (http://opensource.org/licenses/MIT) 6 | 7 | 8 | set -o errexit 9 | set -o nounset 10 | 11 | git clone --recurse-submodules -b v1.45.2 --depth 1 --shallow-submodules https://github.com/grpc/grpc 12 | 13 | export MY_INSTALL_DIR=$HOME/.local 14 | mkdir -p $MY_INSTALL_DIR 15 | export PATH="$MY_INSTALL_DIR/bin:$PATH" 16 | 17 | cd grpc 18 | mkdir -p cmake/build 19 | pushd cmake/build 20 | 21 | cmake -DgRPC_INSTALL=ON \ 22 | -DCMAKE_BUILD_TYPE=Release \ 23 | -DgRPC_BUILD_TESTS=OFF \ 24 | -DCMAKE_INSTALL_PREFIX=$MY_INSTALL_DIR \ 25 | -DABSL_PROPAGATE_CXX_STD=ON \ 26 | -DgRPC_ABSL_PROVIDER=module \ 27 | -DgRPC_CARES_PROVIDER=module \ 28 | -DgRPC_PROTOBUF_PROVIDER=module \ 29 | -DgRPC_RE2_PROVIDER=module \ 30 | -DgRPC_SSL_PROVIDER=module \ 31 | -DgRPC_ZLIB_PROVIDER=module \ 32 | ../.. 33 | 34 | make -j`echo $(($(egrep 'processor' /proc/cpuinfo | wc -l) - 1))` 35 | 36 | make install 37 | popd 38 | 39 | -------------------------------------------------------------------------------- /docker/scripts/mdt_dialout_collector.conf: -------------------------------------------------------------------------------- 1 | iface = "eth0"; 2 | ipv4_socket_cisco = "0.0.0.0:10007"; 3 | bootstrap_servers = "localhost.fake:9093"; 4 | topic = "topic.fake"; 5 | security_protocol = "plaintext"; 6 | log_level = "0"; 7 | 8 | -------------------------------------------------------------------------------- /docker/scripts/mdt_dialout_collector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | # Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 5 | # Distributed under the MIT License (http://opensource.org/licenses/MIT) 6 | 7 | 8 | set -o errexit 9 | set -o nounset 10 | 11 | cd /opt 12 | git clone https://github.com/network-analytics/mdt-dialout-collector.git 13 | 14 | export PATH="/root/.local/bin:$PATH" 15 | cd mdt-dialout-collector; 16 | mkdir build; 17 | cd build; 18 | cmake ../; 19 | make -j 20 | 21 | -------------------------------------------------------------------------------- /grpc-collector.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@prefix@ 2 | exec_prefix=@exec_prefix@ 3 | includedir=@includedir@ 4 | libdir=@libdir@ 5 | 6 | Name: mdt-dialout-collector 7 | Description: Model-Driven Telemetry - Collecting metrics via gRPC dialout 8 | URL: https://github.com/network-analytics/mdt-dialout-collector 9 | Version: @VERSION@ 10 | Cflags: -I${includedir}/grpc_collector_bridge 11 | Libs: -L${libdir} -lgrpc_collector 12 | -------------------------------------------------------------------------------- /proto/Cisco/cisco_dialout.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package mdt_dialout; 4 | 5 | service gRPCMdtDialout { 6 | rpc MdtDialout(stream MdtDialoutArgs) returns(stream MdtDialoutArgs) {}; 7 | } 8 | 9 | message MdtDialoutArgs { 10 | int64 ReqId = 1; 11 | //string data = 2; 12 | bytes data = 2; 13 | string errors = 3; 14 | } 15 | -------------------------------------------------------------------------------- /proto/Cisco/cisco_telemetry.proto: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | * telemetry_bis.proto - Telemetry protobuf definitions 3 | * 4 | * August 2016 5 | * 6 | * Copyright (c) 2016 by Cisco Systems, Inc. 7 | * 8 | * Licensed under the Apache License, Version 2.0 (the "License"); 9 | * you may not use this file except in compliance with the License. 10 | * You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | * ---------------------------------------------------------------------------- 20 | */ 21 | 22 | syntax = "proto3"; 23 | package cisco_telemetry; 24 | 25 | // Telemetry message is the outermost payload message used to stream 26 | // telemetry in a Model Driven Telemetry (MDT) system. MDT provides a 27 | // mechanism for an external entity to subscribe to a data set defined in 28 | // a Yang model and receive periodic or event-based updates of the data 29 | // set from an MDT-capable device. 30 | message Telemetry { 31 | // 32 | // node_id_str is a string encoded unique node ID of the MDT-capable 33 | // device producing the message. (node_id_uuid alternative is not currently 34 | // produced in IOS-XR) 35 | oneof node_id { 36 | string node_id_str = 1; 37 | // bytes node_id_uuid = 2; 38 | } 39 | // 40 | // subscription_id_str is the name of the subscription against which 41 | // this content is being produced. (subscription_id alternative is not 42 | // currently produced in IOS-XR) 43 | oneof subscription { 44 | string subscription_id_str = 3; 45 | // uint32 subscription_id = 4; 46 | } 47 | // 48 | // sensor_path is not currently produced in IOS-XR 49 | // string sensor_path = 5; 50 | // 51 | // encoding_path is the Yang path leading to the content in this message. 52 | // The Yang tree encoded in the content section of this message is rooted 53 | // at the point described by the encoding_path. 54 | string encoding_path = 6; 55 | // 56 | // model_revision 57 | string model_version = 7; 58 | // 59 | // collection_id identifies messages belonging to a collection round. 60 | // Multiple message may be generated from a collection round. 61 | uint64 collection_id = 8; 62 | // 63 | // collection_start_time is the time when the collection identified by 64 | // the collection_id begins - encoded as milliseconds since the epoch. 65 | // If a single collection is spread over multiple Telemetry Messages, 66 | // collection_start_time may be encoded in the first Telemetry Message 67 | // for the collection only. 68 | uint64 collection_start_time = 9; 69 | // 70 | // msg_timestamp is the time when the data encoded in the Telemetry 71 | // message is generated - encoded as milliseconds since the epoch. 72 | uint64 msg_timestamp = 10; 73 | // 74 | // data_gpbkv contains the payload data if data is being encoded in the 75 | // self-describing GPB-KV format. 76 | repeated TelemetryField data_gpbkv = 11; 77 | // 78 | // data_gpb contains the payload data if data is being encoded as 79 | // serialised GPB messages. 80 | TelemetryGPBTable data_gpb = 12; 81 | // 82 | // collection_end_time is the timestamp when the last Telemetry message 83 | // for a collection has been encoded - encoded as milliseconds since the 84 | // epoch. If a single collection is spread over multiple Telemetry 85 | // messages, collection_end_time is encoded in the last Telemetry Message 86 | // for the collection only. 87 | uint64 collection_end_time = 13; 88 | // 89 | // heartbeat_sequence_number is not currently produced in IOS-XR 90 | // uint64 heartbeat_sequence_number = 14; // not produced 91 | } 92 | 93 | // 94 | // TelemetryField messages are used to export content in the self 95 | // describing GPB KV form. The TelemetryField message is sufficient to 96 | // decode telemetry messages for all models. KV-GPB encoding is very 97 | // similar in concept, to JSON encoding 98 | message TelemetryField { 99 | // 100 | // timestamp represents the starting time of the generation of data 101 | // starting from this key, value pair in this message - encoded as 102 | // milliseconds since the epoch. It is encoded when different from the 103 | // msg_timestamp in the containing Telemetry Message. This field can be 104 | // omitted if the value is the same as a TelemetryField message up the 105 | // hierarchy within the same Telemetry Message as well. 106 | uint64 timestamp = 1; 107 | // 108 | // name: string encoding of the name in the key, value pair. It is 109 | // the corresponding YANG element name. 110 | string name = 2; 111 | // 112 | // value_by_type, if present, for the corresponding YANG element 113 | // represented by the name field in the same TelemetryField message. The 114 | // value is encoded to the matching type as defined in the YANG model. 115 | // YANG models often define new types (derived types) using one or more 116 | // base types. The types included in the oneof grouping is sufficient to 117 | // represent such derived types. Derived types represented as a Yang 118 | // container are encoded using the nesting primitive defined in this 119 | // encoding proposal. 120 | oneof value_by_type { 121 | bytes bytes_value = 4; 122 | string string_value = 5; 123 | bool bool_value = 6; 124 | uint32 uint32_value = 7; 125 | uint64 uint64_value = 8; 126 | sint32 sint32_value = 9; 127 | sint64 sint64_value = 10; 128 | double double_value = 11; 129 | float float_value = 12; 130 | } 131 | // 132 | // The Yang model may include nesting (e.g hierarchy of containers). The 133 | // next level of nesting, if present, is encoded, starting from fields. 134 | repeated TelemetryField fields = 15; 135 | // set only for delete event 136 | bool delete = 16; 137 | } 138 | 139 | // TelemetryGPBTable contains a repeated number of TelemetryRowGPB, 140 | // each of which represents content from a subtree instance in the 141 | // the YANG model. For example; a TelemetryGPBTable might contain 142 | // the interface statistics of a collection of interfaces. 143 | message TelemetryGPBTable { 144 | repeated TelemetryRowGPB row = 1; 145 | } 146 | 147 | // 148 | // TelemetryRowGPB, in conjunction with the Telemetry encoding_path and 149 | // model_version, unambiguously represents the root of a subtree in 150 | // the YANG model, and content from that subtree encoded in serialised 151 | // GPB messages. For example; a TelemetryRowGPB might contain the 152 | // interface statistics of one interface. Per encoding-path .proto 153 | // messages are required to decode keys/content pairs below. 154 | message TelemetryRowGPB { 155 | // 156 | // timestamp at which the data for this instance of the TelemetryRowGPB 157 | // message was generated by an MDT-capable device - encoded as 158 | // milliseconds since the epoch. When included, this is typically 159 | // different from the msg_timestamp in the containing Telemetry message. 160 | uint64 timestamp = 1; 161 | // 162 | // keys: if the encoding-path includes one or more list elements, and/or 163 | // ends in a list element, the keys field is a GPB encoded message that 164 | // contains the sequence of key values for each such list element in the 165 | // encoding-path traversed starting from the root. The set of keys 166 | // unambiguously identifies the instance of data encoded in the 167 | // TelemetryRowGPB message. Corresponding protobuf message definition will 168 | // be required to decode the byte stream. The encoding_path field in 169 | // Telemetry message, together with model_version field should be 170 | // sufficient to identify the corresponding protobuf message. 171 | bytes keys = 10; 172 | // 173 | // content: the content field is a GPB encoded message that contains the 174 | // data for the corresponding encoding-path. A separate decoding pass 175 | // would be performed by consumer with the content field as a GPB message 176 | // and the matching .proto used to decode the message. Corresponding 177 | // protobuf message definition will be required to decode the byte 178 | // stream. The encoding_path field in Telemetry message, together with 179 | // model_version field should be sufficient to identify the corresponding 180 | // protobuf message. The decoded combination of keys (when present) and 181 | // content, unambiguously represents an instance of the data set, as 182 | // defined in the Yang model, identified by the encoding-path in the 183 | // containing Telemetry message. 184 | bytes content = 11; 185 | } 186 | -------------------------------------------------------------------------------- /proto/Huawei/huawei_dialout.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package huawei_dialout; 4 | 5 | service gRPCDataservice { 6 | rpc dataPublish(stream serviceArgs) returns(stream serviceArgs) {}; 7 | } 8 | 9 | message serviceArgs { 10 | int64 ReqId = 1; 11 | oneof MessageData { 12 | bytes data = 2; 13 | string data_json = 4; 14 | } 15 | string errors = 3; 16 | } 17 | -------------------------------------------------------------------------------- /proto/Huawei/huawei_telemetry.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; //The .proto file version is defined as v3. 2 | 3 | package huawei_telemetry; //The package name is telemetry. 4 | 5 | message Telemetry { //Telemetry message structure definition. 6 | string node_id_str = 1; //Device name. 7 | string subscription_id_str = 2; //Subscription name during static subscription configuration. 8 | string sensor_path = 3; //Subscription path. 9 | string proto_path = 13; //Message path for the sampling path in the proto file. 10 | uint64 collection_id = 4; //Sampling round. 11 | uint64 collection_start_time = 5; //Start time of a sampling round. 12 | uint64 msg_timestamp = 6; //Timestamp when the current message is generated. 13 | TelemetryGPBTable data_gpb = 7; //Indicates that the data carried is defined in TelemetryGPBTable. 14 | uint64 collection_end_time = 8; //End time of a sampling round. 15 | uint32 current_period = 9; //Sampling precision, in milliseconds. 16 | string except_desc = 10; //Exception description. It is used to report exception information when a sampling exception occurs. 17 | string product_name = 11; //Product name. 18 | enum Encoding { 19 | Encoding_GPB = 0; //GPB encoding format. 20 | Encoding_JSON = 1; //JSON encoding format. 21 | }; 22 | Encoding encoding =12; //Data encoding format. If the GPB encoding format is used, the data_gpb field is valid. Otherwise, the data_str field is valid. 23 | string data_str = 14; //This field is valid only when a non-GPB encoding format is used. 24 | string ne_id = 15; //Unique ID of an NE. In the gateway scenario, this parameter is used to identify the NE to which data belongs. 25 | string software_version = 16; //Software version number. 26 | } 27 | 28 | message TelemetryGPBTable { //TelemetryGPBTable message structure definition. 29 | repeated TelemetryRowGPB row = 1; //Array definition. Its member is TelemetryRowGPB structure. 30 | repeated DataPath delete = 2; //Delete the data path. 31 | Generator generator = 3; //Data source description. This applies to the OnChange+ service that requires high reliability. 32 | } 33 | 34 | message Generator { 35 | uint64 generator_id = 1; //Data source ID. Multiple data sources can provide data concurrently and maintain their own reliability. 36 | uint32 generator_sn = 2; //Message sequence number. The sequence numbers of messages sent by each data source must be consecutive. If the sequence numbers are not consecutive, data out-of-synchronization occurs, the collector should support automatic disconnection and reconnection in this case. The value ranges from 0 to 0xFFFFFFFF and can be reversed. 37 | bool generator_sync = 3; //Data source synchronization Indicates whether to perform OnChange full data synchronization. In addition, if the value is true and no data is contained, the synchronization is complete. 38 | } 39 | 40 | message TelemetryRowGPB { 41 | uint64 timestamp = 1; //Timestamp of the current sampling instance. 42 | bytes content = 11; //Sampling instance data carried. The sensor_path field must be considered to determine which .proto file is used for encoding. 43 | } 44 | 45 | message DataPath { 46 | uint64 timestamp = 1; //Timestamp of the current sampling instance. 47 | Path path = 2; //Data tree node, which contains only the data path and key field information. 48 | } 49 | 50 | message Path { 51 | repeated PathElem node = 1; //Data tree node, which contains only the data path and key field information. 52 | } 53 | 54 | message PathElem { 55 | string name = 1; //Name of the data tree node. 56 | map key = 2; //Key field name and value mapping table of the data tree node. 57 | } 58 | 59 | message TelemetrySelfDefinedEvent { 60 | string path = 1; //Sampling path that triggers the customized event, which describes the method of parsing the content. 61 | string proto_path = 13; //Message path for the sampling path in the proto file. 62 | uint32 level = 2; //Level of the user-defined event. 63 | string description = 3; //Description of the user-defined event. 64 | string fieldName = 4; //Name of the field that triggers the customized event. 65 | uint32 fieldValue = 5; //Value of the field that triggers the customized event. 66 | TelemetrySelfDefineThresTable data_threshold = 6; //Threshold filter criteria when the customized event is triggered. 67 | enum ThresholdRelation { 68 | ThresholdRelation_INVALID = 0; //The relationship between thresholds is not configured. 69 | ThresholdRelation_AND = 1; //The relationship between thresholds is And. 70 | ThresholdRelation_OR = 2; //The relationship between thresholds is Or. 71 | } 72 | ThresholdRelation thresholdRelation = 7; //Relationship between threshold filter criteria when the customized event is triggered. 73 | bytes content = 8; //Sampled data that triggers the customized event. 74 | } 75 | 76 | message TelemetrySelfDefineThresTable { 77 | repeated TelemetryThreshold row = 1; //Multiple thresholds are included. 78 | } 79 | 80 | message TelemetryThreshold { 81 | uint32 thresholdValue = 1; //Delivered threshold. 82 | enum ThresholdOpType { 83 | ThresholdOpType_EQ = 0; //The actual value in the data sent equals to the configured data threshold. 84 | ThresholdOpType_GT = 1; //The actual value in the data sent is greater than the configured data threshold. 85 | ThresholdOpType_GE = 2; //The actual value in the data sent is greater than or equals to the configured data threshold. 86 | ThresholdOpType_LT = 3; //The actual value in the data sent is less than the configured data threshold. 87 | ThresholdOpType_LE = 4; //The actual value in the data sent is less than or equals to the configured data threshold. 88 | } 89 | ThresholdOpType thresholdOpType = 2; //Threshold on the device. 90 | } 91 | -------------------------------------------------------------------------------- /proto/Juniper/juniper_dialout.proto: -------------------------------------------------------------------------------- 1 | 2 | syntax = "proto3"; 3 | 4 | import "juniper_gnmi.proto"; 5 | 6 | // new grpc service definition having DialOutSubscriber as the RPC name. 7 | service Subscriber { 8 | /* 9 | Phase1: Subscriber allows the target to send telemetry update:s (in the form of SubscribeResponse messages, 10 | which has the same semantics as in the gNMI Subscribe RPC, to a client. Target will stream the sensor 11 | configuration that are configured out of band for this client. 12 | 13 | Phase2: Optionally the client may return the SubscribeRequest message in response to the dial-out connection from the target. 14 | In this case, the client may augment the set of subscriptions that are to be published by the target. This SubscribeRequest 15 | message has the same semantics as in the Subscribe gNMI RPC. 16 | 17 | In the case that the client specifies neither option, then target waits indefinitely till the sensor subscription is made. 18 | 19 | The configuration of subscriptions associated with the Subscriber RPC may be through the OpenConfig telemetry configuration and operational state model: 20 | https://github.com/openconfig/public/blob/master/release/models/telemetry/openconfig-telemetry.yang 21 | */ 22 | 23 | rpc DialOutSubscriber(stream juniper_gnmi.SubscribeResponse) returns (stream juniper_gnmi.SubscribeRequest); 24 | 25 | } 26 | 27 | -------------------------------------------------------------------------------- /proto/Juniper/juniper_gnmi_ext.proto: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2018 Google Inc. All Rights Reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | syntax = "proto3"; 17 | 18 | // Package gnmi_ext defines a set of extensions messages which can be optionally 19 | // included with the request and response messages of gNMI RPCs. A set of 20 | // well-known extensions are defined within this file, along with a registry for 21 | // extensions defined outside of this package. 22 | package juniper_gnmi_ext; 23 | 24 | // The Extension message contains a single gNMI extension. 25 | message Extension { 26 | oneof ext { 27 | RegisteredExtension registered_ext = 1; // A registered extension. 28 | // Well known extensions. 29 | MasterArbitration master_arbitration = 2; // Master arbitration extension. 30 | } 31 | } 32 | 33 | // The RegisteredExtension message defines an extension which is defined outside 34 | // of this file. 35 | message RegisteredExtension { 36 | ExtensionID id = 1; // The unique ID assigned to this extension. 37 | bytes msg = 2; // The binary-marshalled protobuf extension payload. 38 | } 39 | 40 | // RegisteredExtension is an enumeration acting as a registry for extensions 41 | // defined by external sources. 42 | enum ExtensionID { 43 | EID_UNSET = 0; 44 | // New extensions are to be defined within this enumeration - their definition 45 | // MUST link to a reference describing their implementation. 46 | 47 | // Juniper Telemetry header 48 | EID_JUNIPER_TELEMETRY_HEADER = 1; 49 | 50 | // An experimental extension that may be used during prototyping of a new 51 | // extension. 52 | EID_EXPERIMENTAL = 999; 53 | } 54 | 55 | // MasterArbitration is used to select the master among multiple gNMI clients 56 | // with the same Roles. The client with the largest election_id is honored as 57 | // the master. 58 | // The document about gNMI master arbitration can be found at 59 | // https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-master-arbitration.md 60 | message MasterArbitration { 61 | Role role = 1; 62 | Uint128 election_id = 2; 63 | } 64 | 65 | // Representation of unsigned 128-bit integer. 66 | message Uint128 { 67 | uint64 high = 1; 68 | uint64 low = 2; 69 | } 70 | 71 | // There can be one master for each role. The role is identified by its id. 72 | message Role { 73 | string id = 1; 74 | // More fields can be added if needed, for example, to specify what paths the 75 | // role can read/write. 76 | } 77 | 78 | -------------------------------------------------------------------------------- /proto/Juniper/juniper_telemetry.proto: -------------------------------------------------------------------------------- 1 | // 2 | // Copyrights (c) 2015, 2016, Juniper Networks, Inc. 3 | // All rights reserved. 4 | // 5 | 6 | // 7 | // Licensed to the Apache Software Foundation (ASF) under one 8 | // or more contributor license agreements. See the NOTICE file 9 | // distributed with this work for additional information 10 | // regarding copyright ownership. The ASF licenses this file 11 | // to you under the Apache License, Version 2.0 (the 12 | // "License"); you may not use this file except in compliance 13 | // with the License. You may obtain a copy of the License at 14 | // 15 | // http://www.apache.org/licenses/LICENSE-2.0 16 | // 17 | // Unless required by applicable law or agreed to in writing, 18 | // software distributed under the License is distributed on an 19 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 20 | // KIND, either express or implied. See the License for the 21 | // specific language governing permissions and limitations 22 | // under the License. 23 | // 24 | 25 | // 26 | // This file defines the top level message used for all Juniper 27 | // Telemetry packets encoded to the protocol buffer format. 28 | // The top level message is TelemetryStream. 29 | // 30 | 31 | syntax = "proto2"; 32 | 33 | import "google/protobuf/descriptor.proto"; 34 | 35 | extend google.protobuf.FieldOptions { 36 | optional TelemetryFieldOptions telemetry_options = 1024; 37 | } 38 | 39 | message TelemetryFieldOptions { 40 | optional bool is_key = 1; 41 | optional bool is_timestamp = 2; 42 | optional bool is_counter = 3; 43 | optional bool is_gauge = 4; 44 | } 45 | 46 | message TelemetryStream { 47 | // router hostname 48 | // (or, just in the case of legacy (microkernel) PFEs, the IP address) 49 | required string system_id = 1 [(telemetry_options).is_key = true]; 50 | 51 | // line card / RE (slot number). For RE, it will be 65535 52 | optional uint32 component_id = 2 [(telemetry_options).is_key = true]; 53 | 54 | // PFE (if applicable) 55 | optional uint32 sub_component_id = 3 [(telemetry_options).is_key = true]; 56 | 57 | // Overload sensor name with "senor name, internal path, external path 58 | // and component" seperated by ":". For RE sensors, component will be 59 | // daemon-name and for PFE sensors it will be "PFE". 60 | optional string sensor_name = 4 [(telemetry_options).is_key = true]; 61 | 62 | // sequence number, monotonically increasing for each 63 | // system_id, component_id, sub_component_id + sensor_name. 64 | optional uint32 sequence_number = 5; 65 | 66 | // timestamp (milliseconds since 00:00:00 UTC 1/1/1970) 67 | optional uint64 timestamp = 6 [(telemetry_options).is_timestamp = true]; 68 | 69 | // major version 70 | optional uint32 version_major = 7; 71 | 72 | // minor version 73 | optional uint32 version_minor = 8; 74 | 75 | optional IETFSensors ietf = 100; 76 | 77 | optional EnterpriseSensors enterprise = 101; 78 | } 79 | 80 | message IETFSensors { 81 | extensions 1 to max; 82 | } 83 | 84 | message EnterpriseSensors { 85 | extensions 1 to max; 86 | } 87 | 88 | extend EnterpriseSensors { 89 | // re-use IANA assigned numbers 90 | optional JuniperNetworksSensors juniperNetworks = 2636; 91 | } 92 | 93 | message JuniperNetworksSensors { 94 | extensions 1 to max; 95 | } 96 | 97 | -------------------------------------------------------------------------------- /proto/Juniper/juniper_telemetry_header.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | // Present as first gNMI update in all packets 4 | message GnmiJuniperTelemetryHeader { 5 | // router name:export IP address 6 | string system_id = 1; 7 | 8 | // line card / RE (slot number) 9 | uint32 component_id = 2; 10 | 11 | // PFE (if applicable) 12 | uint32 sub_component_id = 3; 13 | 14 | // Path contains useful information on identiying which sensor, 15 | // resoruce string and producer the data corresponds to. 16 | // "internal_sensor_name:internal_path:external_path:component" 17 | // e.g.: 18 | // "sensor_1006:/junos/system/linecard/cpu/memory/:/junos/system/linecard/cpu/memory/:PFE" 19 | string path = 4; 20 | 21 | // Sequence number, monotonically increasing for each 22 | // system_id, component_id, sub_component_id + path. 23 | uint64 sequence_number = 5; 24 | } 25 | 26 | 27 | -------------------------------------------------------------------------------- /proto/Juniper/juniper_telemetry_header_extension.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | // Present as first gNMI update in all packets 4 | message GnmiJuniperTelemetryHeaderExtension { 5 | // router name:export IP address 6 | string system_id = 1; 7 | 8 | // line card / RE (slot number) 9 | uint32 component_id = 2; 10 | 11 | // PFE (if applicable) 12 | uint32 sub_component_id = 3; 13 | 14 | // Internal sensor name 15 | string sensor_name = 4; 16 | 17 | // Sensor path in the subscribe request 18 | string subscribed_path = 5; 19 | // Internal sensor path in junos 20 | string streamed_path = 6; 21 | 22 | string component = 7; 23 | 24 | // Sequence number, monotonically increasing for each 25 | uint64 sequence_number = 8; 26 | 27 | // Payload get timestamp in milliseconds 28 | int64 payload_get_timestamp = 9; 29 | 30 | // Stream creation timestamp in milliseconds 31 | int64 stream_creation_timestamp = 10; 32 | 33 | // Event timestamp in milliseconds 34 | int64 event_timestamp = 11; 35 | 36 | // Export timestamp in milliseconds 37 | int64 export_timestamp = 12; 38 | 39 | // Subsequence number 40 | uint64 sub_sequence_number = 13; 41 | 42 | // End of marker 43 | bool eom = 14; 44 | } 45 | -------------------------------------------------------------------------------- /proto/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | CLEANFILES = .proto_mark 4 | 5 | .PHONY: dirs grpc_cisco 6 | 7 | # 8 | # GRPC Cisco 9 | # 10 | 11 | PROTOC=protoc 12 | 13 | MD5SUM_HASH=`find ${top_builddir}/src/proto -type f | sort | xargs md5sum | md5sum` 14 | 15 | all: 16 | @if [ ! -f .proto_mark ] || [ "`cat .proto_mark`" != "$(MD5SUM_HASH)" ]; then \ 17 | make _all; \ 18 | else \ 19 | echo "Autogenerated files up-to-date..."; \ 20 | fi 21 | 22 | _all: dirs grpc_cisco grpc_huawei grpc_juniper grpc_nokia openconfig 23 | echo "$(MD5SUM_HASH)" > .proto_mark 24 | dirs: 25 | mkdir -p ${top_builddir}/src/proto 26 | mkdir -p ${top_builddir}/src/proto/Cisco 27 | mkdir -p ${top_builddir}/src/proto/Huawei 28 | mkdir -p ${top_builddir}/src/proto/Juniper 29 | mkdir -p ${top_builddir}/src/proto/Nokia 30 | mkdir -p ${top_builddir}/src/proto/OpenConfig 31 | 32 | grpc_cisco: 33 | $(PROTOC) -I ${top_srcdir}/proto/Cisco/ \ 34 | --cpp_out ${top_builddir}/src/proto/Cisco \ 35 | --grpc_out ${top_builddir}/src/proto/Cisco \ 36 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" cisco_dialout.proto 37 | $(PROTOC) -I ${top_srcdir}/proto/Cisco/ \ 38 | --cpp_out ${top_builddir}/src/proto/Cisco \ 39 | --grpc_out ${top_builddir}/src/proto/Cisco \ 40 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" cisco_telemetry.proto 41 | grpc_huawei: 42 | $(PROTOC) -I ${top_srcdir}/proto/Huawei/ \ 43 | --cpp_out ${top_builddir}/src/proto/Huawei \ 44 | --grpc_out ${top_builddir}/src/proto/Huawei \ 45 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" huawei_dialout.proto 46 | $(PROTOC) -I ${top_srcdir}/proto/Huawei/ \ 47 | --cpp_out ${top_builddir}/src/proto/Huawei \ 48 | --grpc_out ${top_builddir}/src/proto/Huawei \ 49 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" huawei_telemetry.proto 50 | 51 | grpc_juniper: 52 | $(PROTOC) -I ${top_srcdir}/proto/Juniper/ \ 53 | --cpp_out ${top_builddir}/src/proto/Juniper \ 54 | --grpc_out ${top_builddir}/src/proto/Juniper \ 55 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" juniper_dialout.proto 56 | $(PROTOC) -I ${top_srcdir}/proto/Juniper/ \ 57 | --cpp_out ${top_builddir}/src/proto/Juniper \ 58 | --grpc_out ${top_builddir}/src/proto/Juniper \ 59 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" juniper_gnmi.proto 60 | $(PROTOC) -I ${top_srcdir}/proto/Juniper/ \ 61 | --cpp_out ${top_builddir}/src/proto/Juniper \ 62 | --grpc_out ${top_builddir}/src/proto/Juniper \ 63 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" juniper_telemetry.proto 64 | $(PROTOC) -I ${top_srcdir}/proto/Juniper/ \ 65 | --cpp_out ${top_builddir}/src/proto/Juniper \ 66 | --grpc_out ${top_builddir}/src/proto/Juniper \ 67 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" juniper_telemetry_header_extension.proto 68 | $(PROTOC) -I ${top_srcdir}/proto/Juniper/ \ 69 | --cpp_out ${top_builddir}/src/proto/Juniper \ 70 | --grpc_out ${top_builddir}/src/proto/Juniper \ 71 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" juniper_gnmi_ext.proto 72 | 73 | grpc_nokia: 74 | $(PROTOC) -I ${top_srcdir}/proto/Nokia/ \ 75 | --cpp_out ${top_builddir}/src/proto/Nokia \ 76 | --grpc_out ${top_builddir}/src/proto/Nokia \ 77 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" nokia_dialout.proto 78 | $(PROTOC) -I ${top_srcdir}/proto/Nokia/ \ 79 | --cpp_out ${top_builddir}/src/proto/Nokia \ 80 | --grpc_out ${top_builddir}/src/proto/Nokia \ 81 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" nokia_gnmi.proto 82 | $(PROTOC) -I ${top_srcdir}/proto/Nokia/ \ 83 | --cpp_out ${top_builddir}/src/proto/Nokia \ 84 | --grpc_out ${top_builddir}/src/proto/Nokia \ 85 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" nokia_gnmi_ext.proto 86 | 87 | openconfig: 88 | $(PROTOC) -I ${top_srcdir}/proto/OpenConfig/ \ 89 | --cpp_out ${top_builddir}/src/proto/OpenConfig \ 90 | --grpc_out ${top_builddir}/src/proto/OpenConfig \ 91 | --plugin=protoc-gen-grpc="/usr/local/bin/grpc_cpp_plugin" openconfig_interfaces.proto 92 | -------------------------------------------------------------------------------- /proto/Nokia/nokia_dialout.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | import "nokia_gnmi.proto"; 3 | 4 | package Nokia.SROS; 5 | 6 | service DialoutTelemetry { 7 | rpc Publish(stream nokia_gnmi.SubscribeResponse) returns (stream PublishResponse); 8 | } 9 | message PublishResponse {} -------------------------------------------------------------------------------- /proto/Nokia/nokia_gnmi_ext.proto: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2018 Google Inc. All Rights Reserved. 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | syntax = "proto3"; 17 | 18 | // Package gnmi_ext defines a set of extensions messages which can be optionally 19 | // included with the request and response messages of gNMI RPCs. A set of 20 | // well-known extensions are defined within this file, along with a registry for 21 | // extensions defined outside of this package. 22 | package nokia_gnmi_ext; 23 | 24 | option go_package = "github.com/openconfig/gnmi/proto/gnmi_ext"; 25 | 26 | // The Extension message contains a single gNMI extension. 27 | message Extension { 28 | oneof ext { 29 | RegisteredExtension registered_ext = 1; // A registered extension. 30 | // Well known extensions. 31 | MasterArbitration master_arbitration = 2; // Master arbitration extension. 32 | History history = 3; // History extension. 33 | } 34 | } 35 | 36 | // The RegisteredExtension message defines an extension which is defined outside 37 | // of this file. 38 | message RegisteredExtension { 39 | ExtensionID id = 1; // The unique ID assigned to this extension. 40 | bytes msg = 2; // The binary-marshalled protobuf extension payload. 41 | } 42 | 43 | // RegisteredExtension is an enumeration acting as a registry for extensions 44 | // defined by external sources. 45 | enum ExtensionID { 46 | EID_UNSET = 0; 47 | // New extensions are to be defined within this enumeration - their definition 48 | // MUST link to a reference describing their implementation. 49 | 50 | // An experimental extension that may be used during prototyping of a new 51 | // extension. 52 | EID_EXPERIMENTAL = 999; 53 | } 54 | 55 | // MasterArbitration is used to select the master among multiple gNMI clients 56 | // with the same Roles. The client with the largest election_id is honored as 57 | // the master. 58 | // The document about gNMI master arbitration can be found at 59 | // https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-master-arbitration.md 60 | message MasterArbitration { 61 | Role role = 1; 62 | Uint128 election_id = 2; 63 | } 64 | 65 | // Representation of unsigned 128-bit integer. 66 | message Uint128 { 67 | uint64 high = 1; 68 | uint64 low = 2; 69 | } 70 | 71 | // There can be one master for each role. The role is identified by its id. 72 | message Role { 73 | string id = 1; 74 | // More fields can be added if needed, for example, to specify what paths the 75 | // role can read/write. 76 | } 77 | 78 | // The History extension allows clients to request historical data. Its 79 | // spec can be found at 80 | // https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md 81 | message History { 82 | oneof request { 83 | int64 snapshot_time = 1; // Nanoseconds since the epoch 84 | TimeRange range = 2; 85 | } 86 | } 87 | 88 | message TimeRange { 89 | int64 start = 1; // Nanoseconds since the epoch 90 | int64 end = 2; // Nanoseconds since the epoch 91 | } -------------------------------------------------------------------------------- /ptm/README.md: -------------------------------------------------------------------------------- 1 | #### Data-stream enrichment - specifically: node_id / platform_id 2 | 3 | ##### Example of pmacct's pretag-map (PTM) file format 4 | ```TEXT 5 | set_label=nkey%daisy-router-01%pkey%Cisco-XR ip=1.1.1.1/32 6 | set_label=nkey%daisy-router-02%pkey%Cisco-XE ip=2.2.2.2/32 7 | set_label=nkey%daisy-router-03%pkey%Huawei-VRP ip=3.3.3.3/32 8 | set_label=nkey%daisy-router-04%pkey%JunOS ip=4,4,4,4/32 9 | ``` 10 | 11 | if not already existing, crate a file "ptm/label_map.ptm" resembling the above format. 12 | The client IP address (ip field) is matched & the data-stream is enriched 13 | with (set_label field, nkey-value) & (set_label field, pkey-value) according to this data format: 14 | 15 | ```JSON 16 | 17 | "label": { 18 | "node_id": , 19 | "platform_id": 20 | }, 21 | ``` 22 | 23 | When new rows are added to the PTM, you can refresh the daemon seamlessly: 24 | 25 | ```SHELL 26 | $ sudo kill -USR1 `cat /var/run/mdt_dialout_collector.pid` 27 | ``` 28 | 29 | -------------------------------------------------------------------------------- /src/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | SUBDIRS = \ 4 | bridge \ 5 | cfgWrapper \ 6 | core \ 7 | dataDelivery \ 8 | dataManipulation \ 9 | dataWrapper \ 10 | proto \ 11 | utils 12 | 13 | # 14 | # Libs 15 | # 16 | lib_LTLIBRARIES = libgrpc_collector.la 17 | 18 | libgrpc_collector_la_SOURCES = 19 | 20 | libgrpc_collector_la_LIBADD = \ 21 | bridge/libgrpc_collector_bridge.la \ 22 | cfgWrapper/libgrpc_collector_cfg_wrapper.la \ 23 | core/libgrpc_collector_core.la \ 24 | dataDelivery/libgrpc_collector_data_delivery.la \ 25 | dataManipulation/libgrpc_collector_data_manipulation.la \ 26 | dataWrapper/libgrpc_collector_data_wrapper.la \ 27 | proto/libgrpc_collector_proto.la \ 28 | utils/libgrpc_collector_utils.la \ 29 | $(DEPS) 30 | 31 | # Distributable headers 32 | grpc_collector_includedir = $(includedir)/grpc_collector_bridge 33 | 34 | grpc_collector_include_HEADERS = \ 35 | bridge/grpc_collector_bridge.h 36 | 37 | # 38 | # bin program 39 | # 40 | AM_CPPFLAGS = -I$(top_srcdir)/src/include/ 41 | AM_LDFLAGS = "-Wl,--copy-dt-needed-entries" 42 | 43 | DEPS = \ 44 | $(GRPC_LIBS) \ 45 | $(GRPCCPP_LIBS) \ 46 | $(PROTOBUF_LIBS) \ 47 | $(JSONCPP_LIBS) \ 48 | $(KAFKA_LIBS) \ 49 | $(KAFKACPP_LIBS) \ 50 | $(SPDLOG_LIBS) \ 51 | -lzmq \ 52 | -lconfig++ \ 53 | -lfmt \ 54 | -lpthread 55 | 56 | # Program name 57 | #bin_PROGRAMS = mdt_dialout_collector pmtelemetryd 58 | # 59 | #mdt_dialout_collector_SOURCES = mdt_dialout_collector.cc 60 | #mdt_dialout_collector_LDADD = libgrpc_collector.la 61 | # 62 | #pmtelemetryd_SOURCES = pmtelemetryd.c 63 | #pmtelemetryd_LDADD = libgrpc_collector.la 64 | -------------------------------------------------------------------------------- /src/bridge/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_bridge.la 4 | 5 | libgrpc_collector_bridge_la_SOURCES = grpc_collector_bridge.cc 6 | libgrpc_collector_bridge_la_CPPFLAGS = -I$(top_srcdir)/src/include/ -I$(top_builddir)/src/ 7 | -------------------------------------------------------------------------------- /src/bridge/grpc_collector_bridge.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _GRPC_COLLECTOR_BRIDGE_H_ 6 | #define _GRPC_COLLECTOR_BRIDGE_H_ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | 16 | #ifdef __cplusplus 17 | extern "C" { 18 | #endif 19 | #define MAX_WORKERS 5 20 | 21 | typedef struct { 22 | /* main */ 23 | char *writer_id; 24 | char *iface; 25 | char *ipv4_socket_cisco; 26 | char *ipv4_socket_juniper; 27 | char *ipv4_socket_nokia; 28 | char *ipv4_socket_huawei; 29 | /* char *core_pid_folder; */ 30 | /* workers */ 31 | char *cisco_workers; 32 | char *juniper_workers; 33 | char *nokia_workers; 34 | char *huawei_workers; 35 | /* replies */ 36 | char *replies_cisco; 37 | char *replies_juniper; 38 | char *replies_nokia; 39 | char *replies_huawei; 40 | /* logging */ 41 | char *syslog; 42 | char *syslog_facility; 43 | char *syslog_ident; 44 | char *console_log; 45 | char *spdlog_level; 46 | /* data-flow manipulation */ 47 | char *enable_cisco_gpbkv2json; 48 | char *enable_cisco_message_to_json_string; 49 | char *enable_label_encode_as_map; 50 | char *enable_label_encode_as_map_ptm; 51 | } __attribute__ ((packed)) Options; 52 | 53 | typedef struct { 54 | char *event_type; 55 | char *serialization; 56 | char *writer_id; 57 | char *telemetry_node; 58 | char *telemetry_port; 59 | char *telemetry_data; 60 | } __attribute__ ((packed)) grpc_payload; 61 | 62 | extern Options *InitOptions(); 63 | extern void InitGrpcPayload(grpc_payload **pload_, const char *event_type, 64 | const char *serialization, const char *writer_id, 65 | const char *telemetry_node, const char *telemetry_port, 66 | const char *telemetry_data); 67 | 68 | extern void FreeOptions(Options *opts); 69 | extern void free_grpc_payload(grpc_payload *pload); 70 | 71 | extern void start_grpc_dialout_collector(const char *cfg_path, 72 | const char *zmq_uri); 73 | extern void LoadOptions(const char *cfg_path, 74 | const char *zmq_uri); 75 | extern void *VendorThread(void *vendor); 76 | extern void LoadThreads(pthread_t *workers_vec, const char *ipv4_socket_str, 77 | const char *replies_str, const char *workers_str); 78 | 79 | #ifdef __cplusplus 80 | } 81 | #endif 82 | 83 | #endif 84 | 85 | -------------------------------------------------------------------------------- /src/cfgWrapper/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_cfg_wrapper.la 4 | 5 | libgrpc_collector_cfg_wrapper_la_SOURCES = cfg_wrapper.cc 6 | libgrpc_collector_cfg_wrapper_la_CPPFLAGS = -I$(top_srcdir)/src/include/ -I$(top_builddir)/src/ 7 | -------------------------------------------------------------------------------- /src/cfgWrapper/cfg_wrapper.cc: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | // mdt-dialout-collector Library headers 6 | #include "cfg_wrapper.h" 7 | 8 | 9 | bool CfgWrapper::BuildCfgWrapper( 10 | // main 11 | const std::string &writer_id, 12 | const std::string &iface, 13 | const std::string &ipv4_socket_cisco, 14 | const std::string &ipv4_socket_juniper, 15 | const std::string &ipv4_socket_nokia, 16 | const std::string &ipv4_socket_huawei, 17 | //const std::string &core_pid_folder 18 | const std::string &cisco_workers, 19 | const std::string &juniper_workers, 20 | const std::string &nokia_workers, 21 | const std::string &huawei_workers, 22 | const std::string &replies_cisco, 23 | const std::string &replies_juniper, 24 | const std::string &replies_nokia, 25 | const std::string &replies_huawei, 26 | // logging 27 | const std::string &syslog, 28 | const std::string &syslog_facility, 29 | const std::string &syslog_ident, 30 | const std::string &console_log, 31 | const std::string &spdlog_level, 32 | // data-manipualtion 33 | const std::string &enable_cisco_gpbkv2json, 34 | const std::string &enable_cisco_message_to_json_string, 35 | const std::string &enable_label_encode_as_map, 36 | const std::string &enable_label_encode_as_map_ptm) 37 | { 38 | set_writer_id(writer_id); 39 | set_iface(iface); 40 | set_ipv4_socket_cisco(ipv4_socket_cisco); 41 | set_ipv4_socket_juniper(ipv4_socket_juniper); 42 | set_ipv4_socket_nokia(ipv4_socket_nokia); 43 | set_ipv4_socket_huawei(ipv4_socket_huawei); 44 | //set_core_pid_folder(core_pid_folder); 45 | set_cisco_workers(cisco_workers); 46 | set_juniper_workers(juniper_workers); 47 | set_nokia_workers(nokia_workers); 48 | set_huawei_workers(huawei_workers); 49 | set_replies_cisco(replies_cisco); 50 | set_replies_juniper(replies_juniper); 51 | set_replies_nokia(replies_nokia); 52 | set_replies_huawei(replies_huawei); 53 | set_syslog(syslog); 54 | set_syslog_facility(syslog_facility); 55 | set_syslog_ident(syslog_ident); 56 | set_console_log(console_log); 57 | set_spdlog_level(spdlog_level); 58 | set_enable_cisco_gpbkv2json(enable_cisco_gpbkv2json); 59 | set_enable_cisco_message_to_json_string( 60 | enable_cisco_message_to_json_string); 61 | set_enable_label_encode_as_map(enable_label_encode_as_map); 62 | set_enable_label_encode_as_map_ptm(enable_label_encode_as_map_ptm); 63 | 64 | return true; 65 | } 66 | 67 | -------------------------------------------------------------------------------- /src/cfgWrapper/cfg_wrapper.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _CFG_WRAPPER_H_ 6 | #define _CFG_WRAPPER_H_ 7 | 8 | // C++ Standard Library headers 9 | #include 10 | #include 11 | // External Library headers 12 | 13 | // mdt-dialout-collector Library headers 14 | #include "../utils/logs_handler.h" 15 | #include "../utils/cfg_handler.h" 16 | 17 | 18 | // C++ Class 19 | class CfgWrapper { 20 | public: 21 | CfgWrapper() { 22 | spdlog::get("multi-logger")-> 23 | debug("constructor: CfgWrapper()"); }; 24 | ~CfgWrapper() { 25 | spdlog::get("multi-logger")-> 26 | debug("destructor: ~CfgWrapper()"); }; 27 | 28 | bool BuildCfgWrapper( 29 | // main 30 | const std::string &writer_id, 31 | const std::string &iface, 32 | const std::string &ipv4_socket_cisco, 33 | const std::string &ipv4_socket_juniper, 34 | const std::string &ipv4_socket_nokia, 35 | const std::string &ipv4_socket_huawei, 36 | //const std::string &core_pid_folder 37 | const std::string &cisco_workers, 38 | const std::string &juniper_workers, 39 | const std::string &nokia_workers, 40 | const std::string &huawei_workers, 41 | const std::string &replies_cisco, 42 | const std::string &replies_juniper, 43 | const std::string &replies_nokia, 44 | const std::string &replies_huawei, 45 | // logging 46 | const std::string &syslog, 47 | const std::string &syslog_facility, 48 | const std::string &syslog_ident, 49 | const std::string &console_log, 50 | const std::string &spdlog_level, 51 | // data-manipualtion 52 | const std::string &enable_cisco_gpbkv2json, 53 | const std::string &enable_cisco_message_to_json_string, 54 | const std::string &enable_label_encode_as_map, 55 | const std::string &enable_label_encode_as_map_ptm 56 | ); 57 | 58 | // Setters 59 | void set_writer_id(const std::string &writer_id) { 60 | this->writer_id = writer_id; 61 | }; 62 | void set_iface(const std::string &iface) { 63 | this->iface = iface; 64 | }; 65 | void set_ipv4_socket_cisco(const std::string &ipv4_socket_cisco) { 66 | this->ipv4_socket_cisco = ipv4_socket_cisco; 67 | }; 68 | void set_ipv4_socket_juniper(const std::string &ipv4_socket_juniper) { 69 | this->ipv4_socket_juniper = ipv4_socket_juniper; 70 | }; 71 | void set_ipv4_socket_nokia(const std::string &ipv4_socket_nokia) { 72 | this->ipv4_socket_nokia = ipv4_socket_nokia; 73 | }; 74 | void set_ipv4_socket_huawei(const std::string &ipv4_socket_huawei) { 75 | this->ipv4_socket_huawei = ipv4_socket_huawei; 76 | }; 77 | //void set_core_pid_folder(const std::string &core_pid_folder) { 78 | // this->core_pid_folder = core_pid_folder; 79 | //}; 80 | void set_cisco_workers(const std::string &cisco_workers) { 81 | this->cisco_workers = cisco_workers; 82 | }; 83 | void set_juniper_workers(const std::string &juniper_workers) { 84 | this->juniper_workers = juniper_workers; 85 | }; 86 | void set_nokia_workers(const std::string &nokia_workers) { 87 | this->nokia_workers = nokia_workers; 88 | }; 89 | void set_huawei_workers(const std::string &huawei_workers) { 90 | this->huawei_workers = huawei_workers; 91 | }; 92 | void set_replies_cisco(const std::string &replies_cisco) { 93 | this->replies_cisco = replies_cisco; 94 | }; 95 | void set_replies_juniper(const std::string &replies_juniper) { 96 | this->replies_juniper = replies_juniper; 97 | }; 98 | void set_replies_nokia(const std::string &replies_nokia) { 99 | this->replies_nokia = replies_nokia; 100 | }; 101 | void set_replies_huawei(const std::string &replies_huawei) { 102 | this->replies_huawei = replies_huawei; 103 | }; 104 | void set_syslog(const std::string &syslog) { 105 | this->syslog = syslog; 106 | }; 107 | void set_syslog_facility(const std::string &syslog_facility) { 108 | this->syslog_facility = syslog_facility; 109 | }; 110 | void set_syslog_ident(const std::string &syslog_ident) { 111 | this->syslog_ident = syslog_ident; 112 | }; 113 | void set_console_log(const std::string &console_log) { 114 | this->console_log = console_log; 115 | }; 116 | void set_spdlog_level(const std::string &spdlog_level) { 117 | this->spdlog_level = spdlog_level; 118 | }; 119 | void set_enable_cisco_gpbkv2json( 120 | const std::string &enable_cisco_gpbkv2json) { 121 | this->enable_cisco_gpbkv2json = enable_cisco_gpbkv2json; 122 | }; 123 | void set_enable_cisco_message_to_json_string( 124 | const std::string &enable_cisco_message_to_json_string) { 125 | this->enable_cisco_message_to_json_string = 126 | enable_cisco_message_to_json_string; 127 | }; 128 | void set_enable_label_encode_as_map( 129 | const std::string &enable_label_encode_as_map) { 130 | this->enable_label_encode_as_map = enable_label_encode_as_map; 131 | }; 132 | void set_enable_label_encode_as_map_ptm( 133 | const std::string &enable_label_encode_as_map_ptm) { 134 | this->enable_label_encode_as_map_ptm = enable_label_encode_as_map_ptm; 135 | }; 136 | 137 | // Getters 138 | std::string &get_writer_id() { return this->writer_id; }; 139 | std::string &get_iface() { return this->iface; }; 140 | std::string &get_ipv4_socket_cisco() { return this->ipv4_socket_cisco; }; 141 | std::string &get_ipv4_socket_juniper() { 142 | return this->ipv4_socket_juniper; }; 143 | std::string &get_ipv4_socket_nokia() { return this->ipv4_socket_nokia; }; 144 | std::string &get_ipv4_socket_huawei() { return this->ipv4_socket_huawei; }; 145 | //std::string &get_core_pid_folder() { return this->core_pid_folder; }; 146 | std::string &get_cisco_workers() { return this->cisco_workers; }; 147 | std::string &get_juniper_workers() { return this->juniper_workers; }; 148 | std::string &get_nokia_workers() { return this->nokia_workers; }; 149 | std::string &get_huawei_workers() { return this->huawei_workers; }; 150 | std::string &get_replies_cisco() { return this->replies_cisco; }; 151 | std::string &get_replies_juniper() { return this->replies_juniper; }; 152 | std::string &get_replies_nokia() { return this->replies_nokia; }; 153 | std::string &get_replies_huawei() { return this->replies_huawei; }; 154 | std::string &get_syslog() { return this->syslog; }; 155 | std::string &get_syslog_facility() { return this->syslog_facility; }; 156 | std::string &get_syslog_ident() { return this->syslog_ident; }; 157 | std::string &get_console_log() { return this->console_log; }; 158 | std::string &get_spdlog_level() { return this->spdlog_level; }; 159 | std::string &get_enable_cisco_gpbkv2json() { 160 | return this->enable_cisco_gpbkv2json; }; 161 | std::string &get_enable_cisco_message_to_json_string() { 162 | return this->enable_cisco_message_to_json_string; }; 163 | std::string &get_enable_label_encode_as_map() { 164 | return this->enable_label_encode_as_map; }; 165 | std::string &get_enable_label_encode_as_map_ptm() { 166 | return this->enable_label_encode_as_map_ptm; }; 167 | private: 168 | // main 169 | std::string writer_id; 170 | std::string iface; 171 | std::string ipv4_socket_cisco; 172 | std::string ipv4_socket_juniper; 173 | std::string ipv4_socket_nokia; 174 | std::string ipv4_socket_huawei; 175 | //std::string core_pid_folder; 176 | std::string cisco_workers; 177 | std::string juniper_workers; 178 | std::string nokia_workers; 179 | std::string huawei_workers; 180 | std::string replies_cisco; 181 | std::string replies_juniper; 182 | std::string replies_nokia; 183 | std::string replies_huawei; 184 | // logging 185 | std::string syslog; 186 | std::string syslog_facility; 187 | std::string syslog_ident; 188 | std::string console_log; 189 | std::string spdlog_level; 190 | // data-manipualtion 191 | std::string enable_cisco_gpbkv2json; 192 | std::string enable_cisco_message_to_json_string; 193 | std::string enable_label_encode_as_map; 194 | std::string enable_label_encode_as_map_ptm; 195 | }; 196 | 197 | #endif 198 | 199 | -------------------------------------------------------------------------------- /src/core/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_core.la 4 | 5 | libgrpc_collector_core_la_SOURCES = mdt_dialout_core.cc 6 | libgrpc_collector_core_la_CPPFLAGS = -I$(top_srcdir)/src/include/ -I$(top_builddir)/src/ 7 | -------------------------------------------------------------------------------- /src/core/mdt_dialout_core.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _SRV_H_ 6 | #define _SRV_H_ 7 | 8 | // C++ Standard Library headers 9 | #include 10 | // External Library headers & System headers 11 | #include 12 | #include 13 | #include 14 | 15 | #include "grpc/socket_mutator.h" 16 | // mdt-dialout-collector Library headers 17 | #include "proto/Cisco/cisco_dialout.grpc.pb.h" 18 | #include "proto/Huawei/huawei_dialout.grpc.pb.h" 19 | #include "proto/Juniper/juniper_dialout.grpc.pb.h" 20 | #include "proto/Juniper/juniper_gnmi.pb.h" 21 | #include "proto/Nokia/nokia_dialout.grpc.pb.h" 22 | #include "proto/Nokia/nokia_gnmi.pb.h" 23 | #include "../dataManipulation/data_manipulation.h" 24 | #include "../dataWrapper/data_wrapper.h" 25 | #include "../dataDelivery/kafka_delivery.h" 26 | #include "../dataDelivery/zmq_delivery.h" 27 | #include "../utils/logs_handler.h" 28 | 29 | 30 | // Global visibility to be able to signal the refresh --> CSV/PTM from main 31 | extern std::unordered_map> label_map; 32 | 33 | class ServerBuilderOptionImpl: public grpc::ServerBuilderOption { 34 | public: 35 | ServerBuilderOptionImpl() { 36 | spdlog::get("multi-logger")-> 37 | debug("constructor: ServerBuilderOptionImpl()"); }; 38 | ~ServerBuilderOptionImpl() { 39 | spdlog::get("multi-logger")-> 40 | debug("destructor: ~ServerBuilderOptionImpl()"); }; 41 | virtual void UpdateArguments(grpc::ChannelArguments *args); 42 | virtual void UpdatePlugins( 43 | std::vector> *plugins) {} 44 | }; 45 | 46 | class CustomSocketMutator: public grpc_socket_mutator { 47 | public: 48 | CustomSocketMutator(); 49 | ~CustomSocketMutator() { 50 | spdlog::get("multi-logger")-> 51 | debug("destructor: ~CustomSocketMutator()"); }; 52 | bool bindtodevice_socket_mutator(int fd); 53 | void log_socket_options(int fd); 54 | }; 55 | 56 | class Srv final { 57 | public: 58 | ~Srv() 59 | { 60 | spdlog::get("multi-logger")->debug("destructor: ~Srv()"); 61 | cisco_server_->grpc::ServerInterface::Shutdown(); 62 | juniper_server_->grpc::ServerInterface::Shutdown(); 63 | nokia_server_->grpc::ServerInterface::Shutdown(); 64 | huawei_server_->grpc::ServerInterface::Shutdown(); 65 | cisco_cq_->grpc::ServerCompletionQueue::Shutdown(); 66 | juniper_cq_->grpc::ServerCompletionQueue::Shutdown(); 67 | nokia_cq_->grpc::ServerCompletionQueue::Shutdown(); 68 | huawei_cq_->grpc::ServerCompletionQueue::Shutdown(); 69 | } 70 | Srv() { spdlog::get("multi-logger")->debug("constructor: Srv()"); }; 71 | void CiscoBind(std::string cisco_srv_socket); 72 | void JuniperBind(std::string juniper_srv_socket); 73 | void NokiaBind(std::string nokia_srv_socket); 74 | void HuaweiBind(std::string huawei_srv_socket); 75 | 76 | private: 77 | mdt_dialout::gRPCMdtDialout::AsyncService cisco_service_; 78 | Subscriber::AsyncService juniper_service_; 79 | Nokia::SROS::DialoutTelemetry::AsyncService nokia_service_; 80 | huawei_dialout::gRPCDataservice::AsyncService huawei_service_; 81 | std::unique_ptr cisco_cq_; 82 | std::unique_ptr juniper_cq_; 83 | std::unique_ptr nokia_cq_; 84 | std::unique_ptr huawei_cq_; 85 | std::unique_ptr cisco_server_; 86 | std::unique_ptr juniper_server_; 87 | std::unique_ptr nokia_server_; 88 | std::unique_ptr huawei_server_; 89 | void CiscoFsmCtrl(); 90 | void JuniperFsmCtrl(); 91 | void NokiaFsmCtrl(); 92 | void HuaweiFsmCtrl(); 93 | 94 | class CiscoStream { 95 | public: 96 | ~CiscoStream() { spdlog::get("multi-logger")-> 97 | debug("destructor: ~CiscoStream()"); }; 98 | CiscoStream( 99 | mdt_dialout::gRPCMdtDialout::AsyncService *cisco_service, 100 | grpc::ServerCompletionQueue *cisco_cq); 101 | void Start( 102 | std::unordered_map> 103 | &label_map, 104 | DataManipulation &data_manipulation, 105 | DataWrapper &data_wrapper, 106 | KafkaDelivery &kafka_delivery, 107 | kafka::clients::KafkaProducer &kafka_producer, 108 | ZmqPush &zmq_pusher, 109 | zmq::socket_t &zmq_sock_ptr, 110 | const std::string &zmq_uri, 111 | cisco_telemetry::Telemetry &cisco_tlm 112 | ); 113 | private: 114 | mdt_dialout::gRPCMdtDialout::AsyncService *cisco_service_; 115 | grpc::ServerCompletionQueue *cisco_cq_; 116 | grpc::ServerContext cisco_server_ctx; 117 | mdt_dialout::MdtDialoutArgs cisco_stream; 118 | grpc::ServerAsyncReaderWriter cisco_resp; 120 | int cisco_replies_sent; 121 | const int kCiscoMaxReplies; 122 | enum StreamStatus { START, FLOW, PROCESSING, END }; 123 | StreamStatus cisco_stream_status; 124 | }; 125 | 126 | class JuniperStream { 127 | public: 128 | ~JuniperStream() { 129 | spdlog::get("multi-logger")-> 130 | debug("destructor: ~JuniperStream()"); }; 131 | JuniperStream( 132 | Subscriber::AsyncService *juniper_service, 133 | grpc::ServerCompletionQueue *juniper_cq); 134 | void Start( 135 | std::unordered_map> 136 | &label_map, 137 | DataManipulation &data_manipulation, 138 | DataWrapper &data_wrapper, 139 | KafkaDelivery &kafka_delivery, 140 | kafka::clients::KafkaProducer &kafka_producer, 141 | ZmqPush &zmq_pusher, 142 | zmq::socket_t &zmq_sock, 143 | const std::string &zmq_uri, 144 | GnmiJuniperTelemetryHeaderExtension &juniper_tlm_hdr_ext 145 | ); 146 | private: 147 | Subscriber::AsyncService *juniper_service_; 148 | grpc::ServerCompletionQueue *juniper_cq_; 149 | grpc::ServerContext juniper_server_ctx; 150 | juniper_gnmi::SubscribeResponse juniper_stream; 151 | grpc::ServerAsyncReaderWriter juniper_resp; 153 | int juniper_replies_sent; 154 | const int kJuniperMaxReplies; 155 | enum StreamStatus { START, FLOW, PROCESSING, END }; 156 | StreamStatus juniper_stream_status; 157 | }; 158 | 159 | class NokiaStream { 160 | public: 161 | ~NokiaStream() { 162 | spdlog::get("multi-logger")-> 163 | debug("destructor: ~NokiaStream()"); }; 164 | NokiaStream( 165 | Nokia::SROS::DialoutTelemetry::AsyncService *nokia_service, 166 | grpc::ServerCompletionQueue *nokia_cq); 167 | void Start( 168 | std::unordered_map> 169 | &label_map, 170 | DataManipulation &data_manipulation, 171 | DataWrapper &data_wrapper, 172 | KafkaDelivery &kafka_delivery, 173 | kafka::clients::KafkaProducer &kafka_producer, 174 | ZmqPush &zmq_pusher, 175 | zmq::socket_t &zmq_sock, 176 | const std::string &zmq_uri 177 | ); 178 | private: 179 | Nokia::SROS::DialoutTelemetry::AsyncService *nokia_service_; 180 | grpc::ServerCompletionQueue *nokia_cq_; 181 | grpc::ServerContext nokia_server_ctx; 182 | nokia_gnmi::SubscribeResponse nokia_stream; 183 | grpc::ServerAsyncReaderWriter nokia_resp; 185 | int nokia_replies_sent; 186 | const int kNokiaMaxReplies; 187 | enum StreamStatus { START, FLOW, PROCESSING, END }; 188 | StreamStatus nokia_stream_status; 189 | }; 190 | 191 | class HuaweiStream { 192 | public: 193 | ~HuaweiStream() { 194 | spdlog::get("multi-logger")-> 195 | debug("destructor: ~HuaweiStream()"); }; 196 | HuaweiStream( 197 | huawei_dialout::gRPCDataservice::AsyncService *huawei_service, 198 | grpc::ServerCompletionQueue *huawei_cq); 199 | void Start( 200 | std::unordered_map> 201 | &label_map, 202 | DataManipulation &data_manipulation, 203 | DataWrapper &data_wrapper, 204 | KafkaDelivery &kafka_delivery, 205 | kafka::clients::KafkaProducer &kafka_producer, 206 | ZmqPush &zmq_pusher, 207 | zmq::socket_t &zmq_sock, 208 | const std::string &zmq_uri, 209 | huawei_telemetry::Telemetry &huawei_tlm, 210 | openconfig_interfaces::Interfaces &oc_if 211 | ); 212 | private: 213 | huawei_dialout::gRPCDataservice::AsyncService *huawei_service_; 214 | grpc::ServerCompletionQueue *huawei_cq_; 215 | grpc::ServerContext huawei_server_ctx; 216 | huawei_dialout::serviceArgs huawei_stream; 217 | grpc::ServerAsyncReaderWriter huawei_resp; 219 | int huawei_replies_sent; 220 | const int kHuaweiMaxReplies; 221 | enum StreamStatus { START, FLOW, PROCESSING, END }; 222 | StreamStatus huawei_stream_status; 223 | }; 224 | }; 225 | 226 | #endif 227 | 228 | -------------------------------------------------------------------------------- /src/dataDelivery/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_data_delivery.la 4 | 5 | libgrpc_collector_data_delivery_la_SOURCES = kafka_delivery.cc zmq_delivery.cc 6 | libgrpc_collector_data_delivery_la_CPPFLAGS = -I$(top_srcdir)/src/include/ -I$(top_builddir)/src/ 7 | -------------------------------------------------------------------------------- /src/dataDelivery/kafka_delivery.cc: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | // mdt-dialout-collector Library headers 6 | #include "kafka_delivery.h" 7 | 8 | 9 | KafkaDelivery::KafkaDelivery() 10 | { 11 | spdlog::get("multi-logger")->debug("constructor: KafkaDelivery()"); 12 | this->topic = 13 | kafka_delivery_cfg_parameters.at("topic"); 14 | this->bootstrap_servers = 15 | kafka_delivery_cfg_parameters.at("bootstrap_servers"); 16 | this->enable_idempotence = 17 | kafka_delivery_cfg_parameters.at("enable_idempotence"); 18 | this->client_id = 19 | kafka_delivery_cfg_parameters.at("client_id"); 20 | this->security_protocol = 21 | kafka_delivery_cfg_parameters.at("security_protocol"); 22 | this->ssl_key_location = 23 | kafka_delivery_cfg_parameters.at("ssl_key_location"); 24 | this->ssl_key_password = 25 | kafka_delivery_cfg_parameters.at("ssl_key_password"); 26 | this->ssl_certificate_location = 27 | kafka_delivery_cfg_parameters.at("ssl_certificate_location"); 28 | this->ssl_ca_location = 29 | kafka_delivery_cfg_parameters.at("ssl_ca_location"); 30 | this->enable_ssl_certificate_verification = 31 | kafka_delivery_cfg_parameters.at("enable_ssl_certificate_verification"); 32 | this->log_level = 33 | kafka_delivery_cfg_parameters.at("log_level"); 34 | 35 | set_kafka_properties(this->properties); 36 | } 37 | 38 | void KafkaDelivery::set_kafka_properties(kafka::Properties &properties) 39 | { 40 | properties.put("bootstrap.servers", get_bootstrap_servers()); 41 | properties.put("enable.idempotence", get_enable_idempotence()); 42 | properties.put("client.id", get_client_id()); 43 | properties.put("security.protocol", get_security_protocol()); 44 | properties.put("ssl.key.location", get_ssl_key_location()); 45 | properties.put("ssl.key.password", get_ssl_key_password()); 46 | properties.put("ssl.certificate.location", get_ssl_certificate_location()); 47 | properties.put("ssl.ca.location", get_ssl_ca_location()); 48 | properties.put("enable.ssl.certificate.verification", get_enable_ssl_certificate_verification()); 49 | properties.put("log_level", get_log_level()); 50 | } 51 | 52 | bool KafkaDelivery::AsyncKafkaProducer( 53 | kafka::clients::KafkaProducer &producer, 54 | const std::string &peer, 55 | const std::string &json_str) 56 | { 57 | if (json_str.empty()) { 58 | spdlog::get("multi-logger")-> 59 | error("[AsyncKafkaProducer] data-delivery issue: " 60 | "empty JSON received"); 61 | return false; 62 | } 63 | 64 | try { 65 | kafka::Topic topic = get_topic(); 66 | kafka::Properties properties = get_properties(); 67 | 68 | auto record = kafka::clients::producer::ProducerRecord( 69 | topic, 70 | kafka::Key(peer.c_str()), 71 | kafka::Value(json_str.c_str(), json_str.size()) 72 | ); 73 | 74 | producer.send( 75 | record, 76 | [](const kafka::clients::producer::RecordMetadata &mdata, 77 | const kafka::Error &err) { 78 | if (!err) { 79 | spdlog::get("multi-logger")-> 80 | info("[AsyncKafkaProducer] data-delivery: " 81 | "message successfully delivered"); 82 | } else { 83 | spdlog::get("multi-logger")-> 84 | error("[AsyncKafkaProducer] data-delivery " 85 | "issue: message delivery failure, {}", err.message()); 86 | } 87 | }, kafka::clients::KafkaProducer::SendOption::ToCopyRecordValue); 88 | } catch (const kafka::KafkaException &kex) { 89 | spdlog::get("multi-logger")-> 90 | error("[AsyncKafkaProducer] data-delivery issue: " 91 | "{}", kex.what()); 92 | return false; 93 | } 94 | 95 | return true; 96 | } 97 | 98 | -------------------------------------------------------------------------------- /src/dataDelivery/kafka_delivery.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _KAFKA_DELIVERY_H_ 6 | #define _KAFKA_DELIVERY_H_ 7 | 8 | // C++ Standard Library headers 9 | 10 | // External Library headers 11 | #include "kafka/KafkaProducer.h" 12 | // mdt-dialout-collector Library headers 13 | #include "../utils/cfg_handler.h" 14 | #include "kafka/Properties.h" 15 | #include "kafka/Types.h" 16 | #include "../utils/logs_handler.h" 17 | 18 | 19 | class KafkaDelivery { 20 | public: 21 | KafkaDelivery(); 22 | ~KafkaDelivery() { spdlog::get("multi-logger")-> 23 | debug("destructor: ~KafkaDelivery()"); }; 24 | bool AsyncKafkaProducer( 25 | kafka::clients::KafkaProducer &producer, 26 | const std::string &peer, 27 | const std::string &json_str); 28 | void set_kafka_properties(kafka::Properties &properties); 29 | kafka::Properties get_properties() { 30 | return properties; }; 31 | kafka::Topic get_topic() { 32 | return topic; } 33 | std::string get_bootstrap_servers() { 34 | return bootstrap_servers; }; 35 | std::string get_enable_idempotence() { 36 | return enable_idempotence; }; 37 | std::string get_client_id() { 38 | return client_id; }; 39 | std::string get_security_protocol() { 40 | return security_protocol; }; 41 | std::string get_ssl_key_location() { 42 | return ssl_key_location; }; 43 | std::string get_ssl_key_password() { 44 | return ssl_key_password; }; 45 | std::string get_ssl_certificate_location() { 46 | return ssl_certificate_location; }; 47 | std::string get_ssl_ca_location() { 48 | return ssl_ca_location; }; 49 | std::string get_enable_ssl_certificate_verification() { 50 | return enable_ssl_certificate_verification; }; 51 | std::string get_log_level() { 52 | return log_level; }; 53 | private: 54 | kafka::Properties properties; 55 | kafka::Topic topic; 56 | std::string bootstrap_servers; 57 | std::string enable_idempotence; 58 | std::string client_id; 59 | std::string security_protocol; 60 | std::string ssl_key_location; 61 | std::string ssl_key_password; 62 | std::string ssl_certificate_location; 63 | std::string ssl_ca_location; 64 | std::string enable_ssl_certificate_verification; 65 | std::string log_level; 66 | }; 67 | 68 | #endif 69 | 70 | -------------------------------------------------------------------------------- /src/dataDelivery/zmq_delivery.cc: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | // mdt-dialout-collector Library headers 6 | #include "zmq_delivery.h" 7 | #include "../bridge/grpc_collector_bridge.h" 8 | #include 9 | 10 | 11 | ZmqDelivery::ZmqDelivery() 12 | { 13 | spdlog::get("multi-logger")->debug("constructor: ZmqDelivery()"); 14 | this->set_zmq_transport_uri(); 15 | } 16 | 17 | bool ZmqPush::ZmqPusher( 18 | DataWrapper &data_wrapper, 19 | zmq::socket_t &zmq_sock, 20 | const std::string &zmq_transport_uri) 21 | { 22 | grpc_payload *pload; 23 | 24 | InitGrpcPayload( 25 | &pload, 26 | data_wrapper.get_event_type().c_str(), 27 | data_wrapper.get_serialization().c_str(), 28 | data_wrapper.get_writer_id().c_str(), 29 | data_wrapper.get_telemetry_node().c_str(), 30 | data_wrapper.get_telemetry_port().c_str(), 31 | data_wrapper.get_telemetry_data().c_str()); 32 | 33 | // Message Buff preparation 34 | // PUSH-ing only the pointer to the data-struct 35 | const size_t size = sizeof(grpc_payload *); 36 | zmq::message_t message(&pload, size); 37 | 38 | try { 39 | zmq_sock.send(message, zmq::send_flags::dontwait); 40 | spdlog::get("multi-logger")-> 41 | info("[ZmqPusher] data-delivery: " 42 | "message successfully sent"); 43 | //std::this_thread::sleep_for(std::chrono::milliseconds(300)); 44 | } catch(const zmq::error_t &zex) { 45 | spdlog::get("multi-logger")-> 46 | error("[ZmqPusher] data-delivery issue: " 47 | "{}", zex.what()); 48 | return false; 49 | } 50 | 51 | return true; 52 | } 53 | 54 | void ZmqPull::ZmqPoller( 55 | zmq::socket_t &zmq_sock, 56 | const std::string &zmq_transport_uri) 57 | { 58 | // Message Buff preparation 59 | // POLL-ing only the pointer to the data-struct 60 | const size_t size = sizeof(grpc_payload *); 61 | zmq::message_t message(size); 62 | 63 | try { 64 | auto res = zmq_sock.recv(message, zmq::recv_flags::none); 65 | if (res.value() != 0) { 66 | spdlog::get("multi-logger")-> 67 | info("[ZmqPoller] data-delivery: " 68 | "message successfully received"); 69 | grpc_payload *pload = *(grpc_payload **) message.data(); 70 | std::cout << "PULL-ing from " << zmq_transport_uri << ": " 71 | << pload->event_type 72 | << " " 73 | << pload->serialization 74 | << " " 75 | << pload->writer_id 76 | << " " 77 | << pload->telemetry_node 78 | << " " 79 | << pload->telemetry_port 80 | << " " 81 | << pload->telemetry_data 82 | << "\n"; 83 | free_grpc_payload(pload); 84 | //std::this_thread::sleep_for(std::chrono::milliseconds(300)); 85 | } 86 | } catch(const zmq::error_t &zex) { 87 | spdlog::get("multi-logger")-> 88 | error("[ZmqPoller] data-delivery issue: " 89 | "{}", zex.what()); 90 | std::exit(EXIT_FAILURE); 91 | } 92 | } 93 | 94 | -------------------------------------------------------------------------------- /src/dataDelivery/zmq_delivery.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _ZMQ_DELIVERY_H_ 6 | #define _ZMQ_DELIVERY_H_ 7 | 8 | // C++ Standard Library headers 9 | 10 | // External Library headers 11 | #include 12 | #include 13 | // mdt-dialout-collector Library headers 14 | #include "../dataWrapper/data_wrapper.h" 15 | #include "../utils/cfg_handler.h" 16 | #include "../utils/logs_handler.h" 17 | 18 | 19 | class ZmqDelivery { 20 | public: 21 | ZmqDelivery(); 22 | ~ZmqDelivery() { spdlog::get("multi-logger")-> 23 | debug("destructor: ~ZmqDelivery()"); }; 24 | void set_zmq_transport_uri() { 25 | this->zmq_transport_uri = zmq_delivery_cfg_parameters.at("zmq_uri"); }; 26 | const std::string &get_zmq_transport_uri() { 27 | return this->zmq_transport_uri; }; 28 | private: 29 | std::string zmq_transport_uri; 30 | }; 31 | 32 | class ZmqPush : public ZmqDelivery { 33 | public: 34 | ZmqPush() { spdlog::get("multi-logger")-> 35 | debug("constructor: ZmqPush()"); }; 36 | ~ZmqPush() { spdlog::get("multi-logger")-> 37 | debug("destructor: ~ZmqPush()"); }; 38 | bool ZmqPusher( 39 | DataWrapper &data_wrapper, 40 | zmq::socket_t &zmq_sock, 41 | const std::string &zmq_transport_uri); 42 | zmq::context_t &get_zmq_ctx() { 43 | return this->zmq_ctx; }; 44 | private: 45 | zmq::context_t zmq_ctx; 46 | }; 47 | 48 | class ZmqPull : public ZmqDelivery { 49 | public: 50 | ZmqPull() { spdlog::get("multi-logger")-> 51 | debug("constructor: ZmqPull()"); }; 52 | ~ZmqPull() { spdlog::get("multi-logger")-> 53 | debug("destructor: ~ZmqPull()"); }; 54 | void ZmqPoller( 55 | zmq::socket_t &zmq_sock, 56 | const std::string &zmq_transport_uri); 57 | zmq::context_t &get_zmq_ctx() { 58 | return this->zmq_ctx; }; 59 | private: 60 | zmq::context_t zmq_ctx; 61 | }; 62 | 63 | #endif 64 | 65 | -------------------------------------------------------------------------------- /src/dataManipulation/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_data_manipulation.la 4 | 5 | libgrpc_collector_data_manipulation_la_SOURCES = data_manipulation.cc 6 | libgrpc_collector_data_manipulation_la_CPPFLAGS = -I$(top_srcdir)/src/include/ -I$(top_builddir)/src/ 7 | -------------------------------------------------------------------------------- /src/dataManipulation/data_manipulation.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _DATA_MANIPULATION_H_ 6 | #define _DATA_MANIPULATION_H_ 7 | 8 | // C++ Standard Library headers 9 | #include 10 | #include 11 | // External Library headers 12 | #include 13 | // mdt-dialout-collector Library headers 14 | #include "proto/Cisco/cisco_telemetry.pb.h" 15 | #include "proto/Juniper/juniper_gnmi.pb.h" 16 | #include "proto/Juniper/juniper_telemetry_header_extension.pb.h" 17 | #include "proto/Huawei/huawei_telemetry.pb.h" 18 | #include "proto/Nokia/nokia_gnmi.pb.h" 19 | #include "proto/OpenConfig/openconfig_interfaces.pb.h" 20 | #include 21 | #include "../utils/logs_handler.h" 22 | #include "../utils/cfg_handler.h" 23 | 24 | 25 | class DataManipulation { 26 | public: 27 | DataManipulation() { 28 | spdlog::get("multi-logger")-> 29 | debug("constructor: DataManipulation()"); }; 30 | ~DataManipulation() { 31 | spdlog::get("multi-logger")-> 32 | debug("destructor: ~DataManipulation()"); }; 33 | bool MetaData( 34 | std::string &json_str, 35 | const std::string &peer_ip, 36 | const std::string &peer_port, 37 | std::string &json_str_out); 38 | void set_sequence_number() { sequence_number++; }; 39 | uint64_t get_sequence_number() { return sequence_number; }; 40 | bool AppendLabelMap( 41 | std::unordered_map> &label_map, 42 | const std::string &peer_ip, 43 | const std::string &json_str, 44 | std::string &json_str_out); 45 | bool CiscoGpbkv2Json( 46 | const cisco_telemetry::Telemetry &cisco_tlm, 47 | std::string &json_str_out); 48 | Json::Value CiscoGpbkvField2Json( 49 | const cisco_telemetry::TelemetryField &field); 50 | bool JuniperExtension(juniper_gnmi::SubscribeResponse &juniper_stream, 51 | GnmiJuniperTelemetryHeaderExtension &juniper_tlm_header_ext, 52 | Json::Value &root); 53 | bool JuniperUpdate(juniper_gnmi::SubscribeResponse &juniper_stream, 54 | std::string &json_str_out, 55 | Json::Value &root); 56 | bool NokiaUpdate(nokia_gnmi::SubscribeResponse &nokia_stream, 57 | std::string &json_str_out, 58 | Json::Value &root); 59 | bool HuaweiGpbOpenconfigInterface( 60 | const huawei_telemetry::Telemetry &huawei_tlm, 61 | openconfig_interfaces::Interfaces &oc_if, 62 | std::string &json_str_out); 63 | private: 64 | uint64_t sequence_number = 0; 65 | }; 66 | 67 | #endif 68 | 69 | -------------------------------------------------------------------------------- /src/dataWrapper/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_data_wrapper.la 4 | 5 | libgrpc_collector_data_wrapper_la_SOURCES = data_wrapper.cc 6 | libgrpc_collector_data_wrapper_la_CPPFLAGS = -I$(top_srcdir)/src/include/ 7 | -------------------------------------------------------------------------------- /src/dataWrapper/data_wrapper.cc: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | // mdt-dialout-collector Library headers 6 | #include "data_wrapper.h" 7 | 8 | 9 | bool DataWrapper::BuildDataWrapper( 10 | const std::string &event_type, 11 | const std::string &serialization, 12 | const std::string &writer_id, 13 | const std::string &telemetry_node, 14 | const std::string &telemetry_port, 15 | const std::string &telemetry_data) 16 | { 17 | set_sequence_number(); 18 | set_event_type(event_type); 19 | set_serialization(serialization); 20 | set_timestamp(); 21 | set_writer_id(writer_id); 22 | set_telemetry_node(telemetry_node); 23 | set_telemetry_port(telemetry_port); 24 | set_telemetry_data(telemetry_data); 25 | 26 | return true; 27 | } 28 | 29 | void DataWrapper::DisplayDataWrapper() 30 | { 31 | uint64_t sequence_number = get_sequence_number(); 32 | const std::string event_type = get_event_type(); 33 | const std::string serialization = get_serialization(); 34 | std::time_t timestamp = get_timestamp(); 35 | const std::string writer_id = get_writer_id(); 36 | const std::string telemetry_node = get_telemetry_node(); 37 | uint16_t telemetry_port = static_cast( 38 | std::stoi(get_telemetry_port())); 39 | const std::string telemetry_data = get_telemetry_data(); 40 | 41 | std::cout << sequence_number << "\n"; 42 | std::cout << event_type << "\n"; 43 | std::cout << serialization << "\n"; 44 | std::cout << timestamp << "\n"; 45 | std::cout << writer_id << "\n"; 46 | std::cout << telemetry_node << "\n"; 47 | std::cout << telemetry_port << "\n"; 48 | std::cout << telemetry_data << "\n"; 49 | } 50 | 51 | -------------------------------------------------------------------------------- /src/dataWrapper/data_wrapper.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _DATA_WRAPPER_H_ 6 | #define _DATA_WRAPPER_H_ 7 | 8 | // C++ Standard Library headers 9 | #include 10 | #include 11 | // External Library headers 12 | 13 | // mdt-dialout-collector Library headers 14 | #include "../utils/logs_handler.h" 15 | 16 | 17 | // C++ Class 18 | class DataWrapper { 19 | public: 20 | DataWrapper() { 21 | spdlog::get("multi-logger")-> 22 | debug("constructor: DataWrapper()"); }; 23 | ~DataWrapper() { 24 | spdlog::get("multi-logger")-> 25 | debug("destructor: ~DataWrapper()"); }; 26 | 27 | bool BuildDataWrapper( 28 | const std::string &event_type, 29 | const std::string &serialization, 30 | const std::string &writer_id, 31 | const std::string &telemetry_node, 32 | const std::string &telemetry_port, 33 | const std::string &telemetry_data 34 | ); 35 | 36 | void DisplayDataWrapper(); 37 | 38 | // Setters 39 | void set_sequence_number() { 40 | this->sequence_number++; 41 | }; 42 | void set_event_type(const std::string &event_type) { 43 | this->event_type = event_type; 44 | }; 45 | void set_serialization(const std::string &serialization) { 46 | this->serialization = serialization; 47 | }; 48 | void set_timestamp() { 49 | this->timestamp = std::time(nullptr); 50 | }; 51 | void set_writer_id(const std::string &writer_id) { 52 | this->writer_id = writer_id; 53 | }; 54 | void set_telemetry_node(const std::string &telemetry_node) { 55 | this->telemetry_node = telemetry_node; 56 | }; 57 | void set_telemetry_port(const std::string &telemetry_port) { 58 | this->telemetry_port = telemetry_port; 59 | }; 60 | void set_telemetry_data(const std::string &telemetry_data) { 61 | this->telemetry_data = telemetry_data; 62 | } 63 | 64 | // Getters 65 | uint64_t &get_sequence_number() { return this->sequence_number; }; 66 | std::string &get_event_type() { return this->event_type; }; 67 | std::string &get_serialization() { return this->serialization; }; 68 | std::time_t &get_timestamp() { return this->timestamp; }; 69 | std::string &get_writer_id() { return this->writer_id; }; 70 | std::string &get_telemetry_node() { return this->telemetry_node; }; 71 | std::string &get_telemetry_port() { return this->telemetry_port; }; 72 | std::string &get_telemetry_data() { return this->telemetry_data; }; 73 | private: 74 | uint64_t sequence_number = 0; 75 | std::string event_type = "gRPC"; 76 | std::string serialization = "json_string"; 77 | std::time_t timestamp = 0; 78 | std::string writer_id = "mdt-dialout-collector"; 79 | std::string telemetry_node = "none"; 80 | std::string telemetry_port = "none"; 81 | std::string telemetry_data = "none"; 82 | }; 83 | 84 | #endif 85 | 86 | -------------------------------------------------------------------------------- /src/include/grpc/socket_mutator.h: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Copyright 2015 gRPC authors. 4 | * 5 | * Licensed under the Apache License, Version 2.0 (the "License"); 6 | * you may not use this file except in compliance with the License. 7 | * You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | * 17 | */ 18 | 19 | #ifndef GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H 20 | #define GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H 21 | 22 | #include 23 | 24 | #include 25 | 26 | #include 27 | #include 28 | 29 | /** How is an fd to be used? */ 30 | typedef enum { 31 | /** Used for client connection */ 32 | GRPC_FD_CLIENT_CONNECTION_USAGE, 33 | /** Used for server listening */ 34 | GRPC_FD_SERVER_LISTENER_USAGE, 35 | /** Used for server connection */ 36 | GRPC_FD_SERVER_CONNECTION_USAGE, 37 | } grpc_fd_usage; 38 | 39 | /** Information about an fd to mutate */ 40 | typedef struct { 41 | /** File descriptor to mutate */ 42 | int fd; 43 | /** How the fd will be used */ 44 | grpc_fd_usage usage; 45 | } grpc_mutate_socket_info; 46 | 47 | /** The virtual table of grpc_socket_mutator */ 48 | struct grpc_socket_mutator_vtable { 49 | /** Mutates the socket options of \a fd -- deprecated, prefer mutate_fd_2 */ 50 | bool (*mutate_fd)(int fd, grpc_socket_mutator* mutator); 51 | /** Compare socket mutator \a a and \a b */ 52 | int (*compare)(grpc_socket_mutator* a, grpc_socket_mutator* b); 53 | /** Destroys the socket mutator instance */ 54 | void (*destroy)(grpc_socket_mutator* mutator); 55 | /** Mutates the socket options of the fd in \a info - if set takes preference 56 | * to mutate_fd */ 57 | bool (*mutate_fd_2)(const grpc_mutate_socket_info* info, 58 | grpc_socket_mutator* mutator); 59 | }; 60 | 61 | /** The Socket Mutator interface allows changes on socket options */ 62 | struct grpc_socket_mutator { 63 | const grpc_socket_mutator_vtable* vtable; 64 | gpr_refcount refcount; 65 | }; 66 | 67 | /** called by concrete implementations to initialize the base struct */ 68 | void grpc_socket_mutator_init(grpc_socket_mutator* mutator, 69 | const grpc_socket_mutator_vtable* vtable); 70 | 71 | /** Wrap \a mutator as a grpc_arg */ 72 | grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator* mutator); 73 | 74 | /** Perform the file descriptor mutation operation of \a mutator on \a fd */ 75 | bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator* mutator, int fd, 76 | grpc_fd_usage usage); 77 | 78 | /** Compare if \a a and \a b are the same mutator or have same settings */ 79 | int grpc_socket_mutator_compare(grpc_socket_mutator* a, grpc_socket_mutator* b); 80 | 81 | grpc_socket_mutator* grpc_socket_mutator_ref(grpc_socket_mutator* mutator); 82 | void grpc_socket_mutator_unref(grpc_socket_mutator* mutator); 83 | 84 | #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H */ 85 | -------------------------------------------------------------------------------- /src/include/kafka/AdminClientConfig.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | 8 | namespace KAFKA_API { namespace clients { namespace admin { 9 | 10 | /** 11 | * Configuration for the Kafka Consumer. 12 | */ 13 | class Config: public Properties 14 | { 15 | public: 16 | Config() = default; 17 | Config(const Config&) = default; 18 | explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} 19 | 20 | /** 21 | * The string contains host:port pairs of brokers (splitted by ",") that the administrative client will use to establish initial connection to the Kafka cluster. 22 | * Note: It's mandatory. 23 | */ 24 | static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; 25 | 26 | /** 27 | * Protocol used to communicate with brokers. 28 | * Default value: plaintext 29 | */ 30 | static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; 31 | 32 | /** 33 | * SASL mechanism to use for authentication. 34 | * Default value: GSSAPI 35 | */ 36 | static const constexpr char* SASL_MECHANISM = "sasl.mechanisms"; 37 | 38 | /** 39 | * SASL username for use with the PLAIN and SASL-SCRAM-.. mechanism. 40 | */ 41 | static const constexpr char* SASL_USERNAME = "sasl.username"; 42 | 43 | /** 44 | * SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism. 45 | */ 46 | static const constexpr char* SASL_PASSWORD = "sasl.password"; 47 | 48 | /** 49 | * Shell command to refresh or acquire the client's Kerberos ticket. 50 | */ 51 | static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; 52 | 53 | /** 54 | * The client's Kerberos principal name. 55 | */ 56 | static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; 57 | 58 | /** 59 | * Set to "default" or "oidc" to control with login method to be used. 60 | * If set to "oidc", the following properties must also be specified: 61 | * sasl.oauthbearer.client.id 62 | * sasl.oauthbearer.client.secret 63 | * sasl.oauthbearer.token.endpoint.url 64 | * Default value: default 65 | */ 66 | static const constexpr char* SASL_OAUTHBEARER_METHOD = "sasl.oauthbearer.method"; 67 | 68 | /** 69 | * Public identifier for the applicaition. 70 | * Only used with "sasl.oauthbearer.method=oidc". 71 | */ 72 | static const constexpr char* SASL_OAUTHBEARER_CLIENT_ID = "sasl.oauthbearer.client.id"; 73 | 74 | /** 75 | * Client secret only known to the application and the authorization server. 76 | * Only used with "sasl.oauthbearer.method=oidc". 77 | */ 78 | static const constexpr char* SASL_OAUTHBEARER_CLIENT_SECRET = "sasl.oauthbearer.client.secret"; 79 | 80 | /** 81 | * Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. 82 | * Only used with "sasl.oauthbearer.method=oidc". 83 | */ 84 | static const constexpr char* SASL_OAUTHBEARER_EXTENSIONS = "sasl.oauthbearer.extensions"; 85 | 86 | /** 87 | * Client use this to specify the scope of the access request to the broker. 88 | * Only used with "sasl.oauthbearer.method=oidc". 89 | */ 90 | static const constexpr char* SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; 91 | 92 | /** 93 | * OAuth/OIDC issuer token endpoint HTTP(S) URI used to retreve token. 94 | * Only used with "sasl.oauthbearer.method=oidc". 95 | */ 96 | static const constexpr char* SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; 97 | 98 | /** 99 | * SASL/OAUTHBEARER configuration. 100 | * The format is implementation-dependent and must be parsed accordingly. 101 | */ 102 | static const constexpr char* SASL_OAUTHBEARER_CONFIG = "sasl.oauthbearer.config"; 103 | 104 | /** 105 | * Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. 106 | * Should only be used for development or testing, and not in production. 107 | * Default value: false 108 | */ 109 | static const constexpr char* ENABLE_SASL_OAUTHBEARER_UNSECURE_JWT = "enable.sasl.oauthbearer.unsecure.jwt"; 110 | }; 111 | 112 | } } } // end of KAFKA_API::clients::admin 113 | 114 | -------------------------------------------------------------------------------- /src/include/kafka/AdminCommon.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | 9 | namespace KAFKA_API { namespace clients { namespace admin { 10 | 11 | /** 12 | * The result of AdminClient::createTopics(). 13 | */ 14 | struct CreateTopicsResult 15 | { 16 | explicit CreateTopicsResult(const Error& err): error(err) {} 17 | 18 | /** 19 | * The result error. 20 | */ 21 | Error error; 22 | }; 23 | 24 | /** 25 | * The result of AdminClient::deleteTopics(). 26 | */ 27 | struct DeleteTopicsResult 28 | { 29 | explicit DeleteTopicsResult(const Error& err): error(err) {} 30 | 31 | /** 32 | * The result error. 33 | */ 34 | Error error; 35 | }; 36 | 37 | /** 38 | * The result of AdminClient::deleteRecords(). 39 | */ 40 | struct DeleteRecordsResult 41 | { 42 | explicit DeleteRecordsResult(const Error& err): error(err) {} 43 | 44 | /** 45 | * The result error. 46 | */ 47 | Error error; 48 | }; 49 | 50 | /** 51 | * The result of AdminClient::listTopics(). 52 | */ 53 | struct ListTopicsResult 54 | { 55 | explicit ListTopicsResult(const Error& err): error(err) {} 56 | explicit ListTopicsResult(Topics names): topics(std::move(names)) {} 57 | 58 | /** 59 | * The result error. 60 | */ 61 | Error error; 62 | 63 | /** 64 | * The topics fetched. 65 | */ 66 | Topics topics; 67 | }; 68 | 69 | } } } // end of KAFKA_API::clients::admin 70 | 71 | -------------------------------------------------------------------------------- /src/include/kafka/BrokerMetadata.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | 14 | namespace KAFKA_API { 15 | 16 | /** 17 | * The metadata info for a topic. 18 | */ 19 | struct BrokerMetadata { 20 | /** 21 | * Information for a Kafka node. 22 | */ 23 | struct Node 24 | { 25 | public: 26 | using Id = int; 27 | using Host = std::string; 28 | using Port = int; 29 | 30 | Node(Id i, Host h, Port p): id(i), host(std::move(h)), port(p) {} 31 | 32 | /** 33 | * The node id. 34 | */ 35 | Node::Id id; 36 | 37 | /** 38 | * The host name. 39 | */ 40 | Node::Host host; 41 | 42 | /** 43 | * The port. 44 | */ 45 | Node::Port port; 46 | 47 | /** 48 | * Obtains explanatory string. 49 | */ 50 | std::string toString() const { return host + ":" + std::to_string(port) + "/" + std::to_string(id); } 51 | }; 52 | 53 | /** 54 | * It is used to describe per-partition state in the MetadataResponse. 55 | */ 56 | struct PartitionInfo 57 | { 58 | explicit PartitionInfo(Node::Id leaderId): leader(leaderId) {} 59 | 60 | void addReplica(Node::Id id) { replicas.emplace_back(id); } 61 | void addInSyncReplica(Node::Id id) { inSyncReplicas.emplace_back(id); } 62 | 63 | /** 64 | * The node id currently acting as a leader for this partition or null if there is no leader. 65 | */ 66 | Node::Id leader; 67 | 68 | /** 69 | * The complete set of replicas id for this partition regardless of whether they are alive or up-to-date. 70 | */ 71 | std::vector replicas; 72 | 73 | /** 74 | * The subset of the replicas id that are in sync, that is caught-up to the leader and ready to take over as leader if the leader should fail. 75 | */ 76 | std::vector inSyncReplicas; 77 | 78 | }; 79 | 80 | /** 81 | * Obtains explanatory string from Node::Id. 82 | */ 83 | std::string getNodeDescription(Node::Id id) const; 84 | 85 | /** 86 | * Obtains explanatory string for PartitionInfo. 87 | */ 88 | std::string toString(const PartitionInfo& partitionInfo) const; 89 | 90 | /** 91 | * The BrokerMetadata is per-topic constructed. 92 | */ 93 | explicit BrokerMetadata(Topic topic): _topic(std::move(topic)) {} 94 | 95 | /** 96 | * The topic name. 97 | */ 98 | const std::string& topic() const { return _topic; } 99 | 100 | /** 101 | * The nodes info in the MetadataResponse. 102 | */ 103 | std::vector> nodes() const; 104 | 105 | /** 106 | * The partitions' state in the MetadataResponse. 107 | */ 108 | const std::map& partitions() const { return _partitions; } 109 | 110 | /** 111 | * Obtains explanatory string. 112 | */ 113 | std::string toString() const; 114 | 115 | void setOrigNodeName(const std::string& origNodeName) { _origNodeName = origNodeName; } 116 | void addNode(Node::Id nodeId, const Node::Host& host, Node::Port port) { _nodes[nodeId] = std::make_shared(nodeId, host, port); } 117 | void addPartitionInfo(Partition partition, const PartitionInfo& partitionInfo) { _partitions.emplace(partition, partitionInfo); } 118 | 119 | private: 120 | Topic _topic; 121 | std::string _origNodeName; 122 | std::map> _nodes; 123 | std::map _partitions; 124 | }; 125 | 126 | inline std::vector> 127 | BrokerMetadata::nodes() const 128 | { 129 | std::vector> ret; 130 | ret.reserve(_nodes.size()); 131 | for (const auto& nodeInfo: _nodes) 132 | { 133 | ret.emplace_back(nodeInfo.second); 134 | } 135 | return ret; 136 | } 137 | 138 | inline std::string 139 | BrokerMetadata::getNodeDescription(Node::Id id) const 140 | { 141 | const auto& found = _nodes.find(id); 142 | if (found == _nodes.cend()) return "-:-/" + std::to_string(id); 143 | 144 | auto node = found->second; 145 | return node->host + ":" + std::to_string(node->port) + "/" + std::to_string(id); 146 | } 147 | 148 | inline std::string 149 | BrokerMetadata::toString(const PartitionInfo& partitionInfo) const 150 | { 151 | std::ostringstream oss; 152 | 153 | auto streamNodes = [this](std::ostringstream& ss, const std::vector& nodeIds) -> std::ostringstream& { 154 | bool isTheFirst = true; 155 | for (const auto id: nodeIds) 156 | { 157 | ss << (isTheFirst ? (isTheFirst = false, "") : ", ") << getNodeDescription(id); 158 | } 159 | return ss; 160 | }; 161 | 162 | oss << "leader[" << getNodeDescription(partitionInfo.leader) << "], replicas["; 163 | streamNodes(oss, partitionInfo.replicas) << "], inSyncReplicas["; 164 | streamNodes(oss, partitionInfo.inSyncReplicas) << "]"; 165 | 166 | return oss.str(); 167 | } 168 | 169 | inline std::string 170 | BrokerMetadata::toString() const 171 | { 172 | std::ostringstream oss; 173 | 174 | oss << "originatingNode[" << _origNodeName << "], topic[" << _topic << "], partitions{"; 175 | bool isTheFirst = true; 176 | for (const auto& partitionInfoPair: _partitions) 177 | { 178 | const Partition partition = partitionInfoPair.first; 179 | const PartitionInfo& partitionInfo = partitionInfoPair.second; 180 | oss << (isTheFirst ? (isTheFirst = false, "") : "; ") << partition << ": " << toString(partitionInfo); 181 | } 182 | oss << "}"; 183 | 184 | return oss.str(); 185 | } 186 | 187 | } // end of KAFKA_API 188 | 189 | -------------------------------------------------------------------------------- /src/include/kafka/ConsumerCommon.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | #include 12 | 13 | 14 | namespace KAFKA_API { namespace clients { namespace consumer { 15 | 16 | /** 17 | * To identify which kind of re-balance event is handling, when the set of partitions assigned to the consumer changes. 18 | * It's guaranteed that rebalance callback will be called twice (first with PartitionsRevoked, and then with PartitionsAssigned). 19 | */ 20 | enum class RebalanceEventType { PartitionsAssigned, PartitionsRevoked }; 21 | 22 | /** 23 | * A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the consumer changes. 24 | */ 25 | using RebalanceCallback = std::function; 26 | 27 | /** 28 | * Null RebalanceCallback 29 | */ 30 | #if COMPILER_SUPPORTS_CPP_17 31 | const inline RebalanceCallback NullRebalanceCallback = RebalanceCallback{}; 32 | #else 33 | const static RebalanceCallback NullRebalanceCallback = RebalanceCallback{}; 34 | #endif 35 | 36 | /** 37 | * A callback interface that the user can implement to trigger custom actions when a commit request completes. 38 | */ 39 | using OffsetCommitCallback = std::function; 40 | 41 | /** 42 | * Null OffsetCommitCallback 43 | */ 44 | #if COMPILER_SUPPORTS_CPP_17 45 | const inline OffsetCommitCallback NullOffsetCommitCallback = OffsetCommitCallback{}; 46 | #else 47 | const static OffsetCommitCallback NullOffsetCommitCallback = OffsetCommitCallback{}; 48 | #endif 49 | 50 | /** 51 | * A metadata struct containing the consumer group information. 52 | */ 53 | class ConsumerGroupMetadata 54 | { 55 | public: 56 | explicit ConsumerGroupMetadata(rd_kafka_consumer_group_metadata_t* p): _rkConsumerGroupMetadata(p) {} 57 | 58 | const rd_kafka_consumer_group_metadata_t* rawHandle() const { return _rkConsumerGroupMetadata.get(); } 59 | 60 | private: 61 | rd_kafka_consumer_group_metadata_unique_ptr _rkConsumerGroupMetadata; 62 | }; 63 | 64 | } } } // end of KAFKA_API::clients::consumer 65 | 66 | -------------------------------------------------------------------------------- /src/include/kafka/ConsumerConfig.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | 8 | namespace KAFKA_API { namespace clients { namespace consumer { 9 | 10 | /** 11 | * Configuration for the Kafka Consumer. 12 | */ 13 | class Config: public Properties 14 | { 15 | public: 16 | Config() = default; 17 | Config(const Config&) = default; 18 | explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} 19 | 20 | /** 21 | * The string contains host:port pairs of brokers (splitted by ",") that the consumer will use to establish initial connection to the Kafka cluster. 22 | * Note: It's mandatory. 23 | */ 24 | static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; 25 | 26 | /** 27 | * Group identifier. 28 | * Note: It's better to configure it manually, otherwise a random one would be used for it. 29 | * 30 | */ 31 | static const constexpr char* GROUP_ID = "group.id"; 32 | 33 | /** 34 | * Client identifier. 35 | */ 36 | static const constexpr char* CLIENT_ID = "client.id"; 37 | 38 | /** 39 | * Automatically commits previously polled offsets on each `poll` operation. 40 | */ 41 | static const constexpr char* ENABLE_AUTO_COMMIT = "enable.auto.commit"; 42 | 43 | /** 44 | * This property controls the behavior of the consumer when it starts reading a partition for which it doesn't have a valid committed offset. 45 | * The "latest" means the consumer will begin reading the newest records written after the consumer started. While "earliest" means that the consumer will read from the very beginning. 46 | * Available options: latest, earliest 47 | * Default value: latest 48 | */ 49 | static const constexpr char* AUTO_OFFSET_RESET = "auto.offset.reset"; 50 | 51 | /** 52 | * Emit RD_KAFKA_RESP_ERR_PARTITION_EOF event whenever the consumer reaches the end of a partition. 53 | * Default value: false 54 | */ 55 | static const constexpr char* ENABLE_PARTITION_EOF = "enable.partition.eof"; 56 | 57 | /** 58 | * This controls the maximum number of records that a single call to poll() will return. 59 | * Default value: 500 60 | */ 61 | static const constexpr char* MAX_POLL_RECORDS = "max.poll.records"; 62 | 63 | /** 64 | * Minimum number of messages per topic/partition tries to maintain in the local consumer queue. 65 | * Note: With a larger value configured, the consumer would send FetchRequest towards brokers more frequently. 66 | * Defalut value: 100000 67 | */ 68 | static const constexpr char* QUEUED_MIN_MESSAGES = "queued.min.messages"; 69 | 70 | /** 71 | * Client group session and failure detection timeout. 72 | * If no heartbeat received by the broker within this timeout, the broker will remove the consumer and trigger a rebalance. 73 | * Default value: 10000 74 | */ 75 | static const constexpr char* SESSION_TIMEOUT_MS = "session.timeout.ms"; 76 | 77 | /** 78 | * Timeout for network requests. 79 | * Default value: 60000 80 | */ 81 | static const constexpr char* SOCKET_TIMEOUT_MS = "socket.timeout.ms"; 82 | 83 | /** 84 | * Control how to read messages written transactionally. 85 | * Available options: read_uncommitted, read_committed 86 | * Default value: read_committed 87 | */ 88 | static const constexpr char* ISOLATION_LEVEL = "isolation.level"; 89 | 90 | /* 91 | * The name of one or more partition assignment strategies. 92 | * The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. 93 | * Available options: range, roundrobin, cooperative-sticky 94 | * Default value: range,roundrobin 95 | */ 96 | static const constexpr char* PARTITION_ASSIGNMENT_STRATEGY = "partition.assignment.strategy"; 97 | 98 | /** 99 | * Protocol used to communicate with brokers. 100 | * Default value: plaintext 101 | */ 102 | static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; 103 | 104 | /** 105 | * SASL mechanism to use for authentication. 106 | * Default value: GSSAPI 107 | */ 108 | static const constexpr char* SASL_MECHANISM = "sasl.mechanisms"; 109 | 110 | /** 111 | * SASL username for use with the PLAIN and SASL-SCRAM-.. mechanism. 112 | */ 113 | static const constexpr char* SASL_USERNAME = "sasl.username"; 114 | 115 | /** 116 | * SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism. 117 | */ 118 | static const constexpr char* SASL_PASSWORD = "sasl.password"; 119 | 120 | /** 121 | * Shell command to refresh or acquire the client's Kerberos ticket. 122 | */ 123 | static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; 124 | 125 | /** 126 | * The client's Kerberos principal name. 127 | */ 128 | static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; 129 | 130 | /** 131 | * Set to "default" or "oidc" to control with login method to be used. 132 | * If set to "oidc", the following properties must also be specified: 133 | * sasl.oauthbearer.client.id 134 | * sasl.oauthbearer.client.secret 135 | * sasl.oauthbearer.token.endpoint.url 136 | * Default value: default 137 | */ 138 | static const constexpr char* SASL_OAUTHBEARER_METHOD = "sasl.oauthbearer.method"; 139 | 140 | /** 141 | * Public identifier for the applicaition. 142 | * Only used with "sasl.oauthbearer.method=oidc". 143 | */ 144 | static const constexpr char* SASL_OAUTHBEARER_CLIENT_ID = "sasl.oauthbearer.client.id"; 145 | 146 | /** 147 | * Client secret only known to the application and the authorization server. 148 | * Only used with "sasl.oauthbearer.method=oidc". 149 | */ 150 | static const constexpr char* SASL_OAUTHBEARER_CLIENT_SECRET = "sasl.oauthbearer.client.secret"; 151 | 152 | /** 153 | * Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. 154 | * Only used with "sasl.oauthbearer.method=oidc". 155 | */ 156 | static const constexpr char* SASL_OAUTHBEARER_EXTENSIONS = "sasl.oauthbearer.extensions"; 157 | 158 | /** 159 | * Client use this to specify the scope of the access request to the broker. 160 | * Only used with "sasl.oauthbearer.method=oidc". 161 | */ 162 | static const constexpr char* SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; 163 | 164 | /** 165 | * OAuth/OIDC issuer token endpoint HTTP(S) URI used to retreve token. 166 | * Only used with "sasl.oauthbearer.method=oidc". 167 | */ 168 | static const constexpr char* SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; 169 | 170 | /** 171 | * SASL/OAUTHBEARER configuration. 172 | * The format is implementation-dependent and must be parsed accordingly. 173 | */ 174 | static const constexpr char* SASL_OAUTHBEARER_CONFIG = "sasl.oauthbearer.config"; 175 | 176 | /** 177 | * Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. 178 | * Should only be used for development or testing, and not in production. 179 | * Default value: false 180 | */ 181 | static const constexpr char* ENABLE_SASL_OAUTHBEARER_UNSECURE_JWT = "enable.sasl.oauthbearer.unsecure.jwt"; 182 | 183 | }; 184 | 185 | } } } // end of KAFKA_API::clients::consumer 186 | 187 | -------------------------------------------------------------------------------- /src/include/kafka/ConsumerRecord.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | 14 | 15 | namespace KAFKA_API { namespace clients { namespace consumer { 16 | 17 | /** 18 | * A key/value pair to be received from Kafka. 19 | * This also consists of a topic name and a partition number from which the record is being received, an offset that points to the record in a Kafka partition 20 | */ 21 | class ConsumerRecord 22 | { 23 | public: 24 | // ConsumerRecord will take the ownership of msg (rd_kafka_message_t*) 25 | explicit ConsumerRecord(rd_kafka_message_t* msg): _rk_msg(msg, rd_kafka_message_destroy) {} 26 | 27 | /** 28 | * The topic this record is received from. 29 | */ 30 | Topic topic() const { return _rk_msg->rkt ? rd_kafka_topic_name(_rk_msg->rkt): ""; } 31 | 32 | /** 33 | * The partition from which this record is received. 34 | */ 35 | Partition partition() const { return _rk_msg->partition; } 36 | 37 | /** 38 | * The position of this record in the corresponding Kafka partition. 39 | */ 40 | Offset offset() const { return _rk_msg->offset; } 41 | 42 | /** 43 | * The key (or null if no key is specified). 44 | */ 45 | Key key() const { return Key(_rk_msg->key, _rk_msg->key_len); } 46 | 47 | /** 48 | * The value. 49 | */ 50 | Value value() const { return Value(_rk_msg->payload, _rk_msg->len); } 51 | 52 | /** 53 | * The timestamp of the record. 54 | */ 55 | Timestamp timestamp() const 56 | { 57 | rd_kafka_timestamp_type_t tstype{}; 58 | const Timestamp::Value tsValue = rd_kafka_message_timestamp(_rk_msg.get(), &tstype); 59 | return {tsValue, tstype}; 60 | } 61 | 62 | /** 63 | * The headers of the record. 64 | */ 65 | Headers headers() const; 66 | 67 | /** 68 | * Return just one (the very last) header's value for the given key. 69 | */ 70 | Header::Value lastHeaderValue(const Header::Key& key); 71 | 72 | /** 73 | * The error. 74 | * 75 | * Possible cases: 76 | * 1. Success 77 | * - RD_KAFKA_RESP_ERR_NO_ERROR (0), -- got a message successfully 78 | * - RD_KAFKA_RESP_ERR__PARTITION_EOF, -- reached the end of a partition (got no message) 79 | * 2. Failure 80 | * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) 81 | */ 82 | Error error() const { return Error{_rk_msg->err}; } 83 | 84 | /** 85 | * Obtains explanatory string. 86 | */ 87 | std::string toString() const; 88 | 89 | private: 90 | using rd_kafka_message_shared_ptr = std::shared_ptr; 91 | rd_kafka_message_shared_ptr _rk_msg; 92 | }; 93 | 94 | inline Headers 95 | ConsumerRecord::headers() const 96 | { 97 | Headers headers; 98 | 99 | rd_kafka_headers_t* hdrs = nullptr; 100 | if (rd_kafka_message_headers(_rk_msg.get(), &hdrs) != RD_KAFKA_RESP_ERR_NO_ERROR) 101 | { 102 | return headers; 103 | } 104 | 105 | headers.reserve(rd_kafka_header_cnt(hdrs)); 106 | 107 | const char* name = nullptr; 108 | const void* valuePtr = nullptr; 109 | std::size_t valueSize = 0; 110 | for (std::size_t i = 0; !rd_kafka_header_get_all(hdrs, i, &name, &valuePtr, &valueSize); i++) 111 | { 112 | headers.emplace_back(name, Header::Value(valuePtr, valueSize)); 113 | } 114 | 115 | return headers; 116 | } 117 | 118 | inline Header::Value 119 | ConsumerRecord::lastHeaderValue(const Header::Key& key) 120 | { 121 | rd_kafka_headers_t* hdrs = nullptr; 122 | if (rd_kafka_message_headers(_rk_msg.get(), &hdrs) != RD_KAFKA_RESP_ERR_NO_ERROR) 123 | { 124 | return Header::Value(); 125 | } 126 | 127 | const void* valuePtr = nullptr; 128 | std::size_t valueSize = 0; 129 | return (rd_kafka_header_get_last(hdrs, key.c_str(), &valuePtr, &valueSize) == RD_KAFKA_RESP_ERR_NO_ERROR) ? 130 | Header::Value(valuePtr, valueSize) : Header::Value(); 131 | } 132 | 133 | inline std::string 134 | ConsumerRecord::toString() const 135 | { 136 | std::ostringstream oss; 137 | if (!error()) 138 | { 139 | oss << topic() << "-" << partition() << ":" << offset() << ", " << timestamp().toString() << ", " 140 | << (key().size() ? (key().toString() + "/") : "") << value().toString(); 141 | } 142 | else if (error().value() == RD_KAFKA_RESP_ERR__PARTITION_EOF) 143 | { 144 | oss << "EOF[" << topic() << "-" << partition() << ":" << offset() << "]"; 145 | } 146 | else 147 | { 148 | oss << "ERROR[" << error().message() << ", " << topic() << "-" << partition() << ":" << offset() << "]"; 149 | } 150 | return oss.str(); 151 | } 152 | 153 | } } } // end of KAFKA_API::clients::consumer 154 | 155 | -------------------------------------------------------------------------------- /src/include/kafka/Error.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | 9 | #include 10 | #include 11 | 12 | 13 | namespace KAFKA_API { 14 | 15 | struct ErrorCategory: public std::error_category 16 | { 17 | const char* name() const noexcept override { return "KafkaError"; } 18 | std::string message(int ev) const override { return rd_kafka_err2str(static_cast(ev)); } 19 | 20 | template 21 | struct Global { static ErrorCategory category; }; 22 | }; 23 | 24 | template 25 | ErrorCategory ErrorCategory::Global::category; 26 | 27 | 28 | /** 29 | * Unified error type. 30 | */ 31 | class Error 32 | { 33 | public: 34 | // The error with rich info 35 | explicit Error(rd_kafka_error_t* error = nullptr): _rkError(error, RkErrorDeleter) {} 36 | // The error with brief info 37 | explicit Error(rd_kafka_resp_err_t respErr): _respErr(respErr) {} 38 | // The error with detailed message 39 | Error(rd_kafka_resp_err_t respErr, std::string message, bool fatal = false) 40 | : _respErr(respErr), _message(std::move(message)), _isFatal(fatal) {} 41 | // Copy constructor 42 | Error(const Error& error) { *this = error; } 43 | 44 | // Assignment operator 45 | Error& operator=(const Error& error) 46 | { 47 | if (this == &error) return *this; 48 | 49 | _rkError.reset(); 50 | 51 | _respErr = static_cast(error.value()); 52 | _message = error._message; 53 | _isFatal = error.isFatal(); 54 | _txnRequiresAbort = error.transactionRequiresAbort(); 55 | _isRetriable = error.isRetriable(); 56 | 57 | return *this; 58 | } 59 | 60 | /** 61 | * Check if the error is valid. 62 | */ 63 | explicit operator bool() const { return static_cast(value()); } 64 | 65 | /** 66 | * Conversion to `std::error_code` 67 | */ 68 | explicit operator std::error_code() const 69 | { 70 | return {value(), ErrorCategory::Global<>::category}; 71 | } 72 | 73 | /** 74 | * Obtains the underlying error code value. 75 | * 76 | * Actually, it's the same as 'rd_kafka_resp_err_t', which is defined by librdkafka. 77 | * 1. The negative values are for internal errors. 78 | * 2. Non-negative values are for external errors. See the defination at, 79 | * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) 80 | */ 81 | int value() const 82 | { 83 | return static_cast(_rkError ? rd_kafka_error_code(_rkError.get()) : _respErr); 84 | } 85 | 86 | /** 87 | * Readable error string. 88 | */ 89 | std::string message() const 90 | { 91 | return _message ? *_message : 92 | (_rkError ? rd_kafka_error_string(_rkError.get()) : rd_kafka_err2str(_respErr)); 93 | } 94 | 95 | /** 96 | * Detailed error string. 97 | */ 98 | std::string toString() const 99 | { 100 | std::ostringstream oss; 101 | 102 | oss << rd_kafka_err2str(static_cast(value())) << " [" << value() << "]" << (isFatal() ? " fatal" : ""); 103 | if (transactionRequiresAbort()) oss << " | transaction-requires-abort"; 104 | if (auto retriable = isRetriable()) oss << " | " << (*retriable ? "retriable" : "non-retriable"); 105 | if (_message) oss << " | " << *_message; 106 | 107 | return oss.str(); 108 | } 109 | 110 | /** 111 | * Fatal error indicates that the client instance is no longer usable. 112 | */ 113 | bool isFatal() const 114 | { 115 | return _rkError ? rd_kafka_error_is_fatal(_rkError.get()) : _isFatal; 116 | } 117 | 118 | /** 119 | * Show whether the operation may be retried. 120 | */ 121 | Optional isRetriable() const 122 | { 123 | return _rkError ? rd_kafka_error_is_retriable(_rkError.get()) : _isRetriable; 124 | } 125 | 126 | /** 127 | * Show whether the error is an abortable transaction error. 128 | * 129 | * Note: 130 | * 1. Only valid for transactional API. 131 | * 2. If `true`, the producer must call `abortTransaction` and start a new transaction with `beginTransaction` to proceed with transactions. 132 | */ 133 | bool transactionRequiresAbort() const 134 | { 135 | return _rkError ? rd_kafka_error_txn_requires_abort(_rkError.get()) : false; 136 | } 137 | 138 | private: 139 | rd_kafka_error_shared_ptr _rkError; // For error with rich info 140 | rd_kafka_resp_err_t _respErr{}; // For error with a simple response code 141 | Optional _message; // Additional detailed message (if any) 142 | bool _isFatal = false; 143 | bool _txnRequiresAbort = false; 144 | Optional _isRetriable; // Retriable flag (if any) 145 | }; 146 | 147 | } // end of KAFKA_API 148 | 149 | -------------------------------------------------------------------------------- /src/include/kafka/Header.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | 12 | namespace KAFKA_API { 13 | 14 | /** 15 | * Message Header (with a key value pair) 16 | */ 17 | struct Header 18 | { 19 | using Key = std::string; 20 | using Value = ConstBuffer; 21 | 22 | Header() = default; 23 | Header(Key k, Value v): key(std::move(k)), value(v) {} 24 | 25 | /** 26 | * Obtains explanatory string. 27 | */ 28 | std::string toString() const 29 | { 30 | return (key.empty() ? "[null]" : key) + ":" + value.toString(); 31 | } 32 | 33 | Key key; 34 | Value value; 35 | }; 36 | 37 | /** 38 | * Message Headers. 39 | */ 40 | using Headers = std::vector
; 41 | 42 | /** 43 | * Null Headers. 44 | */ 45 | #if COMPILER_SUPPORTS_CPP_17 46 | const inline Headers NullHeaders = Headers{}; 47 | #else 48 | const static Headers NullHeaders = Headers{}; 49 | #endif 50 | 51 | /** 52 | * Obtains explanatory string for Headers. 53 | */ 54 | inline std::string toString(const Headers& headers) 55 | { 56 | std::string ret; 57 | std::for_each(headers.cbegin(), headers.cend(), 58 | [&ret](const auto& header) { 59 | ret.append(ret.empty() ? "" : ",").append(header.toString()); 60 | }); 61 | return ret; 62 | } 63 | 64 | } // end of KAFKA_API 65 | 66 | -------------------------------------------------------------------------------- /src/include/kafka/Interceptors.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | 8 | namespace KAFKA_API { namespace clients { 9 | 10 | class Interceptors 11 | { 12 | public: 13 | using ThreadStartCallback = std::function; 14 | using ThreadExitCallback = std::function; 15 | 16 | Interceptors& onThreadStart(ThreadStartCallback cb) { _valid = true; _threadStartCb = std::move(cb); return *this; } 17 | Interceptors& onThreadExit(ThreadExitCallback cb) { _valid = true; _threadExitCb = std::move(cb); return *this; } 18 | 19 | ThreadStartCallback onThreadStart() const { return _threadStartCb; } 20 | ThreadExitCallback onThreadExit() const { return _threadExitCb; } 21 | 22 | bool empty() const { return !_valid; } 23 | 24 | private: 25 | ThreadStartCallback _threadStartCb; 26 | ThreadExitCallback _threadExitCb; 27 | bool _valid = false; 28 | }; 29 | 30 | } } // end of KAFKA_API::clients 31 | 32 | -------------------------------------------------------------------------------- /src/include/kafka/KafkaException.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | 16 | namespace KAFKA_API { 17 | 18 | /** 19 | * Specific exception for Kafka clients. 20 | */ 21 | class KafkaException: public std::exception 22 | { 23 | public: 24 | KafkaException(const char* filename, std::size_t lineno, const Error& error) 25 | : _when(std::chrono::system_clock::now()), 26 | _filename(filename), 27 | _lineno(lineno), 28 | _error(std::make_shared(error)) 29 | {} 30 | 31 | /** 32 | * Obtains the underlying error. 33 | */ 34 | const Error& error() const { return *_error; } 35 | 36 | /** 37 | * Obtains explanatory string. 38 | */ 39 | const char* what() const noexcept override 40 | { 41 | _what = utility::getLocalTimeString(_when) + ": " + _error->toString() + " (" + std::string(_filename) + ":" + std::to_string(_lineno) + ")"; 42 | return _what.c_str(); 43 | } 44 | 45 | private: 46 | using TimePoint = std::chrono::system_clock::time_point; 47 | 48 | const TimePoint _when; 49 | const std::string _filename; 50 | const std::size_t _lineno; 51 | const std::shared_ptr _error; 52 | mutable std::string _what; 53 | }; 54 | 55 | 56 | #define KAFKA_THROW_ERROR(error) throw KafkaException(__FILE__, __LINE__, error) 57 | #define KAFKA_THROW_IF_WITH_ERROR(error) if (error) KAFKA_THROW_ERROR(error) 58 | 59 | } // end of KAFKA_API 60 | 61 | -------------------------------------------------------------------------------- /src/include/kafka/Log.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | 14 | namespace KAFKA_API { 15 | 16 | struct Log 17 | { 18 | enum Level 19 | { 20 | Emerg = 0, 21 | Alert = 1, 22 | Crit = 2, 23 | Err = 3, 24 | Warning = 4, 25 | Notice = 5, 26 | Info = 6, 27 | Debug = 7 28 | }; 29 | 30 | static const std::string& levelString(std::size_t level) 31 | { 32 | static const std::vector levelNames = {"EMERG", "ALERT", "CRIT", "ERR", "WARNING", "NOTICE", "INFO", "DEBUG", "INVALID"}; 33 | static const std::size_t maxIndex = levelNames.size() - 1; 34 | 35 | return levelNames[(std::min)(level, maxIndex)]; 36 | } 37 | }; 38 | 39 | template 40 | class LogBuffer 41 | { 42 | public: 43 | LogBuffer():_wptr(_buf.data()) { _buf[0] = 0; } // NOLINT 44 | 45 | LogBuffer& clear() 46 | { 47 | _wptr = _buf.data(); 48 | _buf[0] = 0; 49 | return *this; 50 | } 51 | 52 | template 53 | LogBuffer& print(const char* format, Args... args) 54 | { 55 | assert(!(_buf[0] != 0 && _wptr == _buf.data())); // means it has already been used as a plain buffer (with `str()`) 56 | 57 | auto cnt = std::snprintf(_wptr, capacity(), format, args...); // returns number of characters written if successful (not including '\0') 58 | if (cnt > 0) 59 | { 60 | _wptr = (std::min)(_wptr + cnt, _buf.data() + MAX_CAPACITY - 1); 61 | } 62 | return *this; 63 | } 64 | LogBuffer& print(const char* format) { return print("%s", format); } 65 | 66 | std::size_t capacity() const { return static_cast(_buf.data() + MAX_CAPACITY - _wptr); } 67 | char* str() { return _buf.data(); } 68 | const char* c_str() const { return _buf.data(); } 69 | 70 | private: 71 | std::array _buf; 72 | char* _wptr; 73 | }; 74 | 75 | using Logger = std::function; 76 | 77 | inline void DefaultLogger(int level, const char* /*filename*/, int /*lineno*/, const char* msg) 78 | { 79 | std::cout << "[" << utility::getCurrentTime() << "]" << Log::levelString(static_cast(level)) << " " << msg; 80 | std::cout << std::endl; 81 | } 82 | 83 | inline void NullLogger(int /*level*/, const char* /*filename*/, int /*lineno*/, const char* /*msg*/) 84 | { 85 | } 86 | 87 | } // end of KAFKA_API 88 | 89 | -------------------------------------------------------------------------------- /src/include/kafka/ProducerCommon.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | 16 | namespace KAFKA_API { namespace clients { namespace producer { 17 | 18 | /** 19 | * The metadata for a record that has been acknowledged by the server. 20 | */ 21 | class RecordMetadata 22 | { 23 | public: 24 | enum class PersistedStatus { Not, Possibly, Done }; 25 | 26 | RecordMetadata() = default; 27 | 28 | RecordMetadata(const RecordMetadata& another) { *this = another; } 29 | 30 | // This is only called by the KafkaProducer::deliveryCallback (with a valid rkmsg pointer) 31 | RecordMetadata(const rd_kafka_message_t* rkmsg, Optional recordId) 32 | : _rkmsg(rkmsg), _recordId(recordId) {} 33 | 34 | RecordMetadata& operator=(const RecordMetadata& another) 35 | { 36 | if (this != &another) 37 | { 38 | const auto offsetOption = another.offset(); 39 | _cachedInfo = std::make_unique(another.topic(), 40 | another.partition(), 41 | offsetOption ? *offsetOption : RD_KAFKA_OFFSET_INVALID, 42 | another.keySize(), 43 | another.valueSize(), 44 | another.timestamp(), 45 | another.persistedStatus()); 46 | _recordId = another._recordId; 47 | _rkmsg = nullptr; 48 | } 49 | 50 | return *this; 51 | } 52 | 53 | /** 54 | * The topic the record was appended to. 55 | */ 56 | std::string topic() const 57 | { 58 | return _rkmsg ? (_rkmsg->rkt ? rd_kafka_topic_name(_rkmsg->rkt) : "") : _cachedInfo->topic; 59 | } 60 | 61 | /** 62 | * The partition the record was sent to. 63 | */ 64 | Partition partition() const 65 | { 66 | return _rkmsg ? _rkmsg->partition : _cachedInfo->partition; 67 | } 68 | 69 | /** 70 | * The offset of the record in the topic/partition. 71 | */ 72 | Optional offset() const 73 | { 74 | auto offset = _rkmsg ? _rkmsg->offset : _cachedInfo->offset; 75 | return (offset != RD_KAFKA_OFFSET_INVALID) ? Optional(offset) : Optional(); 76 | } 77 | 78 | /** 79 | * The recordId could be used to identify the acknowledged message. 80 | */ 81 | Optional recordId() const 82 | { 83 | return _recordId; 84 | } 85 | 86 | /** 87 | * The size of the key in bytes. 88 | */ 89 | KeySize keySize() const 90 | { 91 | return _rkmsg ? _rkmsg->key_len : _cachedInfo->keySize; 92 | } 93 | 94 | /** 95 | * The size of the value in bytes. 96 | */ 97 | ValueSize valueSize() const 98 | { 99 | return _rkmsg ? _rkmsg->len : _cachedInfo->valueSize; 100 | } 101 | 102 | /** 103 | * The timestamp of the record in the topic/partition. 104 | */ 105 | Timestamp timestamp() const 106 | { 107 | return _rkmsg ? getMsgTimestamp(_rkmsg) : _cachedInfo->timestamp; 108 | } 109 | 110 | /** 111 | * The persisted status of the record. 112 | */ 113 | PersistedStatus persistedStatus() const 114 | { 115 | return _rkmsg ? getMsgPersistedStatus(_rkmsg) : _cachedInfo->persistedStatus; 116 | } 117 | 118 | std::string persistedStatusString() const 119 | { 120 | return getPersistedStatusString(persistedStatus()); 121 | } 122 | 123 | std::string toString() const 124 | { 125 | const auto offsetOption = offset(); 126 | const auto recordIdOption = recordId(); 127 | 128 | return topic() + "-" + std::to_string(partition()) + "@" + (offsetOption ? std::to_string(*offsetOption) : "NA") 129 | + (recordIdOption ? (":id[" + std::to_string(*recordIdOption) + "],") : ",") 130 | + timestamp().toString() + "," + persistedStatusString(); 131 | } 132 | 133 | private: 134 | static Timestamp getMsgTimestamp(const rd_kafka_message_t* rkmsg) 135 | { 136 | rd_kafka_timestamp_type_t tstype{}; 137 | const Timestamp::Value tsValue = rd_kafka_message_timestamp(rkmsg, &tstype); 138 | return {tsValue, tstype}; 139 | } 140 | 141 | static PersistedStatus getMsgPersistedStatus(const rd_kafka_message_t* rkmsg) 142 | { 143 | const rd_kafka_msg_status_t status = rd_kafka_message_status(rkmsg); 144 | return status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED ? PersistedStatus::Not : (status == RD_KAFKA_MSG_STATUS_PERSISTED ? PersistedStatus::Done : PersistedStatus::Possibly); 145 | } 146 | 147 | static std::string getPersistedStatusString(PersistedStatus status) 148 | { 149 | return status == PersistedStatus::Not ? "NotPersisted" : 150 | (status == PersistedStatus::Done ? "Persisted" : "PossiblyPersisted"); 151 | } 152 | 153 | struct CachedInfo 154 | { 155 | CachedInfo(Topic t, Partition p, Offset o, KeySize ks, ValueSize vs, Timestamp ts, PersistedStatus pst) 156 | : topic(std::move(t)), 157 | partition(p), 158 | offset(o), 159 | keySize(ks), 160 | valueSize(vs), 161 | timestamp(ts), 162 | persistedStatus(pst) 163 | { 164 | } 165 | 166 | CachedInfo(const CachedInfo&) = default; 167 | 168 | std::string topic; 169 | Partition partition; 170 | Offset offset; 171 | KeySize keySize; 172 | ValueSize valueSize; 173 | Timestamp timestamp; 174 | PersistedStatus persistedStatus; 175 | }; 176 | 177 | std::unique_ptr _cachedInfo; 178 | const rd_kafka_message_t* _rkmsg = nullptr; 179 | Optional _recordId; 180 | }; 181 | 182 | /** 183 | * A callback method could be used to provide asynchronous handling of request completion. 184 | * This method will be called when the record sent (by KafkaAsyncProducer) to the server has been acknowledged. 185 | */ 186 | using Callback = std::function; 187 | 188 | } } } // end of KAFKA_API::clients::producer 189 | 190 | -------------------------------------------------------------------------------- /src/include/kafka/ProducerRecord.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | 11 | namespace KAFKA_API { namespace clients { namespace producer { 12 | 13 | /** 14 | * A key/value pair to be sent to Kafka. 15 | * This consists of a topic name to which the record is being sent, an optional partition number, and an optional key and value. 16 | * Note: `ProducerRecord` would not take the ownership from the memory block of `Value`. 17 | */ 18 | class ProducerRecord 19 | { 20 | public: 21 | using Id = std::uint64_t; 22 | 23 | ProducerRecord(Topic topic, Partition partition, const Key& key, const Value& value) 24 | : _topic(std::move(topic)), _partition(partition), _key(key), _value(value) {} 25 | 26 | ProducerRecord(const Topic& topic, Partition partition, const Key& key, const Value& value, Id id) 27 | : ProducerRecord(topic, partition, key, value) { _id = id; } 28 | 29 | ProducerRecord(const Topic& topic, const Key& key, const Value& value) 30 | : ProducerRecord(topic, RD_KAFKA_PARTITION_UA, key, value) {} 31 | 32 | ProducerRecord(const Topic& topic, const Key& key, const Value& value, Id id) 33 | : ProducerRecord(topic, key, value) { _id = id; } 34 | 35 | /** 36 | * The topic this record is being sent to. 37 | */ 38 | const Topic& topic() const { return _topic; } 39 | 40 | /** 41 | * The partition to which the record will be sent (or UNKNOWN_PARTITION if no partition was specified). 42 | */ 43 | Partition partition() const { return _partition; } 44 | 45 | /** 46 | * The key (or null if no key is specified). 47 | */ 48 | Key key() const { return _key; } 49 | 50 | /** 51 | * The value. 52 | */ 53 | Value value() const { return _value; } 54 | 55 | /** 56 | * The id to identify the message (consistent with `Producer::Metadata::recordId()`). 57 | */ 58 | Optional id() const { return _id; } 59 | 60 | /** 61 | * The headers. 62 | */ 63 | const Headers& headers() const { return _headers; } 64 | 65 | /** 66 | * The headers. 67 | * Note: Users could set headers with the reference. 68 | */ 69 | Headers& headers() { return _headers; } 70 | 71 | /** 72 | * Set the partition. 73 | */ 74 | void setPartition(Partition partition) { _partition = partition; } 75 | 76 | /** 77 | * Set the key. 78 | */ 79 | void setKey(const Key& key) { _key = key; } 80 | 81 | /** 82 | * Set the value. 83 | */ 84 | void setValue(const Value& value) { _value = value; } 85 | 86 | /** 87 | * Set the record id. 88 | */ 89 | void setId(Id id) { _id = id; } 90 | 91 | std::string toString() const 92 | { 93 | return _topic + "-" + (_partition == RD_KAFKA_PARTITION_UA ? "NA" : std::to_string(_partition)) + std::string(":") 94 | + (_id ? (std::to_string(*_id) + std::string(", ")) : " ") 95 | + (_headers.empty() ? "" : ("headers[" + KAFKA_API::toString(_headers) + "], ")) 96 | + _key.toString() + std::string("/") + _value.toString(); 97 | } 98 | 99 | private: 100 | Topic _topic; 101 | Partition _partition; 102 | Key _key; 103 | Value _value; 104 | Headers _headers; 105 | Optional _id; 106 | }; 107 | 108 | } } } // end of KAFKA_API::clients::producer 109 | 110 | -------------------------------------------------------------------------------- /src/include/kafka/Project.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // Customize the namespace (default is `kafka`) if necessary 4 | #ifndef KAFKA_API 5 | #define KAFKA_API kafka 6 | #endif 7 | 8 | // Here is the MACRO to enable internal stubs for UT 9 | // #ifndef KAFKA_API_ENABLE_UNIT_TEST_STUBS 10 | // #define KAFKA_API_ENABLE_UNIT_TEST_STUBS 11 | // #endif 12 | 13 | 14 | #if ((__cplusplus >= 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)) 15 | #define COMPILER_SUPPORTS_CPP_17 1 // NOLINT 16 | #else 17 | #define COMPILER_SUPPORTS_CPP_17 0 // NOLINT 18 | #endif 19 | 20 | -------------------------------------------------------------------------------- /src/include/kafka/Properties.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | namespace KAFKA_API { 14 | 15 | /** 16 | * The properties for Kafka clients. 17 | */ 18 | class Properties 19 | { 20 | public: 21 | // Just make sure key will printed in order 22 | using PropertiesMap = std::map; 23 | 24 | Properties() = default; 25 | Properties(const Properties&) = default; 26 | explicit Properties(PropertiesMap kvMap): _kvMap(std::move(kvMap)) {} 27 | 28 | virtual ~Properties() = default; 29 | 30 | bool operator==(const Properties& rhs) const { return map() == rhs.map(); } 31 | 32 | /** 33 | * Set a property. 34 | * If the map previously contained a mapping for the key, the old value is replaced by the specified value. 35 | */ 36 | Properties& put(const std::string& key, const std::string& value) 37 | { 38 | _kvMap[key] = value; 39 | return *this; 40 | } 41 | 42 | /** 43 | * Remove the property (if one exists). 44 | */ 45 | void remove(const std::string& key) 46 | { 47 | _kvMap.erase(key); 48 | } 49 | 50 | /** 51 | * Get a property. 52 | * If the map previously contained a mapping for the key, the old value is replaced by the specified value. 53 | */ 54 | Optional getProperty(const std::string& key) const 55 | { 56 | Optional ret; 57 | auto search = _kvMap.find(key); 58 | if (search != _kvMap.end()) 59 | { 60 | ret = search->second; 61 | } 62 | return ret; 63 | } 64 | 65 | /** 66 | * Remove a property. 67 | */ 68 | void eraseProperty(const std::string& key) 69 | { 70 | _kvMap.erase(key); 71 | } 72 | 73 | std::string toString() const 74 | { 75 | 76 | std::string ret; 77 | std::for_each(_kvMap.cbegin(), _kvMap.cend(), 78 | [&ret](const auto& kv) { 79 | const std::string& key = kv.first; 80 | const std::string& value = kv.second; 81 | 82 | static const std::regex reSensitiveKey(R"(.+\.password|.+\.username|.+secret)"); 83 | const bool isSensitive = std::regex_match(key, reSensitiveKey); 84 | 85 | ret.append(ret.empty() ? "" : "|").append(key).append("=").append(isSensitive ? "*" : value); 86 | }); 87 | return ret; 88 | } 89 | 90 | /** 91 | * Get all properties with a map. 92 | */ 93 | const PropertiesMap& map() const { return _kvMap; } 94 | 95 | private: 96 | PropertiesMap _kvMap; 97 | }; 98 | 99 | } // end of KAFKA_API 100 | 101 | -------------------------------------------------------------------------------- /src/include/kafka/RdKafkaHelper.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | 9 | #include 10 | #include 11 | 12 | namespace KAFKA_API { 13 | 14 | // define smart pointers for rk_kafka_xxx datatypes 15 | 16 | struct RkQueueDeleter { void operator()(rd_kafka_queue_t* p) { rd_kafka_queue_destroy(p); } }; 17 | using rd_kafka_queue_unique_ptr = std::unique_ptr; 18 | 19 | struct RkEventDeleter { void operator()(rd_kafka_event_t* p) { rd_kafka_event_destroy(p); } }; 20 | using rd_kafka_event_unique_ptr = std::unique_ptr; 21 | 22 | struct RkTopicDeleter { void operator()(rd_kafka_topic_t* p) { rd_kafka_topic_destroy(p); } }; 23 | using rd_kafka_topic_unique_ptr = std::unique_ptr; 24 | 25 | struct RkTopicPartitionListDeleter { void operator()(rd_kafka_topic_partition_list_t* p) { rd_kafka_topic_partition_list_destroy(p); } }; 26 | using rd_kafka_topic_partition_list_unique_ptr = std::unique_ptr; 27 | 28 | struct RkConfDeleter { void operator()(rd_kafka_conf_t* p) { rd_kafka_conf_destroy(p); } }; 29 | using rd_kafka_conf_unique_ptr = std::unique_ptr; 30 | 31 | struct RkMetadataDeleter { void operator()(const rd_kafka_metadata_t* p) { rd_kafka_metadata_destroy(p); } }; 32 | using rd_kafka_metadata_unique_ptr = std::unique_ptr; 33 | 34 | struct RkDeleter { void operator()(rd_kafka_t* p) { rd_kafka_destroy(p); } }; 35 | using rd_kafka_unique_ptr = std::unique_ptr; 36 | 37 | struct RkNewTopicDeleter { void operator()(rd_kafka_NewTopic_t* p) { rd_kafka_NewTopic_destroy(p); } }; 38 | using rd_kafka_NewTopic_unique_ptr = std::unique_ptr; 39 | 40 | struct RkDeleteTopicDeleter { void operator()(rd_kafka_DeleteTopic_t* p) { rd_kafka_DeleteTopic_destroy(p); } }; 41 | using rd_kafka_DeleteTopic_unique_ptr = std::unique_ptr; 42 | 43 | struct RkDeleteRecordsDeleter { void operator()(rd_kafka_DeleteRecords_t* p) { rd_kafka_DeleteRecords_destroy(p); } }; 44 | using rd_kafka_DeleteRecords_unique_ptr = std::unique_ptr; 45 | 46 | struct RkConsumerGroupMetadataDeleter { void operator()(rd_kafka_consumer_group_metadata_t* p) { rd_kafka_consumer_group_metadata_destroy(p) ; } }; 47 | using rd_kafka_consumer_group_metadata_unique_ptr = std::unique_ptr; 48 | 49 | inline void RkErrorDeleter(rd_kafka_error_t* p) { rd_kafka_error_destroy(p); } 50 | using rd_kafka_error_shared_ptr = std::shared_ptr; 51 | 52 | 53 | inline std::string toString(rd_kafka_thread_type_t threadType) 54 | { 55 | switch (threadType) 56 | { 57 | case RD_KAFKA_THREAD_MAIN: 58 | return "main"; 59 | case RD_KAFKA_THREAD_BACKGROUND: 60 | return "background"; 61 | case RD_KAFKA_THREAD_BROKER: 62 | return "broker"; 63 | default: 64 | assert(false); 65 | return "NA"; 66 | } 67 | } 68 | 69 | // Convert from rd_kafka_xxx datatypes 70 | inline TopicPartitionOffsets getTopicPartitionOffsets(const rd_kafka_topic_partition_list_t* rk_tpos) 71 | { 72 | TopicPartitionOffsets ret; 73 | const int count = rk_tpos ? rk_tpos->cnt : 0; 74 | for (int i = 0; i < count; ++i) 75 | { 76 | const Topic t = rk_tpos->elems[i].topic; 77 | const Partition p = rk_tpos->elems[i].partition; 78 | const Offset o = rk_tpos->elems[i].offset; 79 | 80 | ret[TopicPartition(t, p)] = o; 81 | } 82 | return ret; 83 | } 84 | 85 | inline Topics getTopics(const rd_kafka_topic_partition_list_t* rk_topics) 86 | { 87 | Topics result; 88 | for (int i = 0; i < (rk_topics ? rk_topics->cnt : 0); ++i) 89 | { 90 | result.insert(rk_topics->elems[i].topic); 91 | } 92 | return result; 93 | } 94 | 95 | inline TopicPartitions getTopicPartitions(const rd_kafka_topic_partition_list_t* rk_tpos) 96 | { 97 | TopicPartitions result; 98 | for (int i = 0; i < (rk_tpos ? rk_tpos->cnt : 0); ++i) 99 | { 100 | result.insert(TopicPartition{rk_tpos->elems[i].topic, rk_tpos->elems[i].partition}); 101 | } 102 | return result; 103 | } 104 | 105 | // Convert to rd_kafka_xxx datatypes 106 | inline rd_kafka_topic_partition_list_t* createRkTopicPartitionList(const TopicPartitionOffsets& tpos) 107 | { 108 | rd_kafka_topic_partition_list_t* rk_tpos = rd_kafka_topic_partition_list_new(static_cast(tpos.size())); 109 | for (const auto& tp_o: tpos) 110 | { 111 | const auto& tp = tp_o.first; 112 | const auto& o = tp_o.second; 113 | rd_kafka_topic_partition_t* rk_tp = rd_kafka_topic_partition_list_add(rk_tpos, tp.first.c_str(), tp.second); 114 | rk_tp->offset = o; 115 | } 116 | return rk_tpos; 117 | } 118 | 119 | inline rd_kafka_topic_partition_list_t* createRkTopicPartitionList(const TopicPartitions& tps) 120 | { 121 | TopicPartitionOffsets tpos; 122 | for (const auto& tp: tps) 123 | { 124 | tpos[TopicPartition(tp.first, tp.second)] = RD_KAFKA_OFFSET_INVALID; 125 | } 126 | return createRkTopicPartitionList(tpos); 127 | } 128 | 129 | inline rd_kafka_topic_partition_list_t* createRkTopicPartitionList(const Topics& topics) 130 | { 131 | TopicPartitionOffsets tpos; 132 | for (const auto& topic: topics) 133 | { 134 | tpos[TopicPartition(topic, RD_KAFKA_PARTITION_UA)] = RD_KAFKA_OFFSET_INVALID; 135 | } 136 | return createRkTopicPartitionList(tpos); 137 | } 138 | 139 | } // end of KAFKA_API 140 | 141 | -------------------------------------------------------------------------------- /src/include/kafka/Timestamp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | 15 | namespace KAFKA_API { 16 | 17 | /** 18 | * The time point together with the type. 19 | */ 20 | struct Timestamp 21 | { 22 | using Value = std::int64_t; 23 | 24 | enum class Type { NotAvailable, CreateTime, LogAppendTime }; 25 | 26 | /** 27 | * The milliseconds since epoch. 28 | */ 29 | Value msSinceEpoch; 30 | 31 | /** 32 | * The type shows what the `msSinceEpoch` means (CreateTime or LogAppendTime). 33 | */ 34 | Type type; 35 | 36 | explicit Timestamp(Value v = 0, Type t = Type::NotAvailable): msSinceEpoch(v), type(t) {} 37 | Timestamp(Value v, rd_kafka_timestamp_type_t t): Timestamp(v, convertType(t)) {} 38 | 39 | static Type convertType(rd_kafka_timestamp_type_t tstype) 40 | { 41 | return (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME) ? Type::CreateTime : 42 | (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME ? Type::LogAppendTime : Type::NotAvailable); 43 | } 44 | 45 | operator std::chrono::time_point() const // NOLINT 46 | { 47 | return std::chrono::time_point(std::chrono::milliseconds(msSinceEpoch)); 48 | } 49 | 50 | static std::string toString(Type t) 51 | { 52 | switch (t) 53 | { 54 | case Type::CreateTime: 55 | return "CreateTime"; 56 | case Type::LogAppendTime: 57 | return "LogAppendTime"; 58 | default: 59 | assert(t == Type::NotAvailable); 60 | return ""; 61 | } 62 | } 63 | 64 | static std::string toString(Value v) 65 | { 66 | auto ms = std::chrono::milliseconds(v); 67 | auto timepoint = std::chrono::time_point(ms); 68 | const std::time_t time = std::chrono::system_clock::to_time_t(timepoint); 69 | std::ostringstream oss; 70 | std::tm tmBuf = {}; 71 | #if !defined(WIN32) 72 | oss << std::put_time(localtime_r(&time, &tmBuf), "%F %T") << "." << std::setfill('0') << std::setw(3) << (v % 1000); 73 | #else 74 | localtime_s(&tmBuf, &time); 75 | oss << std::put_time(&tmBuf, "%F %T") << "." << std::setfill('0') << std::setw(3) << (v % 1000); 76 | #endif 77 | return oss.str(); 78 | } 79 | 80 | /** 81 | * Obtains explanatory string. 82 | */ 83 | std::string toString() const 84 | { 85 | auto typeString = toString(type); 86 | auto timeString = toString(msSinceEpoch); 87 | return typeString.empty() ? timeString : (typeString + "[" + timeString + "]"); 88 | } 89 | }; 90 | 91 | } // end of KAFKA_API 92 | 93 | -------------------------------------------------------------------------------- /src/include/kafka/Types.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | 18 | // Use `boost::optional` for C++14, which doesn't support `std::optional` 19 | #if COMPILER_SUPPORTS_CPP_17 20 | #include 21 | template 22 | using Optional = std::optional; 23 | #else 24 | #include 25 | #include 26 | template 27 | using Optional = boost::optional; 28 | #endif 29 | 30 | 31 | namespace KAFKA_API { 32 | 33 | // Which is similar with `boost::const_buffer` (thus avoid the dependency towards `boost`) 34 | class ConstBuffer 35 | { 36 | public: 37 | explicit ConstBuffer(const void* data = nullptr, std::size_t size = 0): _data(data), _size(size) {} 38 | const void* data() const { return _data; } 39 | std::size_t size() const { return _size; } 40 | std::string toString() const 41 | { 42 | if (_size == 0) return _data ? "[empty]" : "[null]"; 43 | 44 | std::ostringstream oss; 45 | 46 | auto printChar = [&oss](const unsigned char c) { 47 | if (std::isprint(c)) { 48 | oss << c; 49 | } else { 50 | oss << "[0x" << std::hex << std::setfill('0') << std::setw(2) << static_cast(c) << "]"; 51 | } 52 | }; 53 | const auto* beg = static_cast(_data); 54 | std::for_each(beg, beg + _size, printChar); 55 | 56 | return oss.str(); 57 | } 58 | private: 59 | const void* _data; 60 | std::size_t _size; 61 | }; 62 | 63 | 64 | /** 65 | * Infinite timeout. 66 | */ 67 | #if COMPILER_SUPPORTS_CPP_17 68 | const inline std::chrono::milliseconds InfiniteTimeout = (std::chrono::milliseconds::max)(); 69 | #else 70 | const static std::chrono::milliseconds InfiniteTimeout = (std::chrono::milliseconds::max)(); 71 | #endif 72 | 73 | 74 | /** 75 | * Topic name. 76 | */ 77 | using Topic = std::string; 78 | 79 | /** 80 | * Partition number. 81 | */ 82 | using Partition = std::int32_t; 83 | 84 | /** 85 | * Record offset. 86 | */ 87 | using Offset = std::int64_t; 88 | 89 | /** 90 | * Record key. 91 | */ 92 | using Key = ConstBuffer; 93 | using KeySize = std::size_t; 94 | 95 | /** 96 | * Null Key. 97 | */ 98 | #if COMPILER_SUPPORTS_CPP_17 99 | const inline Key NullKey = Key{}; 100 | #else 101 | const static Key NullKey = Key{}; 102 | #endif 103 | 104 | /** 105 | * Record value. 106 | */ 107 | using Value = ConstBuffer; 108 | using ValueSize = std::size_t; 109 | 110 | /** 111 | * Null Value. 112 | */ 113 | #if COMPILER_SUPPORTS_CPP_17 114 | const inline Value NullValue = Value{}; 115 | #else 116 | const static Value NullValue = Value{}; 117 | #endif 118 | 119 | /** 120 | * Topic set. 121 | */ 122 | using Topics = std::set; 123 | 124 | /** 125 | * Topic Partition pair. 126 | */ 127 | using TopicPartition = std::pair; 128 | 129 | /** 130 | * TopicPartition set. 131 | */ 132 | using TopicPartitions = std::set; 133 | 134 | /** 135 | * Topic/Partition/Offset tuple 136 | */ 137 | using TopicPartitionOffset = std::tuple; 138 | 139 | /** 140 | * TopicPartition to Offset map. 141 | */ 142 | using TopicPartitionOffsets = std::map; 143 | 144 | 145 | /** 146 | * Obtains explanatory string for Topics. 147 | */ 148 | inline std::string toString(const Topics& topics) 149 | { 150 | std::string ret; 151 | std::for_each(topics.cbegin(), topics.cend(), 152 | [&ret](const auto& topic) { 153 | ret.append(ret.empty() ? "" : ",").append(topic); 154 | }); 155 | return ret; 156 | } 157 | 158 | /** 159 | * Obtains explanatory string for TopicPartition. 160 | */ 161 | inline std::string toString(const TopicPartition& tp) 162 | { 163 | return tp.first + std::string("-") + std::to_string(tp.second); 164 | } 165 | 166 | /** 167 | * Obtains explanatory string for TopicPartitions. 168 | */ 169 | inline std::string toString(const TopicPartitions& tps) 170 | { 171 | std::string ret; 172 | std::for_each(tps.cbegin(), tps.cend(), 173 | [&ret](const auto& tp) { 174 | ret.append((ret.empty() ? "" : ",") + tp.first + "-" + std::to_string(tp.second)); 175 | }); 176 | return ret; 177 | } 178 | 179 | /** 180 | * Obtains explanatory string for TopicPartitionOffset. 181 | */ 182 | inline std::string toString(const TopicPartitionOffset& tpo) 183 | { 184 | return std::get<0>(tpo) + "-" + std::to_string(std::get<1>(tpo)) + ":" + std::to_string(std::get<2>(tpo)); 185 | } 186 | 187 | /** 188 | * Obtains explanatory string for TopicPartitionOffsets. 189 | */ 190 | inline std::string toString(const TopicPartitionOffsets& tpos) 191 | { 192 | std::string ret; 193 | std::for_each(tpos.cbegin(), tpos.cend(), 194 | [&ret](const auto& tp_o) { 195 | const TopicPartition& tp = tp_o.first; 196 | const Offset& o = tp_o.second; 197 | ret.append((ret.empty() ? "" : ",") + tp.first + "-" + std::to_string(tp.second) + ":" + std::to_string(o)); 198 | }); 199 | return ret; 200 | } 201 | 202 | 203 | /** 204 | * SASL OAUTHBEARER token info. 205 | */ 206 | struct SaslOauthbearerToken 207 | { 208 | using KeyValuePairs = std::map; 209 | 210 | std::string value; 211 | std::chrono::microseconds mdLifetime{}; 212 | std::string mdPrincipalName; 213 | KeyValuePairs extensions; 214 | }; 215 | 216 | 217 | } // end of KAFKA_API 218 | 219 | -------------------------------------------------------------------------------- /src/include/kafka/Utility.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | 15 | namespace KAFKA_API { namespace utility { 16 | 17 | /** 18 | * Get local time as string. 19 | */ 20 | inline std::string getLocalTimeString(const std::chrono::system_clock::time_point& timePoint) 21 | { 22 | auto time = std::chrono::system_clock::to_time_t(timePoint); 23 | std::tm tmBuf = {}; 24 | 25 | #if !defined(WIN32) 26 | localtime_r(&time, &tmBuf); 27 | #else 28 | localtime_s(&tmBuf, &time); 29 | #endif 30 | 31 | std::ostringstream oss; 32 | oss << std::put_time(&tmBuf, "%F %T") << "." << std::setfill('0') << std::setw(6) 33 | << std::chrono::duration_cast(timePoint.time_since_epoch()).count() % 1000000; 34 | 35 | return oss.str(); 36 | } 37 | 38 | /** 39 | * Get current local time as string. 40 | */ 41 | inline std::string getCurrentTime() 42 | { 43 | return getLocalTimeString(std::chrono::system_clock::now()); 44 | } 45 | 46 | /** 47 | * Get random string. 48 | */ 49 | inline std::string getRandomString() 50 | { 51 | using namespace std::chrono; 52 | 53 | const std::uint32_t timestamp = static_cast(duration_cast(system_clock::now().time_since_epoch()).count()); 54 | 55 | std::random_device r; 56 | std::default_random_engine e(r()); 57 | std::uniform_int_distribution uniform_dist(0, 0xFFFFFFFF); 58 | const std::uint64_t rand = uniform_dist(e); 59 | 60 | std::ostringstream oss; 61 | oss << std::setfill('0') << std::setw(sizeof(std::uint32_t) * 2) << std::hex << timestamp << "-" << rand; 62 | return oss.str(); 63 | } 64 | 65 | /** 66 | * Get librdkafka version string. 67 | */ 68 | inline std::string getLibRdKafkaVersion() 69 | { 70 | return rd_kafka_version_str(); 71 | } 72 | 73 | /** 74 | * Current number of threads created by rdkafka. 75 | */ 76 | inline int getLibRdKafkaThreadCount() 77 | { 78 | return rd_kafka_thread_cnt(); 79 | } 80 | 81 | } } // end of KAFKA_API::utility 82 | 83 | -------------------------------------------------------------------------------- /src/include/kafka/addons/KafkaMetrics.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | // https://github.com/Tencent/rapidjson/releases/tag/v1.1.0 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | 19 | namespace KAFKA_API { 20 | 21 | /** 22 | * \brief Helps to parse the metrics string with JSON format. 23 | */ 24 | class KafkaMetrics 25 | { 26 | public: 27 | /** 28 | * \brief Initilize with the metrics string. 29 | */ 30 | explicit KafkaMetrics(std::string jsonMetrics); 31 | 32 | static const constexpr char* WILDCARD = "*"; 33 | 34 | using KeysType = std::vector; 35 | 36 | /** 37 | * \brief The matched keys (for wildcards) and the value. 38 | */ 39 | template 40 | using ResultsType = std::vector>; 41 | 42 | /** 43 | * \brief Get integer value(s) for the specified metrics. 44 | * Note: the wildcard ("*") is supported. 45 | */ 46 | ResultsType getInt(const KeysType& keys) const { return get(keys); } 47 | 48 | /** 49 | * \brief Get string value(s) for the specified metrics. 50 | * Note: the wildcard ("*") is supported. 51 | */ 52 | ResultsType getString(const KeysType& keys) const { return get(keys); } 53 | 54 | static std::string toString(const KafkaMetrics::KeysType& keys); 55 | 56 | template 57 | static std::string toString(const KafkaMetrics::ResultsType& results); 58 | 59 | private: 60 | template 61 | ResultsType get(const KeysType& keys) const; 62 | 63 | template 64 | static void getResults(ResultsType& results, 65 | KeysType& keysForWildcards, 66 | rapidjson::Value::ConstMemberIterator iter, 67 | KeysType::const_iterator keysToParse, 68 | KeysType::const_iterator keysEnd); 69 | 70 | template 71 | static ValueType getValue(rapidjson::Value::ConstMemberIterator iter); 72 | 73 | #if COMPILER_SUPPORTS_CPP_17 74 | std::string _decodeBuf; 75 | #else 76 | std::vector _decodeBuf; 77 | #endif 78 | rapidjson::Document _jsonDoc; 79 | }; 80 | 81 | inline 82 | KafkaMetrics::KafkaMetrics(std::string jsonMetrics) 83 | #if COMPILER_SUPPORTS_CPP_17 84 | : _decodeBuf(std::move(jsonMetrics)) 85 | #else 86 | : _decodeBuf(jsonMetrics.cbegin(), jsonMetrics.cend() + 1) 87 | #endif 88 | { 89 | if (_jsonDoc.ParseInsitu(_decodeBuf.data()).HasParseError()) 90 | { 91 | throw std::runtime_error("Failed to parse string with JSON format!"); 92 | } 93 | } 94 | 95 | template<> 96 | inline std::int64_t 97 | KafkaMetrics::getValue(rapidjson::Value::ConstMemberIterator iter) 98 | { 99 | return iter->value.GetInt(); 100 | } 101 | 102 | template<> 103 | inline std::string 104 | KafkaMetrics::getValue(rapidjson::Value::ConstMemberIterator iter) 105 | { 106 | return iter->value.GetString(); 107 | } 108 | 109 | template 110 | inline KafkaMetrics::ResultsType 111 | KafkaMetrics::get(const KeysType& keys) const 112 | { 113 | if (keys.empty()) throw std::invalid_argument("Input keys cannot be empty!"); 114 | if (keys.front() == WILDCARD) throw std::invalid_argument("The first key cannot be wildcard!"); 115 | if (keys.back() == WILDCARD) throw std::invalid_argument("The last key cannot be wildcard!"); 116 | 117 | ResultsType results; 118 | 119 | const rapidjson::Value::ConstMemberIterator iter = _jsonDoc.FindMember(keys.front().c_str()); 120 | if (iter == _jsonDoc.MemberEnd()) return results; 121 | 122 | if (keys.size() == 1) 123 | { 124 | if (std::is_same::value ? iter->value.IsString() : iter->value.IsInt()) 125 | { 126 | results.emplace_back(KeysType{}, getValue(iter)); 127 | } 128 | 129 | return results; 130 | } 131 | 132 | KeysType keysForWildcards; 133 | 134 | getResults(results, keysForWildcards, iter, keys.cbegin() + 1, keys.cend()); 135 | return results; 136 | } 137 | 138 | template 139 | inline void 140 | KafkaMetrics::getResults(KafkaMetrics::ResultsType& results, 141 | KeysType& keysForWildcards, 142 | rapidjson::Value::ConstMemberIterator iter, 143 | KeysType::const_iterator keysToParse, 144 | KeysType::const_iterator keysEnd) 145 | { 146 | if (!iter->value.IsObject()) return; 147 | 148 | const auto& key = *(keysToParse++); 149 | const bool isTheEnd = (keysToParse == keysEnd); 150 | 151 | if (key == WILDCARD) 152 | { 153 | for (rapidjson::Value::ConstMemberIterator subIter = iter->value.MemberBegin(); subIter != iter->value.MemberEnd(); ++subIter) 154 | { 155 | KeysType newKeysForWildcards = keysForWildcards; 156 | newKeysForWildcards.emplace_back(subIter->name.GetString()); 157 | 158 | getResults(results, newKeysForWildcards, subIter, keysToParse, keysEnd); 159 | } 160 | } 161 | else 162 | { 163 | const rapidjson::Value::ConstMemberIterator subIter = iter->value.FindMember(key.c_str()); 164 | if (subIter == iter->value.MemberEnd()) return; 165 | 166 | if (!isTheEnd) 167 | { 168 | getResults(results, keysForWildcards, subIter, keysToParse, keysEnd); 169 | } 170 | else if (std::is_same::value ? subIter->value.IsString() : subIter->value.IsInt()) 171 | { 172 | results.emplace_back(keysForWildcards, getValue(subIter)); 173 | } 174 | } 175 | } 176 | 177 | inline std::string 178 | KafkaMetrics::toString(const KafkaMetrics::KeysType& keys) 179 | { 180 | std::string ret; 181 | 182 | std::for_each(keys.cbegin(), keys.cend(), 183 | [&ret](const auto& key){ ret.append((ret.empty() ? std::string() : std::string(", ")) + "\"" + key + "\""); }); 184 | 185 | return ret; 186 | } 187 | 188 | template 189 | inline std::string 190 | KafkaMetrics::toString(const KafkaMetrics::ResultsType& results) 191 | { 192 | std::ostringstream oss; 193 | bool isTheFirstOne = true; 194 | 195 | std::for_each(results.cbegin(), results.cend(), 196 | [&oss, &isTheFirstOne](const auto& result) { 197 | const auto keysString = toString(result.first); 198 | 199 | oss << (isTheFirstOne ? (isTheFirstOne = false, "") : ", ") 200 | << (keysString.empty() ? "" : (std::string("[") + keysString + "]:")); 201 | oss << (std::is_same::value ? "\"" : "") << result.second << (std::is_same::value ? "\"" : ""); 202 | }); 203 | 204 | return oss.str(); 205 | } 206 | 207 | } // end of KAFKA_API 208 | 209 | -------------------------------------------------------------------------------- /src/include/kafka/addons/UnorderedOffsetCommitQueue.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | namespace KAFKA_API { namespace clients { namespace consumer { 12 | 13 | template 14 | class Heap 15 | { 16 | public: 17 | bool empty() const { return data.empty(); } 18 | std::size_t size() const { return data.size(); } 19 | 20 | const T& front() const { return data[0]; } 21 | 22 | void push(const T& t) 23 | { 24 | data.emplace_back(t); 25 | 26 | for (std::size_t indexCurrent = data.size() - 1; indexCurrent > 0;) 27 | { 28 | const std::size_t indexParent = (indexCurrent + 1) / 2 - 1; 29 | 30 | if (!(data[indexCurrent] < data[indexParent])) return; 31 | 32 | std::swap(data[indexCurrent], data[indexParent]); 33 | indexCurrent = indexParent; 34 | } 35 | } 36 | 37 | void pop_front() 38 | { 39 | data[0] = data.back(); 40 | data.pop_back(); 41 | 42 | if (data.empty()) return; 43 | 44 | for (std::size_t indexCurrent = 0;;) 45 | { 46 | const std::size_t indexRightChild = (indexCurrent + 1) * 2; 47 | const std::size_t indexLeftChild = indexRightChild - 1; 48 | 49 | if (indexLeftChild >= data.size()) return; 50 | 51 | const std::size_t indexMinChild = (indexRightChild >= data.size() || data[indexLeftChild] < data[indexRightChild]) ? indexLeftChild : indexRightChild; 52 | 53 | if (!(data[indexMinChild] < data[indexCurrent])) return; 54 | 55 | std::swap(data[indexCurrent], data[indexMinChild]); 56 | indexCurrent = indexMinChild; 57 | } 58 | } 59 | 60 | private: 61 | std::vector data; 62 | }; 63 | 64 | 65 | /** 66 | * \brief The queue can be used to determine the right offset to commit. 67 | * A `KafkaManuallyCommitConsumer` might forward the received records to different handlers, while these handlers could not ack the records in order. 68 | * Then, the `UnorderedOffsetCommitQueue` would help, 69 | * 1. Prepare an `UnorderedOffsetCommitQueue` for each topic-partition. 70 | * 2. Make sure call `waitOffset()` for each record received. 71 | * 3. Make sure call `ackOffset()` while a handler acks for an record. 72 | * 4. Figure out whether there's offset to commit with `popOffsetToCommit()` and commit the offset then. 73 | */ 74 | class UnorderedOffsetCommitQueue 75 | { 76 | public: 77 | UnorderedOffsetCommitQueue(const Topic& topic, Partition partition) 78 | : _partitionInfo(std::string("topic[").append(topic).append("], paritition[").append(std::to_string(partition)).append("]")) 79 | { 80 | } 81 | UnorderedOffsetCommitQueue() = default; 82 | 83 | /** 84 | * \brief Return how many received offsets have not been popped to commit (with `popOffsetToCommit()`). 85 | */ 86 | std::size_t size() const { return _offsetsReceived.size(); } 87 | 88 | /** 89 | * \brief Add an offset (for a ConsumerRecord) to the waiting list, until it being acked (with `ackOffset`). 90 | * Note: Make sure the offset would be `ack` later with `ackOffset()`. 91 | */ 92 | void waitOffset(Offset offset) 93 | { 94 | if (offset < 0 || (!_offsetsReceived.empty() && offset <= _offsetsReceived.back())) 95 | { 96 | // Invalid offset (might be fetched from the record which had no valid offset) 97 | KAFKA_API_LOG(Log::Level::Err, "Got invalid offset to wait[%lld]! %s", offset, (_partitionInfo.empty() ? "" : _partitionInfo.c_str())); 98 | return; 99 | } 100 | 101 | _offsetsReceived.emplace_back(offset); 102 | } 103 | 104 | /** 105 | * \brief Ack the record has been handled and ready to be committed. 106 | * Note: If all offsets ahead has been acked, then with `popOffsetToCommit()`, we'd get `offset + 1`, which is ready to be committed for the consumer. 107 | */ 108 | void ackOffset(Offset offset) 109 | { 110 | const Offset maxOffsetReceived = _offsetsReceived.back(); 111 | if (offset > maxOffsetReceived) 112 | { 113 | // Runtime error 114 | KAFKA_API_LOG(Log::Level::Err, "Got invalid ack offset[%lld]! Even larger than all offsets received[%lld]! %s", offset, maxOffsetReceived, (_partitionInfo.empty() ? "" : _partitionInfo.c_str())); 115 | } 116 | 117 | _offsetsToCommit.push(offset); 118 | do 119 | { 120 | const Offset minOffsetToCommit = _offsetsToCommit.front(); 121 | const Offset expectedOffset = _offsetsReceived.front(); 122 | if (minOffsetToCommit == expectedOffset) 123 | { 124 | _toCommit = expectedOffset + 1; 125 | _offsetsToCommit.pop_front(); 126 | _offsetsReceived.pop_front(); 127 | } 128 | else if (minOffsetToCommit < expectedOffset) 129 | { 130 | // Inconsist error (might be caused by duplicated ack) 131 | KAFKA_API_LOG(Log::Level::Err, "Got invalid ack offset[%lld]! Even smaller than expected[%lld]! %s", minOffsetToCommit, expectedOffset, (_partitionInfo.empty() ? "" : _partitionInfo.c_str())); 132 | _offsetsToCommit.pop_front(); 133 | } 134 | else 135 | { 136 | break; 137 | } 138 | } while (!_offsetsToCommit.empty()); 139 | } 140 | 141 | /** 142 | * \brief Pop the offset which is ready for the consumer (if any). 143 | */ 144 | Optional popOffsetToCommit() 145 | { 146 | Optional ret; 147 | if (_committed != _toCommit) 148 | { 149 | ret = _committed = _toCommit; 150 | } 151 | return ret; 152 | } 153 | 154 | /** 155 | * \brief Return the offset last popped. 156 | */ 157 | Optional lastPoppedOffset() 158 | { 159 | Optional ret; 160 | if (_committed != INVALID_OFFSET) 161 | { 162 | ret = _committed; 163 | } 164 | return ret; 165 | } 166 | 167 | private: 168 | std::deque _offsetsReceived; 169 | Heap _offsetsToCommit; 170 | Offset _toCommit = {INVALID_OFFSET}; 171 | Offset _committed = {INVALID_OFFSET}; 172 | std::string _partitionInfo; 173 | 174 | static constexpr Offset INVALID_OFFSET = -1; 175 | }; 176 | 177 | } } } // end of KAFKA_API::clients::consumer 178 | 179 | -------------------------------------------------------------------------------- /src/pmtelemetryd.c: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #include "bridge/grpc_collector_bridge.h" 6 | 7 | 8 | int main(int argc, char *argv[]) 9 | { 10 | const char *zmq_uri = "ipc:///tmp/grpc.sock"; 11 | grpc_payload *pload = NULL; 12 | start_grpc_dialout_collector( 13 | "/etc/opt/mdt-dialout-collector/mdt_dialout_collector.conf", 14 | zmq_uri); 15 | 16 | void *ctx = zmq_ctx_new(); 17 | void *zmq_pull = zmq_socket(ctx, ZMQ_PULL); 18 | zmq_bind(zmq_pull, "ipc:///tmp/grpc.sock"); 19 | //zmq_bind(zmq_pull, "inproc://grpc"); 20 | 21 | while(1) { 22 | zmq_recv(zmq_pull, &pload, sizeof(grpc_payload), 0); 23 | printf("%s\n", pload->writer_id); 24 | printf("%s\n", pload->event_type); 25 | printf("%s\n", pload->serialization); 26 | printf("%s\n", pload->telemetry_node); 27 | printf("%s\n", pload->telemetry_port); 28 | printf("%s\n", pload->telemetry_data); 29 | free_grpc_payload(pload); 30 | } 31 | 32 | return EXIT_SUCCESS; 33 | } 34 | 35 | -------------------------------------------------------------------------------- /src/proto/Cisco/README.md: -------------------------------------------------------------------------------- 1 | #### Cisco's protobuf & gRPC dialout helper classes -------------------------------------------------------------------------------- /src/proto/Huawei/README.md: -------------------------------------------------------------------------------- 1 | #### Huawei's protobuf & gRPC dialout helper classes -------------------------------------------------------------------------------- /src/proto/Juniper/README.md: -------------------------------------------------------------------------------- 1 | #### Juniper's protobuf & gRPC dialout helper classes -------------------------------------------------------------------------------- /src/proto/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_proto.la 4 | 5 | libgrpc_collector_proto_la_SOURCES = \ 6 | Huawei/huawei_dialout.pb.cc \ 7 | Huawei/huawei_telemetry.pb.cc \ 8 | Huawei/huawei_telemetry.grpc.pb.cc \ 9 | Huawei/huawei_dialout.grpc.pb.cc \ 10 | Cisco/cisco_telemetry.pb.cc \ 11 | Cisco/cisco_dialout.grpc.pb.cc \ 12 | Cisco/cisco_dialout.pb.cc \ 13 | Cisco/cisco_telemetry.grpc.pb.cc \ 14 | Juniper/juniper_dialout.pb.cc \ 15 | Juniper/juniper_gnmi.pb.cc \ 16 | Juniper/juniper_gnmi_ext.pb.cc \ 17 | Juniper/juniper_gnmi_ext.grpc.pb.cc \ 18 | Juniper/juniper_telemetry_header_extension.pb.cc \ 19 | Juniper/juniper_telemetry.pb.cc \ 20 | Juniper/juniper_telemetry.grpc.pb.cc \ 21 | Juniper/juniper_telemetry_header_extension.grpc.pb.cc \ 22 | Juniper/juniper_gnmi.grpc.pb.cc \ 23 | Juniper/juniper_dialout.grpc.pb.cc \ 24 | Nokia/nokia_dialout.pb.cc \ 25 | Nokia/nokia_gnmi.pb.cc \ 26 | Nokia/nokia_gnmi_ext.pb.cc \ 27 | Nokia/nokia_gnmi_ext.grpc.pb.cc \ 28 | Nokia/nokia_gnmi.grpc.pb.cc \ 29 | Nokia/nokia_dialout.grpc.pb.cc \ 30 | OpenConfig/openconfig_interfaces.pb.cc \ 31 | OpenConfig/openconfig_interfaces.grpc.pb.cc 32 | 33 | libgrpc_collector_proto_la_CPPFLAGS = -I$(top_builddir)/src/include/ -I$(top_srcdir)/src/include/ 34 | -------------------------------------------------------------------------------- /src/proto/Nokia/README.md: -------------------------------------------------------------------------------- 1 | #### Nokia's protobuf & gRPC dialout helper classes -------------------------------------------------------------------------------- /src/proto/OpenConfig/README.md: -------------------------------------------------------------------------------- 1 | #### OpenConfig's protobuf helper classes 2 | -------------------------------------------------------------------------------- /src/tests/README.md: -------------------------------------------------------------------------------- 1 | # (External) ZMQ Poller 2 | 3 | ```SHELL 4 | cd /src/tests 5 | g++ -O2 -Wall -pedantic zmq_pull.cc -o ../../bin/zmq_pull -lzmq 6 | ``` 7 | 8 | -------------------------------------------------------------------------------- /src/tests/zmq_pull.cc: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | 13 | void *zmq_pull(zmq::context_t &ctx); 14 | 15 | // CONNECT/RECEIVE - PULL 16 | int main(void) 17 | { 18 | // ZMQ Context 19 | zmq::context_t ctx; 20 | 21 | // Actual receiving 22 | std::vector th_fire; 23 | size_t th = 1; 24 | std::cout << "Firing " << th << " threads, Reading & PULL-ing\n"; 25 | for (size_t t = 0; t < th; ++t) { 26 | th_fire.push_back(std::thread ( 27 | &zmq_pull, 28 | std::ref(ctx))); 29 | } 30 | 31 | for (std::thread &t : th_fire) { 32 | if (t.joinable()) { 33 | t.join(); 34 | } 35 | } 36 | 37 | return EXIT_SUCCESS; 38 | } 39 | 40 | // Read from socks - multiple threads 41 | void *zmq_pull(zmq::context_t &ctx) 42 | { 43 | zmq::socket_t sock(ctx, zmq::socket_type::pull); 44 | 45 | // Message Buff preparation 46 | const size_t size = 4096; 47 | zmq::message_t message(size); 48 | 49 | // --- Convert the thread ID into string --- // 50 | auto t_id = std::this_thread::get_id(); 51 | std::stringstream ss; 52 | ss << t_id; 53 | std::string thread_id = ss.str(); 54 | // --- Convert the thread ID into string --- // 55 | 56 | std::string sok = "ipc:///tmp/grpc.sock"; 57 | std::cout << "PULL-ing from " << sok << "\n"; 58 | sock.bind(sok); 59 | while(true) { 60 | auto res = sock.recv(message, zmq::recv_flags::none); 61 | if (res.value() != 0) { 62 | std::cout << thread_id << " PULL-ing from " << sok << ": " 63 | << message.to_string() << "\n"; 64 | } 65 | //std::this_thread::sleep_for(std::chrono::milliseconds(100)); 66 | } 67 | 68 | return (0); 69 | } 70 | 71 | -------------------------------------------------------------------------------- /src/utils/Makefile.am: -------------------------------------------------------------------------------- 1 | MAINTAINERCLEANFILES = Makefile.in 2 | 3 | noinst_LTLIBRARIES = libgrpc_collector_utils.la 4 | 5 | libgrpc_collector_utils_la_SOURCES = cfg_handler.cc logs_handler.cc 6 | libgrpc_collector_utils_la_CPPFLAGS = -I$(top_srcdir)/src/include/ 7 | -------------------------------------------------------------------------------- /src/utils/logs_handler.cc: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | // mdt-dialout-collector Library headers 6 | #include "logs_handler.h" 7 | #include "cfg_handler.h" 8 | #include 9 | 10 | 11 | LogsHandler::LogsHandler() 12 | { 13 | if (set_boot_spdlog_sinks() == false) { 14 | std::cout << "Unable to LogsHandler::set_spdlog_sinks(...)\n"; 15 | std::exit(EXIT_FAILURE); 16 | } else { 17 | spdlog::get("multi-logger-boot")->debug("constructor: LogsHandler()"); 18 | } 19 | } 20 | 21 | bool LogsHandler::set_boot_spdlog_sinks() 22 | { 23 | std::vector spdlog_sinks; 24 | std::string spdlog_level = "debug"; 25 | 26 | // Syslog 27 | const std::string ident = "mdt-dialout-collector"; 28 | try { 29 | auto spdlog_syslog = 30 | std::make_shared( 31 | ident, 0, LOG_USER, true); 32 | spdlog_sinks.push_back(spdlog_syslog); 33 | } catch (const spdlog::spdlog_ex &sex) { 34 | std::cout << "spdlog, syslog: " << sex.what() << "\n"; 35 | return false; 36 | } 37 | 38 | // ConsoleLog 39 | try { 40 | auto spdlog_console = 41 | std::make_shared(); 42 | spdlog_sinks.push_back(spdlog_console); 43 | } catch (const spdlog::spdlog_ex &sex) { 44 | std::cout << "spdlog, console: " << sex.what() << "\n"; 45 | return false; 46 | } 47 | 48 | this->multi_logger_boot = std::make_shared 49 | ("multi-logger-boot", begin(spdlog_sinks), end(spdlog_sinks)); 50 | this->multi_logger_boot->set_level(spdlog::level::from_str(spdlog_level)); 51 | spdlog::register_logger(this->multi_logger_boot); 52 | 53 | return true; 54 | } 55 | 56 | bool LogsHandler::set_spdlog_sinks() 57 | { 58 | std::vector spdlog_sinks; 59 | std::string spdlog_level = logs_cfg_parameters.at("spdlog_level"); 60 | 61 | // Mapping syslog facility strings to codified integers. 62 | // https://www.rfc-editor.org/rfc/rfc5424 63 | std::map syslog_facility { 64 | {"LOG_DAEMON",3}, 65 | {"LOG_USER" ,8}, 66 | {"LOG_LOCAL0",16}, 67 | {"LOG_LOCAL1",17}, 68 | {"LOG_LOCAL2",18}, 69 | {"LOG_LOCAL3",19}, 70 | {"LOG_LOCAL4",20}, 71 | {"LOG_LOCAL5",21}, 72 | {"LOG_LOCAL6",22}, 73 | {"LOG_LOCAL7",23}, 74 | }; 75 | 76 | // Syslog 77 | if (logs_cfg_parameters.at("syslog").compare("true") == 0) { 78 | const std::string ident = logs_cfg_parameters.at("syslog_ident"); 79 | try { 80 | auto spdlog_syslog = 81 | std::make_shared( 82 | ident, 0, 83 | // syslog facility codified integers are multiplied by 8 84 | syslog_facility[ 85 | logs_cfg_parameters.at("syslog_facility")] * 8, 86 | true); 87 | spdlog_sinks.push_back(spdlog_syslog); 88 | } catch (const spdlog::spdlog_ex &sex) { 89 | std::cout << "spdlog, syslog: " << sex.what() << "\n"; 90 | return false; 91 | } 92 | } 93 | 94 | // ConsoleLog 95 | if (logs_cfg_parameters.at("console_log").compare("true") == 0){ 96 | try { 97 | auto spdlog_console = 98 | std::make_shared(); 99 | spdlog_sinks.push_back(spdlog_console); 100 | } catch (const spdlog::spdlog_ex &sex) { 101 | std::cout << "spdlog, console: " << sex.what() << "\n"; 102 | return false; 103 | } 104 | } 105 | 106 | this->multi_logger = std::make_shared 107 | ("multi-logger", begin(spdlog_sinks), end(spdlog_sinks)); 108 | this->multi_logger->set_level(spdlog::level::from_str(spdlog_level)); 109 | spdlog::register_logger(this->multi_logger); 110 | 111 | return true; 112 | } 113 | 114 | -------------------------------------------------------------------------------- /src/utils/logs_handler.h: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2022-present, Salvatore Cuzzilla (Swisscom AG) 2 | // Distributed under the MIT License (http://opensource.org/licenses/MIT) 3 | 4 | 5 | #ifndef _LOGS_HANDLER_H_ 6 | #define _LOGS_HANDLER_H_ 7 | 8 | // C++ Standard Library headers 9 | #include 10 | #include 11 | // External Library headers 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | 19 | class LogsHandler { 20 | public: 21 | LogsHandler(); 22 | ~LogsHandler() { 23 | spdlog::get("multi-logger")->debug("destructor: ~LogsHandler()"); }; 24 | bool set_boot_spdlog_sinks(); 25 | bool set_spdlog_sinks(); 26 | private: 27 | std::shared_ptr multi_logger_boot; 28 | std::shared_ptr multi_logger; 29 | }; 30 | 31 | #endif 32 | 33 | --------------------------------------------------------------------------------