├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ └── feature_request.md ├── commitlint.config.js └── workflows │ ├── ci.yml │ └── release.yaml ├── .gitignore ├── .goreleaser.yaml ├── LICENSE ├── Makefile ├── README.md ├── README_en.md ├── agent ├── agent.go ├── ibex_agent.go ├── ibex_agent_none.go ├── install │ ├── service_darwin.go │ ├── service_freebsd.go │ ├── service_linux.go │ └── service_windows.go ├── logs_agent.go ├── logs_agent_none.go ├── logs_endpoints.go ├── metrics_agent.go ├── metrics_reader.go ├── prometheus_agent.go ├── promethues_agent_none.go └── update │ ├── update_darwin.go │ ├── update_freebsd.go │ ├── update_linux.go │ └── update_windows.go ├── api ├── router_falcon.go ├── router_func.go ├── router_opentsdb.go ├── router_pushgateway.go ├── router_remotewrite.go └── server.go ├── conf ├── config.toml ├── input.aliyun │ └── cloud.toml ├── input.amd_rocm_smi │ └── rocm.toml ├── input.apache │ └── apache.toml ├── input.appdynamics │ └── app.toml ├── input.arp_packet │ └── arp_packet.toml ├── input.bind │ └── bind.toml ├── input.cadvisor │ └── cadvisor.toml ├── input.chrony │ └── chrony.toml ├── input.clickhouse │ └── clickhouse.toml ├── input.cloudwatch │ └── cloud.toml ├── input.conntrack │ └── conntrack.toml ├── input.consul │ └── consul.toml ├── input.cpu │ └── cpu.toml ├── input.dcgm │ ├── 1.x-compatibility-metrics.csv │ ├── dcp-metrics-included.csv │ ├── default-counters.csv │ └── exporter.toml ├── input.disk │ └── disk.toml ├── input.diskio │ └── diskio.toml ├── input.dns_query │ └── dns_query.toml ├── input.docker │ └── docker.toml ├── input.elasticsearch │ └── elasticsearch.toml ├── input.emc_unity │ └── emc_unity.toml ├── input.ethtool │ └── ethtool.toml ├── input.exec │ └── exec.toml ├── input.filecount │ └── filecount.toml ├── input.gnmi │ ├── gnmi.toml │ └── interface.toml.example ├── input.googlecloud │ └── gcp.toml ├── input.greenplum │ └── greenplum.toml ├── input.hadoop │ └── hadoop.toml ├── input.haproxy │ └── haproxy.toml ├── input.http_response │ └── http_response.toml ├── input.influxdb │ └── influxdb.toml ├── input.ipmi │ └── conf.toml ├── input.iptables │ └── iptables.toml ├── input.ipvs │ └── ipvs.toml ├── input.jenkins │ └── jenkins.toml ├── input.jolokia_agent_kafka │ └── kafka.toml ├── input.jolokia_agent_misc │ ├── activemq.toml │ ├── bitbucket.toml │ ├── cassandra.toml │ ├── hadoop-hdfs.toml │ ├── java.toml │ ├── jboss.toml │ ├── kafka-connect.toml │ ├── kafka.toml │ ├── tomcat.toml │ ├── weblogic.toml │ └── zookeeper.toml ├── input.kafka │ └── kafka.toml ├── input.kernel │ └── kernel.toml ├── input.kernel_vmstat │ └── kernel_vmstat.toml ├── input.kubernetes │ └── kubernetes.toml ├── input.ldap │ └── ldap.toml ├── input.linux_sysctl_fs │ └── linux_sysctl_fs.toml ├── input.logstash │ └── logstash.toml ├── input.mem │ └── mem.toml ├── input.mongodb │ └── mongodb.toml ├── input.mtail │ └── mtail.toml ├── input.mysql │ └── mysql.toml ├── input.nats │ └── nats.toml ├── input.net │ └── net.toml ├── input.net_response │ └── net_response.toml ├── input.netstat │ └── netstat.toml ├── input.netstat_filter │ └── netstat_filter.toml ├── input.nfsclient │ └── nfsclient.toml ├── input.nginx │ └── nginx.toml ├── input.nginx_upstream_check │ └── nginx_upstream_check.toml ├── input.node_exporter │ └── exporter.toml ├── input.nsq │ └── nsq.toml ├── input.ntp │ └── ntp.toml ├── input.nvidia_smi │ └── nvidia_smi.toml ├── input.oracle │ ├── grafana.json │ ├── metric.toml │ └── oracle.toml ├── input.phpfpm │ └── phpfpm.toml ├── input.ping │ └── ping.toml ├── input.postgresql │ └── postgresql.toml ├── input.processes │ └── processes.toml ├── input.procstat │ └── procstat.toml ├── input.prometheus │ └── prometheus.toml ├── input.rabbitmq │ └── rabbitmq.toml ├── input.redfish │ └── redfish.toml ├── input.redis │ └── redis.toml ├── input.redis_sentinel │ └── redis_sentinel.toml ├── input.rocketmq_offset │ └── rocketmq_offset.toml ├── input.self_metrics │ └── metrics.toml ├── input.smart │ └── smart.toml ├── input.snmp │ ├── snmp.toml │ └── snmp.toml.example ├── input.snmp_trap │ └── trap.toml ├── input.sockstat │ └── sockstat.toml ├── input.sqlserver │ └── sqlserver.toml ├── input.supervisor │ └── supervisor.toml ├── input.switch_legacy │ └── switch_legacy.toml ├── input.system │ └── system.toml ├── input.systemd │ └── systemd.toml ├── input.tengine │ └── tengine.toml ├── input.tomcat │ └── tomcat.toml ├── input.traffic_server │ └── traffic_server.toml ├── input.vsphere │ └── vsphere.toml ├── input.whois │ └── whois.toml ├── input.x509_cert │ └── x509_cert.toml ├── input.xskyapi │ └── xskyapi.toml ├── input.zookeeper │ └── zookeeper.toml └── logs.toml ├── config ├── config.go ├── duration.go ├── hostname.go ├── http.go ├── inline.go ├── logs.go ├── logs │ ├── channel_message.go │ ├── config.go │ ├── constants.go │ ├── endpoints.go │ ├── info.go │ ├── integration_config.go │ ├── messages.go │ ├── processing_rules.go │ ├── source.go │ ├── sources.go │ ├── stats_tracker.go │ └── status.go ├── logs_none.go ├── prometheus.go ├── provider.go ├── proxy.go ├── secret.go ├── secret_protected.go ├── urllabel.go └── version.go ├── doc ├── categraf-usage.png ├── categraf.png ├── clickhouse.toml ├── flashduty.png ├── img │ └── nightingale-template-center.png ├── laqun.jpeg ├── provider.toml └── why-choose-categraf.png ├── docker ├── Dockerfile ├── Dockerfile.goreleaser ├── Dockerfile.goreleaser.arm64 ├── entrypoint.sh └── nsswitch.conf ├── go.mod ├── go.sum ├── heartbeat ├── cpu │ ├── cpu.go │ ├── cpu_darwin.go │ ├── cpu_linux.go │ ├── cpu_windows.go │ ├── cpu_windows_386.go │ ├── cpu_windows_amd64.go │ └── cpu_windows_arm64.go ├── filesystem │ ├── filesystem.go │ ├── filesystem_common.go │ ├── filesystem_darwin.go │ ├── filesystem_linux.go │ └── filesystem_windows.go ├── heartbeat.go ├── memory │ ├── memory.go │ ├── memory_darwin.go │ ├── memory_linux.go │ └── memory_windows.go ├── meta.go ├── network │ ├── ipconfig_test_sample.txt │ ├── network.go │ ├── network_common.go │ └── network_windows.go └── platform │ ├── platform.go │ ├── platform_android.go │ ├── platform_common.go │ ├── platform_darwin.go │ ├── platform_linux.go │ ├── platform_windows.go │ ├── platform_windows_386.go │ ├── platform_windows_amd64.go │ └── platform_windows_arm64.go ├── ibex ├── client │ └── cli.go ├── cmd_nix.go ├── cmd_windows.go ├── heartbeat.go ├── task.go ├── tasks.go └── types │ └── types.go ├── inputs ├── README.md ├── activemq │ └── README.md ├── aliyun │ ├── README.md │ ├── aliyun-cdn.json │ ├── aliyun-dcdn.json │ ├── cloud.go │ ├── dashboard_for_polardb_mysql.json │ ├── dashboard_for_redis_kvstore_standard.json │ └── internal │ │ ├── manager │ │ ├── cms.go │ │ ├── ecs.go │ │ └── manager.go │ │ └── types │ │ └── types.go ├── amd_rocm_smi │ ├── README.md │ └── amd_rocm_smi.go ├── apache │ ├── LICENSE │ ├── README.md │ ├── apache.go │ └── exporter │ │ └── collector.go ├── appdynamics │ ├── appdynamics.go │ └── instances.go ├── arp_packet │ ├── README.md │ ├── arp_packet.go │ └── arp_packet_none.go ├── bind │ ├── README.md │ ├── bind.go │ ├── json_stats.go │ ├── xml_stats_v2.go │ └── xml_stats_v3.go ├── bitbucket │ └── README.md ├── cadvisor │ ├── README.md │ ├── cadvisor.go │ └── instances.go ├── cassandra │ └── README.md ├── chrony │ ├── README.md │ └── chrony.go ├── clickhouse │ ├── README.md │ └── clickhouse.go ├── cloudwatch │ ├── README.md │ ├── cloudwatch.go │ └── sample.conf ├── collector.go ├── conntrack │ ├── README.md │ ├── conntrack.go │ └── conntrack_nolinux.go ├── consul │ ├── README.md │ └── consul.go ├── cpu │ ├── README.md │ └── cpu.go ├── dcgm │ ├── dcgmexporter │ │ ├── clock_events_collector.go │ │ ├── config.go │ │ ├── const.go │ │ ├── dcgm.go │ │ ├── expcollector.go │ │ ├── exporter_metrics.go │ │ ├── field_entity_group_system_info.go │ │ ├── gpu_collector.go │ │ ├── kubernetes.go │ │ ├── parser.go │ │ ├── pipeline.go │ │ ├── registry.go │ │ ├── server.go │ │ ├── system_info.go │ │ ├── types.go │ │ ├── utils.go │ │ └── xid_collector.go │ ├── exporter.go │ └── exporter_none.go ├── disk │ ├── README.md │ └── disk.go ├── diskio │ ├── README.md │ └── diskio.go ├── dns_query │ ├── README.md │ └── dns_query.go ├── docker │ ├── README.md │ ├── client.go │ ├── docker.go │ ├── errors.go │ └── stats_helper.go ├── elasticsearch │ ├── README.md │ ├── README_en.md │ ├── alerts.json │ ├── collector │ │ ├── cluster_health.go │ │ ├── cluster_health_indices.go │ │ ├── cluster_health_response.go │ │ ├── cluster_health_test.go │ │ ├── cluster_info.go │ │ ├── cluster_info_test.go │ │ ├── cluster_settings.go │ │ ├── cluster_settings_reponse.go │ │ ├── cluster_settings_test.go │ │ ├── cluster_stats.go │ │ ├── cluster_stats_response.go │ │ ├── collector.go │ │ ├── collector_test.go │ │ ├── data_stream.go │ │ ├── data_stream_response.go │ │ ├── data_stream_test.go │ │ ├── ilm_indices.go │ │ ├── ilm_indices_test.go │ │ ├── ilm_status.go │ │ ├── ilm_status_test.go │ │ ├── indices.go │ │ ├── indices_mappings.go │ │ ├── indices_mappings_response.go │ │ ├── indices_mappings_test.go │ │ ├── indices_response.go │ │ ├── indices_settings.go │ │ ├── indices_settings_response.go │ │ ├── indices_settings_test.go │ │ ├── indices_test.go │ │ ├── nodes.go │ │ ├── nodes_response.go │ │ ├── nodes_test.go │ │ ├── shards.go │ │ ├── slm.go │ │ ├── slm_response.go │ │ ├── slm_test.go │ │ ├── snapshots.go │ │ ├── snapshots_reponse.go │ │ ├── snapshots_test.go │ │ ├── tasks.go │ │ ├── tasks_test.go │ │ └── versions_test.go │ ├── dashboard.json │ ├── elasticsearch.go │ ├── fixtures │ │ ├── clusterhealth │ │ │ ├── 1.7.6.json │ │ │ ├── 2.4.5.json │ │ │ └── 5.4.2.json │ │ ├── clusterinfo │ │ │ ├── 2.4.5.json │ │ │ ├── 5.4.2.json │ │ │ └── 7.13.1.json │ │ ├── datastream │ │ │ └── 7.15.0.json │ │ ├── ilm_indices │ │ │ └── 6.6.0.json │ │ ├── ilm_status │ │ │ └── 6.6.0.json │ │ ├── indices_mappings │ │ │ ├── 7.8.0.json │ │ │ └── counts.json │ │ ├── nodestats │ │ │ ├── 5.4.2.json │ │ │ ├── 5.6.16.json │ │ │ ├── 6.5.4.json │ │ │ ├── 6.8.8.json │ │ │ ├── 7.13.1.json │ │ │ ├── 7.3.0.json │ │ │ ├── 7.6.1.json │ │ │ └── 7.6.2.json │ │ ├── settings-5.4.2.json │ │ ├── settings-7.3.0.json │ │ ├── settings-merge-5.4.2.json │ │ └── snapshots │ │ │ ├── 1.7.6.json │ │ │ ├── 2.4.5.json │ │ │ ├── 5.4.2-failed.json │ │ │ └── 5.4.2.json │ └── pkg │ │ ├── clusterinfo │ │ ├── clusterinfo.go │ │ ├── clusterinfo_response.go │ │ └── clusterinfo_test.go │ │ └── roundtripper │ │ └── roundtripper.go ├── emc_unity │ └── emc_unity.go ├── ethtool │ ├── README.md │ ├── command_linux.go │ ├── ethtool_linux.go │ ├── ethtool_notlinux.go │ └── namespace_linux.go ├── exec │ ├── README.md │ ├── exec.go │ ├── scripts │ │ ├── cert │ │ │ └── collect_cert_expiretime.sh │ │ ├── nginx │ │ │ └── collect_nginx_conf_status.sh │ │ └── ssh │ │ │ └── collect_ssh_conn_count.sh │ └── shellquote.go ├── filecount │ ├── README.md │ ├── filecount.go │ ├── filesystem_helpers.go │ ├── filesystem_helpers_notwindows.go │ └── types.go ├── gnmi │ ├── README.md │ ├── extensions │ │ └── jnpr_gnmi_extention │ │ │ └── GnmiJuniperTelemetryHeaderExtension.pb.go │ ├── gnmi.go │ ├── handler.go │ ├── ieeefloat32.go │ ├── path.go │ ├── tag_store.go │ └── update_fields.go ├── googlecloud │ ├── README.md │ ├── connection.go │ ├── gcp.go │ ├── instances.go │ └── internal │ │ ├── distribution.go │ │ └── metrics.go ├── greenplum │ └── greenplum.go ├── hadoop │ ├── README.md │ ├── hadoop.go │ └── hadoop_collector.go ├── hadoop_hdfs │ └── README.md ├── haproxy │ ├── README.md │ ├── dashboard.json │ ├── exporter.go │ └── haproxy.go ├── http_provider.go ├── http_response │ ├── README.md │ ├── alerts.json │ ├── dashboard.json │ ├── http_response.go │ └── tls.go ├── influxdb │ ├── README.md │ └── influxdb.go ├── inputs.go ├── ipmi │ ├── README.md │ ├── exporter │ │ ├── collector_bmc.go │ │ ├── collector_bmc_watchdog.go │ │ ├── collector_chassis.go │ │ ├── collector_dcmi.go │ │ ├── collector_ipmi.go │ │ ├── collector_notwindows.go │ │ ├── collector_sel.go │ │ ├── collector_sm_lan_mode.go │ │ ├── collector_windows.go │ │ ├── config.go │ │ └── freeipmi │ │ │ └── freeipmi.go │ ├── instances.go │ ├── ipmi.go │ ├── ipmi_dash.json │ ├── ipmi_dash2.json │ └── ipmi_exporter_alert_rule.json ├── iptables │ ├── README.md │ ├── iptables.go │ └── iptables_notlinux.go ├── ipvs │ ├── README.md │ ├── ipvs.go │ └── ipvs_linux_amd64.go ├── jboss │ └── README.md ├── jenkins │ └── jenkins.go ├── jolokia │ ├── client.go │ ├── gatherer.go │ ├── metric.go │ └── point_builder.go ├── jolokia_agent │ └── jolokia_agent.go ├── jolokia_proxy │ └── jolokia_proxy.go ├── kafka │ ├── README.md │ ├── alerts.json │ ├── dashboard-by-topic.json │ ├── dashboard-key-metrics.json │ ├── dashboard.json │ ├── exporter │ │ ├── exporter.go │ │ ├── interpolation_map.go │ │ └── scram_client.go │ └── kafka.go ├── kafka_connect │ └── README.md ├── kernel │ ├── README.md │ ├── kernel.go │ └── kernel_notlinux.go ├── kernel_vmstat │ ├── README.md │ ├── kernel_vmstat.go │ └── kernel_vmstat_notlinux.go ├── kube_proxy │ └── dashboard-by-ident.json ├── kube_state_metrics │ ├── README.md │ ├── dashboard.json │ ├── ksm-cluster.json │ ├── ksm-namespace-pods.json │ ├── ksm-namespace-workloads.json │ ├── ksm-node-pods.json │ ├── ksm-pods.json │ ├── ksm-record-rules.json │ ├── ksm-workloads.json │ ├── kube-state-metrics-deploy.yaml │ ├── kube-state-metrics-rbac.yaml │ ├── kube-state-metrics-svc.yaml │ └── pod-restart-alert.json ├── kubelet │ └── dashboard-by-ident.json ├── kubernetes │ ├── README.md │ ├── kube_resources_dash.json │ ├── kubelet-metrics-dash.json │ ├── kubernetes.go │ ├── kubernetes_metrics.go │ └── kubernetes_pods.go ├── ldap │ ├── 389ds.go │ ├── README.md │ ├── ldap.go │ └── openldap.go ├── linux_sysctl_fs │ ├── README.md │ ├── linux_sysctl_fs_linux.go │ └── linuxsysctlfsnotlinux.go ├── local_provider.go ├── logstash │ ├── README.md │ ├── logstash-dash.json │ └── logstash.go ├── mem │ ├── README.md │ └── mem.go ├── mongodb │ ├── README.md │ ├── alerts.json │ ├── dashboard.json │ ├── dashboard2.json │ ├── exporter │ │ ├── base_collector.go │ │ ├── collstats_collector.go │ │ ├── common.go │ │ ├── dbstats_collector.go │ │ ├── debug.go │ │ ├── diagnostic_data_collector.go │ │ ├── exporter.go │ │ ├── general_collector.go │ │ ├── indexstats_collector.go │ │ ├── metrics.go │ │ ├── replset_status_collector.go │ │ ├── serverstatus_collector.go │ │ ├── top_collector.go │ │ ├── topology_info.go │ │ └── v1_compatibility.go │ ├── mongodb.go │ ├── mongodb_data.go │ ├── mongodb_server.go │ └── mongostat.go ├── mtail │ ├── Readme.md │ ├── internal │ │ ├── exporter │ │ │ ├── collectd.go │ │ │ ├── export.go │ │ │ ├── graphite.go │ │ │ ├── json.go │ │ │ ├── prometheus.go │ │ │ ├── statsd.go │ │ │ └── varz.go │ │ ├── logline │ │ │ └── logline.go │ │ ├── metrics │ │ │ ├── datum │ │ │ │ ├── buckets.go │ │ │ │ ├── datum.go │ │ │ │ ├── float.go │ │ │ │ ├── int.go │ │ │ │ └── string.go │ │ │ ├── metric.go │ │ │ ├── store.go │ │ │ ├── testing.go │ │ │ └── type.go │ │ ├── mtail │ │ │ ├── buildinfo.go │ │ │ ├── golden │ │ │ │ ├── reader.go │ │ │ │ └── reader_test.golden │ │ │ ├── httpstatus.go │ │ │ ├── logo.ico │ │ │ ├── logo.ico.go │ │ │ ├── mtail.go │ │ │ ├── options.go │ │ │ └── testdata │ │ │ │ ├── README │ │ │ │ ├── anonymised_dhcpd_log │ │ │ │ ├── anonymised_dhcpd_log.golden │ │ │ │ ├── apache-combined.golden │ │ │ │ ├── apache-common.golden │ │ │ │ ├── lighttpd_accesslog.golden │ │ │ │ ├── mysql_slowqueries.golden │ │ │ │ ├── ntp4 │ │ │ │ ├── ntp4.golden │ │ │ │ ├── prometheus.yml │ │ │ │ ├── rsyncd.golden │ │ │ │ ├── sftp_chroot.golden │ │ │ │ ├── vsftpd_log │ │ │ │ ├── vsftpd_log.golden │ │ │ │ ├── vsftpd_xferlog │ │ │ │ ├── vsftpd_xferlog.golden │ │ │ │ ├── xntp3_peerstats │ │ │ │ └── xntp3_peerstats.golden │ │ ├── runtime │ │ │ ├── code │ │ │ │ ├── instr.go │ │ │ │ ├── object.go │ │ │ │ └── opcodes.go │ │ │ ├── compiler │ │ │ │ ├── ast │ │ │ │ │ ├── ast.go │ │ │ │ │ └── walk.go │ │ │ │ ├── checker │ │ │ │ │ └── checker.go │ │ │ │ ├── codegen │ │ │ │ │ └── codegen.go │ │ │ │ ├── compiler.go │ │ │ │ ├── errors │ │ │ │ │ └── errors.go │ │ │ │ ├── fuzz │ │ │ │ │ └── const-as-cond.mtail │ │ │ │ ├── opt │ │ │ │ │ └── opt.go │ │ │ │ ├── parser │ │ │ │ │ ├── driver.go │ │ │ │ │ ├── lexer.go │ │ │ │ │ ├── parser.go │ │ │ │ │ ├── parser.y │ │ │ │ │ ├── sexp.go │ │ │ │ │ ├── tokens.go │ │ │ │ │ ├── unparser.go │ │ │ │ │ └── y.output │ │ │ │ ├── position │ │ │ │ │ └── position.go │ │ │ │ ├── symbol │ │ │ │ │ └── symtab.go │ │ │ │ └── types │ │ │ │ │ ├── regexp.go │ │ │ │ │ └── types.go │ │ │ ├── fuzz.go │ │ │ ├── fuzz │ │ │ │ ├── 1.mtail │ │ │ │ ├── 284.mtail │ │ │ │ ├── capref-double-regexp-in-cond.mtail │ │ │ │ ├── cmp-to-none.mtail │ │ │ │ ├── const-a.mtail │ │ │ │ ├── const-as-cond.mtail │ │ │ │ ├── const-unused.mtail │ │ │ │ ├── datum-string-concat.mtail │ │ │ │ ├── len.mtail │ │ │ │ ├── match-01e1.mtail │ │ │ │ ├── match-str.mtail │ │ │ │ ├── match-to-int.mtail │ │ │ │ ├── negate-none.mtail │ │ │ │ ├── recursion-depth.mtail │ │ │ │ ├── retval-from-dec.mtail │ │ │ │ └── uninitialised.mtail │ │ │ ├── httpstatus.go │ │ │ ├── options.go │ │ │ ├── runtime.go │ │ │ └── vm │ │ │ │ └── vm.go │ │ ├── tailer │ │ │ ├── httpstatus.go │ │ │ ├── logstream │ │ │ │ ├── base.go │ │ │ │ ├── cancel.go │ │ │ │ ├── dgramstream.go │ │ │ │ ├── fifostream.go │ │ │ │ ├── filestream.go │ │ │ │ ├── logstream.go │ │ │ │ ├── reader.go │ │ │ │ └── socketstream.go │ │ │ └── tail.go │ │ └── waker │ │ │ ├── testwaker.go │ │ │ ├── timedwaker.go │ │ │ └── waker.go │ ├── mtail.go │ ├── timestamp.png │ └── timezone.png ├── mysql │ ├── README.md │ ├── alerts.json │ ├── binlog.go │ ├── custom_queries.go │ ├── dashboard-by-aws-rds.json │ ├── dashboard-by-ident.json │ ├── dashboard-by-instance.json │ ├── engine_innodb.go │ ├── engine_innodb_compute.go │ ├── global_status.go │ ├── global_variables.go │ ├── metrics.go │ ├── mysql.go │ ├── processlist.go │ ├── processlist_by_user.go │ ├── queries.go │ ├── schema_size.go │ ├── slave_status.go │ └── table_size.go ├── nats │ └── nats.go ├── net │ ├── README.md │ ├── net.go │ ├── speed_linux.go │ └── speed_nolinux.go ├── net_response │ ├── README.md │ ├── alerts.json │ ├── dashboard-by-ziv.json │ ├── dashboard.json │ └── net_response.go ├── netstat │ ├── README.md │ ├── ext.go │ ├── ext_linux.go │ ├── ext_notlinux.go │ └── netstat.go ├── netstat_filter │ ├── README.md │ ├── entry.go │ ├── netstat_filter.go │ ├── netstat_tcp.go │ ├── netstat_tcp_filter.go │ ├── netstat_tcp_filter_nolinux.go │ └── netstat_tcp_nolinux.go ├── nfsclient │ └── nfsclient.go ├── nginx │ ├── README.md │ ├── dashbaords.json │ └── nginx.go ├── nginx_upstream_check │ ├── README.md │ ├── dashboards.json │ └── nginx_upstream_check.go ├── nginx_vts │ ├── README.md │ └── dashboards.json ├── node_exporter │ ├── collector │ │ ├── arp_linux.go │ │ ├── bcache_linux.go │ │ ├── bonding_linux.go │ │ ├── btrfs_linux.go │ │ ├── buddyinfo.go │ │ ├── cgroups_linux.go │ │ ├── collector.go │ │ ├── collector_darwin.go │ │ ├── collector_linux.go │ │ ├── collector_windows.go │ │ ├── conntrack_linux.go │ │ ├── cpu_common.go │ │ ├── cpu_darwin.go │ │ ├── cpu_linux.go │ │ ├── cpu_vulnerabilities_linux.go │ │ ├── cpufreq_common.go │ │ ├── cpufreq_linux.go │ │ ├── crontab.go │ │ ├── device_filter.go │ │ ├── diskstats_common.go │ │ ├── diskstats_darwin.go │ │ ├── diskstats_linux.go │ │ ├── dmi.go │ │ ├── drbd_linux.go │ │ ├── drm_linux.go │ │ ├── edac_linux.go │ │ ├── entropy_linux.go │ │ ├── ethtool_linux.go │ │ ├── fibrechannel_linux.go │ │ ├── file.go │ │ ├── filefd_linux.go │ │ ├── filesystem_bsd.go │ │ ├── filesystem_common.go │ │ ├── filesystem_linux.go │ │ ├── fixtures │ │ │ ├── e2e-64k-page-output.txt │ │ │ ├── e2e-output.txt │ │ │ ├── ethtool │ │ │ │ ├── bond0 │ │ │ │ │ └── statistics │ │ │ │ └── eth0 │ │ │ │ │ ├── driver │ │ │ │ │ ├── settings │ │ │ │ │ └── statistics │ │ │ ├── ip_vs_result.txt │ │ │ ├── ip_vs_result_lbs_local_address_local_port.txt │ │ │ ├── ip_vs_result_lbs_local_port.txt │ │ │ ├── ip_vs_result_lbs_none.txt │ │ │ ├── proc │ │ │ │ ├── 1 │ │ │ │ │ ├── mounts │ │ │ │ │ └── stat │ │ │ │ ├── 10 │ │ │ │ │ ├── mountinfo │ │ │ │ │ ├── mountstats │ │ │ │ │ └── stat │ │ │ │ ├── 11 │ │ │ │ │ ├── .missing_stat │ │ │ │ │ └── stat │ │ │ │ ├── buddyinfo │ │ │ │ ├── cgroups │ │ │ │ ├── cpuinfo │ │ │ │ ├── diskstats │ │ │ │ ├── drbd │ │ │ │ ├── interrupts │ │ │ │ ├── interrupts_aarch64 │ │ │ │ ├── loadavg │ │ │ │ ├── mdstat │ │ │ │ ├── meminfo │ │ │ │ ├── net │ │ │ │ │ ├── arp │ │ │ │ │ ├── ip_vs │ │ │ │ │ ├── ip_vs_stats │ │ │ │ │ ├── netstat │ │ │ │ │ ├── rpc │ │ │ │ │ │ ├── nfs │ │ │ │ │ │ └── nfsd │ │ │ │ │ ├── snmp │ │ │ │ │ ├── snmp6 │ │ │ │ │ ├── sockstat │ │ │ │ │ ├── sockstat6 │ │ │ │ │ ├── softnet_stat │ │ │ │ │ ├── stat │ │ │ │ │ │ ├── arp_cache │ │ │ │ │ │ ├── ndisc_cache │ │ │ │ │ │ └── nf_conntrack │ │ │ │ │ ├── udp │ │ │ │ │ └── xfrm_stat │ │ │ │ ├── pressure │ │ │ │ │ ├── cpu │ │ │ │ │ ├── io │ │ │ │ │ └── memory │ │ │ │ ├── schedstat │ │ │ │ ├── self │ │ │ │ │ ├── mountinfo │ │ │ │ │ ├── mountstats │ │ │ │ │ └── stat │ │ │ │ ├── slabinfo │ │ │ │ ├── softirqs │ │ │ │ ├── spl │ │ │ │ │ └── kstat │ │ │ │ │ │ └── zfs │ │ │ │ │ │ ├── abdstats │ │ │ │ │ │ ├── arcstats │ │ │ │ │ │ ├── dbufstats │ │ │ │ │ │ ├── dmu_tx │ │ │ │ │ │ ├── dnodestats │ │ │ │ │ │ ├── fm │ │ │ │ │ │ ├── pool1 │ │ │ │ │ │ ├── io │ │ │ │ │ │ ├── objset-1 │ │ │ │ │ │ ├── objset-2 │ │ │ │ │ │ └── state │ │ │ │ │ │ ├── pool2 │ │ │ │ │ │ └── state │ │ │ │ │ │ ├── poolz1 │ │ │ │ │ │ ├── io │ │ │ │ │ │ ├── objset-1 │ │ │ │ │ │ ├── objset-2 │ │ │ │ │ │ └── state │ │ │ │ │ │ ├── vdev_cache_stats │ │ │ │ │ │ ├── vdev_mirror_stats │ │ │ │ │ │ ├── xuio_stats │ │ │ │ │ │ ├── zfetchstats │ │ │ │ │ │ └── zil │ │ │ │ ├── stat │ │ │ │ ├── sys │ │ │ │ │ ├── fs │ │ │ │ │ │ └── file-nr │ │ │ │ │ ├── kernel │ │ │ │ │ │ ├── pid_max │ │ │ │ │ │ ├── random │ │ │ │ │ │ │ ├── entropy_avail │ │ │ │ │ │ │ └── poolsize │ │ │ │ │ │ ├── seccomp │ │ │ │ │ │ │ └── actions_avail │ │ │ │ │ │ └── threads-max │ │ │ │ │ ├── net │ │ │ │ │ │ └── netfilter │ │ │ │ │ │ │ ├── nf_conntrack_count │ │ │ │ │ │ │ └── nf_conntrack_max │ │ │ │ │ ├── pid_max │ │ │ │ │ └── threads-max │ │ │ │ ├── vmstat │ │ │ │ └── zoneinfo │ │ │ ├── qdisc │ │ │ │ └── results.json │ │ │ ├── sys.ttar │ │ │ ├── textfile │ │ │ │ ├── client_side_timestamp.out │ │ │ │ ├── client_side_timestamp │ │ │ │ │ └── metrics.prom │ │ │ │ ├── different_metric_types.out │ │ │ │ ├── different_metric_types │ │ │ │ │ └── metrics.prom │ │ │ │ ├── glob_extra_dimension.out │ │ │ │ ├── histogram.out │ │ │ │ ├── histogram │ │ │ │ │ └── metrics.prom │ │ │ │ ├── histogram_extra_dimension.out │ │ │ │ ├── histogram_extra_dimension │ │ │ │ │ └── metrics.prom │ │ │ │ ├── inconsistent_metrics.out │ │ │ │ ├── inconsistent_metrics │ │ │ │ │ └── metrics.prom │ │ │ │ ├── metrics_merge_different_help.out │ │ │ │ ├── metrics_merge_different_help │ │ │ │ │ ├── a.prom │ │ │ │ │ └── b.prom │ │ │ │ ├── metrics_merge_empty_help.out │ │ │ │ ├── metrics_merge_empty_help │ │ │ │ │ ├── a.prom │ │ │ │ │ └── b.prom │ │ │ │ ├── metrics_merge_no_help.out │ │ │ │ ├── metrics_merge_no_help │ │ │ │ │ ├── a.prom │ │ │ │ │ └── b.prom │ │ │ │ ├── metrics_merge_same_help.out │ │ │ │ ├── metrics_merge_same_help │ │ │ │ │ ├── a.prom │ │ │ │ │ └── b.prom │ │ │ │ ├── no_metric_files.out │ │ │ │ ├── no_metric_files │ │ │ │ │ └── non_matching_file.txt │ │ │ │ ├── nonexistent_path.out │ │ │ │ ├── summary.out │ │ │ │ ├── summary │ │ │ │ │ └── metrics.prom │ │ │ │ ├── summary_extra_dimension.out │ │ │ │ ├── summary_extra_dimension │ │ │ │ │ └── metrics.prom │ │ │ │ ├── two_metric_files.out │ │ │ │ └── two_metric_files │ │ │ │ │ ├── metrics1.prom │ │ │ │ │ ├── metrics2.prom │ │ │ │ │ └── non_matching_file.txt │ │ │ ├── udev.ttar │ │ │ ├── usr │ │ │ │ └── lib │ │ │ │ │ └── os-release │ │ │ └── wifi │ │ │ │ ├── interfaces.json │ │ │ │ └── wlan0 │ │ │ │ ├── bss.json │ │ │ │ └── stationinfo.json │ │ ├── fixtures_bindmount │ │ │ └── proc │ │ │ │ └── mounts │ │ ├── fixtures_hidepid │ │ │ └── proc │ │ │ │ └── mounts │ │ ├── helper.go │ │ ├── hwmon_linux.go │ │ ├── infiniband_linux.go │ │ ├── interrupts_common.go │ │ ├── interrupts_linux.go │ │ ├── ipvs_linux.go │ │ ├── ksmd_linux.go │ │ ├── lnstat_linux.go │ │ ├── loadavg.go │ │ ├── loadavg_bsd.go │ │ ├── loadavg_linux.go │ │ ├── logind_linux.go │ │ ├── mdadm_linux.go │ │ ├── meminfo.go │ │ ├── meminfo_darwin.go │ │ ├── meminfo_linux.go │ │ ├── meminfo_numa_linux.go │ │ ├── mountstats_linux.go │ │ ├── netclass_linux.go │ │ ├── netclass_rtnl_linux.go │ │ ├── netdev_common.go │ │ ├── netdev_darwin.go │ │ ├── netdev_linux.go │ │ ├── netstat_linux.go │ │ ├── network_route_linux.go │ │ ├── nfs_linux.go │ │ ├── nfsd_linux.go │ │ ├── ntp.go │ │ ├── nvme_linux.go │ │ ├── os_release.go │ │ ├── other_os │ │ │ ├── boot_time_bsd.go │ │ │ ├── boot_time_solaris.go │ │ │ ├── cpu_dragonfly.go │ │ │ ├── cpu_freebsd.go │ │ │ ├── cpu_netbsd.go │ │ │ ├── cpu_openbsd.go │ │ │ ├── cpu_solaris.go │ │ │ ├── cpufreq_solaris.go │ │ │ ├── devstat_dragonfly.go │ │ │ ├── devstat_freebsd.c │ │ │ ├── devstat_freebsd.go │ │ │ ├── devstat_freebsd.h │ │ │ ├── diskstats_openbsd.go │ │ │ ├── diskstats_openbsd_amd64.go │ │ │ ├── exec_bsd.go │ │ │ ├── filesystem_freebsd.go │ │ │ ├── filesystem_openbsd.go │ │ │ ├── interrupts_openbsd.go │ │ │ ├── interrupts_openbsd_amd64.go │ │ │ ├── kvm_bsd.c │ │ │ ├── kvm_bsd.go │ │ │ ├── kvm_bsd.h │ │ │ ├── loadavg_solaris.go │ │ │ ├── meminfo_netbsd.go │ │ │ ├── meminfo_openbsd.go │ │ │ ├── meminfo_openbsd_amd64.go │ │ │ ├── memory_bsd.go │ │ │ ├── netdev_bsd.go │ │ │ ├── netdev_openbsd.go │ │ │ ├── netdev_openbsd_amd64.go │ │ │ ├── netisr_freebsd.go │ │ │ ├── sysctl_bsd.go │ │ │ ├── sysctl_openbsd_amd64.go │ │ │ ├── zfs_freebsd.go │ │ │ └── zfs_solaris.go │ │ ├── paths.go │ │ ├── perf_linux.go │ │ ├── powersupplyclass.go │ │ ├── powersupplyclass_darwin.go │ │ ├── powersupplyclass_linux.go │ │ ├── pressure_linux.go │ │ ├── processes_linux.go │ │ ├── qdisc_linux.go │ │ ├── rapl_linux.go │ │ ├── runit.go │ │ ├── schedstat_linux.go │ │ ├── selinux_linux.go │ │ ├── slabinfo_linux.go │ │ ├── sockstat_linux.go │ │ ├── softirq_linux.go │ │ ├── softirqs_common.go │ │ ├── softnet_linux.go │ │ ├── stat_linux.go │ │ ├── supervisord.go │ │ ├── sysctl_linux.go │ │ ├── systemd_linux.go │ │ ├── tapestats_linux.go │ │ ├── tcpstat_linux.go │ │ ├── textfile.go │ │ ├── thermal_darwin.go │ │ ├── thermal_zone_linux.go │ │ ├── time.go │ │ ├── time_linux.go │ │ ├── time_other.go │ │ ├── timex.go │ │ ├── udp_queues_linux.go │ │ ├── uname.go │ │ ├── uname_bsd.go │ │ ├── uname_linux.go │ │ ├── vmstat_linux.go │ │ ├── wifi_linux.go │ │ ├── xfrm.go │ │ ├── xfs_linux.go │ │ ├── zfs.go │ │ ├── zfs_linux.go │ │ └── zoneinfo_linux.go │ └── exporter.go ├── nsq │ ├── README.md │ └── nsq.go ├── ntp │ ├── README.md │ ├── alerts.json │ ├── ntp.go │ └── ntp_test.go ├── nvidia_smi │ ├── README.md │ ├── builder.go │ ├── csv.go │ ├── fields.go │ ├── nvidia_smi.go │ ├── parser.go │ ├── scrape.go │ ├── types.go │ └── util.go ├── oracle │ ├── README.md │ ├── dashboard.json │ └── oracle.go ├── phpfpm │ ├── README.md │ └── phpfpm.go ├── ping │ ├── README.md │ ├── alerts.json │ ├── dashboard-2.0.json │ ├── dashboard-2.0.png │ ├── dashboard.json │ ├── ping.go │ ├── ping_notwindows.go │ └── ping_windows.go ├── postgresql │ ├── README.md │ ├── alerts.json │ ├── dashboard.json │ ├── postgresql.go │ └── postgresql.png ├── processes │ ├── README.md │ ├── processes_notwindows.go │ └── processes_windows.go ├── procstat │ ├── README.md │ ├── alerts.json │ ├── native_finder.go │ ├── native_finder_notwindows.go │ ├── native_finder_windows.go │ ├── process.go │ ├── procstat.go │ ├── title_capture_notwindows.go │ ├── title_capture_windows.go │ ├── win_service_notwindows.go │ └── win_service_windows.go ├── prometheus │ ├── README.md │ ├── consul.go │ ├── prometheus.go │ └── seata-server-kanban.json ├── provider_manager.go ├── rabbitmq │ ├── README.md │ ├── dashboard-3.8-.json │ ├── dashboard.json │ └── rabbitmq.go ├── redfish │ └── redfish.go ├── redis │ ├── README.md │ ├── alerts.json │ ├── dashboard.json │ ├── dashboard_for_redis6.2.x_exporter1.43.x.json │ ├── draft.md │ └── redis.go ├── redis_sentinel │ ├── README.md │ ├── redis_sentinel.go │ └── redis_sentinel_types.go ├── rocketmq_offset │ ├── model.go │ └── rocketmq.go ├── self_metrics │ └── metrics.go ├── smart │ ├── README.md │ ├── instances.go │ ├── smart.go │ └── var.go ├── snmp │ ├── README.md │ ├── gosmi.go │ ├── health_check.go │ ├── instances.go │ ├── netsnmp.go │ ├── snmp.go │ ├── table.go │ └── wrapper.go ├── snmp_trap │ ├── README.md │ ├── gosmi.go │ ├── netsnmp.go │ └── snmp_trap.go ├── sockstat │ ├── README.md │ ├── sockstat.go │ ├── sockstat_linux.go │ └── sockstat_notlinux.go ├── sqlserver │ ├── README.md │ ├── connectionstring.go │ ├── sqlqueriesV1.go │ ├── sqlqueriesV2.go │ ├── sqlserver.go │ ├── sqlserver_dash.json │ └── sqlserverqueries.go ├── supervisor │ ├── README.md │ ├── README_en.md │ └── supervisor.go ├── switch_legacy │ ├── README.md │ ├── dashboard.json │ ├── lastcache.go │ └── switch_legacy.go ├── system │ ├── README.md │ ├── alerts-linux.json │ ├── base_monitor_grafana10.json │ ├── dashboard.json │ ├── ps.go │ └── system.go ├── systemd │ ├── README.md │ ├── systemd.go │ ├── systemd_linux.go │ └── systemd_nonlinux.go ├── tengine │ ├── README.md │ └── tengine.go ├── tomcat │ ├── README.md │ ├── dashboard.json │ └── tomcat.go ├── tpl │ ├── README.md │ └── tpl.go ├── traffic_server │ └── traffic_server.go ├── vsphere │ ├── client.go │ ├── dashboards.json │ ├── endpoint.go │ ├── finder.go │ ├── throttled_exec.go │ ├── tscache.go │ └── vsphere.go ├── weblogic │ └── README.md ├── whois │ ├── README.md │ └── whois.go ├── x509_cert │ ├── README.md │ └── x509_cert.go ├── xskyapi │ └── xskyapi.go └── zookeeper │ ├── README.md │ ├── alerts.json │ ├── dashboard.json │ └── zookeeper.go ├── k8s ├── README.md ├── apiserver-dash.json ├── categraf.tpl ├── cm-dash.json ├── controller-service.yaml ├── coredns-dash.json ├── daemonset.yaml ├── deployment-etcd-http.yaml ├── deployment.yaml ├── etcd-dash.json ├── etcd-service-http.yaml ├── etcd-service.yaml ├── gen_ds.sh ├── images │ ├── apiserver-dash.jpg │ ├── cm-dash.jpg │ ├── coredns-dash.jpg │ ├── etcd-dash.jpg │ └── scheduler-dash.jpg ├── in_cluster_scrape.yaml ├── pod-dash.json ├── scheduler-dash.json ├── scheduler-service.yaml ├── scrape_with_cafile.yaml ├── scrape_with_kubeconfig.yaml ├── scrape_with_token.yaml ├── secret.yaml └── sidecar.yaml ├── logs ├── README.md ├── auditor │ ├── api.go │ ├── auditor.go │ └── null_auditor.go ├── client │ ├── destination.go │ ├── destinations.go │ ├── destinations_context.go │ ├── errors.go │ ├── http │ │ ├── content_encoding.go │ │ └── destination.go │ ├── kafka │ │ ├── content_encoding.go │ │ ├── destination.go │ │ ├── kafka.go │ │ ├── producer.go │ │ ├── scram_client.go │ │ ├── topic_json.go │ │ └── topic_json_easyjson.go │ └── tcp │ │ ├── connection_manager.go │ │ ├── delimiter.go │ │ ├── destination.go │ │ └── prefixer.go ├── decoder │ ├── auto_multiline_handler.go │ ├── decoder.go │ ├── line_handler.go │ ├── line_parser.go │ ├── matcher.go │ ├── multiline_handler.go │ └── single_line_handler.go ├── diagnostic │ ├── message_receiver.go │ └── noop_message_receiver.go ├── errors │ └── errors.go ├── input │ ├── channel │ │ ├── launcher.go │ │ └── tailer.go │ ├── container │ │ ├── launcher.go │ │ └── noop.go │ ├── file │ │ ├── file_provider.go │ │ ├── open_file_nix.go │ │ ├── open_file_windows.go │ │ ├── position.go │ │ ├── rotate_nix.go │ │ ├── rotate_windows.go │ │ ├── scanner.go │ │ ├── tailer.go │ │ ├── tailer_nix.go │ │ └── tailer_windows.go │ ├── journald │ │ ├── launcher.go │ │ ├── launcher_nosystemd.go │ │ ├── tailer.go │ │ └── tailer_util.go │ ├── kubernetes │ │ ├── json_parser.go │ │ ├── launcher.go │ │ ├── parser.go │ │ └── scanner.go │ └── listener │ │ ├── errors.go │ │ ├── launcher.go │ │ ├── tailer.go │ │ ├── tcp.go │ │ └── udp.go ├── message │ ├── message.go │ ├── origin.go │ └── status.go ├── parser │ └── parser.go ├── pb │ └── agent_logs_payload.pb.go ├── pipeline │ ├── pipeline.go │ └── provider.go ├── processor │ ├── encoder.go │ ├── json.go │ ├── json_serverless.go │ ├── processor.go │ ├── proto.go │ └── raw.go ├── restart │ ├── parallel_stop.go │ ├── restart.go │ ├── serial_stop.go │ ├── start.go │ ├── starter.go │ └── stop.go ├── sender │ ├── batch_strategy.go │ ├── message_buffer.go │ ├── sender.go │ ├── serializer.go │ └── stream_strategy.go ├── service │ ├── service.go │ └── services.go ├── status │ ├── builder.go │ └── status.go ├── tag │ ├── local_provider.go │ └── provider.go └── util │ ├── containers │ ├── entity.go │ ├── filter.go │ ├── image.go │ ├── pause.go │ ├── providers │ │ └── provider.go │ └── types.go │ ├── debug.go │ ├── docker │ ├── common.go │ ├── containers.go │ ├── docker.go │ ├── event_pull.go │ ├── event_stream.go │ ├── event_types.go │ ├── global.go │ ├── network.go │ ├── rancher.go │ ├── storage.go │ └── util_docker.go │ └── kubernetes │ ├── auth.go │ ├── const.go │ ├── helpers.go │ ├── kubelet │ ├── containers.go │ ├── file_stat.go │ ├── json.go │ ├── kubelet.go │ ├── kubelet_client.go │ ├── kubelet_common.go │ ├── kubelet_hosts.go │ ├── kubelet_interface.go │ └── podwatcher.go │ ├── tags │ ├── builder.go │ └── tags.go │ └── time.go ├── main.go ├── main_posix.go ├── main_windows.go ├── parser ├── falcon │ └── parser.go ├── influx │ └── parser.go ├── parser.go └── prometheus │ └── parser.go ├── pkg ├── aop │ ├── logger.go │ └── recovery.go ├── aws │ └── credentials.go ├── backoff │ └── backoff.go ├── cache │ ├── basic_cache.go │ └── cache.go ├── cfg │ ├── cfg.go │ └── scan.go ├── checksum │ └── checksum.go ├── choice │ └── choice.go ├── cmdx │ ├── cmd_notwindows.go │ ├── cmd_windows.go │ └── cmdx.go ├── conv │ └── conv.go ├── dock │ └── docker.go ├── filter │ └── filter.go ├── globpath │ ├── globpath.go │ ├── globpath_test.go │ └── testdata │ │ ├── nested1 │ │ └── nested2 │ │ │ └── nested.txt │ │ └── test.conf ├── hash │ └── hash.go ├── httpx │ ├── client.go │ ├── proxy.go │ └── transport.go ├── jsonx │ └── jsonflattener.go ├── kubernetes │ ├── pod.go │ └── types_kubelet.go ├── limiter │ └── limiter.go ├── metrics │ └── metrics.go ├── netx │ └── netx.go ├── nvmlprovider │ ├── provider.go │ └── provider_test.go ├── osx │ ├── osx.go │ └── proc.go ├── pprof │ └── profile.go ├── prom │ ├── labels │ │ └── labels.go │ └── prom.go ├── proxy │ ├── connect.go │ ├── dialer.go │ ├── proxy.go │ └── socks5.go ├── relabel │ └── relabel.go ├── retry │ ├── README.md │ ├── error.go │ ├── retrier.go │ ├── retrier_test.go │ └── types.go ├── runtimex │ └── stack.go ├── set │ └── set.go ├── snmp │ ├── translate.go │ └── translator.go ├── stringx │ └── strx.go ├── tagx │ └── tagx.go └── tls │ ├── common.go │ └── config.go ├── prometheus ├── README.md └── prometheus.go ├── scripts ├── ci │ ├── go_version_check.sh │ ├── go_vet.sh │ └── static_check.sh └── win_run.bat ├── types ├── error.go ├── metric.go ├── metric │ ├── metric.go │ └── series_grouper.go ├── safe_list.go ├── sample.go └── sample_list.go └── writer ├── writer.go └── writers.go /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest a new feature 4 | title: '' 5 | labels: kind/feature 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **What would you like to be added**: 13 | 14 | **Why is this needed**: 15 | 16 | **Describe the solution you'd like** 17 | 18 | **Additional context** 19 | -------------------------------------------------------------------------------- /.github/commitlint.config.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable import/no-extraneous-dependencies */ 2 | const { maxLineLength } = require('@commitlint/ensure') 3 | 4 | const bodyMaxLineLength = 1000 5 | 6 | const validateBodyMaxLengthIgnoringDeps = (parsedCommit) => { 7 | const { type, scope, body } = parsedCommit 8 | const isDepsCommit = 9 | type === 'chore' && (scope === 'deps' || scope === 'deps-dev') 10 | 11 | return [ 12 | isDepsCommit || !body || maxLineLength(body, bodyMaxLineLength), 13 | `body's lines must not be longer than ${bodyMaxLineLength}`, 14 | ] 15 | } 16 | 17 | module.exports = { 18 | extends: ['@commitlint/config-conventional'], 19 | plugins: ['commitlint-plugin-function-rules'], 20 | rules: { 21 | 'body-max-line-length': [0], 22 | 'function-rules/body-max-line-length': [ 23 | 2, 24 | 'always', 25 | validateBodyMaxLengthIgnoringDeps, 26 | ], 27 | }, 28 | } -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | env: 8 | GO_VERSION: 1.21 9 | 10 | jobs: 11 | goreleaser: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Install libpcap 15 | run: sudo apt-get install -y libpcap-dev 16 | - name: Checkout Source Code 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | - name: Setup Go Environment 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version: ${{ env.GO_VERSION }} 24 | - uses: docker/login-action@v3 25 | with: 26 | username: ${{ secrets.DOCKERHUB_USERNAME }} 27 | password: ${{ secrets.DOCKERHUB_TOKEN }} 28 | - name: Run GoReleaser 29 | uses: goreleaser/goreleaser-action@v5 30 | with: 31 | version: latest 32 | args: release --clean --timeout 60m 33 | env: 34 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .DS_Store 3 | .vscode 4 | /categraf* 5 | *.log 6 | /vendor 7 | docker/conf 8 | docker/categraf 9 | /build 10 | /meta 11 | /nohup.out 12 | 13 | conf_local/ 14 | -------------------------------------------------------------------------------- /agent/ibex_agent.go: -------------------------------------------------------------------------------- 1 | //go:build !no_ibex 2 | 3 | package agent 4 | 5 | import ( 6 | "log" 7 | 8 | coreconfig "flashcat.cloud/categraf/config" 9 | "flashcat.cloud/categraf/ibex" 10 | ) 11 | 12 | type IbexAgent struct { 13 | } 14 | 15 | func NewIbexAgent() AgentModule { 16 | if coreconfig.Config == nil || 17 | coreconfig.Config.Ibex == nil || 18 | !coreconfig.Config.Ibex.Enable { 19 | log.Println("I! ibex agent disabled!") 20 | return nil 21 | } 22 | if coreconfig.Config.Ibex.MetaDir == "" { 23 | coreconfig.Config.Ibex.MetaDir = "tasks.d" 24 | } 25 | 26 | return &IbexAgent{} 27 | } 28 | 29 | func (ia *IbexAgent) Start() error { 30 | go ibex.Start() 31 | return nil 32 | } 33 | 34 | func (ia *IbexAgent) Stop() error { 35 | if coreconfig.Config == nil || 36 | coreconfig.Config.Ibex == nil || 37 | !coreconfig.Config.Ibex.Enable { 38 | return nil 39 | } 40 | ibex.Stop() 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /agent/ibex_agent_none.go: -------------------------------------------------------------------------------- 1 | //go:build no_ibex 2 | 3 | package agent 4 | 5 | type IbexAgent struct{} 6 | 7 | func NewIbexAgent() AgentModule { 8 | return nil 9 | } 10 | 11 | func (a *IbexAgent) Start() error { 12 | return nil 13 | } 14 | 15 | func (a *IbexAgent) Stop() error { 16 | return nil 17 | } 18 | -------------------------------------------------------------------------------- /agent/install/service_darwin.go: -------------------------------------------------------------------------------- 1 | package install 2 | 3 | import ( 4 | "github.com/kardianos/service" 5 | ) 6 | 7 | const ( 8 | ServiceName = "categraf" 9 | ) 10 | 11 | var ( 12 | serviceConfig = &service.Config{ 13 | // 服务显示名称 14 | Name: ServiceName, 15 | // 服务名称 16 | DisplayName: "categraf", 17 | // 服务描述 18 | Description: "Opensource telemetry collector", 19 | } 20 | ) 21 | 22 | func ServiceConfig(userMode bool) *service.Config { 23 | return serviceConfig 24 | } 25 | -------------------------------------------------------------------------------- /agent/install/service_freebsd.go: -------------------------------------------------------------------------------- 1 | package install 2 | 3 | import ( 4 | "github.com/kardianos/service" 5 | ) 6 | 7 | const ( 8 | // freebsd的服务名中间不能有"-" 9 | ServiceName = "categraf" 10 | 11 | SysvScript = `#!/bin/sh 12 | # 13 | # PROVIDE: {{.Name}} 14 | # REQUIRE: networking syslog 15 | # KEYWORD: 16 | # Add the following lines to /etc/rc.conf to enable the {{.Name}}: 17 | # 18 | # {{.Name}}_enable="YES" 19 | # 20 | . /etc/rc.subr 21 | name="{{.Name}}" 22 | rcvar="{{.Name}}_enable" 23 | command="{{.Path}}" 24 | pidfile="/var/run/$name.pid" 25 | start_cmd="/opt/categraf/categraf -configs /opt/categraf/conf" 26 | load_rc_config $name 27 | run_rc_command "$1" 28 | ` 29 | ) 30 | 31 | var ( 32 | serviceConfig = &service.Config{ 33 | // 服务显示名称 34 | Name: ServiceName, 35 | // 服务名称 36 | DisplayName: "categraf", 37 | // 服务描述 38 | Description: "Opensource telemetry collector", 39 | Option: service.KeyValue{ 40 | "SysvScript": SysvScript, 41 | }, 42 | } 43 | ) 44 | 45 | func ServiceConfig(userMode bool) *service.Config { 46 | return serviceConfig 47 | } 48 | -------------------------------------------------------------------------------- /agent/install/service_windows.go: -------------------------------------------------------------------------------- 1 | package install 2 | 3 | import ( 4 | "github.com/kardianos/service" 5 | ) 6 | 7 | const ( 8 | ServiceName = "categraf" 9 | ) 10 | 11 | var ( 12 | serviceConfig = &service.Config{ 13 | // 服务显示名称 14 | Name: ServiceName, 15 | // 服务名称 16 | DisplayName: "categraf", 17 | // 服务描述 18 | Description: "Opensource telemetry collector", 19 | } 20 | ) 21 | 22 | func ServiceConfig(userMode bool) *service.Config { 23 | return serviceConfig 24 | } 25 | -------------------------------------------------------------------------------- /agent/logs_agent_none.go: -------------------------------------------------------------------------------- 1 | //go:build no_logs 2 | 3 | package agent 4 | 5 | type LogsAgent struct { 6 | } 7 | 8 | func NewLogsAgent() AgentModule { 9 | return nil 10 | } 11 | 12 | func (la *LogsAgent) Start() error { 13 | return nil 14 | } 15 | 16 | func (la *LogsAgent) Stop() error { 17 | return nil 18 | } 19 | -------------------------------------------------------------------------------- /agent/prometheus_agent.go: -------------------------------------------------------------------------------- 1 | //go:build !no_prometheus 2 | 3 | package agent 4 | 5 | import ( 6 | "log" 7 | 8 | coreconfig "flashcat.cloud/categraf/config" 9 | "flashcat.cloud/categraf/prometheus" 10 | ) 11 | 12 | type PrometheusAgent struct { 13 | } 14 | 15 | func NewPrometheusAgent() AgentModule { 16 | if coreconfig.Config == nil || 17 | coreconfig.Config.Prometheus == nil || 18 | !coreconfig.Config.Prometheus.Enable { 19 | log.Println("I! prometheus scraping disabled!") 20 | return nil 21 | } 22 | return &PrometheusAgent{} 23 | } 24 | 25 | func (pa *PrometheusAgent) Start() error { 26 | go prometheus.Start() 27 | log.Println("I! prometheus scraping started!") 28 | return nil 29 | } 30 | 31 | func (pa *PrometheusAgent) Stop() error { 32 | prometheus.Stop() 33 | log.Println("I! prometheus scraping stopped!") 34 | return nil 35 | } 36 | -------------------------------------------------------------------------------- /agent/promethues_agent_none.go: -------------------------------------------------------------------------------- 1 | //go:build no_prometheus 2 | 3 | package agent 4 | 5 | type PrometheusAgent struct { 6 | } 7 | 8 | func NewPrometheusAgent() AgentModule { 9 | return nil 10 | } 11 | 12 | func (pa *PrometheusAgent) Start() error { 13 | return nil 14 | } 15 | 16 | func (pa *PrometheusAgent) Stop() error { 17 | return nil 18 | } 19 | -------------------------------------------------------------------------------- /agent/update/update_darwin.go: -------------------------------------------------------------------------------- 1 | package update 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | func Update(tar string) error { 8 | // binary 9 | return fmt.Errorf("linux support only") 10 | } 11 | -------------------------------------------------------------------------------- /agent/update/update_freebsd.go: -------------------------------------------------------------------------------- 1 | package update 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | func Update(tar string) error { 8 | return fmt.Errorf("linux support only") 9 | } 10 | -------------------------------------------------------------------------------- /conf/input.aliyun/cloud.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 60 3 | [[instances]] 4 | # # endpoint region 参考 https://help.aliyun.com/document_detail/28616.html#section-72p-xhs-6qt 5 | # region="cn-beijing" 6 | # endpoint="metrics.cn-hangzhou.aliyuncs.com" 7 | # access_key_id="your-access-key-id" 8 | # access_key_secret="your-access-key-secret" 9 | # interval_times=4 10 | # delay="10m" 11 | # period="60s" 12 | # 13 | # ratelimit=25 14 | # catch_ttl="1h" 15 | # timeout="5s" 16 | # 17 | # # namespace 参考 https://help.aliyun.com/document_detail/163515.htm?spm=a2c4g.11186623.0.0.44d65c58mhgNw3 18 | # namespaces=["acs_ecs_dashboard"] 19 | # [[instances.metric_filters]] 20 | # # metric name 参考 https://help.aliyun.com/document_detail/163515.htm?spm=a2c4g.11186623.0.0.401d15c73Z0dZh 21 | # # 参考页面中的Metric Id 填入下面的metricName ,页面中包含中文的Metric Name对应接口中的Description 22 | # namespace="" 23 | # metric_names=["cpu_cores","vm.TcpCount"] 24 | -------------------------------------------------------------------------------- /conf/input.amd_rocm_smi/rocm.toml: -------------------------------------------------------------------------------- 1 | # Query statistics from AMD Graphics cards using rocm-smi binary 2 | # bin_path = "/opt/rocm/bin/rocm-smi" 3 | 4 | ## Optional: timeout for GPU polling 5 | # timeout = "5s" -------------------------------------------------------------------------------- /conf/input.apache/apache.toml: -------------------------------------------------------------------------------- 1 | [[instances]] 2 | 3 | # https://statuslist.app/apache/apache-status-page-simple-setup-guide/ 4 | # scrape_uri = "http://localhost/server-status/?auto" 5 | # host_override = "" 6 | # insecure = false 7 | # custom_headers = {} 8 | # level: debug,info,warn,error 9 | # log_level = "info" 10 | -------------------------------------------------------------------------------- /conf/input.arp_packet/arp_packet.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | #eth_device="ens192" -------------------------------------------------------------------------------- /conf/input.bind/bind.toml: -------------------------------------------------------------------------------- 1 | [[instances]] 2 | urls = [ 3 | # "http://localhost:8053/xml/v3", 4 | ] 5 | gather_memory_contexts = true 6 | gather_views = true 7 | timeout = "5s" 8 | # labels={app="bind"} 9 | -------------------------------------------------------------------------------- /conf/input.cadvisor/cadvisor.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | # url = "https://1.2.3.4:10250" 6 | # type = "kubelet" 7 | ## url = "http://1.2.3.4:8080/metrics" 8 | ## type = "cadvisor" 9 | 10 | # url_label_key = "instance" 11 | # url_label_value = "{{.Host}}" 12 | # bearer_token_string = "eyJlonglongxxxx.eyJlonglongyyyy.oQsXlonglongZZZ" 13 | ## bearer_token_file = "/path/to/token/file" 14 | 15 | # ignore_label_keys = ["id","name", "container_label*"] 16 | ## choose_label_keys = ["id"] 17 | 18 | # timeout = "3s" 19 | 20 | # use_tls = true 21 | ## tls_min_version = "1.2" 22 | ## tls_ca = "/etc/categraf/ca.pem" 23 | ## tls_cert = "/etc/categraf/cert.pem" 24 | ## tls_key = "/etc/categraf/key.pem" 25 | ## Use TLS but skip chain & host verification 26 | ## insecure_skip_verify = true -------------------------------------------------------------------------------- /conf/input.chrony/chrony.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | ## If true, chronyc tries to perform a DNS lookup for the time server. 5 | #dns_lookup = false 6 | 7 | # Get standard chrony metrics, requires chronyc executable. 8 | #chronyc_command = "/usr/bin/chronyc" 9 | chronyc_command = "" -------------------------------------------------------------------------------- /conf/input.conntrack/conntrack.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | files = [ 5 | "ip_conntrack_count", 6 | "ip_conntrack_max", 7 | "nf_conntrack_count", 8 | "nf_conntrack_max" 9 | ] 10 | 11 | dirs = [ 12 | "/proc/sys/net/ipv4/netfilter", 13 | "/proc/sys/net/netfilter" 14 | ] 15 | 16 | # ignore errors 17 | quiet = true -------------------------------------------------------------------------------- /conf/input.cpu/cpu.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # whether collect per cpu 5 | # collect_per_cpu = false 6 | -------------------------------------------------------------------------------- /conf/input.dcgm/exporter.toml: -------------------------------------------------------------------------------- 1 | #[[instances]] 2 | # path to the file, that contains the DCGM fields to collect 3 | # collectors = "conf/input.dcgm/default-counters.csv" 4 | 5 | # Enable kubernetes mapping metrics to kubernetes pods 6 | # kubernetes=false 7 | 8 | # Choose Type of GPU ID to use to map kubernetes resources to pods. Possible values: "uid", "device-name" 9 | # kubernetes-gpu-id-type = "uid" 10 | 11 | # Use old 1.x namespace 12 | # use-old-namespace = false 13 | 14 | cpu-devices = "f" 15 | 16 | # gpu devices 17 | devices = "f" 18 | 19 | switch-devices = "f" 20 | 21 | # ConfigMap : for metric data 22 | configmap-data = "none" 23 | 24 | # Connect to remote hostengine at : 25 | # remote-hostengine-info = "localhost:5555" 26 | 27 | # Accept GPUs that are fake, for testing purposes only 28 | # fake-gpus = false 29 | 30 | # Replaces every blank space in the GPU model name with a dash, ensuring a continuous, space-free identifier. 31 | # replace-blanks-in-model-name = false 32 | -------------------------------------------------------------------------------- /conf/input.disk/disk.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # By default stats will be gathered for all mount points. 5 | # # Set mount_points will restrict the stats to only the specified mount points. 6 | # mount_points = ["/"] 7 | 8 | # Ignore mount points by filesystem type. 9 | ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs", "nsfs", "CDFS", "fuse.juicefs"] 10 | 11 | ignore_mount_points = ["/boot", "/var/lib/kubelet/pods"] 12 | -------------------------------------------------------------------------------- /conf/input.diskio/diskio.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # By default, categraf will gather stats for all devices including disk partitions. 5 | # # Setting devices will restrict the stats to the specified devices. 6 | # devices = ["sda", "sdb", "vd*"] -------------------------------------------------------------------------------- /conf/input.dns_query/dns_query.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | # # append some labels for series 6 | # labels = { region="cloud", product="n9e" } 7 | 8 | # # interval = global.interval * interval_times 9 | # interval_times = 1 10 | 11 | # # 12 | auto_detect_local_dns_server = false 13 | 14 | ## servers to query 15 | # servers = ["8.8.8.8"] 16 | servers = [] 17 | 18 | ## Network is the network protocol name. 19 | # network = "udp" 20 | 21 | ## Domains or subdomains to query. 22 | # domains = ["."] 23 | 24 | ## Query record type. 25 | ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. 26 | # record_type = "A" 27 | 28 | ## Dns server port. 29 | # port = 53 30 | 31 | ## Query timeout in seconds. 32 | # timeout = 2 -------------------------------------------------------------------------------- /conf/input.emc_unity/emc_unity.toml: -------------------------------------------------------------------------------- 1 | [[instances]] 2 | agent_host_tag = "ident" 3 | 4 | [[instances.addresses]] 5 | # example: https://192.168.1.1 6 | url = "" 7 | username = "" 8 | password = "" 9 | timeout = "" -------------------------------------------------------------------------------- /conf/input.exec/exec.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | # # commands, support glob 6 | commands = [ 7 | # "/opt/categraf/scripts/*.sh" 8 | ] 9 | 10 | # # timeout for each command to complete 11 | # timeout = 5 12 | 13 | # # interval = global.interval * interval_times 14 | # interval_times = 1 15 | 16 | # # choices: influx prometheus falcon 17 | # # influx stdout example: mesurement,labelkey1=labelval1,labelkey2=labelval2 field1=1.2,field2=2.3 18 | # data_format = "influx" 19 | -------------------------------------------------------------------------------- /conf/input.googlecloud/gcp.toml: -------------------------------------------------------------------------------- 1 | #interval=60 2 | #[[instances]] 3 | #project_id="your-project-id" 4 | #credentials_file="/path/to/your/key.json" 5 | #delay="2m" 6 | #period="1m" 7 | #filter="metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.labels.zone=\"asia-northeast1-a\"" 8 | #timeout="5s" 9 | #cache_ttl="1h" 10 | #gce_host_tag="xxx" 11 | #request_inflight=30 12 | -------------------------------------------------------------------------------- /conf/input.greenplum/greenplum.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | -------------------------------------------------------------------------------- /conf/input.haproxy/haproxy.toml: -------------------------------------------------------------------------------- 1 | [[instances]] 2 | # URI on which to scrape HAProxy. 3 | # e.g. 4 | # uri = "http://localhost:5000/baz?stats;csv" 5 | # uri = "http://user:pass@haproxy.example.com/haproxy?stats;csv" 6 | # uri = "unix:/run/haproxy/admin.sock" 7 | uri = "" 8 | 9 | # Flag that enables SSL certificate verification for the scrape URI 10 | ssl_verify = false 11 | 12 | # Comma-separated list of exported server metrics. See http://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1 13 | server_metric_fields = "" 14 | 15 | # Comma-separated list of exported server states to exclude. See https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1, field 17 status 16 | server_exclude_states = "" 17 | 18 | # Timeout for trying to get stats from HAProxy. 19 | timeout = "5s" 20 | 21 | # Flag that enables using HTTP proxy settings from environment variables ($http_proxy, $https_proxy, $no_proxy) 22 | proxy_from_env = false 23 | -------------------------------------------------------------------------------- /conf/input.influxdb/influxdb.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | 6 | urls = [ 7 | # "http://localhost:8086/debug/vars" 8 | ] 9 | ## Username and password to send using HTTP Basic Authentication. 10 | # username = "" 11 | # password = "" 12 | 13 | ## Optional TLS Config 14 | # tls_ca = "/etc/categraf/ca.pem" 15 | # tls_cert = "/etc/categraf/cert.pem" 16 | # tls_key = "/etc/categraf/key.pem" 17 | ## Use TLS but skip chain & host verification 18 | # insecure_skip_verify = false 19 | 20 | ## http request & header timeout 21 | 22 | 23 | # # interval = global.interval * interval_times 24 | # interval_times = 1 25 | 26 | # important! use global unique string to specify instance 27 | # labels = { instance="n9e-10.2.3.4:6379" } 28 | 29 | ## Optional TLS Config 30 | # use_tls = false 31 | # tls_min_version = "1.2" 32 | # tls_ca = "/etc/categraf/ca.pem" 33 | # tls_cert = "/etc/categraf/cert.pem" 34 | # tls_key = "/etc/categraf/key.pem" 35 | ## Use TLS but skip chain & host verification 36 | # insecure_skip_verify = true 37 | -------------------------------------------------------------------------------- /conf/input.ipmi/conf.toml: -------------------------------------------------------------------------------- 1 | # Read metrics from the bare metal servers via freeipmi 2 | [[instances]] 3 | #target="localhost" 4 | #user = "user" 5 | #pass = "1234" 6 | #driver = "LAN_2_0" 7 | #privilege = "user" 8 | ## session-timeout, ms 9 | #timeout = 100000 10 | #collectors = [ "bmc", "ipmi", "chassis", "sel" ] 11 | #exclude_sensor_ids = [ 2, 29, 32, 50, 52, 55 ] 12 | #[instances.collector_cmd] 13 | #ipmi = "sudo" 14 | #sel = "sudo" 15 | 16 | #[instances.default_args] 17 | #ipmi = [ "--bridge-sensors" ] 18 | 19 | #[instances.custom_args] 20 | #ipmi = [ "--bridge-sensors" ] 21 | #sel = [ "ipmi-sel" ] 22 | 23 | -------------------------------------------------------------------------------- /conf/input.ipvs/ipvs.toml: -------------------------------------------------------------------------------- 1 | # Collect virtual and real server stats from Linux IPVS 2 | # no configuration 3 | -------------------------------------------------------------------------------- /conf/input.jenkins/jenkins.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | # Address (host:port) of jenkins server. 6 | # jenkins_url = "http://my-jenkins-instance:8080" 7 | 8 | #jenkins_username = "admin" 9 | #jenkins_password = "" 10 | 11 | #response_timeout = "5s" 12 | 13 | -------------------------------------------------------------------------------- /conf/input.jolokia_agent_misc/zookeeper.toml: -------------------------------------------------------------------------------- 1 | [[instances]] 2 | urls = ["http://localhost:8080/jolokia"] 3 | name_prefix = "zk_" 4 | 5 | [[instances.metric]] 6 | name = "quorum" 7 | mbean = "org.apache.ZooKeeperService:name0=*" 8 | tag_keys = ["name0"] 9 | 10 | [[instances.metric]] 11 | name = "leader" 12 | mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Leader" 13 | tag_keys = ["name1"] 14 | 15 | [[instances.metric]] 16 | name = "follower" 17 | mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Follower" 18 | tag_keys = ["name1"] 19 | -------------------------------------------------------------------------------- /conf/input.kernel/kernel.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | -------------------------------------------------------------------------------- /conf/input.linux_sysctl_fs/linux_sysctl_fs.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | -------------------------------------------------------------------------------- /conf/input.mem/mem.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # whether collect platform specified metrics 5 | collect_platform_fields = true 6 | -------------------------------------------------------------------------------- /conf/input.mtail/mtail.toml: -------------------------------------------------------------------------------- 1 | [[instances]] 2 | # progs = "/path/to/prog1" # prog dir1 3 | # logs = ["/path/to/a.log", "path/to/b.log"] 4 | # override_timezone = "Asia/Shanghai" 5 | # emit_metric_timestamp = "true" #string type 6 | 7 | # [[instances]] 8 | # progs = "/path/to/prog2" # prog dir2 9 | # logs = ["/path/to/logdir/"] 10 | # override_timezone = "Asia/Shanghai" 11 | # emit_metric_timestamp = "true" # string type 12 | -------------------------------------------------------------------------------- /conf/input.nats/nats.toml: -------------------------------------------------------------------------------- 1 | # Provides metrics about the state of a NATS server 2 | # This plugin does NOT support FreeBSD 3 | # # collect interval 4 | # interval = 15 5 | 6 | [[instances]] 7 | ## The address of the monitoring endpoint of the NATS server 8 | # server = "http://localhost:8222" 9 | server = "" 10 | 11 | ## Set response_timeout (default 5 seconds) 12 | response_timeout = "5s" 13 | 14 | ## interval = global.interval * interval_times 15 | # interval_times = 1 -------------------------------------------------------------------------------- /conf/input.net/net.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # whether collect protocol stats on Linux 5 | # collect_protocol_stats = false 6 | 7 | # # setting interfaces will tell categraf to gather these explicit interfaces 8 | # interfaces = ["eth0"] 9 | 10 | # enable_loopback_stats=true 11 | # enable_link_down_stats=true 12 | -------------------------------------------------------------------------------- /conf/input.netstat/netstat.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | disable_summary_stats = false 5 | ## if machine has many network connections, use this plugin may exhaust your cpu resource, diable connection stat to avoid this 6 | disable_connection_stats = true 7 | 8 | tcp_ext = false 9 | ip_ext = false 10 | -------------------------------------------------------------------------------- /conf/input.netstat_filter/netstat_filter.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | [[instances]] 4 | # laddr_ip = "" 5 | # laddr_port = 0 6 | # raddr_ip = "" 7 | # raddr_port = 0 8 | -------------------------------------------------------------------------------- /conf/input.node_exporter/exporter.toml: -------------------------------------------------------------------------------- 1 | #collectors=["--path.procfs=/host/proc", "--collector.cpu", "collector.ntp"] 2 | collectors=[] 3 | -------------------------------------------------------------------------------- /conf/input.nsq/nsq.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # [[instances]] 5 | ## The Nsq API URI used to collect statistical information. 6 | # targets = ["http://localhost:4151"] 7 | 8 | # headers={Authorization="", X-Forwarded-For="", Host=""} 9 | 10 | # timeout="5s" 11 | 12 | # # basic auth 13 | # username="" 14 | # password="" 15 | 16 | ## append some labels for series 17 | # labels = { product="nsq" } 18 | 19 | ## interval = global.interval * interval_times 20 | # interval_times = 1 21 | -------------------------------------------------------------------------------- /conf/input.ntp/ntp.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # ntp servers 5 | # ntp_servers = ["ntp.aliyun.com"] 6 | 7 | # # response time out seconds 8 | # timeout = 5 9 | -------------------------------------------------------------------------------- /conf/input.nvidia_smi/nvidia_smi.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # exec local command 5 | # e.g. nvidia_smi_command = "nvidia-smi" 6 | nvidia_smi_command = "" 7 | 8 | # exec remote command 9 | # nvidia_smi_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null SSH_USER@SSH_HOST nvidia-smi" 10 | 11 | # Comma-separated list of the query fields. 12 | # You can find out possible fields by running `nvidia-smi --help-query-gpus`. 13 | # The value `AUTO` will automatically detect the fields to query. 14 | query_field_names = "AUTO" 15 | 16 | # query_timeout is used to set the query timeout to avoid the delay of date collection. 17 | query_timeout = "5s" -------------------------------------------------------------------------------- /conf/input.oracle/oracle.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # [[instances]] 5 | # address = "10.1.2.3:1521/orcl" 6 | # username = "monitor" 7 | # password = "123456" 8 | # is_sys_dba = false 9 | # is_sys_oper = false 10 | # disable_connection_pool = false 11 | # max_open_connections = 5 12 | # # interval = global.interval * interval_times 13 | # interval_times = 1 14 | # labels = { region="cloud" } 15 | 16 | # [[instances.metrics]] 17 | # mesurement = "sessions" 18 | # label_fields = [ "status", "type" ] 19 | # metric_fields = [ "value" ] 20 | # timeout = "3s" 21 | # request = ''' 22 | # SELECT status, type, COUNT(*) as value FROM v$session GROUP BY status, type 23 | # ''' 24 | 25 | # [[instances]] 26 | # address = "192.168.10.10:1521/orcl" 27 | # username = "monitor" 28 | # password = "123456" 29 | # is_sys_dba = false 30 | # is_sys_oper = false 31 | # disable_connection_pool = false 32 | # max_open_connections = 5 33 | # # labels = { region="local" } 34 | -------------------------------------------------------------------------------- /conf/input.processes/processes.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # force use ps command to gather 5 | # force_ps = false 6 | 7 | # # force use /proc to gather 8 | # force_proc = false -------------------------------------------------------------------------------- /conf/input.redis_sentinel/redis_sentinel.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | # [protocol://][:password]@address[:port] 6 | # e.g. servers = ["tcp://localhost:26379"] 7 | servers = [] 8 | 9 | # # interval = global.interval * interval_times 10 | # interval_times = 1 11 | # add some dimension data by labels 12 | # labels = {} 13 | 14 | ## Optional TLS Config 15 | # use_tls = false 16 | # tls_min_version = "1.2" 17 | # tls_ca = "/etc/categraf/ca.pem" 18 | # tls_cert = "/etc/categraf/cert.pem" 19 | # tls_key = "/etc/categraf/key.pem" 20 | ## Use TLS but skip chain & host verification 21 | # insecure_skip_verify = true 22 | -------------------------------------------------------------------------------- /conf/input.rocketmq_offset/rocketmq_offset.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | # rocketmq_console_ip_port= 6 | # ignored_topics=[] 7 | # 启用认证, 没有认证的不用配置 8 | # username = "" 9 | # password = "" 10 | 11 | -------------------------------------------------------------------------------- /conf/input.self_metrics/metrics.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | -------------------------------------------------------------------------------- /conf/input.sockstat/sockstat.toml: -------------------------------------------------------------------------------- 1 | 2 | # sockstat 3 | ## protocol to collect, valid values are "tcp", "udp", "tcp6", "udp6", "udplite", "raw", "frag", "udplite6", "raw6", "frag6" 4 | protocols = ["tcp", "udp", "tcp6", "udp6"] -------------------------------------------------------------------------------- /conf/input.supervisor/supervisor.toml: -------------------------------------------------------------------------------- 1 | # Gathers information about processes that running under supervisor using XML-RPC API 2 | [[instances]] 3 | ## Url of supervisor's XML-RPC endpoint if basic auth enabled in supervisor http server, 4 | ## than you have to add credentials to url (ex. http://login:pass@localhost:9001/RPC2) 5 | # url = "http://login:pass@localhost:9001/RPC2", eg: url = "http://localhost:9001/RPC2" 6 | url ="" 7 | ## With settings below you can manage gathering additional information about processes 8 | ## If both of them empty, then all additional information will be collected. 9 | ## Currently supported supported additional metrics are: pid, rc 10 | # metrics_include = [] 11 | # metrics_exclude = ["pid", "rc"] -------------------------------------------------------------------------------- /conf/input.system/system.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # # whether collect metric: system_n_users 5 | # collect_user_number = false -------------------------------------------------------------------------------- /conf/input.systemd/systemd.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | enable=false # 设置为true 打开采集 5 | #unit_include=".+" 6 | #unit_exclude="" 7 | enable_start_time_metrics=true #是否采集service unit的启动时间信息 单位秒 8 | enable_task_metrics=true # 是否采集service unit task的metrics 9 | enable_restarts_metrics=true #是否采集service unit重启的次数信息 10 | -------------------------------------------------------------------------------- /conf/input.tomcat/tomcat.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | # Gather metrics from the Tomcat server status page. 5 | [[instances]] 6 | ## URL of the Tomcat server status 7 | # url = "http://127.0.0.1:8080/manager/status/all?XML=true" 8 | url = "" 9 | 10 | ## HTTP Basic Auth Credentials 11 | # username = "tomcat" 12 | # password = "s3cret" 13 | 14 | ## Request timeout 15 | # timeout = "5s" 16 | 17 | # # interval = global.interval * interval_times 18 | # interval_times = 1 19 | 20 | # important! use global unique string to specify instance 21 | # labels = { instance="192.168.1.2:8080", url="-" } 22 | 23 | ## Optional TLS Config 24 | # use_tls = false 25 | # tls_min_version = "1.2" 26 | # tls_ca = "/etc/categraf/ca.pem" 27 | # tls_cert = "/etc/categraf/cert.pem" 28 | # tls_key = "/etc/categraf/key.pem" 29 | ## Use TLS but skip chain & host verification 30 | # insecure_skip_verify = true 31 | -------------------------------------------------------------------------------- /conf/input.traffic_server/traffic_server.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | targets = [ 6 | # "http://127.0.0.1:9999/_stats", 7 | ] 8 | 9 | 10 | ## HTTP Request Method 11 | # method = "GET" 12 | 13 | ## Set timeout (default 5 seconds) 14 | # timeout = "5s" 15 | -------------------------------------------------------------------------------- /conf/input.whois/whois.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | #interval = 3600 3 | # 4 | 5 | #[mappings] 6 | #"baidu.com" = { "app" = "baidu" } 7 | #"google.com" = { "app" = "google" } 8 | 9 | #[[instances]] 10 | ## [deprecated] Used to collect domain name information. 11 | #domain = "baidu.com" 12 | 13 | # domains 14 | #domains = ["baidu.com","google.com","aliyun.com"] 15 | 16 | ## timeout in seconds 17 | # timeout = 30 18 | 19 | ## whois server 20 | # server = "whois.iana.org" 21 | 22 | ## append some labels for series 23 | #labels = { region="n9e", product="test1" } 24 | 25 | ## interval = global.interval * interval_times 26 | #interval_times = 1 27 | 28 | ## concurrent 29 | # concurrent = 1 30 | 31 | 32 | #[[instances]] 33 | ## Used to collect domain name information. 34 | #domain = "google.com" 35 | 36 | ## append some labels for series 37 | #labels = { region="n9e", product="test2" } 38 | 39 | ## interval = global.interval * interval_times 40 | #interval_times = 1 41 | 42 | -------------------------------------------------------------------------------- /conf/input.xskyapi/xskyapi.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | # 4 | [[instances]] 5 | # # append some labels for series 6 | # labels = { region="cloud", product="n9e" } 7 | 8 | # # interval = global.interval * interval_times 9 | # interval_times = 1 10 | 11 | ## must be one of oss/gfs/eus 12 | dss_type = "oss" 13 | 14 | ## URL of each server in the service's cluster 15 | servers = [ 16 | #"http://x.x.x.x:xx" 17 | ] 18 | 19 | ## Set response_timeout (default 5 seconds) 20 | response_timeout = "5s" 21 | 22 | xms_auth_tokens = [ 23 | #"xxxxxxxxxxxxxxx" 24 | ] 25 | 26 | -------------------------------------------------------------------------------- /conf/input.zookeeper/zookeeper.toml: -------------------------------------------------------------------------------- 1 | # # collect interval 2 | # interval = 15 3 | 4 | [[instances]] 5 | # cluster_name = "dev-zk-cluster" 6 | # addresses = "127.0.0.1:2181" 7 | # timeout = 10 8 | 9 | # important! use global unique string to specify instance 10 | # labels = { instance="n9e-10.2.3.4:2181" } 11 | 12 | ## Optional TLS Config 13 | # use_tls = false 14 | # tls_min_version = "1.2" 15 | # tls_ca = "/etc/categraf/ca.pem" 16 | # tls_cert = "/etc/categraf/cert.pem" 17 | # tls_key = "/etc/categraf/key.pem" 18 | ## Use TLS but skip chain & host verification 19 | # insecure_skip_verify = true -------------------------------------------------------------------------------- /config/logs/constants.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package logs 9 | 10 | const ( 11 | // DateFormat is the default date format. 12 | DateFormat = "2006-01-02T15:04:05.000000000Z" 13 | ) 14 | -------------------------------------------------------------------------------- /config/logs_none.go: -------------------------------------------------------------------------------- 1 | //go:build no_logs 2 | 3 | package config 4 | 5 | type Logs struct { 6 | } 7 | -------------------------------------------------------------------------------- /config/prometheus.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | type ( 4 | Prometheus struct { 5 | Enable bool `toml:"enable"` 6 | LogLevel string `toml:"log_level"` 7 | ScrapeConfigFile string `toml:"scrape_config_file"` 8 | WebAddress string `toml:"web_address"` 9 | StoragePath string `toml:"wal_storage_path"` 10 | 11 | MinBlockDuration Duration `toml:"min_block_duration"` 12 | MaxBlockDuration Duration `toml:"max_block_duration"` 13 | RetentionDuration Duration `toml:"retention_time"` 14 | RetentionSize string `toml:"retention_size"` 15 | } 16 | ) 17 | -------------------------------------------------------------------------------- /config/provider.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "flashcat.cloud/categraf/pkg/tls" 4 | 5 | type HTTPProviderConfig struct { 6 | tls.ClientConfig 7 | 8 | RemoteUrl string `toml:"remote_url"` 9 | Headers []string `toml:"headers"` 10 | AuthUsername string `toml:"basic_auth_user"` 11 | AuthPassword string `toml:"basic_auth_pass"` 12 | Timeout int `toml:"timeout"` 13 | ReloadInterval int `toml:"reload_interval"` 14 | } 15 | -------------------------------------------------------------------------------- /config/proxy.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/url" 7 | ) 8 | 9 | type HTTPProxy struct { 10 | HTTPProxyURL string `toml:"http_proxy"` 11 | } 12 | 13 | type proxyFunc func(req *http.Request) (*url.URL, error) 14 | 15 | func (p *HTTPProxy) Proxy() (proxyFunc, error) { 16 | if len(p.HTTPProxyURL) > 0 { 17 | address, err := url.Parse(p.HTTPProxyURL) 18 | if err != nil { 19 | return nil, fmt.Errorf("error parsing proxy url %q: %w", p.HTTPProxyURL, err) 20 | } 21 | return http.ProxyURL(address), nil 22 | } 23 | return http.ProxyFromEnvironment, nil 24 | } 25 | -------------------------------------------------------------------------------- /config/version.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | var Version = "unknown" 4 | -------------------------------------------------------------------------------- /doc/categraf-usage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/doc/categraf-usage.png -------------------------------------------------------------------------------- /doc/categraf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/doc/categraf.png -------------------------------------------------------------------------------- /doc/clickhouse.toml: -------------------------------------------------------------------------------- 1 | # clickhouse output plugin, sending metrics to clickhouse 2 | [metricshouse] 3 | enable = false 4 | debug = false 5 | endpoints = ["10.1.1.7:9000", "10.1.1.8:9000"] 6 | database = "default" 7 | table = "" 8 | username = "" 9 | password = "" 10 | dial_timeout = "1s" 11 | max_open_conns = 10 12 | max_idle_conns = 5 13 | conn_max_lifetime = "1h" 14 | queue_size = 100000 15 | batch_size = 10000 16 | idle_duration = "30s" 17 | -------------------------------------------------------------------------------- /doc/flashduty.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/doc/flashduty.png -------------------------------------------------------------------------------- /doc/img/nightingale-template-center.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/doc/img/nightingale-template-center.png -------------------------------------------------------------------------------- /doc/laqun.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/doc/laqun.jpeg -------------------------------------------------------------------------------- /doc/why-choose-categraf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/doc/why-choose-categraf.png -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | 3 | RUN echo 'hosts: files dns' >> /etc/nsswitch.conf 4 | 5 | RUN set -ex && \ 6 | mkdir -p /usr/bin /etc/categraf 7 | 8 | COPY categraf /usr/bin/categraf 9 | 10 | COPY conf /etc/categraf/conf 11 | 12 | COPY entrypoint.sh /entrypoint.sh 13 | 14 | CMD ["/entrypoint.sh"] 15 | -------------------------------------------------------------------------------- /docker/Dockerfile.goreleaser: -------------------------------------------------------------------------------- 1 | FROM --platform=$TARGETPLATFORM ubuntu:24.04 2 | 3 | RUN apt update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt -y install tzdata ca-certificates snmp snmpd wget curl vim iputils-ping net-tools freeipmi-tools smartmontools ncat lsof 4 | 5 | COPY docker/nsswitch.conf /etc/nsswitch.conf 6 | 7 | COPY categraf /usr/bin/categraf 8 | 9 | COPY docker/entrypoint.sh /entrypoint.sh 10 | 11 | COPY conf /etc/categraf/conf 12 | 13 | CMD ["/entrypoint.sh"] 14 | -------------------------------------------------------------------------------- /docker/Dockerfile.goreleaser.arm64: -------------------------------------------------------------------------------- 1 | FROM --platform=$TARGETPLATFORM ubuntu:24.04 2 | 3 | COPY docker/nsswitch.conf /etc/nsswitch.conf 4 | 5 | COPY categraf /usr/bin/categraf 6 | 7 | COPY docker/entrypoint.sh /entrypoint.sh 8 | 9 | COPY conf /etc/categraf/conf 10 | 11 | CMD ["/entrypoint.sh"] 12 | -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Allow caegraf to send ICMP packets and bind to privliged ports 5 | setcap cap_net_raw,cap_net_bind_service+ep /usr/bin/categraf || echo "Failed to set additional capabilities on /usr/bin/categraf" 6 | 7 | if [ $N9E_HOST ];then 8 | sed -i "s/127.0.0.1:17000/$N9E_HOST/g" /etc/categraf/conf/config.toml 9 | fi 10 | exec /usr/bin/categraf -configs=/etc/categraf/conf 11 | -------------------------------------------------------------------------------- /docker/nsswitch.conf: -------------------------------------------------------------------------------- 1 | # /etc/nsswitch.conf 2 | # 3 | # Example configuration of GNU Name Service Switch functionality. 4 | # If you have the `glibc-doc-reference' and `info' packages installed, try: 5 | # `info libc "Name Service Switch"' for information about this file. 6 | 7 | passwd: files 8 | group: files 9 | shadow: files 10 | gshadow: files 11 | 12 | hosts: files dns 13 | networks: files 14 | 15 | protocols: db files 16 | services: db files 17 | ethers: db files 18 | rpc: db files 19 | 20 | netgroup: nis 21 | 22 | hosts: files dns 23 | -------------------------------------------------------------------------------- /heartbeat/cpu/cpu.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package cpu 7 | 8 | type Cpu struct{} 9 | 10 | const name = "cpu" 11 | 12 | func (self *Cpu) Name() string { 13 | return name 14 | } 15 | 16 | func (self *Cpu) Collect() (result interface{}, err error) { 17 | result, err = getCpuInfo() 18 | return 19 | } 20 | -------------------------------------------------------------------------------- /heartbeat/cpu/cpu_windows_arm64.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package cpu 7 | 8 | func computeCoresAndProcessors() (cpuInfo CPU_INFO, err error) { 9 | return CPU_INFO{}, nil 10 | } 11 | -------------------------------------------------------------------------------- /heartbeat/filesystem/filesystem_common.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package filesystem 7 | 8 | type FileSystem struct{} 9 | 10 | const name = "filesystem" 11 | 12 | func (self *FileSystem) Name() string { 13 | return name 14 | } 15 | 16 | func (self *FileSystem) Collect() (result interface{}, err error) { 17 | result, err = getFileSystemInfo() 18 | return 19 | } 20 | -------------------------------------------------------------------------------- /heartbeat/filesystem/filesystem_darwin.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package filesystem 7 | 8 | var dfOptions = []string{"-l", "-k"} 9 | var expectedLength = 9 10 | 11 | func updatefileSystemInfo(values []string) map[string]string { 12 | return map[string]string{ 13 | "name": values[0], 14 | "kb_size": values[1], 15 | "mounted_on": values[8], 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /heartbeat/filesystem/filesystem_linux.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package filesystem 7 | 8 | var dfOptions = []string{"-lP"} 9 | var expectedLength = 6 10 | 11 | func updatefileSystemInfo(values []string) map[string]string { 12 | return map[string]string{ 13 | "name": values[0], 14 | "kb_size": values[1], 15 | "mounted_on": values[5], 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /heartbeat/memory/memory_darwin.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package memory 7 | 8 | import ( 9 | "os/exec" 10 | "regexp" 11 | "strings" 12 | ) 13 | 14 | func getMemoryInfo() (memoryInfo map[string]string, err error) { 15 | memoryInfo = make(map[string]string) 16 | 17 | out, err := exec.Command("sysctl", "-n", "hw.memsize").Output() 18 | if err == nil { 19 | memoryInfo["total"] = strings.Trim(string(out), "\n") 20 | } 21 | 22 | out, err = exec.Command("sysctl", "-n", "vm.swapusage").Output() 23 | if err == nil { 24 | swap := regexp.MustCompile("total = ").Split(string(out), 2)[1] 25 | memoryInfo["swap_total"] = strings.Split(swap, " ")[0] 26 | } 27 | 28 | return 29 | } 30 | -------------------------------------------------------------------------------- /heartbeat/network/ipconfig_test_sample.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/heartbeat/network/ipconfig_test_sample.txt -------------------------------------------------------------------------------- /heartbeat/network/network.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | //go:build linux || darwin 7 | // +build linux darwin 8 | 9 | package network 10 | 11 | func getNetworkInfo() (networkInfo map[string]interface{}, err error) { 12 | networkInfo = make(map[string]interface{}) 13 | 14 | macaddress, err := macAddress() 15 | if err != nil { 16 | return networkInfo, err 17 | } 18 | networkInfo["macaddress"] = macaddress 19 | 20 | ipAddress, err := externalIpAddress() 21 | if err != nil { 22 | return networkInfo, err 23 | } 24 | networkInfo["ipaddress"] = ipAddress 25 | 26 | ipAddressV6, err := externalIpv6Address() 27 | if err != nil { 28 | return networkInfo, err 29 | } 30 | // We append an IPv6 address to the payload only if IPv6 is enabled 31 | if ipAddressV6 != "" { 32 | networkInfo["ipaddressv6"] = ipAddressV6 33 | } 34 | 35 | return 36 | } 37 | -------------------------------------------------------------------------------- /heartbeat/platform/platform.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | //go:build linux || darwin 7 | // +build linux darwin 8 | 9 | package platform 10 | 11 | import ( 12 | "fmt" 13 | "os/exec" 14 | "regexp" 15 | "strings" 16 | ) 17 | 18 | // GetArchInfo() returns basic host architecture information 19 | func GetArchInfo() (archInfo map[string]interface{}, err error) { 20 | archInfo = make(map[string]interface{}) 21 | 22 | out, err := exec.Command("uname", unameOptions...).Output() 23 | if err != nil { 24 | return nil, err 25 | } 26 | line := fmt.Sprintf("%s", out) 27 | values := regexp.MustCompile(" +").Split(line, 7) 28 | updateArchInfo(archInfo, values) 29 | 30 | out, err = exec.Command("uname", "-v").Output() 31 | if err != nil { 32 | return nil, err 33 | } 34 | archInfo["kernel_version"] = strings.Trim(string(out), "\n") 35 | 36 | return 37 | } 38 | -------------------------------------------------------------------------------- /heartbeat/platform/platform_android.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | //go:build android 7 | // +build android 8 | 9 | package platform 10 | 11 | type Platform struct{} 12 | 13 | const name = "platform" 14 | 15 | func (self *Platform) Name() string { 16 | return name 17 | } 18 | 19 | func (self *Platform) Collect() (result interface{}, err error) { 20 | result, err = getPlatformInfo() 21 | return 22 | } 23 | 24 | func getPlatformInfo() (platformInfo map[string]interface{}, err error) { 25 | 26 | return 27 | } 28 | -------------------------------------------------------------------------------- /heartbeat/platform/platform_linux.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package platform 7 | 8 | import "strings" 9 | 10 | var unameOptions = []string{"-s", "-n", "-r", "-m", "-p", "-i", "-o"} 11 | 12 | func updateArchInfo(archInfo map[string]interface{}, values []string) { 13 | archInfo["kernel_name"] = values[0] 14 | archInfo["hostname"] = values[1] 15 | archInfo["kernel_release"] = values[2] 16 | archInfo["machine"] = values[3] 17 | archInfo["processor"] = values[4] 18 | archInfo["hardware_platform"] = values[5] 19 | archInfo["os"] = strings.Trim(values[6], "\n") 20 | } 21 | -------------------------------------------------------------------------------- /heartbeat/platform/platform_windows_arm64.go: -------------------------------------------------------------------------------- 1 | // This file is licensed under the MIT License. 2 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 3 | // Copyright © 2015 Kentaro Kuribayashi 4 | // Copyright 2014-present Datadog, Inc. 5 | 6 | package platform 7 | 8 | type WKSTA_INFO_100 struct { 9 | wki100_platform_id uint32 10 | wki100_computername string 11 | wki100_langroup string 12 | wki100_ver_major uint32 13 | wki100_ver_minor uint32 14 | } 15 | 16 | type SERVER_INFO_101 struct { 17 | sv101_platform_id uint32 18 | sv101_name string 19 | sv101_version_major uint32 20 | sv101_version_minor uint32 21 | sv101_type uint32 22 | sv101_comment string 23 | } 24 | 25 | func platGetVersion(outdata *byte) (maj uint64, min uint64, err error) { 26 | return 0, 0, nil 27 | } 28 | 29 | func platGetServerInfo(data *byte) (si101 SERVER_INFO_101) { 30 | return SERVER_INFO_101{} 31 | } 32 | -------------------------------------------------------------------------------- /ibex/cmd_nix.go: -------------------------------------------------------------------------------- 1 | //go:build !no_ibex && !windows 2 | 3 | package ibex 4 | 5 | import ( 6 | "os/exec" 7 | "syscall" 8 | ) 9 | 10 | func CmdStart(cmd *exec.Cmd) error { 11 | cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 12 | return cmd.Start() 13 | } 14 | 15 | func CmdKill(cmd *exec.Cmd) error { 16 | return syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) 17 | } 18 | 19 | func ansiToUtf8(mbcs []byte) (string, error) { 20 | // fake 21 | return string(mbcs), nil 22 | } 23 | 24 | func utf8ToAnsi(utf8 string) (string, error) { 25 | // fake 26 | return utf8, nil 27 | } 28 | -------------------------------------------------------------------------------- /ibex/types/types.go: -------------------------------------------------------------------------------- 1 | //go:build !no_ibex 2 | 3 | package types 4 | 5 | type TaskMetaResponse struct { 6 | Message string 7 | Script string 8 | Args string 9 | Account string 10 | Stdin string 11 | } 12 | 13 | type ReportTask struct { 14 | Id int64 15 | Clock int64 16 | Status string 17 | Stdout string 18 | Stderr string 19 | } 20 | 21 | type ReportRequest struct { 22 | Ident string 23 | ReportTasks []ReportTask 24 | } 25 | 26 | type AssignTask struct { 27 | Id int64 28 | Clock int64 29 | Action string 30 | } 31 | 32 | type ReportResponse struct { 33 | Message string 34 | AssignTasks []AssignTask 35 | } 36 | -------------------------------------------------------------------------------- /inputs/README.md: -------------------------------------------------------------------------------- 1 | # inputs 2 | 3 | 每个采集插件就是一个目录,大家可以点击各个目录进去查看,每个插件的使用方式,都提供了 README 和默认配置,一目了然。如果想贡献插件,可以拷贝 tpl 目录的代码,基于 tpl 做改动。 -------------------------------------------------------------------------------- /inputs/activemq/README.md: -------------------------------------------------------------------------------- 1 | # activemq 2 | 3 | ActiveMQ 当前可以使用 jolokia_agent 插件来监控,通过读取 jmx 数据的方式获取监控指标,配置文件可以参考:[activemq.toml](../../conf/input.jolokia_agent_misc/activemq.toml) 4 | -------------------------------------------------------------------------------- /inputs/aliyun/internal/manager/ecs.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | const ( 8 | ecsNamespace = "acs_ecs_dashboard" 9 | ) 10 | 11 | func (m *Manager) EcsKey(instanceID string) string { 12 | return fmt.Sprintf("%s||%s", ecsNamespace, instanceID) 13 | } 14 | -------------------------------------------------------------------------------- /inputs/aliyun/internal/manager/manager.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | cms20190101 "github.com/alibabacloud-go/cms-20190101/v8/client" 5 | cms2021101 "github.com/alibabacloud-go/cms-export-20211101/v2/client" 6 | ) 7 | 8 | type ( 9 | Manager struct { 10 | cms *cmsClient 11 | cmsv2 *cmsV2Client 12 | } 13 | 14 | cmsClient struct { 15 | region string 16 | endpoint string 17 | apikey string 18 | apiSecret string 19 | 20 | *cms20190101.Client 21 | } 22 | cmsV2Client struct { 23 | region string 24 | endpoint string 25 | apikey string 26 | apiSecret string 27 | 28 | *cms2021101.Client 29 | } 30 | ) 31 | 32 | type Option func(manager *Manager) error 33 | 34 | func New(opts ...Option) (*Manager, error) { 35 | var ( 36 | err error 37 | ) 38 | 39 | m := &Manager{} 40 | for _, opt := range opts { 41 | err = opt(m) 42 | if err != nil { 43 | return nil, err 44 | } 45 | } 46 | return m, nil 47 | } 48 | -------------------------------------------------------------------------------- /inputs/apache/README.md: -------------------------------------------------------------------------------- 1 | forked from [apache/README.md](https://github.com/Lusitaniae/apache_exporter/tree/master/README.md) 2 | 3 | ``` 4 | 5 | [[instances]] 6 | ## apache 如何设置server-status页面 https://statuslist.app/apache/apache-status-page-simple-setup-guide/ 7 | 8 | ## 这里填写apache server-status页面的地址 9 | # scrape_uri = "http://localhost/server-status/?auto" 10 | 11 | ## 是否覆盖host 12 | # host_override = "" 13 | 14 | ## 是否跳过https证书验证 15 | # insecure = false 16 | 17 | ## 自定义请求header 18 | # custom_headers = {} 19 | 20 | ## 日志级别 21 | # level: debug,info,warn,error 22 | # log_level = "info" 23 | ``` -------------------------------------------------------------------------------- /inputs/arp_packet/README.md: -------------------------------------------------------------------------------- 1 | # 调整间隔时间 2 | 如有诉求对此插件本身的采集间隔时间调整的话就启用,单位为秒 3 | interval = 15 4 | 5 | # 获取被监控端设备的网卡名称 6 | 可用以下命令获取网卡名称列表 7 | ``` 8 | ip addr | grep '^[0-9]' |awk -F':' '{print $2}' 9 | 10 | lo 11 | eth0 12 | br-153e7f4f0c83 13 | br-2f302c2a8faa 14 | br-5ae0cdb82efc 15 | br-68cba8773a8c 16 | br-c50ca3122079 17 | docker0 18 | br-fd769e4347bd 19 | veth944ac75@if52 20 | ``` 21 | # 在数组instances中启用eth_device 22 | 将以上获取的网卡列表,根据自己的诉求填入,如eth0 23 | ``` 24 | eth_device="eth0" 25 | ``` 26 | # 测试是否能获取到值 27 | ``` 28 | ./categraf --test --inputs arp_packet 29 | 30 | ``` 31 | -------------------------------------------------------------------------------- /inputs/arp_packet/arp_packet_none.go: -------------------------------------------------------------------------------- 1 | package arp_packet 2 | -------------------------------------------------------------------------------- /inputs/bind/README.md: -------------------------------------------------------------------------------- 1 | forked from [telegraf/snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/bind) 2 | 3 | 配置示例 4 | ``` 5 | [[instances]] 6 | urls = [ 7 | #"http://localhost:8053/xml/v3", 8 | ] 9 | 10 | timeout = "5s" 11 | gather_memory_contexts = true 12 | gather_views = true 13 | ``` -------------------------------------------------------------------------------- /inputs/bitbucket/README.md: -------------------------------------------------------------------------------- 1 | # bitbucket 2 | 3 | bitbucket 当前可以使用 jolokia_agent 插件来监控,通过读取 jmx 数据的方式获取监控指标,配置文件可以参考:[bitbucket.toml](../../conf/input.jolokia_agent_misc/bitbucket.toml) 4 | -------------------------------------------------------------------------------- /inputs/cadvisor/cadvisor.go: -------------------------------------------------------------------------------- 1 | package cadvisor 2 | 3 | import ( 4 | "flashcat.cloud/categraf/config" 5 | "flashcat.cloud/categraf/inputs" 6 | ) 7 | 8 | const ( 9 | inputName = "cadvisor" 10 | ) 11 | 12 | type Cadvisor struct { 13 | config.PluginConfig 14 | Instances []*Instance `toml:"instances"` 15 | } 16 | 17 | func init() { 18 | inputs.Add(inputName, func() inputs.Input { 19 | return &Cadvisor{} 20 | }) 21 | } 22 | 23 | func (c *Cadvisor) Clone() inputs.Input { 24 | return &Cadvisor{} 25 | } 26 | 27 | func (c *Cadvisor) Name() string { 28 | return inputName 29 | } 30 | 31 | func (c *Cadvisor) GetInstances() []inputs.Instance { 32 | ret := make([]inputs.Instance, len(c.Instances)) 33 | for i := 0; i < len(c.Instances); i++ { 34 | ret[i] = c.Instances[i] 35 | } 36 | return ret 37 | } 38 | 39 | func (c *Cadvisor) Drop() { 40 | for i := 0; i < len(c.Instances); i++ { 41 | c.Instances[i].Drop() 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /inputs/cassandra/README.md: -------------------------------------------------------------------------------- 1 | # cassandra 2 | 3 | cassandra 当前可以使用 jolokia_agent 插件来监控,通过读取 jmx 数据的方式获取监控指标,配置文件可以参考:[cassandra.toml](../../conf/input.jolokia_agent_misc/cassandra.toml) 4 | -------------------------------------------------------------------------------- /inputs/conntrack/README.md: -------------------------------------------------------------------------------- 1 | # conntrack 2 | 3 | 运维老鸟应该会遇到 conntrack table full 的报错吧,这个插件就是用于监控 conntrack 的情况, forked from [telegraf/conntrack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/conntrack) 4 | 5 | ## Measurements & Fields 6 | 7 | - conntrack 8 | - ip_conntrack_count (int, count): the number of entries in the conntrack table 9 | - ip_conntrack_max (int, size): the max capacity of the conntrack table 10 | 11 | ## 告警 12 | 13 | ``` 14 | 100 * conntrack_ip_conntrack_count / conntrack_ip_conntrack_max > 0.8 15 | 100 * conntrack_nf_conntrack_count / conntrack_nf_conntrack_max > 0.8 16 | ``` -------------------------------------------------------------------------------- /inputs/conntrack/conntrack_nolinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package conntrack 5 | -------------------------------------------------------------------------------- /inputs/cpu/README.md: -------------------------------------------------------------------------------- 1 | # cpu 2 | 3 | CPU 采集插件很简单,自动采集本机 CPU 的使用率、空闲率等等,默认采集的是整机的,如果想采集单核的,就开启这个配置: 4 | 5 | ```ini 6 | collect_per_cpu = true 7 | ``` 8 | 9 | 其中 CPU 使用率的指标名字是 cpu_usage_active 10 | 11 | ## 监控大盘 12 | 13 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/dcgm/exporter_none.go: -------------------------------------------------------------------------------- 1 | package dcgm 2 | -------------------------------------------------------------------------------- /inputs/disk/README.md: -------------------------------------------------------------------------------- 1 | # disk 2 | 3 | 该插件采集磁盘利用率、inode利用率等,默认配置就是推荐配置,如果有发现不符合预期的情况再考虑调整。 4 | 5 | ## 监控大盘 6 | 7 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/diskio/README.md: -------------------------------------------------------------------------------- 1 | # diskio 2 | 3 | 采集硬盘IO的情况 4 | 5 | ## 监控大盘 6 | 7 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/docker/errors.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import "errors" 4 | 5 | var ( 6 | errInfoTimeout = errors.New("timeout retrieving docker engine info") 7 | errStatsTimeout = errors.New("timeout retrieving container stats") 8 | errInspectTimeout = errors.New("timeout retrieving container environment") 9 | errListTimeout = errors.New("timeout retrieving container list") 10 | errServiceTimeout = errors.New("timeout retrieving swarm service list") 11 | ) 12 | -------------------------------------------------------------------------------- /inputs/elasticsearch/collector/versions_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package collector 15 | 16 | var testElasticsearchVersions = []string{ 17 | "5.4.2", 18 | "5.6.16", 19 | "6.5.4", 20 | "6.8.8", 21 | "7.3.0", 22 | "7.6.2", 23 | "7.13.1", 24 | } 25 | -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/clusterhealth/1.7.6.json: -------------------------------------------------------------------------------- 1 | { 2 | "active_primary_shards": 5, 3 | "active_shards": 5, 4 | "cluster_name": "elasticsearch", 5 | "delayed_unassigned_shards": 0, 6 | "initializing_shards": 0, 7 | "number_of_data_nodes": 1, 8 | "number_of_in_flight_fetch": 0, 9 | "number_of_nodes": 1, 10 | "number_of_pending_tasks": 0, 11 | "relocating_shards": 0, 12 | "status": "yellow", 13 | "timed_out": false, 14 | "unassigned_shards": 5 15 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/clusterhealth/2.4.5.json: -------------------------------------------------------------------------------- 1 | { 2 | "active_primary_shards": 5, 3 | "active_shards": 5, 4 | "active_shards_percent_as_number": 50, 5 | "cluster_name": "elasticsearch", 6 | "delayed_unassigned_shards": 0, 7 | "initializing_shards": 0, 8 | "number_of_data_nodes": 1, 9 | "number_of_in_flight_fetch": 0, 10 | "number_of_nodes": 1, 11 | "number_of_pending_tasks": 0, 12 | "relocating_shards": 0, 13 | "status": "yellow", 14 | "task_max_waiting_in_queue_millis": 12, 15 | "timed_out": false, 16 | "unassigned_shards": 5 17 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/clusterhealth/5.4.2.json: -------------------------------------------------------------------------------- 1 | { 2 | "active_primary_shards": 5, 3 | "active_shards": 5, 4 | "active_shards_percent_as_number": 50, 5 | "cluster_name": "elasticsearch", 6 | "delayed_unassigned_shards": 0, 7 | "initializing_shards": 0, 8 | "number_of_data_nodes": 1, 9 | "number_of_in_flight_fetch": 0, 10 | "number_of_nodes": 1, 11 | "number_of_pending_tasks": 0, 12 | "relocating_shards": 0, 13 | "status": "yellow", 14 | "task_max_waiting_in_queue_millis": 12, 15 | "timed_out": false, 16 | "unassigned_shards": 5 17 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/clusterinfo/2.4.5.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster_name": "elasticsearch", 3 | "cluster_uuid": "3qps7bcWTqyzV49ApmPVfw", 4 | "name": "Mys-Tech", 5 | "tagline": "You Know, for Search", 6 | "version": { 7 | "build_hash": "c849dd13904f53e63e88efc33b2ceeda0b6a1276", 8 | "build_snapshot": false, 9 | "build_timestamp": "2017-04-24T16:18:17Z", 10 | "lucene_version": "5.5.4", 11 | "number": "2.4.5" 12 | } 13 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/clusterinfo/5.4.2.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster_name": "elasticsearch", 3 | "cluster_uuid": "kbqi7yhQT-WlPdGL2m0xJg", 4 | "name": "gOHPUga", 5 | "tagline": "You Know, for Search", 6 | "version": { 7 | "build_date": "2017-06-15T02:29:28.122Z", 8 | "build_hash": "929b078", 9 | "build_snapshot": false, 10 | "lucene_version": "6.5.1", 11 | "number": "5.4.2" 12 | } 13 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/clusterinfo/7.13.1.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster_name": "docker-cluster", 3 | "cluster_uuid": "aCMrCY1VQpqJ6U4Sw_xdiw", 4 | "name": "e0630cfd8e1e", 5 | "tagline": "You Know, for Search", 6 | "version": { 7 | "build_date": "2021-05-28T17:40:59.346932922Z", 8 | "build_flavor": "default", 9 | "build_hash": "9a7758028e4ea59bcab41c12004603c5a7dd84a9", 10 | "build_snapshot": false, 11 | "build_type": "docker", 12 | "lucene_version": "8.8.2", 13 | "minimum_index_compatibility_version": "6.0.0-beta1", 14 | "minimum_wire_compatibility_version": "6.8.0", 15 | "number": "7.13.1" 16 | } 17 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/datastream/7.15.0.json: -------------------------------------------------------------------------------- 1 | { 2 | "_shards": { 3 | "failed": 0, 4 | "successful": 30, 5 | "total": 30 6 | }, 7 | "backing_indices": 7, 8 | "data_stream_count": 2, 9 | "data_streams": [ 10 | { 11 | "backing_indices": 5, 12 | "data_stream": "foo", 13 | "maximum_timestamp": 1656079894000, 14 | "store_size_bytes": 429205396 15 | }, 16 | { 17 | "backing_indices": 2, 18 | "data_stream": "bar", 19 | "maximum_timestamp": 1656028796000, 20 | "store_size_bytes": 673822720 21 | } 22 | ], 23 | "total_store_size_bytes": 1103028116 24 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/ilm_indices/6.6.0.json: -------------------------------------------------------------------------------- 1 | { 2 | "indices": { 3 | "facebook": { 4 | "action": "complete", 5 | "action_time_millis": 1660799138651, 6 | "index": "facebook", 7 | "lifecycle_date_millis": 1660799138565, 8 | "managed": true, 9 | "phase": "new", 10 | "phase_time_millis": 1660799138651, 11 | "policy": "my_policy", 12 | "step": "complete", 13 | "step_time_millis": 1660799138651 14 | }, 15 | "twitter": { 16 | "index": "twitter", 17 | "managed": false 18 | } 19 | } 20 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/ilm_status/6.6.0.json: -------------------------------------------------------------------------------- 1 | { 2 | "operation_mode": "RUNNING" 3 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/snapshots/1.7.6.json: -------------------------------------------------------------------------------- 1 | { 2 | "snapshots": [ 3 | { 4 | "duration_in_millis": 328, 5 | "end_time": "2018-09-04T09:09:02.755Z", 6 | "end_time_in_millis": 1536052142755, 7 | "failures": [], 8 | "indices": [ 9 | "foo_1", 10 | "foo_2" 11 | ], 12 | "shards": { 13 | "failed": 0, 14 | "successful": 10, 15 | "total": 10 16 | }, 17 | "snapshot": "snapshot_1", 18 | "start_time": "2018-09-04T09:09:02.427Z", 19 | "start_time_in_millis": 1536052142427, 20 | "state": "SUCCESS", 21 | "version": "1.7.6", 22 | "version_id": 1070699 23 | } 24 | ] 25 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/snapshots/2.4.5.json: -------------------------------------------------------------------------------- 1 | { 2 | "snapshots": [ 3 | { 4 | "duration_in_millis": 508, 5 | "end_time": "2018-09-04T09:25:26.326Z", 6 | "end_time_in_millis": 1536053126326, 7 | "failures": [], 8 | "indices": [ 9 | "foo_2", 10 | "foo_1" 11 | ], 12 | "shards": { 13 | "failed": 0, 14 | "successful": 10, 15 | "total": 10 16 | }, 17 | "snapshot": "snapshot_1", 18 | "start_time": "2018-09-04T09:25:25.818Z", 19 | "start_time_in_millis": 1536053125818, 20 | "state": "SUCCESS", 21 | "version": "2.4.5", 22 | "version_id": 2040599 23 | } 24 | ] 25 | } -------------------------------------------------------------------------------- /inputs/elasticsearch/fixtures/snapshots/5.4.2.json: -------------------------------------------------------------------------------- 1 | { 2 | "snapshots": [ 3 | { 4 | "duration_in_millis": 506, 5 | "end_time": "2018-09-04T09:29:14.477Z", 6 | "end_time_in_millis": 1536053354477, 7 | "failures": [], 8 | "indices": [ 9 | "foo_2", 10 | "foo_1" 11 | ], 12 | "shards": { 13 | "failed": 0, 14 | "successful": 10, 15 | "total": 10 16 | }, 17 | "snapshot": "snapshot_1", 18 | "start_time": "2018-09-04T09:29:13.971Z", 19 | "start_time_in_millis": 1536053353971, 20 | "state": "SUCCESS", 21 | "uuid": "VZ_c_kKISAW8rpcqiwSg0w", 22 | "version": "5.4.2", 23 | "version_id": 5040299 24 | } 25 | ] 26 | } -------------------------------------------------------------------------------- /inputs/exec/scripts/nginx/collect_nginx_conf_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 脚本用途:检测nginx配置是否异常 4 | # 告警条件:conf_status_code=1为异常 5 | 6 | # 监控指标名 7 | input_name="nginx" 8 | 9 | # 自定义标签 10 | cloud="my-cloud" 11 | region="my-region" 12 | azone="az1" 13 | product="my-product" 14 | 15 | nginx_service=$(/usr/sbin/nginx -t > /dev/null 2>&1) 16 | if [ $? -eq 0 ];then 17 | conf_status_code=0 18 | else 19 | conf_status_code=1 20 | fi 21 | 22 | echo "${input_name},cloud=${cloud},region=${region},azone=${azone},product=${product} conf_status_code=${conf_status_code}" 23 | -------------------------------------------------------------------------------- /inputs/exec/scripts/ssh/collect_ssh_conn_count.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 脚本用途:检测虚拟机登录用户数是否异常 4 | 5 | # 监控指标名 6 | input_name="system" 7 | 8 | # 自定义标签 9 | cloud="my-cloud" 10 | region="my-region" 11 | azone="az1" 12 | product="my-product" 13 | 14 | ssh_conn_count=`who | wc -l` 15 | echo "${input_name},cloud=${cloud},region=${region},azone=${azone},product=${product} ssh_conn_count=${ssh_conn_count}" 16 | -------------------------------------------------------------------------------- /inputs/filecount/filesystem_helpers.go: -------------------------------------------------------------------------------- 1 | package filecount 2 | 3 | import ( 4 | "io" 5 | "os" 6 | ) 7 | 8 | /* 9 | The code below is lifted from numerous articles and originates from Andrew Gerrand's 10 things you (probably) don't know about Go. 10 | It allows for mocking a filesystem; this allows for consistent testing of this code across platforms (directory sizes reported 11 | differently by different platforms, for example), while preserving the rest of the functionality as-is, without modification. 12 | */ 13 | 14 | type fileSystem interface { 15 | Open(name string) (file, error) 16 | Stat(name string) (os.FileInfo, error) 17 | } 18 | 19 | type file interface { 20 | io.Closer 21 | io.Reader 22 | io.ReaderAt 23 | io.Seeker 24 | Stat() (os.FileInfo, error) 25 | } 26 | 27 | // osFS implements fileSystem using the local disk 28 | type osFS struct{} 29 | 30 | func (osFS) Open(name string) (file, error) { return os.Open(name) } 31 | func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) } 32 | -------------------------------------------------------------------------------- /inputs/gnmi/ieeefloat32.go: -------------------------------------------------------------------------------- 1 | package gnmi 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/binary" 6 | "fmt" 7 | "math" 8 | ) 9 | 10 | func string2float32(base64Str string) (float32, error) { 11 | // 解码Base64字符串 12 | decodedBytes, err := base64.StdEncoding.DecodeString(base64Str) 13 | if err != nil { 14 | return 0.0, fmt.Errorf("base64 string %s decode error: %v", base64Str, err) 15 | } 16 | 17 | // 确保解码后的字节切片长度为4(32位) 18 | if len(decodedBytes) != 4 { 19 | return 0.0, fmt.Errorf("length after decoding is not 4 bytes, data type is not float32") 20 | } 21 | 22 | // 将字节切片转换为uint32 23 | bits := binary.BigEndian.Uint32(decodedBytes) 24 | 25 | // 将uint32转换为float32 26 | return math.Float32frombits(bits), nil 27 | } 28 | 29 | func bytes2float32(bytes []uint8) (float32, error) { 30 | if len(bytes) != 4 { 31 | return 0.0, fmt.Errorf("length after decoding is not 4 bytes, data type is not float32") 32 | } 33 | 34 | // 将字节切片转换为uint32 35 | bits := binary.BigEndian.Uint32(bytes) 36 | 37 | // 将uint32转换为float32 38 | return math.Float32frombits(bits), nil 39 | } 40 | -------------------------------------------------------------------------------- /inputs/googlecloud/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # GCP 指标获取插件 4 | 需要权限 5 | ```toml 6 | https://www.googleapis.com/auth/monitoring.read 7 | ``` 8 | 9 | 配置 10 | ```toml 11 | #采集周期,建议 >= 1分钟 12 | interval=60 13 | [[instances]] 14 | #配置 project_id 15 | project_id="your-project-id" 16 | #配置认证的key文件 17 | credentials_file="/path/to/your/key.json" 18 | #或者配置认证的JSON 19 | credentials_json="xxx" 20 | 21 | # 指标的end time = now - delay 22 | #delay="2m" 23 | # 指标的start time = now - deley - period 24 | #period="1m" 25 | # 过滤器 26 | #filter="metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.labels.zone=\"asia-northeast1-a\"" 27 | # 请求超时时间 28 | #timeout="5s" 29 | # 指标列表的缓存时长 ,filter为空时 启用 30 | #cache_ttl="1h" 31 | 32 | # 给gce的instance_name 取个别名,放到label中 33 | #gce_host_tag="xxx" 34 | # 每次最多有多少请求同时发起 35 | #request_inflight=30 36 | 37 | # request_inflight 取值(0,100] 38 | # 想配置更大的值 ,前提是你知道你在做什么 39 | force_request_inflight= 200 40 | ``` 41 | -------------------------------------------------------------------------------- /inputs/googlecloud/gcp.go: -------------------------------------------------------------------------------- 1 | package googlecloud 2 | 3 | import ( 4 | "flashcat.cloud/categraf/config" 5 | "flashcat.cloud/categraf/inputs" 6 | ) 7 | 8 | const ( 9 | inputName = "googlecloud" 10 | ) 11 | 12 | type ( 13 | GoogleCloud struct { 14 | config.PluginConfig 15 | Instances []*Instance `toml:"instances"` 16 | } 17 | ) 18 | 19 | var _ inputs.Input = new(GoogleCloud) 20 | var _ inputs.InstancesGetter = new(GoogleCloud) 21 | 22 | func init() { 23 | inputs.Add(inputName, func() inputs.Input { 24 | return &GoogleCloud{} 25 | }) 26 | } 27 | 28 | func (g *GoogleCloud) Clone() inputs.Input { 29 | return &GoogleCloud{} 30 | } 31 | 32 | func (c *GoogleCloud) Name() string { 33 | return inputName 34 | } 35 | 36 | func (c *GoogleCloud) GetInstances() []inputs.Instance { 37 | ret := make([]inputs.Instance, len(c.Instances)) 38 | for i := 0; i < len(c.Instances); i++ { 39 | ret[i] = c.Instances[i] 40 | } 41 | return ret 42 | } 43 | 44 | func (c *GoogleCloud) Drop() { 45 | for i := 0; i < len(c.Instances); i++ { 46 | c.Instances[i].Drop() 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /inputs/googlecloud/internal/metrics.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "google.golang.org/genproto/googleapis/monitoring/v3" 5 | ) 6 | 7 | type ( 8 | Metric monitoring.TimeSeries 9 | ) 10 | -------------------------------------------------------------------------------- /inputs/hadoop_hdfs/README.md: -------------------------------------------------------------------------------- 1 | # hadoop-hdfs 2 | 3 | hadoop-hdfs 当前可以使用 jolokia_agent 插件来监控,通过读取 jmx 数据的方式获取监控指标,配置文件可以参考:[hadoop-hdfs.toml](../../conf/input.jolokia_agent_misc/hadoop-hdfs.toml) 4 | -------------------------------------------------------------------------------- /inputs/haproxy/README.md: -------------------------------------------------------------------------------- 1 | # HAProxy 2 | 3 | forked from [haproxy_exporter](https://github.com/prometheus/haproxy_exporter) 4 | 5 | Note: since HAProxy 2.0.0, the official source includes a Prometheus exporter module that can be built into your binary with a single flag during build time and offers an exporter-free Prometheus endpoint. 6 | 7 | 8 | haproxy configurations for `/stats`: 9 | 10 | ``` 11 | frontend stats 12 | bind *:8404 13 | stats enable 14 | stats uri /stats 15 | stats refresh 10s 16 | ``` -------------------------------------------------------------------------------- /inputs/http_response/README.md: -------------------------------------------------------------------------------- 1 | # http_response 2 | 3 | HTTP 探测插件,用于检测 HTTP 地址的连通性、延迟、HTTPS证书过期时间 4 | 5 | ## code meanings 6 | 7 | ``` 8 | Success = 0 9 | ConnectionFailed = 1 10 | Timeout = 2 11 | DNSError = 3 12 | AddressError = 4 13 | BodyMismatch = 5 14 | CodeMismatch = 6 15 | ``` 16 | 17 | ## Configuration 18 | 19 | 最核心的配置就是 targets 配置,配置目标地址,比如想要监控两个地址: 20 | 21 | ```toml 22 | [[instances]] 23 | targets = [ 24 | "http://localhost:8080", 25 | "https://www.baidu.com" 26 | ] 27 | ``` 28 | 29 | instances 下面的所有 targets 共享同一个 `[[instances]]` 下面的配置,比如超时时间,HTTP方法等,如果有些配置不同,可以拆成多个不同的 `[[instances]]`,比如: 30 | 31 | ```toml 32 | [[instances]] 33 | targets = [ 34 | "http://localhost:8080", 35 | "https://www.baidu.com" 36 | ] 37 | method = "GET" 38 | 39 | [[instances]] 40 | targets = [ 41 | "http://localhost:9090" 42 | ] 43 | method = "POST" 44 | ``` 45 | 46 | ## 监控大盘和告警规则 47 | 48 | 该 README 的同级目录下,提供了 dashboard.json 就是监控大盘的配置,alerts.json 是告警规则,可以导入夜莺使用。 -------------------------------------------------------------------------------- /inputs/http_response/tls.go: -------------------------------------------------------------------------------- 1 | package http_response 2 | 3 | import ( 4 | "crypto/tls" 5 | "time" 6 | ) 7 | 8 | func getEarliestCertExpiry(state *tls.ConnectionState) time.Time { 9 | earliest := time.Time{} 10 | for _, cert := range state.PeerCertificates { 11 | if (earliest.IsZero() || cert.NotAfter.Before(earliest)) && !cert.NotAfter.IsZero() { 12 | earliest = cert.NotAfter 13 | } 14 | } 15 | return earliest 16 | } 17 | 18 | func getCertName(state *tls.ConnectionState) string { 19 | for _, cert := range state.PeerCertificates { 20 | if !cert.IsCA { 21 | return cert.Subject.CommonName 22 | } 23 | } 24 | return "" 25 | } 26 | -------------------------------------------------------------------------------- /inputs/ipmi/exporter/collector_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package exporter 5 | 6 | import ( 7 | "github.com/prometheus/client_golang/prometheus" 8 | ) 9 | 10 | type IPMIConfig struct { 11 | Timeout uint32 12 | } 13 | 14 | func Collect(ch chan<- prometheus.Metric, host, binPath string, config IPMIConfig, debugMod bool) { 15 | return 16 | } 17 | -------------------------------------------------------------------------------- /inputs/ipmi/ipmi.go: -------------------------------------------------------------------------------- 1 | package ipmi 2 | 3 | import ( 4 | "flashcat.cloud/categraf/config" 5 | "flashcat.cloud/categraf/inputs" 6 | ) 7 | 8 | const ( 9 | inputName = "ipmi" 10 | ) 11 | 12 | type Ipmi struct { 13 | config.PluginConfig 14 | Instances []*Instance `toml:"instances"` 15 | } 16 | 17 | func init() { 18 | inputs.Add(inputName, func() inputs.Input { 19 | return &Ipmi{} 20 | }) 21 | } 22 | 23 | func (i *Ipmi) Clone() inputs.Input { 24 | return &Ipmi{} 25 | } 26 | 27 | func (c *Ipmi) Name() string { 28 | return inputName 29 | } 30 | 31 | func (c *Ipmi) GetInstances() []inputs.Instance { 32 | ret := make([]inputs.Instance, len(c.Instances)) 33 | for i := 0; i < len(c.Instances); i++ { 34 | ret[i] = c.Instances[i] 35 | } 36 | return ret 37 | } 38 | 39 | func (c *Ipmi) Drop() { 40 | for i := 0; i < len(c.Instances); i++ { 41 | c.Instances[i].Drop() 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /inputs/iptables/iptables_notlinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package iptables 5 | -------------------------------------------------------------------------------- /inputs/ipvs/ipvs.go: -------------------------------------------------------------------------------- 1 | package ipvs 2 | -------------------------------------------------------------------------------- /inputs/jboss/README.md: -------------------------------------------------------------------------------- 1 | # jboss 2 | 3 | jboss 当前可以使用 jolokia_agent 插件来监控,通过读取 jmx 数据的方式获取监控指标,配置文件可以参考:[jboss.toml](../../conf/input.jolokia_agent_misc/jboss.toml) 4 | -------------------------------------------------------------------------------- /inputs/kafka/exporter/scram_client.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "crypto/sha256" 5 | "crypto/sha512" 6 | "hash" 7 | 8 | "github.com/xdg/scram" 9 | ) 10 | 11 | var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } 12 | var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } 13 | 14 | type XDGSCRAMClient struct { 15 | *scram.Client 16 | *scram.ClientConversation 17 | scram.HashGeneratorFcn 18 | } 19 | 20 | func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { 21 | x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) 22 | if err != nil { 23 | return err 24 | } 25 | x.ClientConversation = x.Client.NewConversation() 26 | return nil 27 | } 28 | 29 | func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { 30 | response, err = x.ClientConversation.Step(challenge) 31 | return 32 | } 33 | 34 | func (x *XDGSCRAMClient) Done() bool { 35 | return x.ClientConversation.Done() 36 | } 37 | -------------------------------------------------------------------------------- /inputs/kafka_connect/README.md: -------------------------------------------------------------------------------- 1 | # kafka-connect 2 | 3 | kafka-connect 当前可以使用 jolokia_agent 插件来监控,通过读取 jmx 数据的方式获取监控指标,配置文件可以参考:[kafka-connect.toml](../../conf/input.jolokia_agent_misc/kafka-connect.toml) 4 | -------------------------------------------------------------------------------- /inputs/kernel/README.md: -------------------------------------------------------------------------------- 1 | # kernel 2 | 3 | 采集本机的内核信息,比如 OS 启动时间,上下文切换的次数等 4 | 5 | ## 监控大盘 6 | 7 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/kernel/kernel_notlinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package kernel 5 | -------------------------------------------------------------------------------- /inputs/kernel_vmstat/kernel_vmstat_notlinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package kernel_vmstat 5 | -------------------------------------------------------------------------------- /inputs/kube_state_metrics/kube-state-metrics-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: kube-state-metrics 11 | template: 12 | metadata: 13 | labels: 14 | app: kube-state-metrics 15 | spec: 16 | serviceAccountName: kube-state-metrics 17 | containers: 18 | - name: kube-state-metrics 19 | image: quay.io/coreos/kube-state-metrics:v1.9.0 20 | ports: 21 | - containerPort: 8080 -------------------------------------------------------------------------------- /inputs/kube_state_metrics/kube-state-metrics-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | prometheus.io/scrape: 'true' 6 | prometheus.io/port: '8080' 7 | name: kube-state-metrics 8 | namespace: kube-system 9 | labels: 10 | app: kube-state-metrics 11 | spec: 12 | ports: 13 | - name: kube-state-metrics 14 | port: 8080 15 | protocol: TCP 16 | selector: 17 | app: kube-state-metrics -------------------------------------------------------------------------------- /inputs/kubernetes/kubernetes_pods.go: -------------------------------------------------------------------------------- 1 | package kubernetes 2 | 3 | type Pods struct { 4 | Kind string `json:"kind"` 5 | APIVersion string `json:"apiVersion"` 6 | Items []Item `json:"items"` 7 | } 8 | 9 | type Item struct { 10 | Metadata Metadata `json:"metadata"` 11 | } 12 | 13 | type Metadata struct { 14 | Name string `json:"name"` 15 | Namespace string `json:"namespace"` 16 | Labels map[string]string `json:"labels"` 17 | } 18 | -------------------------------------------------------------------------------- /inputs/linux_sysctl_fs/README.md: -------------------------------------------------------------------------------- 1 | # linux_sysctl_fs 2 | 3 | 采集一些 /proc/sys/fs 下的内容 4 | -------------------------------------------------------------------------------- /inputs/linux_sysctl_fs/linuxsysctlfsnotlinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package linux_sysctl_fs 5 | -------------------------------------------------------------------------------- /inputs/logstash/README.md: -------------------------------------------------------------------------------- 1 | # logstash 2 | 3 | logstash 监控采集插件,由telegraf改造而来。 4 | 5 | ## Configuration 6 | 7 | 请参考配置[示例](../../conf/input.logstash/logstash.toml) 8 | 9 | ## 监控大盘和告警规则 10 | 11 | 同级目录下的 logstash-dash 是示例的监控面板, 可以直接导入夜莺使用。 -------------------------------------------------------------------------------- /inputs/mem/README.md: -------------------------------------------------------------------------------- 1 | # mem 2 | 3 | 内存采集插件,维持默认配置即可。 4 | 5 | ## 监控大盘 6 | 7 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/mongodb/README.md: -------------------------------------------------------------------------------- 1 | # mongodb 2 | 3 | mongodb 监控采集插件,由mongodb-exporter(https://github.com/percona/mongodb_exporter) 封装而来。v0.3.30-v0.3.42从 [telegraf/mongodb](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mongodb) fork。 4 | 5 | ## Configuration 6 | 7 | 8 | 9 | - 配置文件,[参考示例](../../conf/input.mongodb/mongodb.toml) 10 | - 配置权限,至少授予以下权限给配置文件中用于连接 MongoDB 的 user 才能收集指标: 11 | ``` 12 | { 13 | "role":"clusterMonitor", 14 | "db":"admin" 15 | }, 16 | { 17 | "role":"read", 18 | "db":"local" 19 | } 20 | 21 | ``` 22 | 一个简单配置 23 | ``` 24 | mongo -h xxx -u xxx -p xxx --authenticationDatabase admin 25 | > use admin 26 | > db.createUser({user:"categraf",pwd:"categraf",roles: [{role:"read",db:"local"},{"role":"clusterMonitor","db":"admin"}]}) 27 | ``` 28 | 更详细的权限配置请参考[官方文档](https://www.mongodb.com/docs/manual/reference/built-in-roles/#mongodb-authrole-clusterMonitor) 29 | 30 | ## 监控大盘和告警规则 31 | 32 | 同级目录下的 dashboard.json、alerts.json 是大盘和告警规则, dashboard2.json 是v0.3.30版本以后的大盘。 33 | -------------------------------------------------------------------------------- /inputs/mtail/internal/exporter/json.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | package exporter 5 | 6 | import ( 7 | "encoding/json" 8 | "expvar" 9 | "log" 10 | "net/http" 11 | ) 12 | 13 | var exportJSONErrors = expvar.NewInt("exporter_json_errors") 14 | 15 | // HandleJSON exports the metrics in JSON format via HTTP. 16 | func (e *Exporter) HandleJSON(w http.ResponseWriter, r *http.Request) { 17 | b, err := json.MarshalIndent(e.store, "", " ") 18 | if err != nil { 19 | exportJSONErrors.Add(1) 20 | log.Printf("error marshalling metrics into json:%s", err.Error()) 21 | http.Error(w, err.Error(), http.StatusInternalServerError) 22 | return 23 | } 24 | w.Header().Set("content-type", "application/json") 25 | if _, err := w.Write(b); err != nil { 26 | log.Println(err) 27 | http.Error(w, err.Error(), http.StatusInternalServerError) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /inputs/mtail/internal/logline/logline.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | package logline 5 | 6 | import "context" 7 | 8 | // LogLine contains all the information about a line just read from a log. 9 | type LogLine struct { 10 | Context context.Context 11 | 12 | Filename string // The log filename that this line was read from 13 | Line string // The text of the log line itself up to the newline. 14 | } 15 | 16 | // New creates a new LogLine object. 17 | func New(ctx context.Context, filename string, line string) *LogLine { 18 | return &LogLine{ctx, filename, line} 19 | } 20 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/buildinfo.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | package mtail 5 | 6 | import ( 7 | "fmt" 8 | "runtime" 9 | ) 10 | 11 | // BuildInfo records the compile-time information for use when reporting the mtail version. 12 | type BuildInfo struct { 13 | Branch string 14 | Version string 15 | Revision string 16 | } 17 | 18 | func (b BuildInfo) String() string { 19 | return fmt.Sprintf( 20 | "mtail version %s git revision %s go version %s go arch %s go os %s", 21 | b.Version, 22 | b.Revision, 23 | runtime.Version(), 24 | runtime.GOARCH, 25 | runtime.GOOS, 26 | ) 27 | } 28 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/golden/reader_test.golden: -------------------------------------------------------------------------------- 1 | counter bytes_total {operation=sent} 62793673 2011-02-23T05:54:10Z 2 | counter bytes_total {operation=received} 975017 2011-02-23T05:54:10Z 3 | counter connections_total 52 2011-02-22T21:54:13Z 4 | counter connection-time_total 1181011 2011-02-23T05:54:10Z 5 | counter transfers_total {operation=send,module=module} 2 2011-02-23T05:50:32Z 6 | counter transfers_total {operation=send,module=repo} 25 2011-02-23T05:51:14Z 7 | gauge foo {label=} 8 | counter bar 9 | gauge floaty 37.1 2017-06-15T18:09:37Z 10 | text stringy hi 2018-06-16T18:04:00Z 11 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/logo.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/inputs/mtail/internal/mtail/logo.ico -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/README: -------------------------------------------------------------------------------- 1 | This directory contains test input (.log) and expected output (.golden) files 2 | for the ex_test.go test program. It ensures that the example programs provided 3 | compile and run and generate the expected output. 4 | 5 | The golden file format is read by testutil/reader.go. 6 | 7 | All files in this directory are licensed under the Apache License 2.0. 8 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/apache-combined.golden: -------------------------------------------------------------------------------- 1 | counter apache_http_bytes_total {request_method=GET,http_version=HTTP/1.1,request_status=200} 2602 2018-03-23T12:31:04.000000000Z 2 | counter apache_http_requests_total {request_method=GET,http_version=HTTP/1.1,request_status=200} 4 2018-03-23T12:31:04.000000000Z 3 | counter apache_http_requests_total {request_method=GET,http_version=HTTP/1.1,request_status=304} 1 2018-03-23T12:31:05.000000000Z 4 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/lighttpd_accesslog.golden: -------------------------------------------------------------------------------- 1 | counter request {status=200} 13 2010-04-08T20:43:27-07:00 2 | counter request {status=304} 3 2010-04-08T20:43:20-07:00 3 | counter time_taken {status=200} 15 2010-04-08T20:43:27-07:00 4 | counter time_taken {status=304} 4 2010-04-08T20:43:20-07:00 5 | counter bytes_out {subtotal=resp_body,status=200} 2338666 2010-04-08T20:43:27-07:00 6 | counter bytes_out {subtotal=resp_header,status=200} 3560 2010-04-08T20:43:27-07:00 7 | counter bytes_out {subtotal=resp_body,status=304} 0 2010-04-08T20:43:20-07:00 8 | counter bytes_out {subtotal=resp_header,status=304} 841 2010-04-08T20:43:20-07:00 9 | counter bytes_in {status=200} 4770 2010-04-08T20:43:27-07:00 10 | counter bytes_in {status=304} 1091 2010-04-08T20:43:20-07:00 11 | counter requests {proxy_cache=192.0.2.5} 16 2010-04-08T20:43:27-07:00 12 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/ntp4.golden: -------------------------------------------------------------------------------- 1 | counter int_syscalls 2 | counter recvbuf_overflows 3 | gauge last_recvbuf 4 | counter exits 1 0000-06-30T12:07:12Z 5 | counter starts 1 0000-06-30T12:04:43Z 6 | gauge sync_status 1 0000-12-01T17:42:09Z 7 | counter pll_changes 8 | gauge pll_status 9 | counter peer_syncs 1 0000-06-30T12:04:51Z 10 | counter driftfile_errors 11 | counter sync_lost_total 12 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 1s 3 | 4 | scrape_configs: 5 | - job_name: 'mtail' 6 | static_configs: 7 | - targets: ['localhost:3903'] 8 | 9 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/rsyncd.golden: -------------------------------------------------------------------------------- 1 | counter bytes_total {operation=sent} 62793673 2011-02-23T05:54:10Z 2 | counter bytes_total {operation=received} 975017 2011-02-23T05:54:10Z 3 | counter connections_total 52 2011-02-22T21:54:13Z 4 | counter connection-time_total 1181011 2011-02-23T05:54:10Z 5 | counter transfers_total {operation=send,module=module} 2 2011-02-23T05:50:32Z 6 | counter transfers_total {operation=send,module=repo} 25 2011-02-23T05:51:14Z 7 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/vsftpd_xferlog.golden: -------------------------------------------------------------------------------- 1 | counter transfers {direction=incoming} 10 2011-02-21T15:41:15Z 2 | counter transfer_time {direction=incoming} 10 2011-02-21T15:41:15Z 3 | counter bytes_transferred {direction=incoming} 6404 2011-02-21T15:41:15Z 4 | counter connects 5 | counter logins 6 | counter uploads 7 | counter session_time 8 | counter commands {command=} 9 | counter responses {response=} 10 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/xntp3_peerstats: -------------------------------------------------------------------------------- 1 | 54695 7690.466 64.113.32.5 93b4 0.002345 0.01001 0.00090 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/mtail/testdata/xntp3_peerstats.golden: -------------------------------------------------------------------------------- 1 | gauge peer_status {peer=64.113.32.5} 18 2008-08-17T02:08:10Z 2 | gauge peer_select {peer=64.113.32.5} 3 2008-08-17T02:08:10Z 3 | gauge peer_count {peer=64.113.32.5} 11 2008-08-17T02:08:10Z 4 | gauge peer_code {peer=64.113.32.5} 4 2008-08-17T02:08:10Z 5 | gauge peer_offset {peer=64.113.32.5} 0.002345 2008-08-17T02:08:10Z 6 | gauge peer_delay {peer=64.113.32.5} 0.01001 2008-08-17T02:08:10Z 7 | gauge peer_dispersion {peer=64.113.32.5} 0.00090 2008-08-17T02:08:10Z 8 | counter num_peerstats {peer=64.113.32.5} 1 2008-08-17T02:08:10Z 9 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/code/instr.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | // Package code contains the bytecode instructions for the mtail virtual machine. 5 | package code 6 | 7 | import "fmt" 8 | 9 | type Instr struct { 10 | Opcode Opcode 11 | Operand interface{} 12 | SourceLine int // Line number of the original source file, zero-based numbering. 13 | } 14 | 15 | // debug print for instructions. 16 | func (i Instr) String() string { 17 | return fmt.Sprintf("{%s %v %d}", opNames[i.Opcode], i.Operand, i.SourceLine) 18 | } 19 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/code/object.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | package code 5 | 6 | import ( 7 | "regexp" 8 | 9 | "flashcat.cloud/categraf/inputs/mtail/internal/metrics" 10 | ) 11 | 12 | // Object is the data and bytecode resulting from compiled program source. 13 | type Object struct { 14 | Program []Instr // The program bytecode. 15 | Strings []string // Static strings. 16 | Regexps []*regexp.Regexp // Static regular expressions. 17 | Metrics []*metrics.Metric // Metrics accessible to this program. 18 | } 19 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/compiler/fuzz/const-as-cond.mtail: -------------------------------------------------------------------------------- 1 | const A /n/ 2 | A { 3 | } 4 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/compiler/types/regexp.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | package types 5 | 6 | import ( 7 | "regexp/syntax" 8 | ) 9 | 10 | // ParseRegexp ensures we use the same regexp syntax.Flags across all 11 | // invocations of this method. 12 | func ParseRegexp(pattern string) (re *syntax.Regexp, err error) { 13 | re, err = syntax.Parse(pattern, syntax.Perl) 14 | if err != nil { 15 | return 16 | } 17 | re = re.Simplify() 18 | return 19 | } 20 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/1.mtail: -------------------------------------------------------------------------------- 1 | 1{} 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/284.mtail: -------------------------------------------------------------------------------- 1 | counter c by x 2 | /"(?P\S+)"/ + 3 | /$/ { 4 | c[$x]++ 5 | } 6 | ␤ 7 | "a" 8 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/capref-double-regexp-in-cond.mtail: -------------------------------------------------------------------------------- 1 | 0||0||//||/;0/{$0||//||/;0/{}} 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/cmp-to-none.mtail: -------------------------------------------------------------------------------- 1 | strptime("","")<5{} 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/const-a.mtail: -------------------------------------------------------------------------------- 1 | const A /n/ 2 | A { 3 | } 4 | 5 | A && 1 { 6 | } 7 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/const-as-cond.mtail: -------------------------------------------------------------------------------- 1 | const A /n/ 2 | A { 3 | } 4 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/const-unused.mtail: -------------------------------------------------------------------------------- 1 | const l /l/ + /f/ 2 | l 3 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/datum-string-concat.mtail: -------------------------------------------------------------------------------- 1 | text l 2 | l+=l 3 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/len.mtail: -------------------------------------------------------------------------------- 1 | len(2) 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/match-01e1.mtail: -------------------------------------------------------------------------------- 1 | 882=~01e1{} 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/match-str.mtail: -------------------------------------------------------------------------------- 1 | (0=~"") 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/match-to-int.mtail: -------------------------------------------------------------------------------- 1 | 6=~0{} 2 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/negate-none.mtail: -------------------------------------------------------------------------------- 1 | ~strptime("",""){} 2 | ␤ 3 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/retval-from-dec.mtail: -------------------------------------------------------------------------------- 1 | timer l 2 | l-- - l { 3 | } 4 | -------------------------------------------------------------------------------- /inputs/mtail/internal/runtime/fuzz/uninitialised.mtail: -------------------------------------------------------------------------------- 1 | gauge time/()/{time=$1 2 | }settime(time) 3 | -------------------------------------------------------------------------------- /inputs/mtail/internal/tailer/logstream/base.go: -------------------------------------------------------------------------------- 1 | // Copyright 2024 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | package logstream 5 | 6 | import ( 7 | "flashcat.cloud/categraf/inputs/mtail/internal/logline" 8 | ) 9 | 10 | type streamBase struct { 11 | sourcename string // human readable name of the logstream source 12 | 13 | lines chan *logline.LogLine // outbound channel for lines 14 | } 15 | 16 | // Lines returns the output log line channel for this stream. The stream is 17 | // completed when this channel closes. 18 | func (s *streamBase) Lines() <-chan *logline.LogLine { 19 | return s.lines 20 | } 21 | -------------------------------------------------------------------------------- /inputs/mtail/internal/waker/waker.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Google Inc. All Rights Reserved. 2 | // This file is available under the Apache license. 3 | 4 | package waker 5 | 6 | // A Waker is used to signal to idle routines it's time to look for new work. 7 | type Waker interface { 8 | // Wake returns a channel that's closed when the idle routine should wake up. 9 | Wake() <-chan struct{} 10 | } 11 | -------------------------------------------------------------------------------- /inputs/mtail/timestamp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/inputs/mtail/timestamp.png -------------------------------------------------------------------------------- /inputs/mtail/timezone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/inputs/mtail/timezone.png -------------------------------------------------------------------------------- /inputs/mysql/processlist_by_user.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "database/sql" 5 | "log" 6 | 7 | "flashcat.cloud/categraf/pkg/tagx" 8 | "flashcat.cloud/categraf/types" 9 | ) 10 | 11 | func (ins *Instance) gatherProcesslistByUser(slist *types.SampleList, db *sql.DB, globalTags map[string]string) { 12 | if !ins.GatherProcessListProcessByUser { 13 | return 14 | } 15 | 16 | rows, err := db.Query(SQL_INFO_SCHEMA_PROCESSLIST_BY_USER) 17 | if err != nil { 18 | log.Println("E! failed to get processlist:", err) 19 | return 20 | } 21 | 22 | defer rows.Close() 23 | 24 | labels := tagx.Copy(globalTags) 25 | 26 | for rows.Next() { 27 | var user string 28 | var connections int64 29 | 30 | err = rows.Scan(&user, &connections) 31 | if err != nil { 32 | log.Println("E! failed to scan rows:", err) 33 | return 34 | } 35 | 36 | slist.PushFront(types.NewSample(inputName, "processlist_processes_by_user", connections, labels, map[string]string{"user": user})) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /inputs/mysql/schema_size.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "database/sql" 5 | "log" 6 | 7 | "flashcat.cloud/categraf/pkg/tagx" 8 | "flashcat.cloud/categraf/types" 9 | ) 10 | 11 | func (ins *Instance) gatherSchemaSize(slist *types.SampleList, db *sql.DB, globalTags map[string]string) { 12 | if !ins.GatherSchemaSize { 13 | return 14 | } 15 | 16 | rows, err := db.Query(SQL_QUERY_SCHEMA_SIZE) 17 | if err != nil { 18 | log.Println("E! failed to get schema size of", ins.Address, err) 19 | return 20 | } 21 | 22 | defer rows.Close() 23 | 24 | labels := tagx.Copy(globalTags) 25 | 26 | for rows.Next() { 27 | var schema string 28 | var size int64 29 | 30 | err = rows.Scan(&schema, &size) 31 | if err != nil { 32 | log.Println("E! failed to scan rows of", ins.Address, err) 33 | return 34 | } 35 | 36 | slist.PushFront(types.NewSample(inputName, "schema_size_bytes", size, labels, map[string]string{"schema": schema})) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /inputs/net/README.md: -------------------------------------------------------------------------------- 1 | # net 2 | 3 | 网络流量监控插件,比如各个网卡的流量、包量、错包情况等 4 | 5 | ## Configuration 6 | 7 | 通常可以维持默认配置,不过有的时候,我们有些网卡不想采集,只想采集指定的网卡,可以通过 interfaces 这个配置来指定。 8 | 9 | ## 监控大盘 10 | 11 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/net/speed_nolinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | 3 | package net 4 | 5 | func Speed(iface string) (int64, error) { 6 | return 0, nil 7 | } 8 | -------------------------------------------------------------------------------- /inputs/net_response/README.md: -------------------------------------------------------------------------------- 1 | # net_response 2 | 3 | 网络探测插件,通常用于监控本机某个端口是否在监听,或远端某个端口是否能连通 4 | 5 | ## code meanings 6 | 7 | - 0: Success 8 | - 1: Timeout 9 | - 2: ConnectionFailed 10 | - 3: ReadFailed 11 | - 4: StringMismatch 12 | 13 | ## Configuration 14 | 15 | 最核心的配置就是 targets 部分,指定探测的目标,下面的例子: 16 | 17 | ```toml 18 | [[instances]] 19 | targets = [ 20 | "10.2.3.4:22", 21 | "localhost:6379", 22 | ":9090" 23 | ] 24 | ``` 25 | 26 | - `10.2.3.4:22` 表示探测 10.2.3.4 这个机器的 22 端口是否可以连通 27 | - `localhost:6379` 表示探测本机的 6379 端口是否可以连通 28 | - `:9090` 表示探测本机的 9090 端口是否可以连通 29 | 30 | 监控数据或告警事件中只是一个 IP 和端口,接收告警的人看到了,可能不清楚只是哪个业务的模块告警了,可以附加一些更有价值的信息放到标签里,比如例子中: 31 | 32 | ```toml 33 | labels = { region="cloud", product="n9e" } 34 | ``` 35 | 36 | 标识了这是 cloud 这个 region,n9e 这个产品,这俩标签会附到时序数据上,告警的时候自然也会报出来。 37 | 38 | ## 监控大盘和告警规则 39 | 40 | 该 README 的同级目录下,提供了 dashboard.json 就是监控大盘的配置,alerts.json 是告警规则,可以导入夜莺使用。 -------------------------------------------------------------------------------- /inputs/net_response/alerts.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "网络地址探活失败", 4 | "note": "", 5 | "severity": 2, 6 | "disabled": 0, 7 | "prom_for_duration": 60, 8 | "prom_ql": "net_response_result_code != 0", 9 | "prom_eval_interval": 15, 10 | "enable_stime": "00:00", 11 | "enable_etime": "23:59", 12 | "enable_days_of_week": [ 13 | "1", 14 | "2", 15 | "3", 16 | "4", 17 | "5", 18 | "6", 19 | "0" 20 | ], 21 | "enable_in_bg": 0, 22 | "notify_recovered": 1, 23 | "notify_channels": [], 24 | "notify_repeat_step": 60, 25 | "recover_duration": 0, 26 | "callbacks": [], 27 | "runbook_url": "", 28 | "append_tags": [] 29 | } 30 | ] -------------------------------------------------------------------------------- /inputs/netstat/README.md: -------------------------------------------------------------------------------- 1 | # netstat 2 | 3 | 该插件采集网络连接情况,比如有多少 time_wait 连接,多少 established 连接 4 | 5 | # 监控大盘 6 | 7 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/netstat/ext.go: -------------------------------------------------------------------------------- 1 | package netstat 2 | 3 | import ( 4 | "path/filepath" 5 | "strconv" 6 | ) 7 | 8 | // ProcNetstat models the content of /proc//net/netstat. 9 | type ProcNetstat struct { 10 | // The process ID. 11 | PID int 12 | TcpExt map[string]interface{} 13 | IpExt map[string]interface{} 14 | } 15 | 16 | // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an 17 | // interface to kernel data structures. 18 | type FS string 19 | 20 | // Path appends the given path elements to the filesystem path, adding separators 21 | // as necessary. 22 | func (fs FS) Path(p ...string) string { 23 | return filepath.Join(append([]string{string(fs)}, p...)...) 24 | } 25 | 26 | // Proc provides information about a running process. 27 | type Proc struct { 28 | // The process ID. 29 | PID int 30 | 31 | fs FS 32 | } 33 | 34 | func (p Proc) path(pa ...string) string { 35 | if p.PID == 0 { 36 | return p.fs.Path(pa...) 37 | } 38 | return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) 39 | } 40 | -------------------------------------------------------------------------------- /inputs/netstat/ext_notlinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | 3 | package netstat 4 | 5 | func (p Proc) Netstat() (*ProcNetstat, error) { 6 | return nil, nil 7 | } 8 | -------------------------------------------------------------------------------- /inputs/netstat_filter/README.md: -------------------------------------------------------------------------------- 1 | # netstat_filter 2 | 3 | 该插件采集网络连接情况,并根据用户条件进行过滤统计,以达到监控用户关心链接情况 4 | ## 指标列表 5 | tcp_established 6 | tcp_syn_sent 7 | tcp_syn_recv 8 | tcp_fin_wait1 9 | tcp_fin_wait2 10 | tcp_time_wait 11 | tcp_close 12 | tcp_close_wait 13 | tcp_last_ack 14 | tcp_listen 15 | tcp_closing 16 | tcp_none 17 | tcp_send_queue 18 | tcp_recv_queue 19 | 20 | ## 功能说明 21 | 对源IP、源端口、目标IP和目标端口过滤后进行网卡recv-Q、send-Q进行采集,该指标可以很好反应出指定连接的质量,例如rtt时间过长,导致收到服务端ack确认很慢就会使send-Q长期大于0,可以及时通过监控发现,从而提前优化网络或程序 22 | 23 | 当过滤结果为多个连接时会将send和recv值进行加和 24 | 例如: 25 | 配置文件``raddr_port = 11883`` 26 | 当本地和不同IP的11883都有连接建立的情况下,会将多条连接的结果进行加和。或在并发多连接的情况下,会合并加合,总之过滤的越粗略被加合数就会越多。 27 | 28 | 多条规则请复制``[[instances]]``进行配置 29 | 30 | ## 注意事项 31 | netstat_filter_tcp_send_queue和netstat_filter_tcp_recv_queue指标目前只支持linux。windows用户默认为0。 32 | -------------------------------------------------------------------------------- /inputs/netstat_filter/entry.go: -------------------------------------------------------------------------------- 1 | package netstat 2 | 3 | import "net" 4 | 5 | // Entry holds the information of a /proc/net/* entry. 6 | // For example, /proc/net/tcp: 7 | // sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 8 | // 0: 0100007F:13AD 00000000:0000 0A 00000000:00000000 00:00000000 00000000 1000 0 18083222 9 | type Entry struct { 10 | Proto string 11 | SrcIP net.IP 12 | SrcPort uint 13 | DstIP net.IP 14 | DstPort uint 15 | Txq uint 16 | Rxq uint 17 | UserId int 18 | INode int 19 | } 20 | 21 | // NewEntry creates a new entry with values from /proc/net/ 22 | func NewEntry(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint, txq uint, rxq uint, userId int, iNode int) Entry { 23 | return Entry{ 24 | Proto: proto, 25 | SrcIP: srcIP, 26 | SrcPort: srcPort, 27 | DstIP: dstIP, 28 | DstPort: dstPort, 29 | Txq: txq, 30 | Rxq: rxq, 31 | UserId: userId, 32 | INode: iNode, 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /inputs/netstat_filter/netstat_tcp_filter.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package netstat 5 | 6 | import ( 7 | "fmt" 8 | "net" 9 | ) 10 | 11 | func FilterEntries(entries []Entry, srcIP string, srcPort uint32, dstIP string, dstPort uint32) map[string]struct { 12 | Txq int 13 | Rxq int 14 | } { 15 | result := make(map[string]struct { 16 | Txq int 17 | Rxq int 18 | }) 19 | 20 | for _, entry := range entries { 21 | // 判断源IP、源端口、目标IP和目标端口是否与传入参数匹配 22 | if (len(srcIP) == 0 || entry.SrcIP.Equal(net.ParseIP(srcIP))) && 23 | (srcPort == 0 || entry.SrcPort == uint(srcPort)) && 24 | (len(dstIP) == 0 || entry.DstIP.Equal(net.ParseIP(dstIP))) && 25 | (dstPort == 0 || entry.DstPort == uint(dstPort)) { 26 | // 构建匹配条件的唯一键 27 | key := fmt.Sprintf("%s-%d-%s-%d", srcIP, srcPort, dstIP, dstPort) 28 | // 获取临时变量,对其字段进行修改 29 | temp := result[key] 30 | temp.Txq += int(entry.Txq) 31 | temp.Rxq += int(entry.Rxq) 32 | // 将修改后的临时变量重新赋值给 result[key] 33 | result[key] = temp 34 | } 35 | } 36 | return result 37 | } 38 | -------------------------------------------------------------------------------- /inputs/netstat_filter/netstat_tcp_filter_nolinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package netstat 5 | 6 | func FilterEntries(entries []Entry, srcIP string, srcPort uint32, dstIP string, dstPort uint32) map[string]struct { 7 | Txq int 8 | Rxq int 9 | } { 10 | result := make(map[string]struct { 11 | Txq int 12 | Rxq int 13 | }) 14 | return result 15 | } 16 | -------------------------------------------------------------------------------- /inputs/netstat_filter/netstat_tcp_nolinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package netstat 5 | 6 | func Parse(proto string) ([]Entry, error) { 7 | entries := make([]Entry, 0) 8 | 9 | entries = append(entries, NewEntry( 10 | proto, 11 | nil, 12 | 0, 13 | nil, 14 | 0, 15 | 0, 16 | 0, 17 | 0, 18 | 0, 19 | )) 20 | 21 | return entries, nil 22 | } 23 | -------------------------------------------------------------------------------- /inputs/nginx_vts/README.md: -------------------------------------------------------------------------------- 1 | # nginx_vts 2 | 3 | nginx_vts 已经支持输出 prometheus 格式的数据,所以,其实已经不需要这个采集插件了,直接用 categraf 的 prometheus 采集插件,读取 nginx_vts 的 prometheus 数据即可。 4 | 5 | ## Configuration 6 | 7 | 假设 nginx_vts 插件的访问路径是 `/vts` ,请求其 `/vts/format/prometheus` 就可以抓到 prometheus 的数据了。 8 | 9 | ## 监控大盘 10 | 11 | https://github.com/flashcatcloud/categraf/blob/main/inputs/nginx_vts/dashboards.json 12 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/collector_darwin.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | func paramsInit(params map[string]string) { 4 | pathInit(params) 5 | diskstatsCollectorInit(params) 6 | filesystemCollectorInit(params) 7 | netDevCollectorInit(params) 8 | ntpCollectorInit(params) 9 | powerSupplyClassCollectorInit(params) 10 | runitCollectorInit(params) 11 | supervisordCollectorInit(params) 12 | textFileCollectorInit(params) 13 | fileCollectorInit(params) 14 | crontabCollectorInit(params) 15 | } 16 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/collector_linux.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | func paramsInit(params map[string]string) { 4 | pathInit(params) 5 | arpCollectorInit(params) 6 | bcacheCollectorInit(params) 7 | cpuCollectorInit(params) 8 | diskstatsCollectorInit(params) 9 | ethtoolCollectorInit(params) 10 | filesystemCollectorInit(params) 11 | hwMonCollectorInit(params) 12 | ipvsCollectorInit(params) 13 | netClassCollectorInit(params) 14 | netDevCollectorInit(params) 15 | netStatCollectorInit(params) 16 | ntpCollectorInit(params) 17 | perfCollectorInit(params) 18 | powerSupplyClassCollectorInit(params) 19 | qdiscStatCollectorInit(params) 20 | raplCollectorInit(params) 21 | runitCollectorInit(params) 22 | statCollectorInit(params) 23 | supervisordCollectorInit(params) 24 | sysctlCollectorInit(params) 25 | systemdCollectorInit(params) 26 | tapestatsCollectorInit(params) 27 | textFileCollectorInit(params) 28 | vmStatCollectorInit(params) 29 | wifiCollectorInit(params) 30 | fileCollectorInit(params) 31 | crontabCollectorInit(params) 32 | } 33 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/collector_windows.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | func paramsInit(params map[string]string) { 4 | pathInit(params) 5 | ntpCollectorInit(params) 6 | runitCollectorInit(params) 7 | supervisordCollectorInit(params) 8 | textFileCollectorInit(params) 9 | fileCollectorInit(params) 10 | crontabCollectorInit(params) 11 | } 12 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/ethtool/bond0/statistics: -------------------------------------------------------------------------------- 1 | ERROR: 1 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/ethtool/eth0/driver: -------------------------------------------------------------------------------- 1 | # ethtool -i eth0 2 | driver: e1000e 3 | version: 5.11.0-22-generic 4 | firmware-version: 0.5-4 5 | expansion-rom-version: 6 | bus-info: 0000:00:1f.6 7 | supports-statistics: yes 8 | supports-test: yes 9 | supports-eeprom-access: yes 10 | supports-register-dump: yes 11 | supports-priv-flags: yes 12 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/ethtool/eth0/settings: -------------------------------------------------------------------------------- 1 | # ethtool eth0 2 | Settings for eth0: 3 | Supported ports: [ TP MII ] 4 | Supported link modes: 10baseT/Half 10baseT/Full 5 | 100baseT/Half 100baseT/Full 6 | 1000baseT/Full 10000baseT/Full 7 | Supported pause frame use: Symmetric 8 | Supports auto-negotiation: Yes 9 | Supported FEC modes: Not reported 10 | Advertised link modes: 10baseT/Half 10baseT/Full 11 | 100baseT/Half 100baseT/Full 12 | 1000baseT/Full 13 | Advertised pause frame use: Symmetric 14 | Advertised auto-negotiation: Yes 15 | Advertised FEC modes: Not reported 16 | Speed: 1000Mb/s 17 | Duplex: Full 18 | Auto-negotiation: on 19 | Port: Twisted Pair 20 | PHYAD: 1 21 | Transceiver: internal 22 | MDI-X: off (auto) 23 | netlink error: Operation not permitted 24 | Current message level: 0x00000007 (7) 25 | drv probe link 26 | Link detected: yes 27 | 28 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/ethtool/eth0/statistics: -------------------------------------------------------------------------------- 1 | # ethtool -S eth0 2 | NIC statistics: 3 | tx_packets: 961500 4 | rx_packets: 1260062 5 | tx_errors: 0 6 | rx_errors: 0 7 | rx_missed: 401 8 | align_errors: 0 9 | tx_single_collisions: 0 10 | tx_multi_collisions: 0 11 | rx_unicast: 1230297 12 | rx_broadcast: 5792 13 | rx_multicast: 23973 14 | tx_aborted: 0 15 | tx_underrun: 0 16 | duplicate metric: 1 17 | duplicate_metric: 2 18 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/1/stat: -------------------------------------------------------------------------------- 1 | 1 (systemd) S 0 1 1 0 -1 4194560 9061 9416027 94 2620 36 98 54406 13885 20 0 1 0 29 109604864 2507 18446744073709551615 1 1 0 0 0 0 671173123 4096 1260 0 0 0 17 0 0 0 19 0 0 0 0 0 0 0 0 0 0 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/10/stat: -------------------------------------------------------------------------------- 1 | 17 (khungtaskd) S 2 0 0 0 -1 2129984 0 0 0 0 14 0 0 0 20 0 1 0 24 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/11/.missing_stat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/inputs/node_exporter/collector/fixtures/proc/11/.missing_stat -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/11/stat: -------------------------------------------------------------------------------- 1 | 11 (rcu_preempt) I 2 0 0 0 -1 2129984 0 0 0 0 0 346 0 0 -2 0 1 0 32 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 1 1 0 0 0 0 0 0 0 0 0 0 0 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/buddyinfo: -------------------------------------------------------------------------------- 1 | Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 2 | Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 3 | Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/cgroups: -------------------------------------------------------------------------------- 1 | #subsys_name hierarchy num_cgroups enabled 2 | cpuset 5 47 1 3 | cpu 3 172 1 4 | cpuacct 3 172 1 5 | blkio 6 170 1 6 | memory 7 234 1 7 | devices 11 170 1 8 | freezer 9 47 1 9 | net_cls 2 47 1 10 | perf_event 8 47 1 11 | hugetlb 12 47 1 12 | pids 10 170 1 13 | rdma 4 1 1 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/drbd: -------------------------------------------------------------------------------- 1 | version: 8.4.3 (api:1/proto:86-101) 2 | srcversion: 1A9F77B1CA5FF92235C2213 3 | 4 | 1: cs:Connected ro:Primary/Primary ds:UpToDate/UpToDate C r----- 5 | ns:17324442 nr:10961011 dw:28263521 dr:118696670 al:1100 bm:221 lo:12345 pe:12346 ua:12347 ap:12348 ep:1 wo:d oos:12349 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/loadavg: -------------------------------------------------------------------------------- 1 | 0.21 0.37 0.39 1/719 19737 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/arp: -------------------------------------------------------------------------------- 1 | IP address HW type Flags HW address Mask Device 2 | 192.168.1.1 0x1 0x2 cc:aa:dd:ee:aa:bb * eth0 3 | 192.168.1.2 0x1 0x2 bb:cc:dd:ee:ff:aa * eth0 4 | 192.168.1.3 0x1 0x2 aa:bb:cc:dd:ee:ff * eth0 5 | 192.168.1.4 0x1 0x2 dd:ee:ff:aa:bb:cc * eth1 6 | 192.168.1.5 0x1 0x2 ee:ff:aa:bb:cc:dd * eth1 7 | 192.168.1.6 0x1 0x2 ff:aa:bb:cc:dd:ee * eth1 8 | 10.0.0.1 0x1 0x2 de:ad:be:ef:00:00 * nope 9 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/ip_vs: -------------------------------------------------------------------------------- 1 | IP Virtual Server version 1.2.1 (size=4096) 2 | Prot LocalAddress:Port Scheduler Flags 3 | -> RemoteAddress:Port Forward Weight ActiveConn InActConn 4 | TCP C0A80016:0CEA wlc 5 | -> C0A85216:0CEA Tunnel 100 248 2 6 | -> C0A85318:0CEA Tunnel 100 248 2 7 | -> C0A85315:0CEA Tunnel 100 248 1 8 | TCP C0A80039:0CEA wlc 9 | -> C0A85416:0CEA Tunnel 0 0 0 10 | -> C0A85215:0CEA Tunnel 100 1499 0 11 | -> C0A83215:0CEA Tunnel 100 1498 0 12 | TCP C0A80037:0CEA wlc 13 | -> C0A8321A:0CEA Tunnel 0 0 0 14 | -> C0A83120:0CEA Tunnel 100 0 0 15 | FWM 10001000 wlc 16 | -> C0A8321A:0CEA Tunnel 20 64 1 17 | -> C0A83120:0CEA Tunnel 100 321 5 18 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/ip_vs_stats: -------------------------------------------------------------------------------- 1 | Total Incoming Outgoing Incoming Outgoing 2 | Conns Packets Packets Bytes Bytes 3 | 16AA370 E33656E5 0 51D8C8883AB3 0 4 | 5 | Conns/s Pkts/s Pkts/s Bytes/s Bytes/s 6 | 4 1FB3C 0 1282A8F 0 7 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/rpc/nfs: -------------------------------------------------------------------------------- 1 | net 70 70 69 45 2 | rpc 1218785755 374636 1218815394 3 | proc2 18 16 57 74 52 71 73 45 86 0 52 83 61 17 53 50 23 70 82 4 | proc3 22 0 1061909262 48906 4077635 117661341 5 29391916 2570425 2993289 590 0 0 7815 15 1130 0 3983 92385 13332 2 1 23729 5 | proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 90 1 26 0 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/rpc/nfsd: -------------------------------------------------------------------------------- 1 | rc 0 6 18622 2 | fh 0 0 0 0 0 3 | io 157286400 72864 4 | th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 5 | ra 32 0 0 0 0 0 0 0 0 0 0 0 6 | net 972 55 917 1 7 | rpc 18628 3 1 2 0 8 | proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 9 | proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 10 | proc4 2 2 10853 11 | proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 12 | wdeleg_getattr 15 13 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/sockstat: -------------------------------------------------------------------------------- 1 | sockets: used 229 2 | TCP: inuse 4 orphan 0 tw 4 alloc 17 mem 1 3 | UDP: inuse 0 mem 0 4 | UDPLITE: inuse 0 5 | RAW: inuse 0 6 | FRAG: inuse 0 memory 0 7 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/sockstat6: -------------------------------------------------------------------------------- 1 | TCP6: inuse 17 2 | UDP6: inuse 9 3 | UDPLITE6: inuse 0 4 | RAW6: inuse 1 5 | FRAG6: inuse 0 memory 0 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/softnet_stat: -------------------------------------------------------------------------------- 1 | 00049279 00000000 00000001 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 2 | 000dfb82 00000029 0000000a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 3 | 00551c3f 00000000 00000055 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 4 | 002f8339 00000000 00000032 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/stat/arp_cache: -------------------------------------------------------------------------------- 1 | entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls 2 | 00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c 3 | 00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/stat/ndisc_cache: -------------------------------------------------------------------------------- 1 | entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls 2 | 00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb 3 | 00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/stat/nf_conntrack: -------------------------------------------------------------------------------- 1 | entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart 2 | 00000021 00000000 00000000 00000000 00000003 0000588a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 3 | 00000021 00000000 00000000 00000000 00000002 000056a4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000002 4 | 00000021 00000000 00000000 00000000 00000001 000058d4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000001 5 | 00000021 00000000 00000000 00000000 0000002f 00005688 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000004 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/net/udp: -------------------------------------------------------------------------------- 1 | sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 2 | 0: 00000000:0016 00000000:0000 0A 00000015:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 3 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/pressure/cpu: -------------------------------------------------------------------------------- 1 | some avg10=0.00 avg60=0.00 avg300=0.00 total=14036781 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/pressure/io: -------------------------------------------------------------------------------- 1 | some avg10=0.18 avg60=0.34 avg300=0.10 total=159886802 2 | full avg10=0.18 avg60=0.34 avg300=0.10 total=159229614 3 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/pressure/memory: -------------------------------------------------------------------------------- 1 | some avg10=0.00 avg60=0.00 avg300=0.00 total=0 2 | full avg10=0.00 avg60=0.00 avg300=0.00 total=0 3 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/schedstat: -------------------------------------------------------------------------------- 1 | version 15 2 | timestamp 15819019232 3 | cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 4 | domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 5 | cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 6 | domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 7 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/self/stat: -------------------------------------------------------------------------------- 1 | 17 (khungtaskd) S 2 0 0 0 -1 2129984 0 0 0 0 14 0 0 0 20 0 1 0 24 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/slabinfo: -------------------------------------------------------------------------------- 1 | slabinfo - version: 2.1 2 | # name : tunables : slabdata 3 | tw_sock_TCP 704 864 256 32 2 : tunables 0 0 0 : slabdata 27 27 0 4 | dmaengine-unmap-128 1206 1320 1088 30 8 : tunables 0 0 0 : slabdata 44 44 0 5 | kmalloc-8192 132 148 8192 4 8 : tunables 0 0 0 : slabdata 37 37 0 6 | kmem_cache 320 320 256 32 2 : tunables 0 0 0 : slabdata 10 10 0 7 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/softirqs: -------------------------------------------------------------------------------- 1 | CPU0 CPU1 2 | HI: 7 1 3 | TIMER: 424191 108342 4 | NET_TX: 2301 2430 5 | NET_RX: 43066 104508 6 | BLOCK: 23776 24115 7 | IRQ_POLL: 0 0 8 | TASKLET: 372 1899 9 | SCHED: 378895 152852 10 | HRTIMER: 40 346 11 | RCU: 155929 146631 12 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/abdstats: -------------------------------------------------------------------------------- 1 | 7 1 0x01 21 5712 73163810083184 309946154984654 2 | name type data 3 | struct_size 4 2520 4 | linear_cnt 4 62 5 | linear_data_size 4 223232 6 | scatter_cnt 4 1 7 | scatter_data_size 4 16384 8 | scatter_chunk_waste 4 0 9 | scatter_order_0 4 0 10 | scatter_order_1 4 0 11 | scatter_order_2 4 1 12 | scatter_order_3 4 0 13 | scatter_order_4 4 0 14 | scatter_order_5 4 0 15 | scatter_order_6 4 0 16 | scatter_order_7 4 0 17 | scatter_order_8 4 0 18 | scatter_order_9 4 0 19 | scatter_order_10 4 0 20 | scatter_page_multi_chunk 4 0 21 | scatter_page_multi_zone 4 0 22 | scatter_page_alloc_retry 4 0 23 | scatter_sg_table_retry 4 0 24 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/dmu_tx: -------------------------------------------------------------------------------- 1 | 5 1 0x01 11 528 8010436841 354962070418194 2 | name type data 3 | dmu_tx_assigned 4 3532844 4 | dmu_tx_delay 4 0 5 | dmu_tx_error 4 0 6 | dmu_tx_suspended 4 0 7 | dmu_tx_group 4 0 8 | dmu_tx_memory_reserve 4 0 9 | dmu_tx_memory_reclaim 4 0 10 | dmu_tx_dirty_throttle 4 0 11 | dmu_tx_dirty_delay 4 0 12 | dmu_tx_dirty_over_max 4 0 13 | dmu_tx_quota 4 0 14 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/fm: -------------------------------------------------------------------------------- 1 | 0 1 0x01 4 192 8007255140 354329591145385 2 | name type data 3 | erpt-dropped 4 18 4 | erpt-set-failed 4 0 5 | fmri-set-failed 4 0 6 | payload-set-failed 4 0 7 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/pool1/io: -------------------------------------------------------------------------------- 1 | 12 3 0x00 1 80 79205351707403 395818011156865 2 | nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt 3 | 1884160 3206144 22 132 7155162 104112268 79210489694949 24168078 104112268 79210489849220 0 0 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-1: -------------------------------------------------------------------------------- 1 | 23 1 0x01 7 2160 221578688875 6665999035587 2 | name type data 3 | dataset_name 7 pool1 4 | writes 4 0 5 | nwritten 4 0 6 | reads 4 0 7 | nread 4 0 8 | nunlinks 4 0 9 | nunlinked 4 0 10 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-2: -------------------------------------------------------------------------------- 1 | 24 1 0x01 7 2160 221611904716 7145015038451 2 | name type data 3 | dataset_name 7 pool1/dataset1 4 | writes 4 4 5 | nwritten 4 12302 6 | reads 4 2 7 | nread 4 28 8 | nunlinks 4 3 9 | nunlinked 4 3 10 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/pool1/state: -------------------------------------------------------------------------------- 1 | ONLINE 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/pool2/state: -------------------------------------------------------------------------------- 1 | SUSPENDED 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/poolz1/io: -------------------------------------------------------------------------------- 1 | 16 3 0x00 1 80 79568650431241 395832279341621 2 | nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt 3 | 2826240 2680501248 33 25294 9673715628 6472105124093 110734831833266 9829091640 6472105124093 110734831944501 0 0 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-1: -------------------------------------------------------------------------------- 1 | 30 1 0x01 7 2160 217993779684 2621674546179 2 | name type data 3 | dataset_name 7 poolz1 4 | writes 4 0 5 | nwritten 4 0 6 | reads 4 0 7 | nread 4 0 8 | nunlinks 4 0 9 | nunlinked 4 0 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-2: -------------------------------------------------------------------------------- 1 | 31 1 0x01 7 2160 218133979890 3024169078920 2 | name type data 3 | dataset_name 7 poolz1/dataset1 4 | writes 4 10 5 | nwritten 4 32806 6 | reads 4 2 7 | nread 4 28 8 | nunlinks 4 14 9 | nunlinked 4 14 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/poolz1/state: -------------------------------------------------------------------------------- 1 | DEGRADED 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/vdev_cache_stats: -------------------------------------------------------------------------------- 1 | 8 1 0x01 3 144 8012540758 352116106118781 2 | name type data 3 | delegations 4 40 4 | hits 4 0 5 | misses 4 0 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/vdev_mirror_stats: -------------------------------------------------------------------------------- 1 | 18 1 0x01 7 1904 73163813004224 309980651991187 2 | name type data 3 | rotating_linear 4 0 4 | rotating_offset 4 0 5 | rotating_seek 4 0 6 | non_rotating_linear 4 0 7 | non_rotating_seek 4 0 8 | preferred_found 4 0 9 | preferred_not_found 4 94 10 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/xuio_stats: -------------------------------------------------------------------------------- 1 | 2 1 0x01 6 288 8009100742 353415816865654 2 | name type data 3 | onloan_read_buf 4 32 4 | onloan_write_buf 4 0 5 | read_buf_copied 4 0 6 | read_buf_nocopy 4 0 7 | write_buf_copied 4 0 8 | write_buf_nocopy 4 0 9 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/zfetchstats: -------------------------------------------------------------------------------- 1 | 4 1 0x01 11 528 8010434610 345692669858836 2 | name type data 3 | hits 4 7067992 4 | misses 4 11 5 | colinear_hits 4 0 6 | colinear_misses 4 11 7 | stride_hits 4 7067990 8 | stride_misses 4 0 9 | reclaim_successes 4 0 10 | reclaim_failures 4 11 11 | streams_resets 4 0 12 | streams_noresets 4 2 13 | bogus_streams 4 0 14 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/spl/kstat/zfs/zil: -------------------------------------------------------------------------------- 1 | 7 1 0x01 13 624 8012538347 351689526932992 2 | name type data 3 | zil_commit_count 4 10 4 | zil_commit_writer_count 4 0 5 | zil_itx_count 4 0 6 | zil_itx_indirect_count 4 0 7 | zil_itx_indirect_bytes 4 0 8 | zil_itx_copied_count 4 0 9 | zil_itx_copied_bytes 4 0 10 | zil_itx_needcopy_count 4 0 11 | zil_itx_needcopy_bytes 4 18446744073709537686 12 | zil_itx_metaslab_normal_count 4 0 13 | zil_itx_metaslab_normal_bytes 4 0 14 | zil_itx_metaslab_slog_count 4 0 15 | zil_itx_metaslab_slog_bytes 4 0 16 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/fs/file-nr: -------------------------------------------------------------------------------- 1 | 1024 0 1631329 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/kernel/pid_max: -------------------------------------------------------------------------------- 1 | 123 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/kernel/random/entropy_avail: -------------------------------------------------------------------------------- 1 | 1337 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/kernel/random/poolsize: -------------------------------------------------------------------------------- 1 | 4096 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/kernel/seccomp/actions_avail: -------------------------------------------------------------------------------- 1 | kill_process kill_thread trap errno user_notif trace log allow 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/kernel/threads-max: -------------------------------------------------------------------------------- 1 | 7801 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/net/netfilter/nf_conntrack_count: -------------------------------------------------------------------------------- 1 | 123 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/net/netfilter/nf_conntrack_max: -------------------------------------------------------------------------------- 1 | 65536 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/pid_max: -------------------------------------------------------------------------------- 1 | 123 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/proc/sys/threads-max: -------------------------------------------------------------------------------- 1 | 7801 -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/qdisc/results.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "IfaceName": "wlan0", 4 | "Bytes": 42, 5 | "Packets": 42, 6 | "Requeues": 1, 7 | "Kind": "fq", 8 | "Drops": 1 9 | }, 10 | { 11 | "IfaceName": "eth0", 12 | "Bytes": 83, 13 | "Packets": 83, 14 | "Requeues": 2, 15 | "Kind": "pfifo_fast" 16 | } 17 | ] 18 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/client_side_timestamp.out: -------------------------------------------------------------------------------- 1 | # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise 2 | # TYPE node_textfile_scrape_error gauge 3 | node_textfile_scrape_error 1 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/client_side_timestamp/metrics.prom: -------------------------------------------------------------------------------- 1 | metric_with_custom_timestamp 1 1441205977284 2 | normal_metric 2 3 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_different_help.out: -------------------------------------------------------------------------------- 1 | # HELP events_total A nice help message. 2 | # TYPE events_total counter 3 | events_total{file="a",foo="bar"} 10 4 | events_total{file="a",foo="baz"} 20 5 | # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. 6 | # TYPE node_textfile_mtime_seconds gauge 7 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/a.prom"} 1 8 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/b.prom"} 1 9 | # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise 10 | # TYPE node_textfile_scrape_error gauge 11 | node_textfile_scrape_error 0 12 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_different_help/a.prom: -------------------------------------------------------------------------------- 1 | # HELP events_total A nice help message. 2 | # TYPE events_total counter 3 | events_total{foo="bar",file="a"} 10 4 | events_total{foo="baz",file="a"} 20 5 | 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_different_help/b.prom: -------------------------------------------------------------------------------- 1 | # HELP events_total A different help message. 2 | # TYPE events_total counter 3 | events_total{foo="bar",file="b"} 30 4 | events_total{foo="baz",file="b"} 40 5 | 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_empty_help.out: -------------------------------------------------------------------------------- 1 | # HELP events_total Metric read from fixtures/textfile/metrics_merge_empty_help/a.prom, fixtures/textfile/metrics_merge_empty_help/b.prom 2 | # TYPE events_total counter 3 | events_total{file="a",foo="bar"} 10 4 | events_total{file="a",foo="baz"} 20 5 | events_total{file="b",foo="bar"} 30 6 | events_total{file="b",foo="baz"} 40 7 | # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. 8 | # TYPE node_textfile_mtime_seconds gauge 9 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_empty_help/a.prom"} 1 10 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_empty_help/b.prom"} 1 11 | # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise 12 | # TYPE node_textfile_scrape_error gauge 13 | node_textfile_scrape_error 0 14 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_empty_help/a.prom: -------------------------------------------------------------------------------- 1 | # HELP events_total 2 | # TYPE events_total counter 3 | events_total{foo="bar",file="a"} 10 4 | events_total{foo="baz",file="a"} 20 5 | 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_empty_help/b.prom: -------------------------------------------------------------------------------- 1 | # HELP events_total 2 | # TYPE events_total counter 3 | events_total{foo="bar",file="b"} 30 4 | events_total{foo="baz",file="b"} 40 5 | 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_no_help.out: -------------------------------------------------------------------------------- 1 | # HELP events_total Metric read from fixtures/textfile/metrics_merge_no_help/a.prom, fixtures/textfile/metrics_merge_no_help/b.prom 2 | # TYPE events_total counter 3 | events_total{file="a",foo="bar"} 10 4 | events_total{file="a",foo="baz"} 20 5 | events_total{file="b",foo="bar"} 30 6 | events_total{file="b",foo="baz"} 40 7 | # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. 8 | # TYPE node_textfile_mtime_seconds gauge 9 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_no_help/a.prom"} 1 10 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_no_help/b.prom"} 1 11 | # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise 12 | # TYPE node_textfile_scrape_error gauge 13 | node_textfile_scrape_error 0 14 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_no_help/a.prom: -------------------------------------------------------------------------------- 1 | # TYPE events_total counter 2 | events_total{foo="bar",file="a"} 10 3 | events_total{foo="baz",file="a"} 20 4 | 5 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_no_help/b.prom: -------------------------------------------------------------------------------- 1 | # TYPE events_total counter 2 | events_total{foo="bar",file="b"} 30 3 | events_total{foo="baz",file="b"} 40 4 | 5 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_same_help.out: -------------------------------------------------------------------------------- 1 | # HELP events_total The same help. 2 | # TYPE events_total counter 3 | events_total{file="a",foo="bar"} 10 4 | events_total{file="a",foo="baz"} 20 5 | events_total{file="b",foo="bar"} 30 6 | events_total{file="b",foo="baz"} 40 7 | # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. 8 | # TYPE node_textfile_mtime_seconds gauge 9 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_same_help/a.prom"} 1 10 | node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_same_help/b.prom"} 1 11 | # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise 12 | # TYPE node_textfile_scrape_error gauge 13 | node_textfile_scrape_error 0 14 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_same_help/a.prom: -------------------------------------------------------------------------------- 1 | # HELP events_total The same help. 2 | # TYPE events_total counter 3 | events_total{foo="bar",file="a"} 10 4 | events_total{foo="baz",file="a"} 20 5 | 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/metrics_merge_same_help/b.prom: -------------------------------------------------------------------------------- 1 | # HELP events_total The same help. 2 | # TYPE events_total counter 3 | events_total{foo="bar",file="b"} 30 4 | events_total{foo="baz",file="b"} 40 5 | 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/no_metric_files.out: -------------------------------------------------------------------------------- 1 | # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise 2 | # TYPE node_textfile_scrape_error gauge 3 | node_textfile_scrape_error 0 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/no_metric_files/non_matching_file.txt: -------------------------------------------------------------------------------- 1 | This file should be ignored. 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/nonexistent_path.out: -------------------------------------------------------------------------------- 1 | # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise 2 | # TYPE node_textfile_scrape_error gauge 3 | node_textfile_scrape_error 1 4 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/two_metric_files/metrics1.prom: -------------------------------------------------------------------------------- 1 | testmetric1_1{foo="bar"} 10 2 | testmetric1_2{foo="baz"} 20 3 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/two_metric_files/metrics2.prom: -------------------------------------------------------------------------------- 1 | testmetric2_1{foo="bar"} 30 2 | testmetric2_2{foo="baz"} 40 3 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/textfile/two_metric_files/non_matching_file.txt: -------------------------------------------------------------------------------- 1 | This file should be ignored. 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/usr/lib/os-release: -------------------------------------------------------------------------------- 1 | NAME="Ubuntu" 2 | VERSION="20.04.2 LTS (Focal Fossa)" 3 | ID=ubuntu 4 | ID_LIKE=debian 5 | PRETTY_NAME="Ubuntu 20.04.2 LTS" 6 | VERSION_ID="20.04" 7 | HOME_URL="https://www.ubuntu.com/" 8 | SUPPORT_URL="https://help.ubuntu.com/" 9 | BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 10 | PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 11 | VERSION_CODENAME=focal 12 | UBUNTU_CODENAME=focal 13 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/wifi/interfaces.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "wlan0", 4 | "type": 2, 5 | "frequency": 2412 6 | }, 7 | { 8 | "name": "wlan1", 9 | "type": 3, 10 | "frequency": 2412 11 | }, 12 | { 13 | "type": 10 14 | } 15 | ] 16 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/wifi/wlan0/bss.json: -------------------------------------------------------------------------------- 1 | { 2 | "ssid": "Example", 3 | "bssid": "ABEiM0RV", 4 | "status": 1 5 | } 6 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures/wifi/wlan0/stationinfo.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "hardwareaddr": "qrvM3e7/", 4 | "connected": 30000000000, 5 | "inactive": 400000000, 6 | "receivebitrate": 128000000, 7 | "transmitbitrate": 164000000, 8 | "signal": -52, 9 | "transmitretries": 10, 10 | "transmitfailed": 2, 11 | "beaconloss": 1 12 | }, 13 | { 14 | "hardwareaddr": "AQIDBAUG", 15 | "connected": 60000000000, 16 | "inactive": 800000000, 17 | "receivebitrate": 256000000, 18 | "transmitbitrate": 328000000, 19 | "signal": -26, 20 | "transmitretries": 20, 21 | "transmitfailed": 4, 22 | "beaconloss": 2 23 | } 24 | ] 25 | 26 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures_bindmount/proc/mounts: -------------------------------------------------------------------------------- 1 | /dev/nvme1n0 /host ext4 rw,seclabel,relatime,data=ordered 0 0 2 | /dev/nvme1n1 /host/media/volume1 ext4 rw,seclabel,relatime,data=ordered 0 0 3 | /dev/nvme1n2 /host/media/volume2 ext4 rw,seclabel,relatime,data=ordered 0 0 4 | tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0 5 | tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 6 | tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0 7 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/fixtures_hidepid/proc/mounts: -------------------------------------------------------------------------------- 1 | rootfs / rootfs rw 0 0 2 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/other_os/kvm_bsd.h: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // +build !nomeminfo 15 | // +build freebsd dragonfly 16 | 17 | #include 18 | 19 | int _kvm_swap_used_pages(uint64_t *out); 20 | -------------------------------------------------------------------------------- /inputs/node_exporter/collector/time_other.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | //go:build !linux && !notime 15 | // +build !linux,!notime 16 | 17 | package collector 18 | 19 | import ( 20 | "github.com/prometheus/client_golang/prometheus" 21 | ) 22 | 23 | func (c *timeCollector) update(ch chan<- prometheus.Metric) error { 24 | return nil 25 | } 26 | -------------------------------------------------------------------------------- /inputs/ntp/README.md: -------------------------------------------------------------------------------- 1 | # ntp 2 | 3 | ntp 插件用于计算时间偏移。生产环境所有的机器,都应该是一致的时间,如果有些机器时间滞后,有些机器时间超前,在一些分布式系统中会是非常严重的问题。所有的大型互联网公司,一定会在机器交付给业务方之前,就由系统运维人员统一做好了时间调整,并启用 ntpd 或 chrony 4 | 5 | ## 监控手段 6 | 7 | 监控机器时间偏移量,只需要给出 ntp 服务端地址,Categraf 就会周期性去请求,对比本机时间,得到偏移量,监控指标是 ntp_offset_ms 顾名思义,单位是毫秒,一般这个值不能超过 1000 8 | 9 | ## 监控规则 10 | 11 | 该 README 所在目录的同级目录下有 alerts.json 就是告警规则,导入夜莺即可使用 -------------------------------------------------------------------------------- /inputs/ntp/alerts.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "NTP时间偏移太大", 4 | "note": "", 5 | "severity": 2, 6 | "disabled": 0, 7 | "prom_for_duration": 60, 8 | "prom_ql": "ntp_offset_ms > 1000 or ntp_offset_ms < -1000", 9 | "prom_eval_interval": 15, 10 | "enable_stime": "00:00", 11 | "enable_etime": "23:59", 12 | "enable_days_of_week": [ 13 | "1", 14 | "2", 15 | "3", 16 | "4", 17 | "5", 18 | "6", 19 | "0" 20 | ], 21 | "enable_in_bg": 0, 22 | "notify_recovered": 1, 23 | "notify_channels": [], 24 | "notify_repeat_step": 60, 25 | "recover_duration": 0, 26 | "callbacks": [], 27 | "runbook_url": "", 28 | "append_tags": [] 29 | } 30 | ] -------------------------------------------------------------------------------- /inputs/ntp/ntp_test.go: -------------------------------------------------------------------------------- 1 | package ntp 2 | 3 | import ( 4 | "log" 5 | "testing" 6 | "time" 7 | 8 | "github.com/toolkits/pkg/nux" 9 | ) 10 | 11 | func TestGetTwoTime(t *testing.T) { 12 | orgTime := time.Now() 13 | log.Println("Begin") 14 | serverReciveTime, serverTransmitTime, err := nux.NtpTwoTime("ntp1.aliyun.com", 20) 15 | if err != nil { 16 | log.Println(err) 17 | return 18 | } 19 | dstTime := time.Now() 20 | 21 | // https://en.wikipedia.org/wiki/Network_Time_Protocol 22 | duration := ((serverReciveTime.UnixNano() - orgTime.UnixNano()) + (serverTransmitTime.UnixNano() - dstTime.UnixNano())) / 2 23 | 24 | delta := duration / 1e6 // convert to ms 25 | log.Println(delta) 26 | } 27 | -------------------------------------------------------------------------------- /inputs/nvidia_smi/types.go: -------------------------------------------------------------------------------- 1 | package nvidia_smi 2 | 3 | import ( 4 | "errors" 5 | "regexp" 6 | ) 7 | 8 | // qField stands for query field - the field name before the query. 9 | type qField string 10 | 11 | // rField stands for returned field - the field name as returned by the nvidia-smi. 12 | type rField string 13 | 14 | type MetricInfo struct { 15 | metricName string 16 | valueMultiplier float64 17 | } 18 | 19 | var ( 20 | ErrUnexpectedQueryField = errors.New("unexpected query field") 21 | ErrParseNumber = errors.New("could not parse number from value") 22 | 23 | numericRegex = regexp.MustCompile("[+-]?([0-9]*[.])?[0-9]+") 24 | 25 | requiredFields = []qField{ 26 | uuidQField, 27 | nameQField, 28 | driverModelCurrentQField, 29 | driverModelPendingQField, 30 | vBiosVersionQField, 31 | driverVersionQField, 32 | } 33 | ) 34 | -------------------------------------------------------------------------------- /inputs/nvidia_smi/util.go: -------------------------------------------------------------------------------- 1 | package nvidia_smi 2 | 3 | import ( 4 | "regexp" 5 | "strconv" 6 | "strings" 7 | ) 8 | 9 | const ( 10 | hexToDecimalBase = 16 11 | hexToDecimalUIntBitSize = 64 12 | ) 13 | 14 | var ( 15 | matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") 16 | ) 17 | 18 | func hexToDecimal(hex string) (float64, error) { 19 | s := hex 20 | s = strings.ReplaceAll(s, "0x", "") 21 | s = strings.ReplaceAll(s, "0X", "") 22 | parsed, err := strconv.ParseUint(s, hexToDecimalBase, hexToDecimalUIntBitSize) 23 | 24 | return float64(parsed), err 25 | } 26 | -------------------------------------------------------------------------------- /inputs/phpfpm/README.md: -------------------------------------------------------------------------------- 1 | # nginx 2 | 3 | *PHP-FPM* 监控采集插件,由telegraf的phpfpm改造而来。 4 | 5 | 该插件需要更改phpfpm的配置文件,开启 *pm.status_path*配置项 6 | ``` 7 | pm.status_path = /status 8 | ``` 9 | 10 | 11 | ## Configuration 12 | 13 | 请参考配置[示例](../../conf/input.phpfpm/phpfpm.toml)文件 14 | 15 | ### 注意事项: 16 | 1. 如下配置 仅生效于HTTP的url 17 | - response_timeout 18 | - username & password 19 | - headers 20 | - TLS config 21 | 2. 如果使用 Unix socket,需要保证 categraf 和 socket path 在同一个主机上,且 categraf 运行用户拥有读取该 path 的权限。 22 | ## 监控大盘和告警规则 23 | 24 | 待更新... -------------------------------------------------------------------------------- /inputs/ping/alerts.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "PING地址探测失败", 4 | "note": "", 5 | "severity": 2, 6 | "disabled": 0, 7 | "prom_for_duration": 60, 8 | "prom_ql": "ping_result_code != 0", 9 | "prom_eval_interval": 15, 10 | "enable_stime": "00:00", 11 | "enable_etime": "23:59", 12 | "enable_days_of_week": [ 13 | "1", 14 | "2", 15 | "3", 16 | "4", 17 | "5", 18 | "6", 19 | "0" 20 | ], 21 | "enable_in_bg": 0, 22 | "notify_recovered": 1, 23 | "notify_channels": [], 24 | "notify_repeat_step": 60, 25 | "recover_duration": 0, 26 | "callbacks": [], 27 | "runbook_url": "", 28 | "append_tags": [] 29 | } 30 | ] -------------------------------------------------------------------------------- /inputs/ping/dashboard-2.0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/inputs/ping/dashboard-2.0.png -------------------------------------------------------------------------------- /inputs/postgresql/postgresql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/inputs/postgresql/postgresql.png -------------------------------------------------------------------------------- /inputs/processes/README.md: -------------------------------------------------------------------------------- 1 | # processes 2 | 3 | 统计进程数量,比如 running 的有多少,sleeping 的有多少,total 有多少 4 | 5 | ## 监控大盘 6 | 7 | 该插件没有单独的监控大盘,OS 的监控大盘统一放到 system 下面了 -------------------------------------------------------------------------------- /inputs/processes/processes_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package processes 5 | -------------------------------------------------------------------------------- /inputs/procstat/native_finder_notwindows.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package procstat 5 | 6 | import ( 7 | "strings" 8 | ) 9 | 10 | // Pattern matches on the process name 11 | func (pg *NativeFinder) Pattern(pattern string, filters ...Filter) ([]PID, error) { 12 | var pids []PID 13 | procs, err := pg.FastProcessList() 14 | if err != nil { 15 | return pids, err 16 | } 17 | PROCS: 18 | for _, p := range procs { 19 | for _, filter := range filters { 20 | if !filter(p) { 21 | continue PROCS 22 | } 23 | } 24 | name, err := p.Exe() 25 | if err != nil { 26 | // skip, this can be caused by the pid no longer existing 27 | // or you having no permissions to access it 28 | continue 29 | } 30 | if strings.Contains(name, pattern) { 31 | pids = append(pids, PID(p.Pid)) 32 | } 33 | } 34 | return pids, err 35 | } 36 | -------------------------------------------------------------------------------- /inputs/procstat/native_finder_windows.go: -------------------------------------------------------------------------------- 1 | package procstat 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | // Pattern matches on the process name 8 | func (pg *NativeFinder) Pattern(pattern string, filters ...Filter) ([]PID, error) { 9 | var pids []PID 10 | procs, err := pg.FastProcessList() 11 | if err != nil { 12 | return pids, err 13 | } 14 | PROCS: 15 | for _, p := range procs { 16 | for _, filter := range filters { 17 | if !filter(p) { 18 | continue PROCS 19 | } 20 | } 21 | name, err := p.Name() 22 | if err != nil { 23 | // skip, this can be caused by the pid no longer existing 24 | // or you having no permissions to access it 25 | continue 26 | } 27 | if strings.Contains(name, pattern) { 28 | pids = append(pids, PID(p.Pid)) 29 | } 30 | } 31 | return pids, err 32 | } 33 | -------------------------------------------------------------------------------- /inputs/procstat/title_capture_notwindows.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package procstat 5 | 6 | func getWindowTitleByPid(pid uint32) string { 7 | return "" 8 | } 9 | -------------------------------------------------------------------------------- /inputs/procstat/win_service_notwindows.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package procstat 5 | 6 | import ( 7 | "fmt" 8 | ) 9 | 10 | func queryPidWithWinServiceName(_ string) (uint32, error) { 11 | return 0, fmt.Errorf("os not support win_service option") 12 | } 13 | -------------------------------------------------------------------------------- /inputs/rabbitmq/README.md: -------------------------------------------------------------------------------- 1 | # rabbitmq 2 | 3 | rabbitmq 监控采集插件,fork 自:[telegraf/rabbitmq](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/rabbitmq) 。不过,这个插件用处不大了,因为从 rabbitmq 3.8 版本开始,就内置了 prometheus 的支持,即,如果 rabbitmq 启用了 prometheus,可以直接暴露 metrics 接口,Categraf 从这个 metrics 接口拉取数据即可 4 | 5 | rabbitmq 启用 prometheus 插件: 6 | 7 | ```shell 8 | rabbitmq-plugins enable rabbitmq_prometheus 9 | ``` 10 | 11 | 启用成功的话,rabbitmq 默认会在 15692 端口起监听,访问 [http://localhost:15692/metrics](http://localhost:15692/metrics) 即可看到符合 prometheus 协议的监控数据。 12 | 13 | 于是,使用 Categraf 的 prometheus 插件,来抓取即可,无需使用 rabbitmq 这个插件了。 14 | 15 | 本 README 文件的同级目录,放置了一个 dashboard.json 就是为 rabbitmq 3.8 以上版本准备的,可以导入夜莺使用。 -------------------------------------------------------------------------------- /inputs/redis/README.md: -------------------------------------------------------------------------------- 1 | # redis 2 | 3 | redis 的监控原理,就是连上 redis,执行 info 命令,解析结果,整理成监控数据上报。 4 | 5 | ## Configuration 6 | 7 | redis 插件的配置在 `conf/input.redis/redis.toml` 最简单的配置如下: 8 | 9 | ```toml 10 | [[instances]] 11 | address = "127.0.0.1:6379" 12 | username = "" 13 | password = "" 14 | labels = { instance="n9e-10.23.25.2:6379" } 15 | ``` 16 | 17 | 如果要监控多个 redis 实例,就增加 instances 即可: 18 | 19 | ```toml 20 | [[instances]] 21 | address = "10.23.25.2:6379" 22 | username = "" 23 | password = "" 24 | labels = { instance="n9e-10.23.25.2:6379" } 25 | 26 | [[instances]] 27 | address = "10.23.25.3:6379" 28 | username = "" 29 | password = "" 30 | labels = { instance="n9e-10.23.25.3:6379" } 31 | ``` 32 | 33 | 建议通过 labels 配置附加一个 instance 标签,便于后面复用监控大盘。 34 | 35 | ## 监控大盘和告警规则 36 | 37 | 该 README 的同级目录下,提供了 dashboard.json 就是监控大盘的配置,alerts.json 是告警规则,可以导入夜莺使用。 38 | 39 | -------------------------------------------------------------------------------- /inputs/redis_sentinel/README.md: -------------------------------------------------------------------------------- 1 | # redis_sentinel 2 | 3 | forked from [telegraf/redis_sentinel](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/redis_sentinel) 4 | -------------------------------------------------------------------------------- /inputs/smart/smart.go: -------------------------------------------------------------------------------- 1 | package smart 2 | 3 | import ( 4 | _ "embed" 5 | "flashcat.cloud/categraf/config" 6 | "flashcat.cloud/categraf/inputs" 7 | "os" 8 | ) 9 | 10 | const ( 11 | inputName = "smart" 12 | ) 13 | 14 | type ( 15 | Smart struct { 16 | config.PluginConfig 17 | Instances []*Instance `toml:"instances"` 18 | } 19 | ) 20 | 21 | func init() { 22 | // Set LC_NUMERIC to uniform numeric output from cli tools 23 | _ = os.Setenv("LC_NUMERIC", "en_US.UTF-8") 24 | 25 | inputs.Add(inputName, func() inputs.Input { 26 | return &Smart{} 27 | }) 28 | } 29 | 30 | func (s *Smart) Clone() inputs.Input { 31 | return &Smart{} 32 | } 33 | 34 | func (s *Smart) Name() string { 35 | return inputName 36 | } 37 | 38 | func (s *Smart) GetInstances() []inputs.Instance { 39 | ret := make([]inputs.Instance, len(s.Instances)) 40 | for i := 0; i < len(s.Instances); i++ { 41 | ret[i] = s.Instances[i] 42 | } 43 | return ret 44 | } 45 | -------------------------------------------------------------------------------- /inputs/snmp/README.md: -------------------------------------------------------------------------------- 1 | forked from [telegraf/snmp](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/snmp) 2 | 3 | 目前只修改了netsnmp的部分 ,配置中为了兼容,保留了path参数。 4 | 5 | snmp_up代表设备是否存活,1 存活 0不存活,依赖ICMP 6 | 7 | 配置示例 8 | ``` 9 | [[instances]] 10 | agents = ["udp://172.30.15.189:161"] 11 | 12 | timeout = "5s" 13 | version = 2 14 | community = "public" 15 | agent_host_tag = "ident" 16 | retries = 1 17 | 18 | [[instances.field]] 19 | oid = "RFC1213-MIB::sysUpTime.0" 20 | name = "uptime" 21 | 22 | [[instances.field]] 23 | oid = "RFC1213-MIB::sysName.0" 24 | name = "source" 25 | is_tag = true 26 | 27 | [[instances.table]] 28 | oid = "IF-MIB::ifTable" 29 | name = "interface" 30 | inherit_tags = ["source"] 31 | 32 | [[instances.table.field]] 33 | oid = "IF-MIB::ifDescr" 34 | name = "ifDescr" 35 | is_tag = true 36 | 37 | ``` -------------------------------------------------------------------------------- /inputs/snmp_trap/gosmi.go: -------------------------------------------------------------------------------- 1 | package snmp_trap 2 | 3 | import ( 4 | "flashcat.cloud/categraf/pkg/snmp" 5 | ) 6 | 7 | type gosmiTranslator struct { 8 | } 9 | 10 | func (t *gosmiTranslator) lookup(oid string) (snmp.MibEntry, error) { 11 | return snmp.TrapLookup(oid) 12 | } 13 | 14 | func newGosmiTranslator(paths []string) (*gosmiTranslator, error) { 15 | err := snmp.LoadMibsFromPath(paths, &snmp.GosmiMibLoader{}) 16 | if err == nil { 17 | return &gosmiTranslator{}, nil 18 | } 19 | return nil, err 20 | } 21 | -------------------------------------------------------------------------------- /inputs/sockstat/sockstat_notlinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | 3 | package sockstat 4 | 5 | // ParseNetSockstat retrieves IPv4 socket statistics. 6 | func ParseNetSockstat() (*NetSockstat, error) { 7 | return nil, nil 8 | } 9 | 10 | // ParseNetSockstat6 retrieves IPv6 socket statistics. 11 | // 12 | // If IPv6 is disabled on this kernel, the returned error can be checked with 13 | // os.IsNotExist. 14 | func ParseNetSockstat6() (*NetSockstat, error) { 15 | return nil, nil 16 | } 17 | -------------------------------------------------------------------------------- /inputs/sqlserver/README.md: -------------------------------------------------------------------------------- 1 | # SQL Server 2 | 3 | forked from [telegraf/sqlserver](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sqlserver). 这个插件的作用是获取sqlserver的监控指标,这里去掉了Azure相关部分监控,只保留了本地部署sqlserver情况。 4 | 5 | # 按照下面方法创建监控账号,用于读取监控数据 6 | USE master; 7 | 8 | CREATE LOGIN [categraf] WITH PASSWORD = N'mystrongpassword'; 9 | 10 | GRANT VIEW SERVER STATE TO [categraf]; 11 | 12 | GRANT VIEW ANY DEFINITION TO [categraf]; 13 | Data Source=10.19.1.1;Initial Catalog=hc;User ID=sa;Password=mystrongpassword; -------------------------------------------------------------------------------- /inputs/switch_legacy/lastcache.go: -------------------------------------------------------------------------------- 1 | package switch_legacy 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/gaochao1/sw" 7 | ) 8 | 9 | type LastifMap struct { 10 | lock *sync.RWMutex 11 | ifstat map[string][]sw.IfStats 12 | } 13 | 14 | func NewLastifMap() *LastifMap { 15 | return &LastifMap{ 16 | lock: new(sync.RWMutex), 17 | ifstat: make(map[string][]sw.IfStats), 18 | } 19 | } 20 | 21 | func (m *LastifMap) Get(k string) []sw.IfStats { 22 | m.lock.RLock() 23 | defer m.lock.RUnlock() 24 | if val, ok := m.ifstat[k]; ok { 25 | return val 26 | } 27 | return nil 28 | } 29 | 30 | func (m *LastifMap) Set(k string, v []sw.IfStats) { 31 | m.lock.Lock() 32 | m.ifstat[k] = v 33 | m.lock.Unlock() 34 | } 35 | 36 | func (m *LastifMap) Check(k string) bool { 37 | m.lock.RLock() 38 | _, ok := m.ifstat[k] 39 | m.lock.RUnlock() 40 | return ok 41 | } 42 | -------------------------------------------------------------------------------- /inputs/system/README.md: -------------------------------------------------------------------------------- 1 | # system 2 | 3 | 系统负载相关的采集插件 4 | 5 | ## 监控大盘和告警规则 6 | 7 | 该 README 文件所在的同级目录下有监控大盘和告警规则的配置JSON文件,导入夜莺即可使用 -------------------------------------------------------------------------------- /inputs/systemd/README.md: -------------------------------------------------------------------------------- 1 | # systemd 插件 2 | 自 [node_exporter](https://github.com/prometheus/node_exporter/blob/master/collector/systemd_linux.go)fork 并改动 3 | 4 | ## Configuration 5 | ```toml 6 | enable=false # 设置为true 打开采集 7 | #unit_include=".+" 8 | #unit_exclude="" 9 | enable_start_time_metrics=true #是否采集service unit的启动时间信息 单位秒 10 | enable_task_metrics=true # 是否采集service unit task的metrics 11 | enable_restarts_metrics=true #是否采集service unit重启的次数信息 12 | ``` 13 | -------------------------------------------------------------------------------- /inputs/systemd/systemd_nonlinux.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | // +build !linux 3 | 4 | package systemd 5 | 6 | import ( 7 | "flashcat.cloud/categraf/types" 8 | ) 9 | 10 | func (s *Systemd) Init() error { 11 | return nil 12 | } 13 | 14 | func (s *Systemd) Gather(slist *types.SampleList) { 15 | } 16 | -------------------------------------------------------------------------------- /inputs/tpl/README.md: -------------------------------------------------------------------------------- 1 | # tpl 2 | 3 | 这不是个插件,这是插件开发模板,如果要开发某个采集插件,可以拷贝一份tpl的代码,改一下作为基础代码使用 -------------------------------------------------------------------------------- /inputs/tpl/tpl.go: -------------------------------------------------------------------------------- 1 | package tpl 2 | 3 | import ( 4 | "flashcat.cloud/categraf/config" 5 | "flashcat.cloud/categraf/inputs" 6 | ) 7 | 8 | const inputName = "plugin_tpl" 9 | 10 | type PluginTpl struct { 11 | config.PluginConfig 12 | Instances []*Instance `toml:"instances"` 13 | } 14 | 15 | func init() { 16 | inputs.Add(inputName, func() inputs.Input { 17 | return &PluginTpl{} 18 | }) 19 | } 20 | 21 | func (pt *PluginTpl) Clone() inputs.Input { 22 | return &PluginTpl{} 23 | } 24 | 25 | func (pt *PluginTpl) Name() string { 26 | return inputName 27 | } 28 | 29 | func (pt *PluginTpl) GetInstances() []inputs.Instance { 30 | ret := make([]inputs.Instance, len(pt.Instances)) 31 | for i := 0; i < len(pt.Instances); i++ { 32 | ret[i] = pt.Instances[i] 33 | } 34 | return ret 35 | } 36 | 37 | type Instance struct { 38 | config.InstanceConfig 39 | } 40 | -------------------------------------------------------------------------------- /inputs/weblogic/README.md: -------------------------------------------------------------------------------- 1 | # weblogic 2 | 3 | weblogic 当前可以使用 jolokia_agent 插件来监控,通过读取 jmx 数据的方式获取监控指标,配置文件可以参考:[weblogic.toml](../../conf/input.jolokia_agent_misc/weblogic.toml) 4 | -------------------------------------------------------------------------------- /inputs/whois/README.md: -------------------------------------------------------------------------------- 1 | # whois 2 | 3 | 域名探测插件,用于探测域名的注册时间和到期时间,值为UTC0时间戳 4 | 5 | 6 | ## Configuration 7 | 8 | 最核心的配置就是 domain 配置,配置目标地址,比如想要监控一个地址: 9 | 默认保持注释状态,注释状态下,插件默认不启用 10 | 11 | ```toml 12 | # [[instances]] 13 | ## Used to collect domain name information. 14 | # domain = "baidu.com" 15 | ``` 16 | 请注意这里配置的是域名不是URL 17 | 18 | ## 指标解释 19 | 20 | whois_domain_createddate 域名创建时间戳 21 | whois_domain_updateddate 域名更新时间戳 22 | whois_domain_expirationdate 域名到期时间戳 23 | 24 | ## 注意事项 25 | 请不要将interval设置过短,会导致频繁请求timeout,没太大必要性,请尽量放长请求周期 -------------------------------------------------------------------------------- /k8s/controller-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: kube-system 5 | name: kube-controller-manager 6 | labels: 7 | k8s-app: kube-controller-manager 8 | spec: 9 | selector: 10 | component: kube-controller-manager 11 | type: ClusterIP 12 | clusterIP: None 13 | ports: 14 | - name: https 15 | port: 10257 16 | targetPort: 10257 17 | -------------------------------------------------------------------------------- /k8s/etcd-service-http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: kube-system 5 | name: etcd 6 | labels: 7 | k8s-app: etcd 8 | spec: 9 | selector: 10 | component: etcd 11 | type: ClusterIP 12 | clusterIP: None 13 | ports: 14 | - name: http 15 | port: 2381 16 | targetPort: 2381 17 | protocol: TCP 18 | -------------------------------------------------------------------------------- /k8s/etcd-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: kube-system 5 | name: etcd 6 | labels: 7 | k8s-app: etcd 8 | spec: 9 | selector: 10 | component: etcd 11 | type: ClusterIP 12 | clusterIP: None 13 | ports: 14 | - name: https 15 | port: 2379 16 | targetPort: 2379 17 | protocol: TCP 18 | -------------------------------------------------------------------------------- /k8s/images/apiserver-dash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/k8s/images/apiserver-dash.jpg -------------------------------------------------------------------------------- /k8s/images/cm-dash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/k8s/images/cm-dash.jpg -------------------------------------------------------------------------------- /k8s/images/coredns-dash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/k8s/images/coredns-dash.jpg -------------------------------------------------------------------------------- /k8s/images/etcd-dash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/k8s/images/etcd-dash.jpg -------------------------------------------------------------------------------- /k8s/images/scheduler-dash.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/k8s/images/scheduler-dash.jpg -------------------------------------------------------------------------------- /k8s/scheduler-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: kube-system 5 | name: kube-scheduler 6 | labels: 7 | k8s-app: kube-scheduler 8 | spec: 9 | selector: 10 | component: kube-scheduler 11 | type: ClusterIP 12 | clusterIP: None 13 | ports: 14 | - name: https 15 | port: 10259 16 | targetPort: 10259 17 | protocol: TCP 18 | -------------------------------------------------------------------------------- /k8s/secret.yaml: -------------------------------------------------------------------------------- 1 | ### k8s 1.24及以上版本需要手动创建secret 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: categraf-secret 6 | annotations: 7 | kubernetes.io/service-account.name: "categraf-serviceaccount" 8 | type: kubernetes.io/service-account-token 9 | -------------------------------------------------------------------------------- /logs/README.md: -------------------------------------------------------------------------------- 1 | # logs 2 | 3 | forked from datadog logs-agent 4 | -------------------------------------------------------------------------------- /logs/auditor/api.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | //Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package auditor 9 | 10 | import ( 11 | "encoding/json" 12 | ) 13 | 14 | // v2: In the third version of the auditor, we dropped Timestamp and used a generic Offset instead to reinforce the separation of concerns 15 | // between the auditor and log sources. 16 | 17 | func unmarshalRegistry(b []byte) (map[string]*RegistryEntry, error) { 18 | var r JSONRegistry 19 | err := json.Unmarshal(b, &r) 20 | if err != nil { 21 | return nil, err 22 | } 23 | registry := make(map[string]*RegistryEntry) 24 | for identifier, entry := range r.Registry { 25 | newEntry := entry 26 | registry[identifier] = &newEntry 27 | } 28 | return registry, nil 29 | } 30 | -------------------------------------------------------------------------------- /logs/client/destination.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package client 9 | 10 | // Destination sends a payload to a specific endpoint over a given network protocol. 11 | type Destination interface { 12 | Send(payload []byte) error 13 | SendAsync(payload []byte) 14 | Close() 15 | } 16 | -------------------------------------------------------------------------------- /logs/client/destinations.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package client 9 | 10 | // Destinations holds the main destination and additional ones to send logs to. 11 | type Destinations struct { 12 | Main Destination 13 | Additionals []Destination 14 | } 15 | 16 | // NewDestinations returns a new destinations composite. 17 | func NewDestinations(main Destination, additionals []Destination) *Destinations { 18 | return &Destinations{ 19 | Main: main, 20 | Additionals: additionals, 21 | } 22 | } 23 | 24 | func (ds *Destinations) Close() { 25 | ds.Main.Close() 26 | for _, dest := range ds.Additionals { 27 | dest.Close() 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /logs/client/errors.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package client 9 | 10 | // RetryableError represents an error that can occur when sending a payload. 11 | type RetryableError struct { 12 | err error 13 | } 14 | 15 | // NewRetryableError returns a new destination error. 16 | func NewRetryableError(err error) *RetryableError { 17 | return &RetryableError{ 18 | err: err, 19 | } 20 | } 21 | 22 | // RetryableError returns the message of the error. 23 | func (e *RetryableError) Error() string { 24 | return e.err.Error() 25 | } 26 | -------------------------------------------------------------------------------- /logs/client/kafka/scram_client.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import ( 4 | "crypto/sha256" 5 | "crypto/sha512" 6 | 7 | "github.com/xdg-go/scram" 8 | ) 9 | 10 | var ( 11 | SHA256 scram.HashGeneratorFcn = sha256.New 12 | SHA512 scram.HashGeneratorFcn = sha512.New 13 | ) 14 | 15 | type XDGSCRAMClient struct { 16 | *scram.Client 17 | *scram.ClientConversation 18 | scram.HashGeneratorFcn 19 | } 20 | 21 | func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { 22 | x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) 23 | if err != nil { 24 | return err 25 | } 26 | x.ClientConversation = x.Client.NewConversation() 27 | return nil 28 | } 29 | 30 | func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { 31 | response, err = x.ClientConversation.Step(challenge) 32 | return 33 | } 34 | 35 | func (x *XDGSCRAMClient) Done() bool { 36 | return x.ClientConversation.Done() 37 | } 38 | -------------------------------------------------------------------------------- /logs/client/kafka/topic_json.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | package kafka 4 | 5 | import ( 6 | _ "github.com/mailru/easyjson/gen" 7 | ) 8 | 9 | // easyjson:json 10 | type ( 11 | Data struct { 12 | Topic string `json:"topic"` 13 | MsgKey string `json:"msg_key"` 14 | } 15 | ) 16 | -------------------------------------------------------------------------------- /logs/client/tcp/prefixer.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package tcp 9 | 10 | // prefixer prepends a prefix to a message. 11 | type prefixer struct { 12 | prefix []byte 13 | } 14 | 15 | // newPrefixer returns a prefixer that prepends the given prefix to a message. 16 | func newPrefixer(prefix string) *prefixer { 17 | return &prefixer{ 18 | prefix: append([]byte(prefix)), 19 | } 20 | } 21 | 22 | // apply prepends the prefix to the message. 23 | func (p *prefixer) apply(content []byte) []byte { 24 | return append(p.prefix, content...) 25 | } 26 | -------------------------------------------------------------------------------- /logs/decoder/line_handler.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package decoder 9 | 10 | // truncatedFlag is the flag that is added at the beginning 11 | // or/and at the end of every trucated lines. 12 | var truncatedFlag = []byte("...TRUNCATED...") 13 | 14 | // escapedLineFeed is used to escape new line character 15 | // for multiline message. 16 | // New line character needs to be escaped because they are used 17 | // as delimiter for transport. 18 | var escapedLineFeed = []byte{'\n'} 19 | 20 | // LineHandler handles raw lines to form structured lines 21 | type LineHandler interface { 22 | Handle(input *Message) 23 | Start() 24 | Stop() 25 | } 26 | -------------------------------------------------------------------------------- /logs/diagnostic/noop_message_receiver.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package diagnostic 9 | 10 | import "flashcat.cloud/categraf/logs/message" 11 | 12 | // NoopMessageReceiver for cases where diagnosing messages is unsupported or not needed (serverless, tests) 13 | type NoopMessageReceiver struct{} 14 | 15 | // HandleMessage does nothing with the message 16 | func (n *NoopMessageReceiver) HandleMessage(m message.Message, redactedMsg []byte) {} 17 | -------------------------------------------------------------------------------- /logs/input/container/noop.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | //Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package container 9 | 10 | import ( 11 | "flashcat.cloud/categraf/logs/restart" 12 | ) 13 | 14 | // noopLauncher does nothing. 15 | type noopLauncher struct{} 16 | 17 | // NewNoopLauncher returns a new noopLauncher. 18 | func NewNoopLauncher() restart.Restartable { 19 | return &noopLauncher{} 20 | } 21 | 22 | // Start does nothing. 23 | func (l *noopLauncher) Start() {} 24 | 25 | // Stop does nothing. 26 | func (l *noopLauncher) Stop() {} 27 | -------------------------------------------------------------------------------- /logs/input/file/open_file_nix.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs && !windows 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package file 9 | 10 | import ( 11 | "os" 12 | ) 13 | 14 | // openFile opens a file with the standard Open method on *nix OSes 15 | func openFile(path string) (*os.File, error) { 16 | return os.Open(path) 17 | } 18 | -------------------------------------------------------------------------------- /logs/input/file/rotate_windows.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs && windows 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package file 9 | 10 | import ( 11 | "os" 12 | ) 13 | 14 | // DidRotate is not implemented on windows, log rotations are handled by the 15 | // tailer for now. 16 | func DidRotate(file *os.File, lastReadOffset int64) (bool, error) { 17 | return false, nil 18 | } 19 | -------------------------------------------------------------------------------- /logs/input/journald/launcher_nosystemd.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs && !systemd 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package journald 9 | 10 | import ( 11 | logsconfig "flashcat.cloud/categraf/config/logs" 12 | "flashcat.cloud/categraf/logs/auditor" 13 | "flashcat.cloud/categraf/logs/pipeline" 14 | ) 15 | 16 | // Launcher is not supported on no systemd environment. 17 | type Launcher struct{} 18 | 19 | // NewLauncher returns a new Launcher 20 | func NewLauncher(sources *logsconfig.LogSources, pipelineProvider pipeline.Provider, registry auditor.Registry) *Launcher { 21 | return &Launcher{} 22 | } 23 | 24 | // Start does nothing 25 | func (l *Launcher) Start() {} 26 | 27 | // Stop does nothing 28 | func (l *Launcher) Stop() {} 29 | -------------------------------------------------------------------------------- /logs/input/listener/errors.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package listener 9 | 10 | import ( 11 | "strings" 12 | ) 13 | 14 | // isConnClosedError returns true if the error is related to a closed connection, 15 | // for more details, see: https://golang.org/src/internal/poll/fd.go#L18. 16 | func isClosedConnError(err error) bool { 17 | return strings.Contains(err.Error(), "use of closed network connection") 18 | } 19 | -------------------------------------------------------------------------------- /logs/restart/restart.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package restart 9 | 10 | // Restartable represents a startable and stopable object 11 | type Restartable interface { 12 | Startable 13 | Stoppable 14 | } 15 | -------------------------------------------------------------------------------- /logs/restart/serial_stop.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package restart 9 | 10 | // serialStopper implements the logic to stop different components from a data pipeline in series 11 | type serialStopper struct { 12 | components []Stoppable 13 | } 14 | 15 | // NewSerialStopper returns a new serialGroup 16 | func NewSerialStopper(components ...Stoppable) Stopper { 17 | return &serialStopper{ 18 | components: components, 19 | } 20 | } 21 | 22 | // Add appends new elements to the array of components to stop 23 | func (g *serialStopper) Add(components ...Stoppable) { 24 | g.components = append(g.components, components...) 25 | } 26 | 27 | // Stop stops all components one after another 28 | func (g *serialStopper) Stop() { 29 | for _, stopper := range g.components { 30 | stopper.Stop() 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /logs/restart/start.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package restart 9 | 10 | // Startable represents a startable object 11 | type Startable interface { 12 | Start() 13 | } 14 | 15 | // Starter starts a group of startable objects from a data pipeline 16 | type Starter interface { 17 | Startable 18 | Add(components ...Startable) 19 | } 20 | -------------------------------------------------------------------------------- /logs/restart/starter.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package restart 9 | 10 | // Starter implements the logic to start different components from a data pipeline in series 11 | type starter struct { 12 | components []Startable 13 | } 14 | 15 | // NewStarter returns a new starter 16 | func NewStarter(components ...Startable) Starter { 17 | return &starter{ 18 | components: components, 19 | } 20 | } 21 | 22 | // Add appends new elements to the array of components to start 23 | func (s *starter) Add(components ...Startable) { 24 | s.components = append(s.components, components...) 25 | } 26 | 27 | // Start starts all components one after another 28 | func (s *starter) Start() { 29 | for _, c := range s.components { 30 | c.Start() 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /logs/restart/stop.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package restart 9 | 10 | // Stoppable represents a stoppable object 11 | type Stoppable interface { 12 | Stop() 13 | } 14 | 15 | // Stopper stops a group of stoppable objects from a data pipeline 16 | type Stopper interface { 17 | Stoppable 18 | Add(components ...Stoppable) 19 | } 20 | -------------------------------------------------------------------------------- /logs/util/debug.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "strings" 5 | 6 | coreconfig "flashcat.cloud/categraf/config" 7 | ) 8 | 9 | func Debug() bool { 10 | if coreconfig.Config.DebugMode && strings.Contains(coreconfig.Config.InputFilters, "logs-agent") { 11 | return true 12 | } 13 | return false 14 | } 15 | -------------------------------------------------------------------------------- /logs/util/docker/rancher.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package docker 9 | 10 | import ( 11 | "log" 12 | "net" 13 | ) 14 | 15 | const rancherIPLabel = "io.rancher.container.ip" 16 | 17 | // FindRancherIPInLabels looks for the `io.rancher.container.ip` label and parses it. 18 | // Rancher 1.x containers don't have docker networks as the orchestrator provides its own CNI. 19 | func FindRancherIPInLabels(labels map[string]string) (string, bool) { 20 | cidr, found := labels[rancherIPLabel] 21 | if found { 22 | ipv4Addr, _, err := net.ParseCIDR(cidr) 23 | if err != nil { 24 | log.Printf("error while retrieving Rancher IP: %q is not valid", cidr) 25 | return "", false 26 | } 27 | return ipv4Addr.String(), true 28 | } 29 | 30 | return "", false 31 | } 32 | -------------------------------------------------------------------------------- /logs/util/kubernetes/kubelet/file_stat.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | package kubelet 4 | 5 | import "os" 6 | 7 | // FileExists returns true if a file exists and is accessible, false otherwise 8 | func FileExists(path string) bool { 9 | _, err := os.Stat(path) 10 | return err == nil 11 | } 12 | -------------------------------------------------------------------------------- /logs/util/kubernetes/time.go: -------------------------------------------------------------------------------- 1 | //go:build !no_logs 2 | 3 | // Unless explicitly stated otherwise all files in this repository are licensed 4 | // under the Apache License Version 2.0. 5 | // This product includes software developed at Datadog (https://www.datadoghq.com/). 6 | // Copyright 2016-present Datadog, Inc. 7 | 8 | package kubernetes 9 | 10 | import ( 11 | "fmt" 12 | "time" 13 | ) 14 | 15 | // TimeWithoutWall fixes the `wall` issue in unit tests. 16 | // THIS FUNCTION SHOULD NOT BE USED OUTSIDE OF TESTS. 17 | // Unstructured serializes time to string in RFC3339 without Nano seconds. 18 | // when it's parsed back, the Go time.Time does not have the `wall` field as it's used for nanosecs. 19 | func TimeWithoutWall(t time.Time) time.Time { 20 | text := t.Format(time.RFC3339) 21 | time, err := time.Parse(time.RFC3339, text) 22 | if err != nil { 23 | panic(fmt.Sprintf("Impossible to unmarshall text: '%s'", text)) 24 | } 25 | 26 | return time 27 | } 28 | -------------------------------------------------------------------------------- /main_posix.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | 3 | package main 4 | 5 | import ( 6 | "os" 7 | "os/signal" 8 | "syscall" 9 | 10 | "flashcat.cloud/categraf/agent" 11 | "flashcat.cloud/categraf/config" 12 | "flashcat.cloud/categraf/pkg/pprof" 13 | ) 14 | 15 | func runAgent(ag *agent.Agent) { 16 | initLog(config.Config.Log.FileName) 17 | ag.Start() 18 | go profile() 19 | handleSignal(ag) 20 | } 21 | 22 | func doOSsvc() { 23 | } 24 | 25 | var ( 26 | pprofStart uint32 27 | ) 28 | 29 | func profile() { 30 | sc := make(chan os.Signal, 1) 31 | signal.Notify(sc, syscall.SIGUSR2) 32 | for { 33 | sig := <-sc 34 | switch sig { 35 | case syscall.SIGUSR2: 36 | go pprof.Go() 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /parser/parser.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | import ( 4 | "flashcat.cloud/categraf/types" 5 | ) 6 | 7 | type Parser interface { 8 | Parse(input []byte, slist *types.SampleList) error 9 | } 10 | -------------------------------------------------------------------------------- /pkg/cfg/scan.go: -------------------------------------------------------------------------------- 1 | package cfg 2 | 3 | import "os" 4 | 5 | type scanner struct { 6 | data []byte 7 | err error 8 | } 9 | 10 | func NewFileScanner() *scanner { 11 | return &scanner{} 12 | } 13 | 14 | func (s *scanner) Err() error { 15 | return s.err 16 | } 17 | 18 | func (s *scanner) Data() []byte { 19 | return s.data 20 | } 21 | 22 | func (s *scanner) Read(file string) { 23 | if s.err == nil { 24 | s.data, s.err = os.ReadFile(file) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /pkg/cmdx/cmd_notwindows.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package cmdx 5 | 6 | import ( 7 | "os/exec" 8 | "syscall" 9 | "time" 10 | ) 11 | 12 | func CmdWait(cmd *exec.Cmd, timeout time.Duration) (error, bool) { 13 | var err error 14 | 15 | done := make(chan error) 16 | go func() { 17 | done <- cmd.Wait() 18 | }() 19 | 20 | select { 21 | case <-time.After(timeout): 22 | go func() { 23 | <-done // allow goroutine to exit 24 | }() 25 | 26 | // IMPORTANT: cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} is necessary before cmd.Start() 27 | err = syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) 28 | return err, true 29 | case err = <-done: 30 | return err, false 31 | } 32 | } 33 | 34 | func CmdStart(cmd *exec.Cmd) error { 35 | cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 36 | return cmd.Start() 37 | } 38 | -------------------------------------------------------------------------------- /pkg/cmdx/cmd_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package cmdx 5 | 6 | import ( 7 | "os/exec" 8 | "syscall" 9 | "time" 10 | ) 11 | 12 | func CmdWait(cmd *exec.Cmd, timeout time.Duration) (error, bool) { 13 | var err error 14 | 15 | done := make(chan error) 16 | go func() { 17 | done <- cmd.Wait() 18 | }() 19 | 20 | select { 21 | case <-time.After(timeout): 22 | go func() { 23 | <-done // allow goroutine to exit 24 | }() 25 | 26 | err = cmd.Process.Signal(syscall.SIGKILL) 27 | return err, true 28 | case err = <-done: 29 | return err, false 30 | } 31 | } 32 | 33 | func CmdStart(cmd *exec.Cmd) error { 34 | return cmd.Start() 35 | } 36 | -------------------------------------------------------------------------------- /pkg/cmdx/cmdx.go: -------------------------------------------------------------------------------- 1 | package cmdx 2 | 3 | import ( 4 | "os/exec" 5 | "time" 6 | ) 7 | 8 | func RunTimeout(cmd *exec.Cmd, timeout time.Duration) (error, bool) { 9 | err := CmdStart(cmd) 10 | if err != nil { 11 | return err, false 12 | } 13 | 14 | return CmdWait(cmd, timeout) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/dock/docker.go: -------------------------------------------------------------------------------- 1 | package dock 2 | 3 | import "strings" 4 | 5 | // Adapts some of the logic from the actual Docker library's image parsing 6 | // routines: 7 | // https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go 8 | func ParseImage(image string) (string, string) { 9 | domain := "" 10 | remainder := "" 11 | 12 | i := strings.IndexRune(image, '/') 13 | 14 | if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") { 15 | remainder = image 16 | } else { 17 | domain, remainder = image[:i], image[i+1:] 18 | } 19 | 20 | imageName := "" 21 | imageVersion := "unknown" 22 | 23 | i = strings.LastIndex(remainder, ":") 24 | if i > -1 { 25 | imageVersion = remainder[i+1:] 26 | imageName = remainder[:i] 27 | } else { 28 | imageName = remainder 29 | } 30 | 31 | if domain != "" { 32 | imageName = domain + "/" + imageName 33 | } 34 | 35 | return imageName, imageVersion 36 | } 37 | -------------------------------------------------------------------------------- /pkg/globpath/testdata/nested1/nested2/nested.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flashcatcloud/categraf/ae854f34ac0737f13e3ef91ae509494d7c29b7cf/pkg/globpath/testdata/nested1/nested2/nested.txt -------------------------------------------------------------------------------- /pkg/globpath/testdata/test.conf: -------------------------------------------------------------------------------- 1 | # this is a fake testing config file 2 | # for testing the filestat plugin 3 | 4 | option1 = "foo" 5 | option2 = "bar" 6 | -------------------------------------------------------------------------------- /pkg/httpx/proxy.go: -------------------------------------------------------------------------------- 1 | package httpx 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "net/url" 7 | ) 8 | 9 | func GetProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) { 10 | if httpProxy == "" { 11 | return http.ProxyFromEnvironment 12 | } 13 | proxyURL, err := url.Parse(httpProxy) 14 | if err != nil { 15 | return func(_ *http.Request) (*url.URL, error) { 16 | return nil, errors.New("bad proxy: " + err.Error()) 17 | } 18 | } 19 | return func(r *http.Request) (*url.URL, error) { 20 | return proxyURL, nil 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /pkg/netx/netx.go: -------------------------------------------------------------------------------- 1 | package netx 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | ) 7 | 8 | func LocalAddressByInterfaceName(interfaceName string) (net.Addr, error) { 9 | i, err := net.InterfaceByName(interfaceName) 10 | if err != nil { 11 | return nil, err 12 | } 13 | 14 | addrs, err := i.Addrs() 15 | if err != nil { 16 | return nil, err 17 | } 18 | 19 | for _, addr := range addrs { 20 | if naddr, ok := addr.(*net.IPNet); ok { 21 | // leaving port set to zero to let kernel pick 22 | return &net.TCPAddr{IP: naddr.IP}, nil 23 | } 24 | } 25 | 26 | return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName) 27 | } 28 | -------------------------------------------------------------------------------- /pkg/osx/osx.go: -------------------------------------------------------------------------------- 1 | package osx 2 | 3 | import "os" 4 | 5 | // getEnv returns the value of an environment variable, or returns the provided fallback value 6 | func GetEnv(key, fallback string) string { 7 | if value, ok := os.LookupEnv(key); ok { 8 | return value 9 | } 10 | return fallback 11 | } 12 | -------------------------------------------------------------------------------- /pkg/osx/proc.go: -------------------------------------------------------------------------------- 1 | package osx 2 | 3 | import "os" 4 | 5 | func GetHostProc() string { 6 | procPath := "/proc" 7 | if os.Getenv("HOST_PROC") != "" { 8 | procPath = os.Getenv("HOST_PROC") 9 | } 10 | return procPath 11 | } 12 | -------------------------------------------------------------------------------- /pkg/pprof/profile.go: -------------------------------------------------------------------------------- 1 | package pprof 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net" 7 | "net/http" 8 | "sync/atomic" 9 | ) 10 | 11 | var ( 12 | pprof uint32 13 | addr string 14 | ) 15 | 16 | func Go() { 17 | 18 | if !atomic.CompareAndSwapUint32(&pprof, 0, 1) { 19 | log.Println("pprofile already started,", addr) 20 | return 21 | } 22 | listener, err := net.Listen("tcp", "127.0.0.1:0") 23 | if err != nil { 24 | log.Println(err) 25 | return 26 | } 27 | addr = fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", listener.Addr().(*net.TCPAddr).Port) 28 | log.Printf("pprof started at %s", addr) 29 | 30 | err = http.Serve(listener, nil) 31 | if err != nil { 32 | log.Println(err) 33 | return 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /pkg/prom/prom.go: -------------------------------------------------------------------------------- 1 | package prom 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | func ValidName(s string) string { 9 | nameRe := regexp.MustCompile("([^a-zA-Z0-9_])") 10 | s = nameRe.ReplaceAllString(s, "_") 11 | s = strings.ToLower(s) 12 | return s 13 | } 14 | 15 | func BuildMetric(names ...string) string { 16 | var b strings.Builder 17 | for i := 0; i < len(names); i++ { 18 | if names[i] != "" { 19 | if b.Len() > 0 { 20 | b.WriteString("_") 21 | } 22 | b.WriteString(names[i]) 23 | } 24 | } 25 | 26 | return b.String() 27 | } 28 | -------------------------------------------------------------------------------- /pkg/proxy/dialer.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "time" 7 | 8 | netProxy "golang.org/x/net/proxy" 9 | ) 10 | 11 | type ProxiedDialer struct { 12 | dialer netProxy.Dialer 13 | } 14 | 15 | func (pd *ProxiedDialer) Dial(network, addr string) (net.Conn, error) { 16 | return pd.dialer.Dial(network, addr) 17 | } 18 | 19 | func (pd *ProxiedDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { 20 | if contextDialer, ok := pd.dialer.(netProxy.ContextDialer); ok { 21 | return contextDialer.DialContext(ctx, network, addr) 22 | } 23 | 24 | contextDialer := contextDialerShim{pd.dialer} 25 | return contextDialer.DialContext(ctx, network, addr) 26 | } 27 | 28 | func (pd *ProxiedDialer) DialTimeout(network, addr string, timeout time.Duration) (net.Conn, error) { 29 | ctx := context.Background() 30 | if timeout.Seconds() != 0 { 31 | var cancel context.CancelFunc 32 | ctx, cancel = context.WithTimeout(ctx, timeout) 33 | defer cancel() 34 | } 35 | 36 | return pd.DialContext(ctx, network, addr) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/proxy/socks5.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "golang.org/x/net/proxy" 5 | ) 6 | 7 | type Socks5ProxyConfig struct { 8 | Socks5ProxyEnabled bool `toml:"socks5_enabled"` 9 | Socks5ProxyAddress string `toml:"socks5_address"` 10 | Socks5ProxyUsername string `toml:"socks5_username"` 11 | Socks5ProxyPassword string `toml:"socks5_password"` 12 | } 13 | 14 | func (c *Socks5ProxyConfig) GetDialer() (proxy.Dialer, error) { 15 | var auth *proxy.Auth 16 | if c.Socks5ProxyPassword != "" || c.Socks5ProxyUsername != "" { 17 | auth = new(proxy.Auth) 18 | auth.User = c.Socks5ProxyUsername 19 | auth.Password = c.Socks5ProxyPassword 20 | } 21 | return proxy.SOCKS5("tcp", c.Socks5ProxyAddress, auth, proxy.Direct) 22 | } 23 | -------------------------------------------------------------------------------- /pkg/snmp/translator.go: -------------------------------------------------------------------------------- 1 | package snmp 2 | 3 | type TranslatorPlugin interface { 4 | SetTranslator(name string) // Agent calls this on inputs before Init 5 | } 6 | -------------------------------------------------------------------------------- /pkg/stringx/strx.go: -------------------------------------------------------------------------------- 1 | package stringx 2 | 3 | import ( 4 | "unicode" 5 | ) 6 | 7 | func SnakeCase(in string) string { 8 | runes := []rune(in) 9 | length := len(runes) 10 | 11 | var out []rune 12 | for i := 0; i < length; i++ { 13 | if runes[i] == '.' { 14 | continue 15 | } 16 | if i > 0 { 17 | if (runes[i] == '_' && runes[i-1] == '_') || 18 | (i == length-1 && runes[i] == '_') { 19 | continue 20 | } 21 | if unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) { 22 | if runes[i-1] != '_' { 23 | out = append(out, '_') 24 | } 25 | } 26 | } 27 | out = append(out, unicode.ToLower(runes[i])) 28 | } 29 | 30 | return string(out) 31 | } 32 | -------------------------------------------------------------------------------- /pkg/tagx/tagx.go: -------------------------------------------------------------------------------- 1 | package tagx 2 | 3 | func Copy(raw map[string]string) map[string]string { 4 | ret := make(map[string]string) 5 | for k, v := range raw { 6 | ret[k] = v 7 | } 8 | return ret 9 | } 10 | -------------------------------------------------------------------------------- /scripts/ci/go_version_check.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eo pipefail 4 | 5 | function main () { 6 | local -r required_version="1.21" # 设置所需的最低 Go 版本 7 | 8 | local -r current_version=$(go version | awk '{print $3}') # 获取当前 Go 版本 9 | 10 | if [[ "$(printf '%s\n' "$required_version" "$current_version" | sort -V | head -n1)" != "$required_version" ]]; then 11 | >&2 echo "Error: Go version $required_version or higher is required, but found $current_version" 12 | exit 1 13 | fi 14 | } 15 | 16 | main -------------------------------------------------------------------------------- /scripts/ci/go_vet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | go list ./... | grep -vE "/inputs/mtail/internal/runtime/compiler/parser|pkg/otel/fanoutconsumer|pkg/otel/pipelines|agent/install|agent/update" | xargs go vet -tests=false -------------------------------------------------------------------------------- /scripts/ci/static_check.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | # static check docs: https://staticcheck.dev/docs/checks/ 6 | 7 | ( 8 | GOOS=linux go list ./... | xargs staticcheck -go=1.21 -tests=false -f binary 9 | GOOS=darwin go list ./... | xargs staticcheck -go=1.21 -tests=false -f binary 10 | ) | staticcheck -merge -f=text | tee static-check.log 11 | lines=$(cat static-check.log | wc -l) 12 | rm -f static-check.log 13 | if [[ $lines -eq 0 ]]; then 14 | echo "static check pass" 15 | exit 0 16 | else 17 | echo "static check failed" 18 | exit 1 19 | fi -------------------------------------------------------------------------------- /scripts/win_run.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal enabledelayedexpansion 3 | 4 | if "%1"=="" ( 5 | echo Please input the argument: start or stop 6 | goto :end 7 | ) 8 | 9 | if /i "%1"=="start" ( 10 | call :startLogic 11 | goto :end 12 | ) 13 | 14 | if /i "%1"=="stop" ( 15 | call :stopLogic 16 | goto :end 17 | ) 18 | 19 | echo Invalid argument, please input start or stop 20 | 21 | :startLogic 22 | echo Executing start logic... 23 | start /min "categraf.exe" 24 | echo categraf.exe process started 25 | goto :eof 26 | 27 | :stopLogic 28 | echo Executing stop logic... 29 | for /f "tokens=2" %%A in ('tasklist -v ^| findstr categraf.exe') do ( 30 | set pid=%%A 31 | goto :killProcess 32 | ) 33 | 34 | :killProcess 35 | if "%pid%"=="" ( 36 | echo Process not found: categraf.exe 37 | ) else ( 38 | echo Preparing to terminate process: categraf.exe, PID: %pid% 39 | taskkill /pid %pid% /f 40 | echo Process terminated 41 | ) 42 | 43 | goto :eof 44 | 45 | :end 46 | pause 47 | 48 | -------------------------------------------------------------------------------- /types/error.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "errors" 4 | 5 | var ErrInstancesEmpty = errors.New("instances empty") 6 | -------------------------------------------------------------------------------- /types/sample_list.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "container/list" 5 | "reflect" 6 | ) 7 | 8 | type SampleList struct { 9 | SafeList[*Sample] 10 | } 11 | 12 | func NewSampleList() *SampleList { 13 | return &SampleList{*NewSafeList[*Sample]()} 14 | } 15 | 16 | func (l *SampleList) PushSample(prefix, metric string, value interface{}, labels ...map[string]string) *list.Element { 17 | v := NewSample(prefix, metric, value, labels...) 18 | e := l.PushFront(v) 19 | return e 20 | } 21 | 22 | func (l *SampleList) PushSamples(prefix string, fields map[string]interface{}, labels ...map[string]string) { 23 | vs := make([]*Sample, 0, len(fields)) 24 | for metric, value := range fields { 25 | v := NewSample(prefix, metric, convertPtrToValue(value), labels...) 26 | vs = append(vs, v) 27 | } 28 | l.PushFrontN(vs) 29 | } 30 | 31 | func convertPtrToValue(value interface{}) interface{} { 32 | if value == nil { 33 | return value 34 | } 35 | v := reflect.ValueOf(value) 36 | if v.Kind() == reflect.Ptr { 37 | v = v.Elem() 38 | } 39 | return v.Interface() 40 | } 41 | --------------------------------------------------------------------------------