├── tests
├── __init__.py
├── core
│ ├── __init__.py
│ ├── fixtures
│ │ ├── __init__.py
│ │ ├── checks
│ │ │ ├── __init__.py
│ │ │ ├── invalid_conf.yaml
│ │ │ ├── invalid_check_1.py
│ │ │ ├── valid_conf.yaml
│ │ │ ├── invalid_check_2.py
│ │ │ ├── valid_conf_2.yaml
│ │ │ ├── valid_check_1.py
│ │ │ ├── valid_check_2.py
│ │ │ └── valid_sub_check.py
│ │ ├── flare
│ │ │ ├── apikey.conf
│ │ │ ├── apikeys.conf
│ │ │ ├── datadog-agent-1.tar.bz2
│ │ │ └── password_uri.yaml
│ │ ├── target_module.py
│ │ └── config
│ │ │ ├── one_endpoint.conf
│ │ │ ├── multiple_apikeys.conf
│ │ │ ├── multiple_endpoints_bad.conf
│ │ │ ├── multiple_endpoints.conf
│ │ │ └── bad.conf
│ ├── test_utils_process.py
│ ├── test_utils_net.py
│ ├── test_ec2.py
│ ├── test_emitter.py
│ └── test_proxy.py
└── checks
│ ├── __init__.py
│ ├── mock
│ ├── __init__.py
│ ├── test_docker.py
│ ├── test_ganglia.py
│ ├── test_system_swap.py
│ ├── test_windows_service.py
│ ├── test_w32logevent.py
│ ├── test_mesos_slave.py
│ └── test_riakcs.py
│ ├── integration
│ ├── __init__.py
│ ├── test_sysstat.py
│ ├── test_disk.py
│ ├── test_linux_proc_extras.py
│ ├── test_windows_service.py
│ └── test_lighttpd.py
│ └── fixtures
│ ├── wmi
│ ├── win32_service_down
│ ├── win32_service_up
│ ├── win32_perfformatteddata_perfproc_process
│ ├── win32_process
│ ├── win32_perfformatteddata_perfdisk_logicaldisk
│ ├── win32_perfformatteddata_perfproc_process_alt
│ ├── win32_perfrawdata_perfos_system_unknown
│ ├── win32_perfrawdata_perfos_system_current
│ ├── win32_perfrawdata_perfos_system_previous
│ ├── win32_ntlogevent
│ ├── win32_perfformatteddata_w3svc_webservice
│ └── win32_perfformatteddata_w3svc_webservice_2008
│ ├── cacti
│ └── whitelist.txt
│ ├── nagios
│ ├── host-perfdata
│ └── service-perfdata
│ ├── powerdns-recursor
│ └── recursor.conf
│ ├── spark
│ ├── rdd_metrics
│ ├── apps_metrics
│ ├── spark_apps
│ └── executor_metrics
│ ├── mapreduce
│ ├── apps_metrics
│ ├── task_metrics
│ ├── job_metrics
│ └── job_counter_metrics
│ ├── disk
│ ├── centos-df-Tk
│ ├── debian-df-Tk
│ └── freebsd-df-Tk
│ ├── mesos_master
│ └── roles.json
│ ├── hdfs_datanode
│ └── hdfs_datanode_jmx
│ ├── yarn
│ ├── nodes_metrics
│ ├── cluster_metrics
│ └── apps_metrics
│ ├── network
│ ├── ss_ipv4
│ ├── ss_ipv6
│ └── netstat
│ ├── varnish
│ └── varnishadm_dump
│ ├── vsphere
│ └── vsphere_topology.json
│ ├── riakcs
│ └── riakcs_out.python
│ ├── hdfs_namenode
│ ├── hdfs_namesystem
│ └── hdfs_namesystem_state
│ ├── mesos_slave
│ └── stats.json
│ └── cassandra
│ └── cassandra.yaml
├── utils
├── __init__.py
├── service_discovery
│ ├── __init__.py
│ ├── sd_backend.py
│ └── config.py
├── http.py
├── singleton.py
├── containers.py
├── deprecations.py
├── pip-allow-failures.sh
├── shell.py
├── logger.py
├── ntp.py
├── net.py
└── pidfile.py
├── win32
├── __init__.py
├── shell.py
└── common.py
├── checks
├── libs
│ ├── __init__.py
│ ├── wmi
│ │ └── __init__.py
│ ├── vmware
│ │ └── __init__.py
│ ├── jmxterm-1.0-DATADOG-uber.jar
│ └── jmxfetch-0.12.0-jar-with-dependencies.jar
├── system
│ └── __init__.py
└── metric_types.py
├── dogstream
├── __init__.py
└── common.py
├── ci
├── resources
│ ├── pgbouncer
│ │ ├── users.txt
│ │ └── pgbouncer.ini
│ ├── snmp
│ │ └── snmpd.conf
│ ├── elasticsearch
│ │ └── elasticsearch.yml
│ ├── zookeeper
│ │ └── zoo.cfg
│ ├── redis
│ │ ├── noauth.conf
│ │ ├── slave_unhealthy.conf
│ │ ├── slave_healthy.conf
│ │ └── auth.conf
│ ├── postgres
│ │ ├── postgres.sql
│ │ ├── dogs.sql
│ │ └── datadog_test.sql
│ ├── supervisord
│ │ ├── program_0.sh
│ │ ├── program_1.sh
│ │ ├── program_2.sh
│ │ └── supervisord.conf
│ ├── phpfpm
│ │ ├── php-fpm.conf
│ │ └── nginx.conf
│ ├── tomcat
│ │ ├── setenv.sh
│ │ └── jmx.yaml
│ ├── fluentd
│ │ └── td-agent.conf
│ ├── kong
│ │ ├── kong_install.sh
│ │ ├── setup_serf.sh
│ │ ├── setup_uuid.sh
│ │ ├── setup_dnsmasq.sh
│ │ ├── setup_openresty.sh
│ │ └── setup_lua.sh
│ ├── haproxy
│ │ ├── haproxy-open.cfg
│ │ └── haproxy.cfg
│ ├── lighttpd
│ │ └── lighttpd.conf
│ ├── nginx
│ │ ├── testing.crt
│ │ └── testing.key
│ └── go_expvar
│ │ └── test_expvar.go
├── ssh.rb
├── checks_mock.rb
├── docker_daemon.rb
├── core_integration.rb
├── skeleton.rb
├── windows.rb
├── fluentd.rb
├── go_expvar.rb
├── zookeeper.rb
├── memcache.rb
├── mysql.rb
├── snmp.rb
├── gearman.rb
├── tokumx.rb
└── system.rb
├── packaging
├── osx
│ ├── app
│ │ ├── Agent.icns
│ │ └── Info.plist
│ ├── com.datadoghq.Agent.plist.example
│ └── supervisor.conf
├── datadog-agent
│ ├── win32
│ │ ├── wix
│ │ │ ├── FindReplace.exe
│ │ │ ├── confd.xslt
│ │ │ ├── FindReplace
│ │ │ │ └── FindReplace.go
│ │ │ └── files.xslt
│ │ ├── install_files
│ │ │ ├── config_icon.ico
│ │ │ ├── win32service.pyd
│ │ │ ├── dd_agent_win_256.ico
│ │ │ ├── guidata
│ │ │ │ └── images
│ │ │ │ │ ├── txt.png
│ │ │ │ │ ├── agent.png
│ │ │ │ │ ├── apply.png
│ │ │ │ │ ├── delete.png
│ │ │ │ │ ├── edit.png
│ │ │ │ │ ├── info.png
│ │ │ │ │ ├── start.png
│ │ │ │ │ ├── stop.png
│ │ │ │ │ ├── filesave.png
│ │ │ │ │ ├── restart.png
│ │ │ │ │ ├── settings.png
│ │ │ │ │ ├── not_found.png
│ │ │ │ │ └── agent.svg
│ │ │ ├── datadog-cert.pem
│ │ │ └── license.rtf
│ │ └── README.md
│ ├── smartos
│ │ ├── dd-agent
│ │ └── dd-agent.xml
│ └── source
│ │ └── supervisor.conf
├── debian
│ ├── start_agent.sh
│ └── datadog-agent.service
├── desc
├── centos
│ └── setup-supervisor.py
└── supervisor_32.conf
├── requirements-test.txt
├── conf.d
├── system_swap.yaml.example
├── auto_conf
│ ├── etcd.yaml
│ ├── couch.yaml
│ ├── riak.yaml
│ ├── elastic.yaml
│ ├── mcache.yaml
│ ├── redisdb.yaml
│ ├── apache.yaml
│ ├── kyototycoon.yaml
│ ├── couchbase.yaml
│ └── consul.yaml
├── linux_proc_extras.yaml.example
├── btrfs.yaml.example
├── riak.yaml.example
├── statsd.yaml.example
├── system_core.yaml.example
├── mesos.yaml.example
├── pgbouncer.yaml.example
├── couchbase.yaml.example
├── nagios.yaml.example
├── dns_check.yaml.example
├── marathon.yaml.example
├── riakcs.yaml.example
├── ceph.yaml.example
├── kong.yaml.example
├── ntp.yaml.default
├── kafka_consumer.yaml.example
├── gunicorn.yaml.example
├── kyototycoon.yaml.example
├── hdfs_datanode.yaml.example
├── jenkins.yaml.example
├── hdfs_namenode.yaml.example
├── gearmand.yaml.example
├── ssh_check.yaml.example
├── fluentd.yaml.example
├── apache.yaml.example
├── mcache.yaml.example
├── lighttpd.yaml.example
├── network.yaml.default
├── mesos_master.yaml.example
├── agent_metrics.yaml.default
├── mesos_slave.yaml.example
├── zk.yaml.example
├── nginx.yaml.example
├── powerdns_recursor.yaml.example
├── postfix.yaml.example
├── etcd.yaml.example
├── yarn.yaml.example
├── hdfs.yaml.example
├── activemq_xml.yaml.example
├── tokumx.yaml.example
├── windows_service.yaml.example
├── tcp_check.yaml.example
├── couch.yaml.example
├── php_fpm.yaml.example
├── varnish.yaml.example
├── spark.yaml.example
├── kubernetes.yaml.example
├── teamcity.yaml.example
├── disk.yaml.default
├── directory.yaml.example
├── cacti.yaml.example
├── sqlserver.yaml.example
├── go_expvar.yaml.example
├── iis.yaml.example
├── rabbitmq.yaml.example
└── redisdb.yaml.example
├── tox.ini
├── Gemfile
├── MANIFEST.in
├── .gitignore
├── .rubocop.yml
├── .github
├── PULL_REQUEST_TEMPLATE.md
└── ISSUE_TEMPLATE.md
├── checks.d
├── system_swap.py
├── system_core.py
└── ntp.py
├── supervisord.dev.conf
├── datadog-cert.pem
├── LICENSE
└── requirements-opt.txt
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/win32/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/checks/libs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/dogstream/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/core/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/checks/libs/wmi/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/checks/system/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/checks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/checks/libs/vmware/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/checks/mock/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/core/fixtures/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/checks/integration/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/utils/service_discovery/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ci/resources/pgbouncer/users.txt:
--------------------------------------------------------------------------------
1 | "datadog" "datadog"
2 |
--------------------------------------------------------------------------------
/ci/resources/snmp/snmpd.conf:
--------------------------------------------------------------------------------
1 | rocommunity public
2 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_service_down:
--------------------------------------------------------------------------------
1 | State Stopped
2 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_service_up:
--------------------------------------------------------------------------------
1 | State Running
2 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/invalid_conf.yaml:
--------------------------------------------------------------------------------
1 | init_config:
2 |
--------------------------------------------------------------------------------
/ci/resources/elasticsearch/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | node:
2 | name: batman
3 |
--------------------------------------------------------------------------------
/tests/core/fixtures/flare/apikey.conf:
--------------------------------------------------------------------------------
1 | api_key: aaaaaaaaaaaaaaaaaaaaaa
2 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/cacti/whitelist.txt:
--------------------------------------------------------------------------------
1 | localhost*load*rrd
2 | localhost*hdd*free*
3 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/invalid_check_1.py:
--------------------------------------------------------------------------------
1 | class InvalidCheck(object):
2 | pass
3 |
--------------------------------------------------------------------------------
/ci/resources/zookeeper/zoo.cfg:
--------------------------------------------------------------------------------
1 | tickTime=2000
2 | dataDir=/tmp/zookeeper
3 | clientPort=2181
4 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/valid_conf.yaml:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - host: localhost
5 |
--------------------------------------------------------------------------------
/tests/core/fixtures/target_module.py:
--------------------------------------------------------------------------------
1 | default_target = 'DEFAULT'
2 | specified_target = 'SPECIFIED'
3 |
--------------------------------------------------------------------------------
/packaging/osx/app/Agent.icns:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/osx/app/Agent.icns
--------------------------------------------------------------------------------
/requirements-test.txt:
--------------------------------------------------------------------------------
1 | nose==1.3.4
2 | flake8==2.5.1
3 | mock==1.0.1
4 | pep8==1.5.7
5 | pylint==1.5.5
6 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfformatteddata_perfproc_process:
--------------------------------------------------------------------------------
1 | IOReadBytesPerSec 20455
2 | IDProcess 4036
3 |
--------------------------------------------------------------------------------
/ci/resources/redis/noauth.conf:
--------------------------------------------------------------------------------
1 | daemonize yes
2 | pidfile /tmp/dd-redis-noauth.pid
3 | bind 127.0.0.1
4 | port 16379
5 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_process:
--------------------------------------------------------------------------------
1 | CommandLine C:\\ProgramFiles(x86)\\Google\\Chrome\\Application\\chrome.exe\
2 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/invalid_check_2.py:
--------------------------------------------------------------------------------
1 | import nothing # noqa
2 |
3 | class InvalidCheck(object):
4 | pass
5 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfformatteddata_perfdisk_logicaldisk:
--------------------------------------------------------------------------------
1 | AvgDiskBytesPerWrite 1536
2 | FreeMegabytes 19742
3 |
--------------------------------------------------------------------------------
/checks/libs/jmxterm-1.0-DATADOG-uber.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/checks/libs/jmxterm-1.0-DATADOG-uber.jar
--------------------------------------------------------------------------------
/conf.d/system_swap.yaml.example:
--------------------------------------------------------------------------------
1 | # This check takes no initial configuration
2 | init_config:
3 |
4 | instances:
5 | [{}]
6 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/valid_conf_2.yaml:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - host: localhost
5 | - host: localh0st
6 |
--------------------------------------------------------------------------------
/conf.d/auto_conf/etcd.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - etcd
3 |
4 | init_config:
5 |
6 | instances:
7 | - url: "http://%%host%%:%%port_0%%"
8 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfformatteddata_perfproc_process_alt:
--------------------------------------------------------------------------------
1 | IOReadBytesPerSec 20455
2 | ResultNotMatchingAnyTargetProperty 0
3 |
--------------------------------------------------------------------------------
/conf.d/auto_conf/couch.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - couchdb
3 |
4 | init_config:
5 |
6 | instances:
7 | - server: http://%%host%%:%%port%%
8 |
--------------------------------------------------------------------------------
/conf.d/auto_conf/riak.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - riak
3 |
4 | init_config:
5 |
6 | instances:
7 | - url: http://%%host%%:%%port%%/stats
8 |
--------------------------------------------------------------------------------
/conf.d/linux_proc_extras.yaml.example:
--------------------------------------------------------------------------------
1 | # There's no configuration necessary for this check.
2 | init_config:
3 |
4 | instances:
5 | - tags: []
6 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/wix/FindReplace.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/wix/FindReplace.exe
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfrawdata_perfos_system_unknown:
--------------------------------------------------------------------------------
1 | UnknownCounter 999 123456
2 | Timestamp_Sys100NS 52
3 | Frequency_Sys100NS 0.5
4 |
--------------------------------------------------------------------------------
/tests/core/fixtures/flare/apikeys.conf:
--------------------------------------------------------------------------------
1 | api_key: aaaaaaaaaaaaaaaaaaaaaa ,bbbbbbbbbbbbbbbbbbbbbb, cccccccccccccccccccccc,dddddddddddddddddddddd
2 |
--------------------------------------------------------------------------------
/tests/core/fixtures/flare/datadog-agent-1.tar.bz2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/tests/core/fixtures/flare/datadog-agent-1.tar.bz2
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude = venv/*,embedded/*,.git/*
3 | max-line-length = 700
4 | ignore = E128,E203,E226,E231,E241,E251,E261,E265,E302,E303
5 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | gem 'addressable'
4 | gem 'colorize'
5 | gem 'httparty'
6 | gem 'rake'
7 | gem 'rubocop', '~>0.38.0'
8 |
--------------------------------------------------------------------------------
/ci/resources/redis/slave_unhealthy.conf:
--------------------------------------------------------------------------------
1 | daemonize yes
2 | pidfile /tmp/dd-redis-noauth.pid
3 | bind 127.0.0.1
4 | port 46379
5 | slaveof 127.0.0.1 55555
--------------------------------------------------------------------------------
/conf.d/auto_conf/elastic.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - elasticsearch
3 |
4 | init_config:
5 |
6 | instances:
7 | - url: "http://%%host%%:%%port%%"
8 |
--------------------------------------------------------------------------------
/checks/libs/jmxfetch-0.12.0-jar-with-dependencies.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/checks/libs/jmxfetch-0.12.0-jar-with-dependencies.jar
--------------------------------------------------------------------------------
/ci/resources/redis/slave_healthy.conf:
--------------------------------------------------------------------------------
1 | daemonize yes
2 | pidfile /tmp/dd-redis-noauth.pid
3 | bind 127.0.0.1
4 | port 36379
5 | slaveof 127.0.0.1 16379
6 |
--------------------------------------------------------------------------------
/conf.d/auto_conf/mcache.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - memcached
3 |
4 | init_config:
5 |
6 | instances:
7 | - url: "%%host%%"
8 | port: "%%port%%"
9 |
--------------------------------------------------------------------------------
/conf.d/auto_conf/redisdb.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - redis
3 |
4 | init_config:
5 |
6 | instances:
7 | - host: "%%host%%"
8 | port: "%%port%%"
9 |
--------------------------------------------------------------------------------
/conf.d/btrfs.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | # Not required for this check
3 |
4 | instances:
5 | - excluded_devices: [] # List of devices to exclude
6 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include datadog.conf.example
2 | include checks/libs/jmxterm-1.0-alpha-4-uber.jar
3 | include checks/libs/jmxfetch-0.12.0-jar-with-dependencies.jar
4 |
--------------------------------------------------------------------------------
/conf.d/auto_conf/apache.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - httpd
3 |
4 | init_config:
5 |
6 | instances:
7 | - apache_status_url: http://%%host%%/server-status?auto
8 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/config_icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/config_icon.ico
--------------------------------------------------------------------------------
/conf.d/auto_conf/kyototycoon.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - kyototycoon
3 |
4 | init_config:
5 |
6 | instances:
7 | - report_url: http://%%host%%:%%port%%/rpc/report
8 |
--------------------------------------------------------------------------------
/conf.d/riak.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - url: http://127.0.0.1:8098/stats
5 | # tags:
6 | # - optional_tag1
7 | # - optional_tag2
8 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/win32service.pyd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/win32service.pyd
--------------------------------------------------------------------------------
/conf.d/statsd.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - host: localhost
5 | port: 8126
6 | # tags:
7 | # - optional_tag1
8 | # - optional_tag2
9 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/dd_agent_win_256.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/dd_agent_win_256.ico
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/txt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/txt.png
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfrawdata_perfos_system_current:
--------------------------------------------------------------------------------
1 | CounterRawCount 500 65536
2 | CounterCounter 500 272696320
3 | Timestamp_Sys100NS 52
4 | Frequency_Sys100NS 0.5
5 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfrawdata_perfos_system_previous:
--------------------------------------------------------------------------------
1 | CounterRawCount 300 65536
2 | CounterCounter 300 272696320
3 | Timestamp_Sys100NS 50
4 | Frequency_Sys100NS 0.5
5 |
--------------------------------------------------------------------------------
/ci/resources/redis/auth.conf:
--------------------------------------------------------------------------------
1 | daemonize yes
2 | pidfile /tmp/dd-redis-auth.pid
3 | bind 127.0.0.1
4 | port 26379
5 | requirepass datadog-is-devops-best-friend
6 | slaveof 127.0.0.1 16379
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/agent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/agent.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/apply.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/apply.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/delete.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/delete.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/edit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/edit.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/info.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/info.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/start.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/start.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/stop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/stop.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/filesave.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/filesave.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/restart.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/restart.png
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/settings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/settings.png
--------------------------------------------------------------------------------
/tests/checks/fixtures/nagios/host-perfdata:
--------------------------------------------------------------------------------
1 | [HOSTPERFDATA] 1339511443 localhost 0.017 PING OK - Packet loss = 0%, RTA = 0.05 ms rta=0.048000ms;3000.000000;5000.000000;0.000000 pl=0%;80;100;0
2 |
--------------------------------------------------------------------------------
/conf.d/system_core.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # No configuration is needed for this check.
5 | # A single instance needs to be defined with any value.
6 | - foo: bar
7 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/not_found.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tes/dd-agent/master/packaging/datadog-agent/win32/install_files/guidata/images/not_found.png
--------------------------------------------------------------------------------
/conf.d/auto_conf/couchbase.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - couchbase
3 |
4 | init_config:
5 |
6 | instances:
7 | - server: http://%%host%%:%%port%%
8 | user: Administrator
9 | password: password
10 |
--------------------------------------------------------------------------------
/conf.d/mesos.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | # time to wait on a Mesos API request
3 | # default_timeout: 5
4 |
5 | instances:
6 | # url: the API endpoint of your Mesos master
7 | # - url: "https://server:port"
8 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/valid_check_1.py:
--------------------------------------------------------------------------------
1 | from checks import AgentCheck
2 |
3 | OUTPUT = 'valid_check_1'
4 |
5 | class ValidCheck(AgentCheck):
6 |
7 | def check(self, instance):
8 | return OUTPUT
9 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/valid_check_2.py:
--------------------------------------------------------------------------------
1 | from checks import AgentCheck
2 |
3 | OUTPUT = 'valid_check_2'
4 |
5 | class ValidCheck(AgentCheck):
6 |
7 | def check(self, instance):
8 | return OUTPUT
9 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/powerdns-recursor/recursor.conf:
--------------------------------------------------------------------------------
1 | local-port=5353
2 |
3 | experimental-webserver=on
4 | experimental-webserver-address=127.0.0.1
5 | experimental-webserver-port=8082
6 | experimental-api-key=pdns_api_key
7 |
--------------------------------------------------------------------------------
/conf.d/auto_conf/consul.yaml:
--------------------------------------------------------------------------------
1 | docker_images:
2 | - consul
3 |
4 | init_config:
5 |
6 | instances:
7 | - url: "http://%%host%%:%%port%%"
8 | catalog_checks: yes
9 | new_leader_checks: yes
10 | # service_whitelist:
11 |
--------------------------------------------------------------------------------
/ci/resources/postgres/postgres.sql:
--------------------------------------------------------------------------------
1 | CREATE USER datadog WITH PASSWORD 'datadog';
2 | GRANT SELECT ON pg_stat_database TO datadog;
3 | CREATE DATABASE datadog_test;
4 | GRANT ALL PRIVILEGES ON DATABASE datadog_test TO datadog;
5 | CREATE DATABASE dogs;
6 |
--------------------------------------------------------------------------------
/tests/core/fixtures/checks/valid_sub_check.py:
--------------------------------------------------------------------------------
1 | from tests.core.fixtures.checks.valid_check_2 import ValidCheck
2 |
3 | OUTPUT = 'valid_check_1'
4 |
5 | class InheritedCheck(ValidCheck):
6 |
7 | def check(self, instance):
8 | return OUTPUT
9 |
--------------------------------------------------------------------------------
/conf.d/pgbouncer.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # - host: localhost
5 | # port: 15433
6 | # username: my_username
7 | # password: my_password
8 | # tags:
9 | # - optional_tag1
10 | # - optional_tag2
11 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/README.md:
--------------------------------------------------------------------------------
1 | # Building for Windows
2 |
3 | In `cmd.exe` or Powershell, from the dd-agent repo's root run:
4 |
5 | `powershell -File .\packaging\datadog-agent\win32\build.ps1`
6 |
7 | This will generate a `.msi` file in the `build/` folder.
8 |
--------------------------------------------------------------------------------
/ci/resources/supervisord/program_0.sh:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # dummy program that runs for 10 seconds and dies
6 | echo 'test' >> $VOLATILE_DIR/supervisor/started_0
7 | sleep 10
8 |
--------------------------------------------------------------------------------
/ci/resources/supervisord/program_1.sh:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # dummy program that runs for 20 seconds and dies
6 | echo 'test' >> $VOLATILE_DIR/supervisor/started_1
7 | sleep 20
8 |
--------------------------------------------------------------------------------
/ci/resources/supervisord/program_2.sh:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # dummy program that runs for 30 seconds and dies
6 | echo 'test' >> $VOLATILE_DIR/supervisor/started_2
7 | sleep 30
8 |
--------------------------------------------------------------------------------
/checks/metric_types.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | class MetricTypes(object):
6 |
7 | GAUGE = 'gauge'
8 | COUNTER = 'counter'
9 | RATE = 'rate'
10 | COUNT = 'count'
11 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/spark/rdd_metrics:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "id": 6,
4 | "name": "PythonRDD",
5 | "numPartitions": 2,
6 | "numCachedPartitions": 2,
7 | "storageLevel": "Memory Serialized 1x Replicated",
8 | "memoryUsed": 284,
9 | "diskUsed": 0
10 | }
11 | ]
--------------------------------------------------------------------------------
/ci/resources/phpfpm/php-fpm.conf:
--------------------------------------------------------------------------------
1 | [www]
2 | user = nobody
3 | group = nobody
4 | listen = 127.0.0.1:9000
5 | pm = dynamic
6 | pm.max_children = 5
7 | pm.start_servers = 2
8 | pm.min_spare_servers = 1
9 | pm.max_spare_servers = 3
10 | pm.status_path = /status
11 | ping.path = /ping
12 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_ntlogevent:
--------------------------------------------------------------------------------
1 | EventCode 0
2 | EventIdentifier 0
3 | EventType 0
4 | InsertionStrings [insertionstring]
5 | Logfile Application
6 | Message SomeMessage
7 | SourceName MSQLSERVER
8 | TimeGenerated 21001224113047.000000-480
9 | User FooUser
10 | Type Error
11 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/spark/apps_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "apps": {
3 | "app": [
4 | {
5 | "id": "application_1459362484344_0011",
6 | "name": "PySpark",
7 | "trackingUrl": "http://localhost:8088/proxy/application_1459362484344_0011/"
8 | }
9 | ]
10 | }
11 | }
--------------------------------------------------------------------------------
/conf.d/couchbase.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - server: http://localhost:8091
5 | user: Administrator
6 | password: password
7 | # timeout: 10 # Optional timeout to http connections to the API
8 | # tags:
9 | # - optional_tag1
10 | # - optional_tag2
11 |
--------------------------------------------------------------------------------
/packaging/debian/start_agent.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # (C) Datadog, Inc. 2010-2016
3 | # All rights reserved
4 | # Licensed under Simplified BSD License (see LICENSE)
5 |
6 | PATH=/opt/datadog-agent/embedded/bin:/opt/datadog-agent/bin:$PATH
7 |
8 | exec /opt/datadog-agent/bin/supervisord -c /etc/dd-agent/supervisor.conf
9 |
--------------------------------------------------------------------------------
/ci/resources/tomcat/setenv.sh:
--------------------------------------------------------------------------------
1 | CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote"
2 | CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote.port=8090"
3 | CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
4 | CATALINA_OPTS="$CATALINA_OPTS -Dcom.sun.management.jmxremote.ssl=false"
5 |
--------------------------------------------------------------------------------
/conf.d/nagios.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | # check_freq: 15
3 |
4 | instances:
5 | - nagios_conf: /etc/nagios3/nagios.cfg
6 | # # Default to True
7 | # collect_events: True
8 | #
9 | # # Default to False
10 | # collect_host_performance_data: False
11 | # collect_service_performance_data: False
12 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/mapreduce/apps_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "apps": {
3 | "app": [
4 | {
5 | "id": "application_1453738555560_0001",
6 | "trackingUrl": "http://localhost:8088/proxy/application_1453738555560_0001/",
7 | "user": "vagrant",
8 | "name": "WordCount"
9 | }
10 | ]
11 | }
12 | }
--------------------------------------------------------------------------------
/conf.d/dns_check.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | default_timeout: 4
3 |
4 | instances:
5 | - hostname: www.example.org
6 | nameserver: 127.0.0.1
7 | timeout: 8
8 |
9 | # Specify an (optional) `record_type` to customize the record type
10 | # queried by the check (default: "A")
11 | # record_type: A
12 |
--------------------------------------------------------------------------------
/conf.d/marathon.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | # time to wait on a Marathon API request
3 | # default_timeout: 5
4 |
5 | instances:
6 | # url: the API endpoint of your Marathon master
7 | # - url: "https://server:port"
8 | #
9 | # if marathon is protected by basic auth
10 | # user: "username"
11 | # password: "password"
12 |
--------------------------------------------------------------------------------
/tests/core/fixtures/config/one_endpoint.conf:
--------------------------------------------------------------------------------
1 | [Main]
2 |
3 | # The host of the Datadog intake server to send agent data to
4 | dd_url: https://app.datadoghq.com
5 |
6 | # The Datadog api key to associate your agent's data with your organization.
7 | # Can be found here:
8 | # https://app.datadoghq.com/account/settings
9 | api_key: 1234
10 |
--------------------------------------------------------------------------------
/tests/core/fixtures/flare/password_uri.yaml:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - server: mongodb://datadog:V3pZC7ghx1ne82XkyqLnOW36@localhost:27017/admin
5 | tags:
6 | - foo
7 |
8 | # - server: mongodb://datadog:V3pZC7ghx1ne82XkyqLnOW36@localhost:27017/movies
9 | # tags:
10 | # - bar
11 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/spark/spark_apps:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "id": "app_001",
4 | "name": "PySparkShell",
5 | "attempts": [
6 | {
7 | "startTime": "2016-04-12T12:48:17.576GMT",
8 | "endTime": "1969-12-31T23:59:59.999GMT",
9 | "sparkUser": "",
10 | "completed": false
11 | }
12 | ]
13 | }
14 | ]
--------------------------------------------------------------------------------
/ci/resources/pgbouncer/pgbouncer.ini:
--------------------------------------------------------------------------------
1 | [databases]
2 | datadog_test = host=127.0.0.1 port=15432 dbname=datadog_test
3 |
4 | [pgbouncer]
5 | listen_port = 15433
6 | listen_addr = *
7 | auth_type = md5
8 | auth_file = USERS_TXT
9 | admin_users = datadog
10 | logfile = /tmp/pgbouncer.log
11 | pidfile = /tmp/pgbouncer.pid
12 | logfile = /tmp/pgbouncer.log
13 |
--------------------------------------------------------------------------------
/packaging/debian/datadog-agent.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description="Datadog Agent"
3 | After=network.target
4 |
5 | [Service]
6 | Type=forking
7 | User=dd-agent
8 | ExecStart=/opt/datadog-agent/bin/start_agent.sh
9 | ExecStop=/opt/datadog-agent/bin/supervisorctl -c /etc/dd-agent/supervisor.conf shutdown
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/conf.d/riakcs.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - access_id: access-key
5 | access_secret: access-secret
6 | #is_secure: True # Uncomment and change to false if you are not using ssl
7 | #host: localhost # Hostname/IP of your riakcs node
8 | #port: 8080 # port used by your riakcs node
9 | #s3_root: s3.amazonaws.com #
10 |
--------------------------------------------------------------------------------
/tests/core/fixtures/config/multiple_apikeys.conf:
--------------------------------------------------------------------------------
1 | [Main]
2 |
3 | # The host of the Datadog intake server to send agent data to
4 | dd_url: https://app.datadoghq.com
5 |
6 | # The Datadog api key to associate your agent's data with your organization.
7 | # Can be found here:
8 | # https://app.datadoghq.com/account/settings
9 | api_key: 1234, 5678 , 901
10 |
--------------------------------------------------------------------------------
/utils/http.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | import requests
6 |
7 |
8 | DEFAULT_TIMEOUT = 10
9 |
10 |
11 | def retrieve_json(url, timeout=DEFAULT_TIMEOUT):
12 | r = requests.get(url, timeout=timeout)
13 | r.raise_for_status()
14 | return r.json()
15 |
--------------------------------------------------------------------------------
/conf.d/ceph.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # - tags:
5 | # - name:mars_cluster
6 | #
7 | # ceph_cmd: /usr/bin/ceph
8 | #
9 | # If your environment requires sudo, please add a line like:
10 | # dd-agent ALL=(ALL) NOPASSWD:/usr/bin/ceph
11 | # to your sudoers file, and uncomment the below option.
12 | #
13 | # use_sudo: True
14 |
--------------------------------------------------------------------------------
/ci/resources/fluentd/td-agent.conf:
--------------------------------------------------------------------------------
1 |
2 | type monitor_agent
3 | bind 0.0.0.0
4 | port 24220
5 |
6 |
7 |
8 | id plg1
9 | type forward
10 |
11 | host localhost
12 |
13 |
14 |
15 |
16 | id plg2
17 | type forward
18 |
19 | host localhost
20 |
21 |
22 |
--------------------------------------------------------------------------------
/conf.d/kong.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # For every instance, you need an `kong_status_url` and can optionally
5 | # supply a list of tags.
6 | #
7 | - kong_status_url: http://localhost:8001/status/
8 | # tags:
9 | # - instance:foo
10 | #
11 | # - kong_status_url: http://example2.com:8001/status/
12 | # tags:
13 | # - instance:bar
14 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/disk/centos-df-Tk:
--------------------------------------------------------------------------------
1 | Filesystem Type 1K-blocks Used Available Use% Mounted on
2 | /dev/sda3 ext4 93421512 70963180 17706052 81% /
3 | tmpfs tmpfs 32969616 0 32969616 0% /dev/shm
4 | /dev/sda1 ext4 512752 90460 395412 19% /boot
5 | 10.1.5.223:/vil/cor
6 | nfs 1020054752 56080768 963973984 6% /cor
7 |
--------------------------------------------------------------------------------
/tests/core/fixtures/config/multiple_endpoints_bad.conf:
--------------------------------------------------------------------------------
1 | [Main]
2 |
3 | # The host of the Datadog intake server to send agent data to
4 | dd_url: https://app.datadoghq.com, https://app.example.com
5 |
6 | # The Datadog api key to associate your agent's data with your organization.
7 | # Can be found here:
8 | # https://app.datadoghq.com/account/settings
9 | api_key: 1234, 5678, 901
10 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/mesos_master/roles.json:
--------------------------------------------------------------------------------
1 | {
2 | "roles": [
3 | {
4 | "weight": 1,
5 | "resources": {
6 | "ports": "[31915-31915]",
7 | "mem": 100,
8 | "disk": 0,
9 | "cpus": 1
10 | },
11 | "name": "*",
12 | "frameworks": [
13 | "20150403-140128-251789322-5050-6047-0000"
14 | ]
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/tests/core/fixtures/config/multiple_endpoints.conf:
--------------------------------------------------------------------------------
1 | [Main]
2 |
3 | # The host of the Datadog intake server to send agent data to
4 | dd_url: https://app.datadoghq.com, https://app.example.com, https://app.example.com
5 |
6 | # The Datadog api key to associate your agent's data with your organization.
7 | # Can be found here:
8 | # https://app.datadoghq.com/account/settings
9 | api_key: 1234,5678, 901
10 |
--------------------------------------------------------------------------------
/conf.d/ntp.yaml.default:
--------------------------------------------------------------------------------
1 | # This file is overwritten upon Agent upgrade.
2 | # To make modifications to the check configuration, please copy this file
3 | # to `ntp.yaml` and make your changes on that file.
4 |
5 | init_config:
6 |
7 | instances:
8 | - offset_threshold: 60
9 |
10 | # Optional params:
11 | #
12 | # host: pool.ntp.org
13 | # port: ntp
14 | # version: 3
15 | # timeout: 5
16 |
--------------------------------------------------------------------------------
/packaging/desc:
--------------------------------------------------------------------------------
1 | Datadog Monitoring Agent
2 | The Datadog Monitoring Agent is a lightweight process that monitors system
3 | processes and services, and sends information back to your Datadog account.
4 | .
5 | This package installs and runs the advanced Agent daemon, which queues and
6 | forwards metrics from your applications as well as system services.
7 | .
8 | See http://www.datadoghq.com/ for more information
9 |
--------------------------------------------------------------------------------
/utils/singleton.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | class Singleton(type):
6 | _instances = {}
7 |
8 | def __call__(cls, *args, **kwargs):
9 |
10 | if cls not in cls._instances:
11 | cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
12 | return cls._instances[cls]
13 |
--------------------------------------------------------------------------------
/ci/resources/tomcat/jmx.yaml:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - conf:
5 | - include:
6 | attribute:
7 | bufferSize:
8 | alias: my.metric.buf
9 | metric_type: gauge
10 | domain: Catalina
11 | type: Connector
12 | - include:
13 | domain: Catalina
14 | type: ThreadPool
15 | host: localhost
16 | port: 8090
17 | name: jmx_instance1
--------------------------------------------------------------------------------
/conf.d/kafka_consumer.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | # Customize the ZooKeeper connection timeout here
3 | # zk_timeout: 5
4 | # Customize the Kafka connection timeout here
5 | # kafka_timeout: 5
6 |
7 | instances:
8 | # - kafka_connect_str: localhost:9092
9 | # zk_connect_str: localhost:2181
10 | # zk_prefix: /0.8
11 | # consumer_groups:
12 | # my_consumer:
13 | # my_topic: [0, 1, 4, 12]
14 |
--------------------------------------------------------------------------------
/ci/resources/kong/kong_install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | pushd $INTEGRATIONS_DIR/kong
6 | wget -O $VOLATILE_DIR/kong.tar.gz https://github.com/Mashape/kong/archive/0.8.1.tar.gz
7 | tar xvzf $VOLATILE_DIR/kong.tar.gz -C . --strip-components=1
8 | mkdir $LUAJIT_DIR/include/$LUA_VERSION/uuid
9 | cp $UUID_DIR/usr/include/uuid/* $LUAJIT_DIR/include/$LUA_VERSION/uuid
10 | cp $UUID_DIR/usr/lib/libuuid* $LUAJIT_DIR/lib
11 | popd
12 |
--------------------------------------------------------------------------------
/ci/resources/postgres/dogs.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE breed (id SERIAL, name VARCHAR(255));
2 | CREATE TABLE kennel (id SERIAL, address VARCHAR(255));
3 | INSERT INTO kennel (address) VALUES ('Midtown, New York'), ('Boston');
4 | SELECT * FROM kennel;
5 | CREATE INDEX breed_names ON breed(name);
6 | INSERT INTO breed (name) VALUES ('Labrador Retriver'), ('German Shepherd'), ('Yorkshire Terrier'), ('Golden Retriever'), ('Bulldog');
7 | SELECT * FROM breed WHERE name = 'Labrador';
8 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/smartos/dd-agent:
--------------------------------------------------------------------------------
1 | #!/bin/sh -x
2 | dd_base=/opt/local/datadog
3 | source $dd_base/venv/bin/activate
4 |
5 | case "$1" in
6 | start)
7 | cd $dd_base && $dd_base/venv/bin/supervisord -n -c $dd_base/agent/supervisor.conf
8 | ;;
9 |
10 | stop)
11 | cd $dd_base && $dd_base/venv/bin/supervisorctl -c $dd_base/agent/supervisor.conf shutdown
12 | ;;
13 |
14 | *)
15 | echo "Usage: $0 {start|stop}"
16 | exit 2
17 | esac
18 | exit $?
19 |
--------------------------------------------------------------------------------
/ci/resources/kong/setup_serf.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if [ "$TEST_SUITE" == "unit" ]; then
6 | echo "Exiting, no integration tests"
7 | exit
8 | fi
9 |
10 | pushd $INTEGRATIONS_DIR/
11 | mkdir -p $SERF_DIR
12 |
13 | if [ ! "$(ls -A $SERF_DIR)" ]; then
14 | pushd $SERF_DIR
15 | wget https://releases.hashicorp.com/serf/${SERF_VERSION}/serf_${SERF_VERSION}_linux_amd64.zip
16 | unzip serf_${SERF_VERSION}_linux_amd64.zip
17 | popd
18 | fi
19 | popd
20 |
--------------------------------------------------------------------------------
/ci/resources/postgres/datadog_test.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE persons (personid SERIAL, lastname VARCHAR(255), firstname VARCHAR(255), address VARCHAR(255), city VARCHAR(255));
2 | INSERT INTO persons (lastname, firstname, address, city) VALUES ('Cavaille', 'Leo', 'Midtown', 'New York'), ('Someveryveryveryveryveryveryveryveryveryverylongname', 'something', 'Avenue des Champs Elysees', 'Beautiful city of lights');
3 | SELECT * FROM persons;
4 | SELECT * FROM persons;
5 | SELECT * FROM persons;
6 |
--------------------------------------------------------------------------------
/conf.d/gunicorn.yaml.example:
--------------------------------------------------------------------------------
1 | # NB: This check requires the python environment on which gunicorn runs to
2 | # have the `setproctitle` module installed (https://pypi.python.org/pypi/setproctitle/)
3 |
4 | init_config:
5 |
6 | instances:
7 | # The name of the gunicorn process. For the following gunicorn server ...
8 | #
9 | # gunicorn --name my_web_app my_web_app_config.ini
10 | #
11 | # ... we'd use the name `my_web_app`.
12 | #
13 | # - proc_name: my_web_app
14 |
--------------------------------------------------------------------------------
/conf.d/kyototycoon.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | # The Kyoto Tycoon check does not require any init_config
3 |
4 | instances:
5 | # Add one or more instances, which accept report_url,
6 | # name, and optionally tags keys. The report URL should
7 | # be a URL to the Kyoto Tycoon "report" RPC endpoint.
8 | #
9 | # Complete example:
10 | #
11 | - report_url: http://localhost:1978/rpc/report
12 | # name: my_kyoto_instance
13 | # tags:
14 | # foo: bar
15 | # baz: bat
16 |
--------------------------------------------------------------------------------
/conf.d/hdfs_datanode.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | #
5 | # The HDFS DataNode check retrieves metrics from the HDFS DataNode's JMX
6 | # interface. This check must be installed on a HDFS DataNode. The HDFS
7 | # DataNode JMX URI is composed of the DataNode's hostname and port.
8 | #
9 | # The hostname and port can be found in the hdfs-site.xml conf file under
10 | # the property dfs.datanode.http.address
11 | #
12 | - hdfs_datanode_jmx_uri: http://localhost:50075
--------------------------------------------------------------------------------
/conf.d/jenkins.yaml.example:
--------------------------------------------------------------------------------
1 | # DEPRECATED:
2 | # This Jenkins check is deprecated and not actively developed anymore. It will be
3 | # removed in a future version of the Datadog Agent. Please move to using the Datadog
4 | # plugin for Jenkins. More information can be found on the Jenkins Integration panel
5 | # under the Configuration tab (https://app.datadoghq.com/account/settings#integrations/jenkins)
6 |
7 | init_config:
8 |
9 | instances:
10 | - name: default
11 | jenkins_home: /var/lib/jenkins
12 |
--------------------------------------------------------------------------------
/ci/resources/kong/setup_uuid.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 |
6 | pushd $INTEGRATIONS_DIR
7 | mkdir -p $UUID_DIR
8 |
9 | rsync rsync://rsync.kernel.org/pub/linux/utils/util-linux/v2.27/util-linux-2.27.tar.gz util-linux-2.27.tar.gz
10 | tar xzf util-linux-2.27.tar.gz
11 |
12 | echo $TRAVIS_PYTHON_VERSION
13 | pushd util-linux-2.27
14 | ./configure \
15 | --disable-use-tty-group\
16 | PYTHON_CFLAGS="-I/usr/include/python$TRAVIS_PYTHON_VERSION"
17 | make
18 | make install DESTDIR=$UUID_DIR
19 | popd
20 | popd
--------------------------------------------------------------------------------
/conf.d/hdfs_namenode.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | #
5 | # The HDFS NameNode check retrieves metrics from the HDFS NameNode's JMX
6 | # interface. This check must be installed on the NameNode. The HDFS
7 | # NameNode JMX URI is composed of the NameNode's hostname and port.
8 | #
9 | # The hostname and port can be found in the hdfs-site.xml conf file under
10 | # the property dfs.http.address or dfs.namenode.http-address
11 | #
12 | - hdfs_namenode_jmx_uri: http://localhost:50070
--------------------------------------------------------------------------------
/conf.d/gearmand.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - server: localhost
5 | port: 4730
6 | # Use the `tasks` parameter to specify the tasks you'd like to
7 | # collect metrics on (up to 200 tasks).
8 | #
9 | # If you have fewer than 200 tasks, you don't have to set this parameter,
10 | # the metrics will be collected on all the tasks by default.
11 | # tasks:
12 | # - task1
13 | # - task2
14 | # tags:
15 | # - optional_tag_1
16 | # - optional_tag_2
17 |
--------------------------------------------------------------------------------
/conf.d/ssh_check.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - host: localhost # required
5 | username: test # required
6 | # password: abcd # optional
7 | # port: 22 # optional, leaving blank defaults to port 22
8 | # sftp_check: True # optional, leaving blank defaults to True
9 | # private_key_file: # optional, file path to private key
10 | # add_missing_keys: True # optional, leaving blank defaults to False
11 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/hdfs_datanode/hdfs_datanode_jmx:
--------------------------------------------------------------------------------
1 | {"beans":[{"name":"Hadoop:service=DataNode,name=FSDatasetState","modelerType":"FSDatasetState","tag.Context":"FSDatasetState","tag.StorageInfo":"FSDataset{dirpath='[/hadoop/hdfs/data/current]'}","tag.Hostname":"dev.minerkasch.com","Capacity":41167421440,"DfsUsed":501932032,"Remaining":27914526720,"NumFailedVolumes":0,"LastVolumeFailureDate":0,"EstimatedCapacityLostTotal":0,"CacheUsed":0,"CacheCapacity":0,"NumBlocksCached":0,"NumBlocksFailedToCache":0,"NumBlocksFailedToUnCache":0}]}
--------------------------------------------------------------------------------
/conf.d/fluentd.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # Every instance requires a `monitor_agent_url`
5 | # Optional, set `plugin_ids` to monitor a specific scope of plugins.
6 | - monitor_agent_url: http://example.com:24220/api/plugins.json
7 | plugin_ids:
8 | - plg1
9 | - plg2
10 | # Optional, set 'tag_by' to specify how to tag metrics. By default, metrics are tagged with `plugin_id`
11 | - monitor_agent_url: http://example.com:24220/api/plugins.json
12 | tag_by: type
13 |
14 |
--------------------------------------------------------------------------------
/conf.d/apache.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - apache_status_url: http://localhost/server-status?auto
5 | # apache_user: example_user
6 | # apache_password: example_password
7 | # tags:
8 | # - optional_tag
9 |
10 | # The (optional) disable_ssl_validation will instruct the check
11 | # to skip the validation of the SSL certificate of the URL being tested.
12 | # Defaults to false, set to true if you want to disable SSL certificate validation.
13 | #
14 | # disable_ssl_validation: false
15 |
--------------------------------------------------------------------------------
/conf.d/mcache.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - url: localhost # url used to connect to the memcached instance
5 | # socket: /socket/path # if url missing; 'dd-agent' user must have read/write permission
6 | # port: 11211 # If this line is not present, port will default to 11211
7 | # tags:
8 | # - optional_tag
9 |
10 | # options:
11 | # items: false # set to true if you wish to collect items memcached stats.
12 | # slabs: false # set to true if you wish to collect slabs memcached stats.
13 |
14 |
--------------------------------------------------------------------------------
/conf.d/lighttpd.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # For each instance, you have an `lighttpd_status_url` and (optionally)
5 | # a list of tags.
6 |
7 | - lighttpd_status_url: http://localhost/server-status?auto
8 | # tags:
9 | # - instance:foo
10 | #
11 | # - lighttpd_status_url: http://example2.com:1234/server-status?auto
12 | # tags:
13 | # - instance:bar
14 | #
15 | # # Lighttpd2 status url
16 | # - lighttpd_status_url: http://example.com/server-status?format=plain
17 | # tags:
18 | # - instance:l2
19 |
--------------------------------------------------------------------------------
/ci/resources/kong/setup_dnsmasq.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if [ "$TEST_SUITE" == "unit" ]; then
6 | echo "Exiting, no integration tests"
7 | exit
8 | fi
9 |
10 | pushd $INTEGRATIONS_DIR/
11 | mkdir -p $DNSMASQ_DIR
12 |
13 | if [ ! "$(ls -A $DNSMASQ_DIR)" ]; then
14 | pushd $DNSMASQ_DIR
15 | wget http://www.thekelleys.org.uk/dnsmasq/dnsmasq-${DNSMASQ_VERSION}.tar.gz
16 | tar xzf dnsmasq-${DNSMASQ_VERSION}.tar.gz
17 |
18 | pushd dnsmasq-${DNSMASQ_VERSION}
19 | make install DESTDIR=$DNSMASQ_DIR
20 | popd
21 |
22 | popd
23 | fi
24 | popd
25 |
--------------------------------------------------------------------------------
/conf.d/network.yaml.default:
--------------------------------------------------------------------------------
1 | # This file is overwritten upon Agent upgrade.
2 | # To make modifications to the check configuration, please copy this file
3 | # to `network.yaml` and make your changes on that file.
4 |
5 | init_config:
6 |
7 | instances:
8 | # Network check only supports one configured instance
9 | - collect_connection_state: false
10 | excluded_interfaces:
11 | - lo
12 | - lo0
13 | # Optionally completely ignore any network interface
14 | # matching the given regex:
15 | # excluded_interface_re: my-network-interface.*
16 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/disk/debian-df-Tk:
--------------------------------------------------------------------------------
1 | Filesystem Type 1K-blocks Used Available Use% Mounted on
2 | /dev/sda1 ext4 5 4 1 80% /
3 | udev devtmpfs 2020720 8 2020712 1% /dev
4 | tmpfs tmpfs 404968 328 404640 1% /run
5 | none tmpfs 5120 0 5120 0% /run/lock
6 | none tmpfs 2024828 0 2024828 0% /run/shm
7 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/nagios/service-perfdata:
--------------------------------------------------------------------------------
1 | [SERVICEPERFDATA] 1339511383 localhost Current Load 0.003 0.112 (Return code of 127 is out of bounds - plugin may be missing)
2 | [SERVICEPERFDATA] 1339511443 localhost Current Users 0.030 0.182 USERS OK - 1 users currently logged in users=1;20;50;0
3 | [SERVICEPERFDATA] 1339511503 localhost PING 4.006 0.126 PING OK - Packet loss = 0%, RTA = 0.06 ms rta=0.065000ms;100.000000;500.000000;0.000000 pl=0%;20;60;0
4 | [SERVICEPERFDATA] 1339511563 localhost Root Partition 0.009 0.168 DISK OK - free space: / 4467 MB (64% inode=96%): /=2470MB;5852;6583;0;7315
5 |
--------------------------------------------------------------------------------
/conf.d/mesos_master.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | default_timeout: 10
3 |
4 | instances:
5 | - url: "http://localhost:5050"
6 |
7 | # The (optional) disable_ssl_validation will instruct the check
8 | # to skip the validation of the SSL certificate of the master.
9 | # This is mostly useful for certificates that are not signed by a
10 | # public authority.
11 | # When true, the check logs a warning in collector.log
12 | # Defaults to false, set to true if you want to disable
13 | # SSL certificate validation.
14 | #
15 | # disable_ssl_validation: true
16 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/yarn/nodes_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "nodes": {
3 | "node": [
4 | {
5 | "rack": "/default-rack",
6 | "state": "NEW",
7 | "id": "h2:1235",
8 | "nodeHostName": "h2",
9 | "nodeHTTPAddress": "h2:2",
10 | "healthStatus": "Healthy",
11 | "lastHealthUpdate": 1324056895432,
12 | "healthReport": "Healthy",
13 | "numContainers": 0,
14 | "usedMemoryMB": 0,
15 | "availMemoryMB": 8192,
16 | "usedVirtualCores": 0,
17 | "availableVirtualCores": 8
18 | }
19 | ]
20 | }
21 | }
--------------------------------------------------------------------------------
/utils/containers.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | def freeze(o):
6 | """
7 | Freezes any mutable object including dictionaries and lists for hashing.
8 | Accepts nested dictionaries.
9 | """
10 | if isinstance(o, dict):
11 | return frozenset(dict([(k, freeze(v)) for k,v in o.iteritems()]).iteritems())
12 |
13 | if isinstance(o, list):
14 | return tuple([freeze(v) for v in o])
15 |
16 | return o
17 |
18 | def hash_mutable(m):
19 | return hash(freeze(m))
20 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.zip
2 | Gemfile.lock
3 | *.pyc
4 | *.*~
5 | build/*
6 | datadog.conf
7 | datadog_agent.egg-info/*
8 | artifacts/*
9 | node_modules/*
10 | .DS_Store
11 | venv/*
12 | vendor/
13 | *.swp
14 | *.log
15 | !nagios.log
16 | *.pid
17 | *.deb
18 | *.rpm
19 | *.egg-info*
20 | dist/*
21 | *.msi
22 | *.wixobj
23 | *.wixpdb
24 | *.exe
25 | nosetests*.xml
26 | stats.dat
27 | conf.d/*.yaml
28 | !conf.d/network.yaml
29 | packaging/build/
30 | packaging/root/
31 | Vagrantfile
32 | .vagrant/*
33 | embedded/*
34 | .pip-cache/*
35 | .cache
36 | dump.rdb
37 | tests/core/fixtures/flare/dd*
38 | .python-version
39 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfformatteddata_w3svc_webservice:
--------------------------------------------------------------------------------
1 | ServiceUptime 251
2 | TotalBytesSent 0
3 | TotalBytesReceived 0
4 | TotalBytesTransferred 0
5 | CurrentConnections 0
6 | TotalFilesSent 0
7 | TotalFilesReceived 0
8 | TotalConnectionAttemptsallinstances 0
9 | TotalGetRequests 0
10 | TotalPostRequests 0
11 | TotalHeadRequests 0
12 | TotalPutRequests 0
13 | TotalDeleteRequests 0
14 | TotalOptionsRequests 0
15 | TotalTraceRequests 0
16 | TotalNotFoundErrors 0
17 | TotalLockedErrors 0
18 | TotalAnonymousUsers 0
19 | TotalNonAnonymousUsers 0
20 | TotalCGIRequests 0
21 | TotalISAPIExtensionRequests 0
22 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/wmi/win32_perfformatteddata_w3svc_webservice_2008:
--------------------------------------------------------------------------------
1 | ServiceUptime 251
2 | TotalBytesSent 0
3 | TotalBytesReceived 0
4 | TotalBytesTransfered 0
5 | CurrentConnections 0
6 | TotalFilesSent 0
7 | TotalFilesReceived 0
8 | TotalConnectionAttemptsallinstances 0
9 | TotalGetRequests 0
10 | TotalPostRequests 0
11 | TotalHeadRequests 0
12 | TotalPutRequests 0
13 | TotalDeleteRequests 0
14 | TotalOptionsRequests 0
15 | TotalTraceRequests 0
16 | TotalNotFoundErrors 0
17 | TotalLockedErrors 0
18 | TotalAnonymousUsers 0
19 | TotalNonAnonymousUsers 0
20 | TotalCGIRequests 0
21 | TotalISAPIExtensionRequests 0
22 |
--------------------------------------------------------------------------------
/.rubocop.yml:
--------------------------------------------------------------------------------
1 | # We allow longer lines in our style
2 | Metrics/LineLength:
3 | Max: 150
4 |
5 | # TODO/FIXME:
6 | # At some point, we might just want to do the changes and get rid of those
7 |
8 | # Offense count: 1
9 | # Configuration parameters: CountComments.
10 | Metrics/ClassLength:
11 | Max: 128
12 |
13 | # Offense count: 2
14 | # Configuration parameters: CountComments.
15 | Metrics/MethodLength:
16 | Max: 17
17 |
18 | # Offense count: 2
19 | Style/Documentation:
20 | Enabled: false
21 |
22 | # Offense count: 1
23 | # Configuration parameters: Methods.
24 | Style/SingleLineBlockParams:
25 | Enabled: false
26 |
--------------------------------------------------------------------------------
/win32/shell.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | import traceback
6 |
7 | def shell():
8 | from config import get_version, set_win32_requests_ca_bundle_path
9 |
10 | set_win32_requests_ca_bundle_path()
11 | print """
12 | Datadog Agent v%s - Python Shell
13 |
14 | """ % (get_version())
15 | while True:
16 | cmd = raw_input('>>> ')
17 | try:
18 | exec(cmd)
19 | except Exception as e:
20 | print traceback.format_exc(e)
21 |
22 | if __name__ == "__main__":
23 | shell()
24 |
--------------------------------------------------------------------------------
/conf.d/agent_metrics.yaml.default:
--------------------------------------------------------------------------------
1 | # This file is overwritten upon Agent upgrade.
2 | # To make modifications to the check configuration, please copy this file
3 | # to `agent_metrics.yaml` and make your changes on that file.
4 |
5 | init_config:
6 | process_metrics:
7 | - name: memory_info
8 | type: gauge
9 | active: yes
10 | - name: io_counters
11 | type: rate
12 | active: yes
13 | - name: num_threads
14 | type: gauge
15 | active: yes
16 | - name: connections
17 | type: gauge
18 | active: no
19 |
20 | instances:
21 | [{}]
22 |
--------------------------------------------------------------------------------
/utils/deprecations.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | import logging
6 | from os.path import basename
7 | import sys
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def deprecate_old_command_line_tools():
13 | name = basename(sys.argv[0])
14 | if name in ['dd-forwarder', 'dogstatsd', 'dd-agent']:
15 | log.warn("Using this command is deprecated and will be removed in a future version,"
16 | " for more information see "
17 | "https://github.com/DataDog/dd-agent/wiki/Deprecation-notice--(old-command-line-tools)")
18 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | *Note: Please remember to review the Datadog [Contribution Guidelines](https://github.com/DataDog/dd-agent/blob/master/CONTRIBUTING.md)
2 | if you have not yet done so.*
3 |
4 |
5 | ### What does this PR do?
6 |
7 | A brief description of the change being made with this pull request.
8 |
9 | ### Motivation
10 |
11 | What inspired you to submit this pull request?
12 |
13 | ### Testing Guidelines
14 |
15 | An overview on [testing](https://github.com/DataDog/dd-agent/blob/master/tests/README.md)
16 | is available in our contribution guidelines.
17 |
18 | ### Additional Notes
19 |
20 | Anything else we should know when reviewing?
21 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/network/ss_ipv4:
--------------------------------------------------------------------------------
1 | Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port
2 | udp ESTAB 0 0 127.0.0.1:48135 127.0.0.1:8125
3 | udp UNCONN 0 0 127.0.0.1:8125 *:*
4 | tcp LISTEN 0 128 *:6379 *:*
5 | tcp LISTEN 0 128 *:6380 *:*
6 | tcp TIME-WAIT 0 0 127.0.0.1:80 127.0.0.1:51650
7 | tcp TIME-WAIT 0 0 127.0.0.1:58414 127.0.0.1:9200
8 | tcp ESTAB 0 0 10.0.2.15:45637 10.0.2.15:9300
9 |
--------------------------------------------------------------------------------
/utils/service_discovery/sd_backend.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # std
6 | import logging
7 |
8 | # project
9 | from utils.service_discovery.sd_docker_backend import SDDockerBackend
10 |
11 | log = logging.getLogger(__name__)
12 |
13 | AUTO_CONFIG_DIR = 'auto_conf/'
14 | SD_BACKENDS = ['docker']
15 |
16 |
17 | def get_sd_backend(agentConfig):
18 | if agentConfig.get('service_discovery_backend') == 'docker':
19 | return SDDockerBackend(agentConfig)
20 | else:
21 | log.error("Service discovery backend not supported. This feature won't be enabled")
22 |
--------------------------------------------------------------------------------
/ci/resources/phpfpm/nginx.conf:
--------------------------------------------------------------------------------
1 | worker_processes 1;
2 | events {
3 | worker_connections 1024;
4 | }
5 | http {
6 | include mime.types;
7 | default_type application/octet-stream;
8 | sendfile on;
9 | keepalive_timeout 65;
10 | server {
11 | listen 42424;
12 | server_name localhost;
13 | location ~ /(status|ping|\.*\.php)$ {
14 | root html;
15 | fastcgi_pass 127.0.0.1:9000;
16 | fastcgi_index index.php;
17 | fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
18 | include fastcgi_params;
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/conf.d/mesos_slave.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | default_timeout: 10
3 |
4 | instances:
5 | - url: "http://localhost:5051"
6 | # master_port: 5050
7 | # tasks:
8 | # - "hello"
9 |
10 | # The (optional) disable_ssl_validation will instruct the check
11 | # to skip the validation of the SSL certificate of the slave metric endpoint.
12 | # This is mostly useful for certificates that are not signed by a
13 | # public authority.
14 | # When true, the check logs a warning in collector.log
15 | # Defaults to false, set to true if you want to disable
16 | # SSL certificate validation.
17 | #
18 | # disable_ssl_validation: true
19 |
20 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/network/ss_ipv6:
--------------------------------------------------------------------------------
1 | Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port
2 | udp UNCONN 0 0 :::111 :::*
3 | udp UNCONN 0 0 fe80::a00:27ff:fe1c:3c4:123 :::*
4 | udp ESTAB 0 0 fe80::a00:27ff:fee9:10ee:123 :::*
5 | tcp LISTEN 0 128 :::6380 :::*
6 | tcp TIME-WAIT 0 0 ::ffff:127.0.0.1:58488 ::ffff:127.0.0.1:7199
7 | tcp ESTAB 0 0 ::ffff:127.0.0.1:42395 ::ffff:127.0.0.1:2181
8 | tcp CLOSING 0 0 ::ffff:127.0.0.1:58439 ::ffff:127.0.0.1:7199
9 |
--------------------------------------------------------------------------------
/tests/checks/integration/test_sysstat.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | import logging
3 | import os
4 | import unittest
5 |
6 | # 3p
7 | from nose.plugins.attrib import attr
8 |
9 | logging.basicConfig(level=logging.DEBUG)
10 | logger = logging.getLogger(__file__)
11 |
12 | from checks.system.unix import Cpu
13 |
14 |
15 | @attr(requires='system')
16 | class TestSystem(unittest.TestCase):
17 |
18 | def testCPU(self):
19 | global logger
20 | logger.info(os.environ['PATH'])
21 | cpu = Cpu(logger)
22 | res = cpu.check({})
23 | # Make sure we sum up to 100% (or 99% in the case of macs)
24 | assert abs(reduce(lambda a, b: a+b, res.values(), 0) - 100) <= 5, res
25 |
--------------------------------------------------------------------------------
/checks.d/system_swap.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # 3p
6 | import psutil
7 |
8 | # project
9 | from checks import AgentCheck
10 | from utils.platform import Platform
11 |
12 |
13 | class SystemSwap(AgentCheck):
14 |
15 | def check(self, instance):
16 |
17 | if Platform.is_linux():
18 | procfs_path = self.agentConfig.get('procfs_path', '/proc').rstrip('/')
19 | psutil.PROCFS_PATH = procfs_path
20 |
21 | swap_mem = psutil.swap_memory()
22 | self.rate('system.swap.swapped_in', swap_mem.sin)
23 | self.rate('system.swap.swapped_out', swap_mem.sout)
24 |
--------------------------------------------------------------------------------
/conf.d/zk.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - host: localhost
5 | port: 2181
6 | # timeout: 3
7 | # tags:
8 | # - optional_tag1
9 | # - optional_tag2
10 |
11 | # If `expected_mode` is defined we'll send a service check where the
12 | # status is determined by whether the current mode matches the expected.
13 | # Options: leader, follower, standalone
14 | # expected_mode: leader
15 |
16 | # Whether to report the current instance mode as a 0/1 gauge
17 | # For example if the current instance mode is `observer` - `zookeeper.instances.observer` reports as 1
18 | # and `zookeeper.instances.(leader|follower|standalone|etc.)` reports as 0
19 | # report_instance_mode: true
20 |
--------------------------------------------------------------------------------
/conf.d/nginx.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # For every instance, you need an `nginx_status_url` and can optionally
5 | # supply a list of tags. This plugin requires nginx to be compiled with
6 | # the nginx stub status module option, and activated with the correct
7 | # configuration stanza. On debian/ubuntu, this is included in the
8 | # `nginx-extras` package. For more details, see:
9 | #
10 | # http://docs.datadoghq.com/integrations/nginx/
11 | #
12 |
13 | - nginx_status_url: http://localhost/nginx_status/
14 | # tags:
15 | # - instance:foo
16 | #
17 | # - nginx_status_url: http://example2.com:1234/nginx_status/
18 | # ssl_validation: False
19 | # tags:
20 | # - instance:bar
21 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/yarn/cluster_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "clusterMetrics": {
3 | "appsSubmitted": 0,
4 | "appsCompleted": 0,
5 | "appsPending": 0,
6 | "appsRunning": 0,
7 | "appsFailed": 0,
8 | "appsKilled": 0,
9 | "reservedMB": 0,
10 | "availableMB": 17408,
11 | "allocatedMB": 0,
12 | "reservedVirtualCores": 0,
13 | "availableVirtualCores": 7,
14 | "allocatedVirtualCores": 1,
15 | "containersAllocated": 0,
16 | "containersReserved": 0,
17 | "containersPending": 0,
18 | "totalMB": 17408,
19 | "totalVirtualCores": 8,
20 | "totalNodes": 1,
21 | "lostNodes": 0,
22 | "unhealthyNodes": 0,
23 | "decommissionedNodes": 0,
24 | "rebootedNodes": 0,
25 | "activeNodes": 1
26 | }
27 | }
--------------------------------------------------------------------------------
/conf.d/powerdns_recursor.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # The PowerDNS Recursor check retrieves metrics from the Recursor experimental
5 | # web server.
6 | # See https://doc.powerdns.com/3/recursor/settings/#experimental-webserver
7 | # The API key has to be set as well:
8 | # https://doc.powerdns.com/3/recursor/settings/#experimental-api-key
9 | #
10 | # This check works with PowerDNS Recursor 3.x.
11 |
12 |
13 |
14 | # Host running the recursor.
15 | - host: 127.0.0.1
16 | # Recursor web server port.
17 | port: 8082
18 | # Recursor web server api key.
19 | api_key: pdns_api_key
20 |
21 | # Optional tags to be applied to every emitted metric.
22 | # tags:
23 | # - key:value
24 | # - instance:production
25 |
--------------------------------------------------------------------------------
/tests/core/test_utils_process.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | import os
3 | import unittest
4 |
5 | # project
6 | from utils.process import is_my_process, pid_exists
7 |
8 |
9 | class UtilsProcessTest(unittest.TestCase):
10 | def test_my_own_pid(self):
11 | my_pid = os.getpid()
12 | self.assertTrue(pid_exists(my_pid))
13 | self.assertTrue(is_my_process(my_pid))
14 |
15 | def test_inexistant_pid(self):
16 | # There will be one point where we finally find a free PID
17 | for pid in xrange(30000):
18 | if not pid_exists(pid):
19 | return
20 | raise Exception("Probably a bug in pid_exists or more than 30000 procs!!")
21 |
22 | def test_existing_process(self):
23 | self.assertFalse(is_my_process(1))
24 |
--------------------------------------------------------------------------------
/packaging/osx/app/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDisplayName
6 | Datadog Agent
7 | CFBundleExecutable
8 | gui
9 | CFBundleIconFile
10 | Agent.icns
11 | CFBundlePackageType
12 | APPL
13 | CFBundleIdentifier
14 | com.datadoghq.agent
15 | CFBundleSignature
16 | ????
17 | LSUIElement
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/conf.d/postfix.yaml.example:
--------------------------------------------------------------------------------
1 | # The user running dd-agent must have passwordless sudo access for the find
2 | # command to run the postfix check. Here's an example:
3 | #
4 | # example /etc/sudoers entry:
5 | # dd-agent ALL=(ALL) NOPASSWD:/usr/bin/find
6 | #
7 | # Redhat/CentOS/Amazon Linux flavours will need to add:
8 | # Defaults:dd-agent !requiretty
9 |
10 | init_config:
11 |
12 | instances:
13 | - directory: /var/spool/postfix
14 | queues:
15 | - incoming
16 | - active
17 | - deferred
18 | tags:
19 | - optional_tag1
20 | - optional_tag2
21 | - directory: /var/spool/postfix-2
22 | queues:
23 | - incoming
24 | - active
25 | - deferred
26 | tags:
27 | - optional_tag3
28 | - optional_tag4
29 |
--------------------------------------------------------------------------------
/packaging/osx/com.datadoghq.Agent.plist.example:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | RunAtLoad
6 |
7 | Label
8 | com.datadoghq.agent
9 | ProgramArguments
10 |
11 | /opt/datadog-agent/bin/datadog-agent
12 | restart
13 |
14 | UserName
15 | USER_NAME
16 | StandardOutPath
17 | /var/log/datadog/launchd.log
18 | StandardErrorPath
19 | /var/log/datadog/launchd.log
20 |
21 |
22 |
--------------------------------------------------------------------------------
/ci/ssh.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | namespace :ci do
8 | namespace :ssh do |flavor|
9 | task before_install: ['ci:common:before_install']
10 |
11 | task install: ['ci:common:install']
12 |
13 | task before_script: ['ci:common:before_script']
14 |
15 | task script: ['ci:common:script'] do
16 | this_provides = [
17 | 'ssh'
18 | ]
19 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
20 | end
21 |
22 | task before_cache: ['ci:common:before_cache']
23 |
24 | task cleanup: ['ci:common:cleanup']
25 |
26 | task :execute do
27 | Rake::Task['ci:common:execute'].invoke(flavor)
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/guidata/images/agent.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
10 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/mapreduce/task_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "tasks": {
3 | "task": [
4 | {
5 | "startTime": 1453761318527,
6 | "finishTime": 0,
7 | "elapsedTime": 99869037,
8 | "progress": 49.11076,
9 | "id": "task_1453738555560_0001_m_000000",
10 | "state": "RUNNING",
11 | "type": "MAP",
12 | "successfulAttempt": "",
13 | "status": "map > map"
14 | },
15 | {
16 | "startTime": 1453761318527,
17 | "finishTime": 0,
18 | "elapsedTime": 123456,
19 | "progress": 32.42940,
20 | "id": "task_1453738555560_0001_r_000000",
21 | "state": "RUNNING",
22 | "type": "REDUCE",
23 | "successfulAttempt": "",
24 | "status": "map > map"
25 | }
26 | ]
27 | }
28 | }
--------------------------------------------------------------------------------
/dogstream/common.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | import calendar
6 | from datetime import datetime
7 |
8 | MAX_TITLE_LEN = 100
9 |
10 |
11 | class ParseError(Exception):
12 | pass
13 |
14 |
15 | def parse_date(date_val, date_format=None):
16 | if date_format:
17 | dt = datetime.strptime(date_val, date_format)
18 | else:
19 | to_try = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S,%f']
20 |
21 | for fmt in to_try:
22 | try:
23 | dt = datetime.strptime(date_val, fmt)
24 | break
25 | except Exception:
26 | pass
27 | else:
28 | raise ParseError(date_val)
29 |
30 | return calendar.timegm(dt.timetuple())
31 |
--------------------------------------------------------------------------------
/ci/checks_mock.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | namespace :ci do
8 | namespace :checks_mock do |flavor|
9 | task before_install: ['ci:common:before_install']
10 |
11 | task install: ['ci:common:install']
12 |
13 | task before_script: ['ci:common:before_script']
14 |
15 | task script: ['ci:common:script'] do
16 | this_provides = [
17 | 'checks_mock'
18 | ]
19 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
20 | end
21 |
22 | task before_cache: ['ci:common:before_cache']
23 |
24 | task cleanup: ['ci:common:cleanup']
25 |
26 | task :execute do
27 | Rake::Task['ci:common:execute'].invoke(flavor)
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/ci/docker_daemon.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | namespace :ci do
8 | namespace :docker_daemon do |flavor|
9 | task before_install: ['ci:common:before_install']
10 |
11 | task install: ['ci:common:install']
12 |
13 | task before_script: ['ci:common:before_script']
14 |
15 | task script: ['ci:common:script'] do
16 | this_provides = [
17 | 'docker_daemon'
18 | ]
19 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
20 | end
21 |
22 | task before_cache: ['ci:common:before_cache']
23 |
24 | task cleanup: ['ci:common:cleanup']
25 |
26 | task :execute do
27 | Rake::Task['ci:common:execute'].invoke(flavor)
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/ci/core_integration.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | namespace :ci do
8 | namespace :core_integration do |flavor|
9 | task before_install: ['ci:common:before_install']
10 |
11 | task install: ['ci:common:install']
12 |
13 | task before_script: ['ci:common:before_script']
14 |
15 | task script: ['ci:common:script'] do
16 | this_provides = [
17 | 'core_integration'
18 | ]
19 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
20 | end
21 |
22 | task before_cache: ['ci:common:before_cache']
23 |
24 | task cleanup: ['ci:common:cleanup']
25 |
26 | task :execute do
27 | Rake::Task['ci:common:execute'].invoke(flavor)
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/ci/resources/kong/setup_openresty.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | pushd $INTEGRATIONS_DIR/
6 | mkdir -p $OPENRESTY_DIR
7 |
8 | if [ ! "$(ls -A $OPENRESTY_DIR)" ]; then
9 | # Download OpenSSL
10 | OPENSSL_BASE=openssl-$OPENSSL_VERSION
11 | curl https://www.openssl.org/source/$OPENSSL_BASE.tar.gz | tar xz
12 |
13 | # Download OpenResty
14 | OPENRESTY_BASE=openresty-$OPENRESTY_VERSION
15 | curl https://openresty.org/download/$OPENRESTY_BASE.tar.gz | tar xz
16 |
17 | pushd $OPENRESTY_BASE
18 |
19 | ./configure \
20 | --prefix=$OPENRESTY_DIR \
21 | --with-luajit=$LUAJIT_DIR \
22 | --with-openssl=../$OPENSSL_BASE \
23 | --with-pcre-jit \
24 | --with-ipv6 \
25 | --with-http_realip_module \
26 | --with-http_ssl_module \
27 | --with-http_stub_status_module
28 |
29 | make
30 | make install
31 | popd
32 | fi
33 | popd
--------------------------------------------------------------------------------
/utils/pip-allow-failures.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # (C) Datadog, Inc. 2010-2016
3 | # All rights reserved
4 | # Licensed under Simplified BSD License (see LICENSE)
5 |
6 |
7 | PIP_COMMAND=${PIP_COMMAND:-pip}
8 | PIP_OPTIONS=${PIP_OPTIONS:-}
9 |
10 | while read dependency; do
11 | dependency_stripped="$(echo "${dependency}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
12 | case "$dependency_stripped" in
13 | # Skip comments
14 | \#*)
15 | continue
16 | ;;
17 | # Skip blank lines
18 | "")
19 | continue
20 | ;;
21 | *)
22 | if $PIP_COMMAND install $PIP_OPTIONS "$dependency_stripped" 2>&1; then
23 | echo "$dependency_stripped is installed"
24 | else
25 | echo "Could not install $dependency_stripped, skipping"
26 | fi
27 | ;;
28 | esac
29 | done < "$1"
30 |
--------------------------------------------------------------------------------
/tests/core/fixtures/config/bad.conf:
--------------------------------------------------------------------------------
1 | [Main]
2 |
3 | # The host of the Datadog intake server to send agent data to
4 | dd_url: https://app.datadoghq.com
5 |
6 | # The Datadog api key to associate your agent's data with your organization.
7 | # Can be found here:
8 | # https://app.datadoghq.com/account/settings
9 | api_key: 1234
10 |
11 | # Force the hostname to whatever you want.
12 | #hostname: mymachine.mydomain
13 |
14 | # Use the amazon EC2 instance-id instead of hostname (unless hostname is
15 | # explicitly set)
16 | use_ec2_instance_id: no
17 |
18 | # Use mount points instead of volumes to track disk and fs metrics
19 | use_mount: no
20 |
21 |
22 | # Start a graphite listener on this port
23 | # graphite_listen_port: 17124
24 |
25 | nagios_log: /var/log/nagios3/nagios.log
26 | nagios_perf_cfg: /var/log/blah.log
27 |
28 | graphite_listen_port: 17126
29 |
--------------------------------------------------------------------------------
/win32/common.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | import ctypes
6 |
7 | def handle_exe_click(name):
8 | ''' When the executables are clicked directly in the UI, we must let the
9 | user know that they have to install the program as a service instead of
10 | running it directly. '''
11 | message = """To use %(name)s, you must install it as a service.
12 |
13 | To install %(name)s as a service, you must run the following in the console:
14 |
15 | %(name)s.exe install
16 |
17 | For all available options, including how to install the service for a particular user, run the following in a console:
18 |
19 | %(name)s.exe help
20 | """ % ({'name': name})
21 | MessageBox = ctypes.windll.user32.MessageBoxA
22 | MessageBox(None, message, 'Install as a Service', 0)
23 |
--------------------------------------------------------------------------------
/utils/shell.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # stdlib
6 | import os
7 |
8 |
9 | def which(program):
10 | """ shutil.which() goodness in python2.7
11 | Taken from
12 | http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
13 | """
14 | def is_exe(fpath):
15 | return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
16 |
17 | fpath, fname = os.path.split(program)
18 | if fpath:
19 | if is_exe(program):
20 | return program
21 | else:
22 | for path in os.environ["PATH"].split(os.pathsep):
23 | path = path.strip('"')
24 | exe_file = os.path.join(path, program)
25 | if is_exe(exe_file):
26 | return exe_file
27 |
28 | return None
29 |
--------------------------------------------------------------------------------
/conf.d/etcd.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # API endpoint of your etcd instance
5 | - url: "https://server:port"
6 | # Change the time to wait on an etcd API request
7 | # timeout: 5
8 |
9 | # If certificate-based authentication of clients is enabled on your etcd server,
10 | # specify the key file and the certificate file that the check should use.
11 | # ssl_keyfile: /path/to/key/file
12 | # ssl_certfile: /path/to/certificate/file
13 |
14 | # Set to `false` to disable the validation of the server's SSL certificates (default: true).
15 | # ssl_cert_validation: true
16 |
17 | # If ssl_cert_validation is enabled, you can provide a custom file
18 | # that lists trusted CA certificates (optional).
19 | # ssl_ca_certs: /path/to/CA/certificate/file
20 |
21 | # optionally, add tags:
22 | # tags:
23 | # - foo
24 | # - bar
25 |
--------------------------------------------------------------------------------
/conf.d/yarn.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # The YARN check retrieves metrics from YARNS's ResourceManager. This
5 | # check must be run from the Master Node and the ResourceManager URI must
6 | # be specified below. The ResourceManager URI is composed of the
7 | # ResourceManager's hostname and port.
8 | #
9 | # The ResourceManager hostname can be found in the yarn-site.xml conf file
10 | # under the property yarn.resourcemanager.address
11 | #
12 | # The ResourceManager port can be found in the yarn-site.xml conf file under
13 | # the property yarn.resourcemanager.webapp.address
14 | #
15 | - resourcemanager_uri: http://localhost:8088
16 |
17 | # A Required friendly name for the cluster.
18 | # cluster_name: MyYarnCluster
19 |
20 | # Optional tags to be applied to every emitted metric.
21 | # tags:
22 | # - key:value
23 | # - instance:production
--------------------------------------------------------------------------------
/conf.d/hdfs.yaml.example:
--------------------------------------------------------------------------------
1 | # DEPRECATED:
2 | # This check is deprecated and will be removed in a future version of the agent
3 | # Please use the `hdfs_namenode` and `hdfs_datanode` checks instead
4 |
5 | init_config:
6 | # HDFS check does not require any init_config
7 |
8 | instances:
9 | # Each instance requires a namenode.
10 | # Port defaults to 8020.
11 | # When using HDFS in High Availabality mode
12 | # You can specify multiple urls and ports
13 | # WARNING: HA Mode is only available with Snakebite > 2.2.0
14 | # You have to manually install it with the following command:
15 | # sudo /opt/datadog-agent/embedded/bin/pip install --upgrade snakebite
16 | #
17 | - namenodes:
18 | - url: localhost
19 | port: 8020
20 | # - url: namenode2.example.com # Optional, to be set when using HA Mode
21 | # port: 8020 # Optional, to be set when using HA Mode
22 | #
23 | # tags:
24 | # - optional_tag
25 |
--------------------------------------------------------------------------------
/ci/resources/supervisord/supervisord.conf:
--------------------------------------------------------------------------------
1 | [unix_http_server]
2 | file=VOLATILE_DIR/supervisor.sock
3 |
4 | [rpcinterface:supervisor]
5 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
6 |
7 | [supervisorctl]
8 | serverurl=unix://VOLATILE_DIR/supervisor/supervisor.sock
9 |
10 | [supervisord]
11 | logfile=VOLATILE_DIR/supervisor/supervisord.log
12 | logfile_maxbytes=50MB
13 | logfile_backups=10
14 | loglevel=info
15 | pidfile=VOLATILE_DIR/supervisor/supervisord.pid
16 | childlogdir=VOLATILE_DIR/supervisor
17 |
18 | [program:program_0]
19 | command=/bin/sh VOLATILE_DIR/supervisor/program_0.sh
20 | autostart=true
21 | autorestart=false
22 |
23 | [program:program_1]
24 | command=/bin/sh VOLATILE_DIR/supervisor/program_1.sh
25 | autostart=true
26 | autorestart=false
27 |
28 | [program:program_2]
29 | command=/bin/sh VOLATILE_DIR/supervisor/program_2.sh
30 | autostart=true
31 | autorestart=false
32 |
--------------------------------------------------------------------------------
/conf.d/activemq_xml.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - url: http://localhost:8161
5 | # the url will probably be something like http://:8161
6 | # the agent check will append /admin/xml/queues.jsp to the url
7 | # username: username
8 | # password: password
9 | # suppress_errors: false # suppress connection errors if url is expected to be sometimes offline (eg standby host)
10 |
11 | #detailed_queues: # Optional. If you have more than 300 queues you need to list the ones you want to track
12 | # - queue1
13 | # - queue2
14 |
15 |
16 | #detailed_topics: # Optional. If you have more than 300 topics you need to list the ones you want to track
17 | # - topic1
18 | # - topic2
19 |
20 | #detailed_subscribers: # Optional. If you have more than 300 subscribers you need to list the ones you want to track
21 | # - subscriber1
22 | # - subscriber2
--------------------------------------------------------------------------------
/tests/checks/fixtures/spark/executor_metrics:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "id": "driver",
4 | "hostPort": "10.0.2.15:33870",
5 | "rddBlocks": 99,
6 | "memoryUsed": 98,
7 | "diskUsed": 97,
8 | "activeTasks": 96,
9 | "failedTasks": 95,
10 | "completedTasks": 94,
11 | "totalTasks": 93,
12 | "totalDuration": 92,
13 | "totalInputBytes": 91,
14 | "totalShuffleRead": 90,
15 | "totalShuffleWrite": 89,
16 | "maxMemory": 278019440,
17 | "executorLogs": {}
18 | },
19 | {
20 | "id": "1",
21 | "hostPort": "10.0.2.15:33870",
22 | "rddBlocks": 1,
23 | "memoryUsed": 2,
24 | "diskUsed": 3,
25 | "activeTasks": 4,
26 | "failedTasks": 5,
27 | "completedTasks": 6,
28 | "totalTasks": 7,
29 | "totalDuration": 8,
30 | "totalInputBytes": 9,
31 | "totalShuffleRead": 10,
32 | "totalShuffleWrite": 11,
33 | "maxMemory": 555755765,
34 | "executorLogs": {}
35 | }
36 | ]
--------------------------------------------------------------------------------
/tests/checks/fixtures/varnish/varnishadm_dump:
--------------------------------------------------------------------------------
1 | Backend b0 is Sick
2 | Current states good: 0 threshold: 3 window: 5
3 | Average responsetime of good probes: 0.000000
4 | Oldest Newest
5 | ================================================================
6 | 4444444444444444444444444444444444444444444444444444444444444444 Good IPv4
7 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX Good Xmit
8 | RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR Good Recv
9 | ---------------------------------------------------------------- Happy
10 | Backend b1 is Sick
11 | Current states good: 0 threshold: 3 window: 5
12 | Average responsetime of good probes: 0.000000
13 | Oldest Newest
14 | ================================================================
15 | ---------------------------------------------------------------- Happy
16 |
--------------------------------------------------------------------------------
/conf.d/tokumx.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # Specify the MongoDB URI, with database to use for reporting (defaults to "admin")
5 | # E.g. mongodb://datadog:LnCbkX4uhpuLHSUrcayEoAZA@localhost:27017/my-db
6 | - server: mongodb://localhost:27017
7 | # tags:
8 | # - optional_tag1
9 | # - optional_tag2
10 |
11 | # Optional SSL parameters, see https://github.com/mongodb/mongo-python-driver/blob/2.6.3/pymongo/mongo_client.py#L193-L212
12 | # for more details
13 | #
14 | # ssl: False # Optional (default to False)
15 | # ssl_keyfile: # Path to the private keyfile used to identify the local
16 | # ssl_certfile: # Path to the certificate file used to identify the local connection against mongod.
17 | # ssl_cert_reqs: # Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided.
18 | # ssl_ca_certs: # Path to the ca_certs file
19 |
--------------------------------------------------------------------------------
/conf.d/windows_service.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # For each instance you define what host to connect to (defaulting to the
5 | # current host) as well as a list of services you care about. The service
6 | # names should match the Service name in the properties and NOT the display
7 | # name in the services.msc list.
8 | #
9 | # If you want to check services on a remote host, you have to specify a
10 | # hostname and (optional) credentials
11 | #
12 | #- host: MYREMOTESERVER
13 | # username: MYREMOTESERVER\fred
14 | # password: mysecretpassword
15 | # services:
16 | # - ServiceExample
17 | # - AnotherService
18 | #
19 | # The sample configuration will monitor the WMI Performance Adapter service,
20 | # named "wmiApSrv" in the service properties.
21 | #
22 | - host: . # "." means the current host
23 | services:
24 | - wmiApSrv
25 |
--------------------------------------------------------------------------------
/checks.d/system_core.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # 3p
6 | import psutil
7 |
8 | # project
9 | from checks import AgentCheck
10 | from utils.platform import Platform
11 |
12 |
13 | class SystemCore(AgentCheck):
14 |
15 | def check(self, instance):
16 |
17 | if Platform.is_linux():
18 | procfs_path = self.agentConfig.get('procfs_path', '/proc').rstrip('/')
19 | psutil.PROCFS_PATH = procfs_path
20 |
21 | cpu_times = psutil.cpu_times(percpu=True)
22 | self.gauge("system.core.count", len(cpu_times))
23 |
24 | for i, cpu in enumerate(cpu_times):
25 | for key, value in cpu._asdict().iteritems():
26 | self.rate(
27 | "system.core.{0}".format(key),
28 | 100.0 * value,
29 | tags=["core:{0}".format(i)]
30 | )
31 |
--------------------------------------------------------------------------------
/ci/skeleton.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | namespace :ci do
8 | namespace :my_new_flavor do |flavor|
9 | task before_install: ['ci:common:before_install']
10 |
11 | task install: ['ci:common:install']
12 |
13 | task before_script: ['ci:common:before_script']
14 | # If you need to wait on a start of a progran, please use Wait.for,
15 | # see https://github.com/DataDog/dd-agent/pull/1547
16 |
17 | task script: ['ci:common:script'] do
18 | this_provides = [
19 | 'my_new_flavor'
20 | ]
21 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
22 | end
23 |
24 | task before_cache: ['ci:common:before_cache']
25 |
26 | task cleanup: ['ci:common:cleanup']
27 |
28 | task :execute do
29 | Rake::Task['ci:common:execute'].invoke(flavor)
30 | end
31 | end
32 | end
33 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/vsphere/vsphere_topology.json:
--------------------------------------------------------------------------------
1 | {"childEntity": [{"hostFolder": {"childEntity": [{"host": [{"spec": "HostSystem", "name": "host1", "vm": []}, {"spec": "HostSystem", "name": "host2", "vm": []}], "spec": "ClusterComputeResource", "name": "compute_resource1"}]}, "spec": "Datacenter", "name": "datacenter1"}, {"childEntity": [{"hostFolder": {"childEntity": [{"host": [{"vm": [{"runtime": {"powerState": "poweredOn"}, "spec": "VirtualMachine", "name": "vm1"}, {"runtime": {"powerState": "poweredOn"}, "spec": "VirtualMachine", "name": "vm2", "label": true}, {"runtime": {"powerState": "poweredOff"}, "spec": "VirtualMachine", "name": "vm3", "label": true}, {"runtime": {"powerState": "poweredOn"}, "spec": "VirtualMachine", "name": "vm4", "label": true}], "spec": "HostSystem", "name": "host3"}], "spec": "ClusterComputeResource", "name": "compute_resource2"}]}, "spec": "Datacenter", "name": "datacenter2"}], "spec": "Folder", "name": "folder1"}], "spec": "Folder", "name": "rootFolder"}
2 |
--------------------------------------------------------------------------------
/ci/resources/haproxy/haproxy-open.cfg:
--------------------------------------------------------------------------------
1 | # Basic configuration
2 | global
3 | log 127.0.0.1 local0
4 | maxconn 4096
5 |
6 | defaults
7 | log global
8 | mode http
9 | option httplog
10 | option dontlognull
11 | retries 3
12 | option redispatch
13 | option forwardfor
14 | timeout client 1000
15 | timeout server 1000
16 | timeout connect 1000
17 |
18 | frontend public
19 | bind 127.0.0.1:3836 # DTDG
20 | default_backend datadog
21 |
22 | backend datadog
23 | stats uri /stats
24 | stats refresh 5s
25 |
26 | balance roundrobin
27 | server singleton:8080 127.0.0.1:8080
28 | server singleton:8081 127.0.0.1:8081
29 | server otherserver 127.0.0.1:1234
30 |
31 | backend anotherbackend
32 | stats uri /stats
33 | stats refresh 5s
34 |
35 | balance roundrobin
36 | server singleton:8080 127.0.0.1:8080
37 | server singleton:8081 127.0.0.1:8081
38 | server otherserver 127.0.0.1:1234
39 |
--------------------------------------------------------------------------------
/conf.d/tcp_check.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - name: My first service
5 | host: myhost.example.com
6 | port: 8080
7 | timeout: 1
8 |
9 | # The (optional) window and threshold parameters allow you to trigger
10 | # alerts only if the check fails x times within the last y attempts
11 | # where x is the threshold and y is the window.
12 | #
13 | # threshold: 3
14 | # window: 5
15 |
16 | # The (optional) collect_response_time parameter will instruct the
17 | # check to create a metric 'network.tcp.response_time', tagged with
18 | # the url, reporting the response time in seconds.
19 | #
20 | # collect_response_time: false
21 |
22 | # The (optional) skip_event parameter will instruct the check to not
23 | # create any event to avoid duplicates with a server side service check.
24 | # This default to False.
25 | #
26 | skip_event: true
27 |
28 | # - name: My second service
29 | # host: 127.0.0.1
30 | # port: 80
31 |
--------------------------------------------------------------------------------
/tests/checks/mock/test_docker.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 | # stdlib
5 | import mock
6 | import unittest
7 |
8 | from utils.dockerutil import DockerUtil
9 |
10 |
11 | class TestDockerutil(unittest.TestCase):
12 | def setUp(self):
13 | self.dockerutil = DockerUtil()
14 |
15 | @mock.patch('utils.dockerutil.DockerUtil.client')
16 | def test_get_events(self, mocked_client):
17 | mocked_client.events.return_value = [
18 | {'status': 'stop', 'id': '1234567890', 'from': '1234567890', 'time': 1423247867}
19 | ]
20 | events_generator, _ = self.dockerutil.get_events()
21 | self.assertEqual(len(events_generator), 1)
22 |
23 | # bug in dockerpy, we should be resilient
24 | mocked_client.events.return_value = [u'an error from Docker API here']
25 | events_generator, _ = self.dockerutil.get_events()
26 | self.assertEqual(len(list(events_generator)), 0)
27 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/riakcs/riakcs_out.python:
--------------------------------------------------------------------------------
1 | {'object_head': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'bucket_list_pool': [5, 0, 0], 'object_put': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'object_delete': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'bucket_put_acl': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'block_get': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'legend': [['meter_count', 'meter_rate', 'latency_mean', 'latency_median', 'latency_95', 'latency_99'], ['workers', 'overflow', 'size']], 'service_get_buckets': [1, 4.5063025253016977e-10, 0.0, 0.0, 0.0, 0.0], 'block_delete': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'object_get': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'bucket_create': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'block_put': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'bucket_get_acl': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'request_pool': [127, 0, 1], 'manifest_siblings_bp_sleep': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'bucket_list_keys': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'object_get_acl': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'object_put_acl': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'bucket_delete': [0, 0.0, 0.0, 0.0, 0.0, 0.0], 'block_get_retry': [0, 0.0, 0.0, 0.0, 0.0, 0.0]}
--------------------------------------------------------------------------------
/conf.d/couch.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # The `db_whitelist` should contain the names of the databases meant to be checked.
5 | # If no whitelist is specified, all databases will be checked.
6 | #
7 | # The `db_blacklist` should contain the names of any databases meant to be excluded
8 | # from being checked. If a database is listed in both the blacklist and whitelist,
9 | # the blacklist will take precedence.
10 | #
11 | # You should also notice that no more than 50 databases will be checked, if you have
12 | # whitelisted more than 50 or if you have more than 50 databases and no whitelist,
13 | # only the first 50 databases will be checked.
14 | #
15 | - server: http://localhost:5984
16 | # user: username # optional
17 | # password: password # optional
18 | # timeout: 5 # in seconds
19 | # db_whitelist:
20 | # - db1
21 | # - db2
22 | # - db3
23 | # db_blacklist:
24 | # - db1
25 | # - db2
26 | # - db3
27 |
28 |
--------------------------------------------------------------------------------
/ci/resources/haproxy/haproxy.cfg:
--------------------------------------------------------------------------------
1 | # Basic configuration
2 | global
3 | log 127.0.0.1 local0
4 | maxconn 4096
5 |
6 | defaults
7 | log global
8 | mode http
9 | option httplog
10 | option dontlognull
11 | retries 3
12 | option redispatch
13 | option forwardfor
14 | timeout client 1000
15 | timeout server 1000
16 | timeout connect 1000
17 |
18 | frontend public
19 | bind 127.0.0.1:3835 # DTDG
20 | default_backend datadog
21 |
22 | backend datadog
23 | stats uri /stats
24 | stats auth datadog:isdevops
25 | stats refresh 5s
26 |
27 | balance roundrobin
28 | server singleton:8080 127.0.0.1:8080
29 | server singleton:8081 127.0.0.1:8081
30 | server otherserver 127.0.0.1:1234
31 |
32 | backend anotherbackend
33 | stats uri /stats
34 | stats auth datadog:isdevops
35 | stats refresh 5s
36 |
37 | balance roundrobin
38 | server singleton:8080 127.0.0.1:8080
39 | server singleton:8081 127.0.0.1:8081
40 | server otherserver 127.0.0.1:1234
41 |
--------------------------------------------------------------------------------
/tests/checks/mock/test_ganglia.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | from cStringIO import StringIO
3 | import logging
4 | import subprocess
5 | import time
6 | import unittest
7 |
8 | # 3p
9 | import xml.etree.ElementTree as tree
10 |
11 | # project
12 | from checks.ganglia import Ganglia
13 | from tests.checks.common import Fixtures
14 |
15 |
16 | class TestGanglia(unittest.TestCase):
17 | def testSpeed(self):
18 | # Pretend to be gmetad and serve a large piece of content
19 | original_file = Fixtures.file('ganglia.txt')
20 | subprocess.Popen("nc -l 8651 < %s" % original_file, shell=True)
21 | # Wait for 1 second
22 | time.sleep(1)
23 |
24 | g = Ganglia(logging.getLogger(__file__))
25 | parsed = StringIO(g.check({'ganglia_host': 'localhost', 'ganglia_port': 8651}))
26 | original = Fixtures.file('ganglia.txt')
27 | x1 = tree.parse(parsed)
28 | x2 = tree.parse(original)
29 | # Cursory test
30 | self.assertEquals([c.tag for c in x1.getroot()], [c.tag for c in x2.getroot()])
31 |
--------------------------------------------------------------------------------
/tests/checks/mock/test_system_swap.py:
--------------------------------------------------------------------------------
1 | import mock
2 |
3 | from tests.checks.common import AgentCheckTest
4 |
5 | class _PSUtilSwapStatsMock(object):
6 | def __init__(self, sin, sout):
7 | self.sin = sin
8 | self.sout = sout
9 |
10 | ORIG_SWAP_IN = 115332743168
11 | ORIG_SWAP_OUT = 22920884224
12 |
13 | SWAP_IN_INCR = 2
14 | SWAP_OUT_INCR = 4
15 |
16 | MOCK_PSUTIL_SWAP_STATS = [
17 | _PSUtilSwapStatsMock(ORIG_SWAP_IN, ORIG_SWAP_OUT),
18 | _PSUtilSwapStatsMock(ORIG_SWAP_IN + SWAP_IN_INCR, ORIG_SWAP_OUT + SWAP_OUT_INCR),
19 | ]
20 |
21 | class SystemSwapTestCase(AgentCheckTest):
22 |
23 | CHECK_NAME = 'system_swap'
24 |
25 | @mock.patch('psutil.swap_memory', side_effect=MOCK_PSUTIL_SWAP_STATS)
26 | def test_system_swap(self, mock_swap_stats):
27 | self.run_check_twice({"instances": [{}]}) # Run check twice, sleeping for 1 sec in between
28 | self.assertMetric('system.swap.swapped_in', value=SWAP_IN_INCR, count=1)
29 | self.assertMetric('system.swap.swapped_out', value=SWAP_OUT_INCR, count=1)
30 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/yarn/apps_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "apps": {
3 | "app": [
4 | {
5 | "finishedTime": 1326815598530,
6 | "amContainerLogs": "http://host.domain.com:8042/node/containerlogs/container_1326815542473_0001_01_000001",
7 | "trackingUI": "History",
8 | "state": "RUNNING",
9 | "user": "user1",
10 | "id": "application_1326815542473_0001",
11 | "clusterId": 1326815542473,
12 | "finalStatus": "SUCCEEDED",
13 | "amHostHttpAddress": "host.domain.com:8042",
14 | "progress": 100,
15 | "name": "word count",
16 | "startedTime": 1326815573334,
17 | "elapsedTime": 25196,
18 | "diagnostics": "",
19 | "trackingUrl": "http://host.domain.com:8088/proxy/application_1326815542473_0001/jobhistory/job/job_1326815542473_1_1",
20 | "queue": "default",
21 | "allocatedMB": 0,
22 | "allocatedVCores": 0,
23 | "runningContainers": 0,
24 | "memorySeconds": 151730,
25 | "vcoreSeconds": 103
26 | }
27 | ]
28 | }
29 | }
--------------------------------------------------------------------------------
/ci/resources/lighttpd/lighttpd.conf:
--------------------------------------------------------------------------------
1 | server.modules = (
2 | "mod_access",
3 | "mod_alias",
4 | "mod_compress",
5 | "mod_redirect",
6 | "mod_rewrite",
7 | "mod_status"
8 | )
9 |
10 | server.document-root = "/var/www"
11 | server.upload-dirs = ( "/var/cache/lighttpd/uploads" )
12 | server.errorlog = "%PATH%/error.log"
13 | server.pid-file = "%PATH%/lighttpd.pid"
14 | server.username = "www-data"
15 | server.groupname = "www-data"
16 | server.port = 9449
17 | status.status-url = "/server-status"
18 | index-file.names = ( "index.php", "index.html",
19 | "index.htm", "default.htm",
20 | " index.lighttpd.html" )
21 |
22 | url.access-deny = ( "~", ".inc" )
23 |
24 | static-file.exclude-extensions = ( ".php", ".pl", ".fcgi" )
25 |
26 | ## Use ipv6 if available
27 | #include_shell "/usr/share/lighttpd/use-ipv6.pl"
28 |
29 | dir-listing.encoding = "utf-8"
30 | server.dir-listing = "enable"
31 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/hdfs_namenode/hdfs_namesystem:
--------------------------------------------------------------------------------
1 | {"beans":[{"name":"Hadoop:service=NameNode,name=FSNamesystem","modelerType":"FSNamesystem","tag.Context":"dfs","tag.HAState":"active","tag.TotalSyncTimes":"15 ","tag.Hostname":"dev.minerkasch.com","MissingBlocks":0,"MissingReplOneBlocks":0,"ExpiredHeartbeats":0,"TransactionsSinceLastCheckpoint":7,"TransactionsSinceLastLogRoll":7,"LastWrittenTransactionId":255775,"LastCheckpointTime":1455829366300,"CapacityTotal":41167421440,"CapacityTotalGB":38.0,"CapacityUsed":504754176,"CapacityUsedGB":0.0,"CapacityRemaining":27087769600,"CapacityRemainingGB":25.0,"CapacityUsedNonDFS":13574897664,"TotalLoad":2,"SnapshottableDirectories":0,"Snapshots":0,"LockQueueLength":0,"BlocksTotal":27689,"NumFilesUnderConstruction":0,"NumActiveClients":0,"FilesTotal":82990,"PendingReplicationBlocks":0,"UnderReplicatedBlocks":27689,"CorruptBlocks":1,"ScheduledReplicationBlocks":0,"PendingDeletionBlocks":0,"ExcessBlocks":0,"PostponedMisreplicatedBlocks":0,"PendingDataNodeMessageCount":0,"MillisSinceLastLoadedEdits":0,"BlockCapacity":2097152,"StaleDataNodes":0,"TotalFiles":82990,"TotalSyncCount":8}]}
--------------------------------------------------------------------------------
/tests/checks/mock/test_windows_service.py:
--------------------------------------------------------------------------------
1 | # project
2 | from checks import AgentCheck
3 | from tests.core.test_wmi import TestCommonWMI
4 | from tests.checks.common import AgentCheckTest
5 |
6 |
7 | class WindowsServiceTestCase(AgentCheckTest, TestCommonWMI):
8 | CHECK_NAME = 'windows_service'
9 |
10 | WIN_SERVICES_CONFIG = {
11 | 'host': ".",
12 | 'services': ["WinHttpAutoProxySvc", "WSService"]
13 | }
14 |
15 | def test_check(self):
16 | """
17 | Returns the right service checks
18 | """
19 | # Run check
20 | config = {
21 | 'instances': [self.WIN_SERVICES_CONFIG]
22 | }
23 |
24 | self.run_check(config)
25 |
26 | # Test service checks
27 | self.assertServiceCheck('windows_service.state', status=AgentCheck.OK, count=1,
28 | tags=[u'service:WinHttpAutoProxySvc'])
29 | self.assertServiceCheck('windows_service.state', status=AgentCheck.CRITICAL, count=1,
30 | tags=[u'service:WSService'])
31 |
32 | self.coverage_report()
33 |
--------------------------------------------------------------------------------
/packaging/centos/setup-supervisor.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | #! /usr/bin/python
6 | import ConfigParser
7 | import sys
8 |
9 |
10 | def main():
11 |
12 | if len(sys.argv) < 3 or len(sys.argv) > 4:
13 | return False
14 |
15 | source = sys.argv[1]
16 | dest = sys.argv[2]
17 |
18 | # Read config files
19 | new_config = ConfigParser.RawConfigParser()
20 | current_config = ConfigParser.RawConfigParser()
21 |
22 | new_config.read(source)
23 | current_config.read(dest)
24 |
25 | print "Cleaning up supervisord configuration"
26 | # Remove sections from new_config in current_config
27 | for section in new_config.sections():
28 | if current_config.has_section(section):
29 | if section != "supervisorctl" and section != "supervisord":
30 | current_config.remove_section(section)
31 |
32 | # Write out config
33 | f = open(dest,'wb')
34 | current_config.write(f)
35 | f.close()
36 |
37 | if __name__ == "__main__":
38 | main()
39 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/mapreduce/job_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "jobs": {
3 | "job": [
4 | {
5 | "startTime": 1453761316277,
6 | "finishTime": 0,
7 | "elapsedTime": 99221829,
8 | "id": "job_1453738555560_0001",
9 | "name": "WordCount",
10 | "user": "vagrant",
11 | "state": "RUNNING",
12 | "mapsTotal": 1,
13 | "mapsCompleted": 0,
14 | "reducesTotal": 1,
15 | "reducesCompleted": 0,
16 | "mapProgress": 48.335266,
17 | "reduceProgress": 0.0,
18 | "mapsPending": 0,
19 | "mapsRunning": 1,
20 | "reducesPending": 1,
21 | "reducesRunning": 0,
22 | "uberized": false,
23 | "diagnostics": "",
24 | "newReduceAttempts": 1,
25 | "runningReduceAttempts": 0,
26 | "failedReduceAttempts": 0,
27 | "killedReduceAttempts": 0,
28 | "successfulReduceAttempts": 0,
29 | "newMapAttempts": 0,
30 | "runningMapAttempts": 1,
31 | "failedMapAttempts": 1,
32 | "killedMapAttempts": 0,
33 | "successfulMapAttempts": 0
34 | }
35 | ]
36 | }
37 | }
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/wix/confd.xslt:
--------------------------------------------------------------------------------
1 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 | yes
23 |
24 |
25 | yes
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/tests/checks/integration/test_disk.py:
--------------------------------------------------------------------------------
1 | # 3p
2 | from nose.plugins.attrib import attr
3 |
4 | # project
5 | from tests.checks.common import AgentCheckTest
6 |
7 |
8 | @attr(requires='system')
9 | class TestCheckDisk(AgentCheckTest):
10 | CHECK_NAME = 'disk'
11 |
12 | DISK_GAUGES = [
13 | 'system.disk.total',
14 | 'system.disk.used',
15 | 'system.disk.free',
16 | 'system.disk.in_use',
17 | ]
18 |
19 | INODE_GAUGES = [
20 | 'system.fs.inodes.total',
21 | 'system.fs.inodes.used',
22 | 'system.fs.inodes.free',
23 | 'system.fs.inodes.in_use'
24 | ]
25 |
26 | # Really a basic check to see if all metrics are there
27 | def test_check(self):
28 | self.run_check({'instances': [{'use_mount': 'no'}]})
29 |
30 | # Assert metrics
31 | for metric in self.DISK_GAUGES + self.INODE_GAUGES:
32 | self.assertMetric(metric, tags=[])
33 |
34 | self.coverage_report()
35 |
36 | # Test two instances
37 | def test_bad_config(self):
38 | self.assertRaises(Exception,
39 | lambda: self.run_check({'instances': [{}, {}]}))
40 |
--------------------------------------------------------------------------------
/tests/checks/integration/test_linux_proc_extras.py:
--------------------------------------------------------------------------------
1 | # 3p
2 | from nose.plugins.attrib import attr
3 |
4 | # project
5 | from tests.checks.common import AgentCheckTest
6 |
7 | @attr('linux')
8 | @attr(requires='linux')
9 | class TestCheckLinuxProcExtras(AgentCheckTest):
10 | CHECK_NAME = 'linux_proc_extras'
11 |
12 | INODE_GAUGES = [
13 | 'system.inodes.total',
14 | 'system.inodes.used'
15 | ]
16 |
17 | PROC_COUNTS = [
18 | 'system.linux.context_switches',
19 | 'system.linux.processes_created',
20 | 'system.linux.interrupts'
21 | ]
22 |
23 | ENTROPY_GAUGES = [
24 | 'system.entropy.available'
25 | ]
26 |
27 | PROCESS_STATS_GAUGES = [
28 | 'system.processes.states',
29 | 'system.processes.priorities'
30 | ]
31 |
32 | # Really a basic check to see if all metrics are there
33 | def test_check(self):
34 | self.run_check({'instances': []})
35 |
36 | # Assert metrics
37 | for metric in self.PROC_COUNTS + self.INODE_GAUGES + self.ENTROPY_GAUGES + self.PROCESS_STATS_GAUGES:
38 | self.assertMetric(metric, tags=[])
39 |
40 | self.coverage_report()
41 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/wix/FindReplace/FindReplace.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "strings"
7 |
8 | "io/ioutil"
9 | )
10 |
11 | func check(e error) {
12 | if e != nil {
13 | panic(e)
14 | os.Exit(1)
15 | }
16 |
17 | }
18 |
19 | func main() {
20 | args := os.Args
21 |
22 | if len(args) != 4 {
23 | fmt.Println("Usage: FindReplace.exe path_to_file search_text replace_text\n")
24 | os.Exit(0)
25 | }
26 |
27 | filePath := args[1]
28 | searchText := args[2]
29 | replaceText := args[3]
30 |
31 | if strings.Trim(replaceText, " ") == "" {
32 | fmt.Println("Replace text can't be empty")
33 | os.Exit(0)
34 | }
35 |
36 | parts := strings.Split(replaceText, ":")
37 | if len(parts) == 2 && strings.Trim(parts[1], " ") == "" {
38 | fmt.Println("You can't specify an empty key.")
39 | os.Exit(0)
40 | }
41 |
42 | contents, err := ioutil.ReadFile(filePath)
43 | check(err)
44 | contentsString := string(contents)
45 | newContents := strings.Replace(contentsString, searchText, replaceText, -1)
46 | newContentsByte := []byte(newContents)
47 | err = ioutil.WriteFile(filePath, newContentsByte, 0644)
48 | check(err)
49 | os.Exit(0)
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/tests/core/test_utils_net.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | from unittest import TestCase
3 | import socket
4 |
5 | # 3p
6 | from nose.plugins.skip import SkipTest
7 |
8 | # project
9 | from utils.net import inet_pton, _inet_pton_win
10 | from utils.net import IPV6_V6ONLY, IPPROTO_IPV6
11 |
12 |
13 | class TestUtilsNet(TestCase):
14 | def test__inet_pton_win(self):
15 |
16 | if _inet_pton_win != inet_pton:
17 | raise SkipTest('socket.inet_pton is available, no need to test')
18 |
19 | # only test what we need this function for
20 | self.assertEqual(inet_pton(socket.AF_INET, '192.168.1.1'), '\xc0\xa8\x01\x01')
21 | self.assertRaises(socket.error, inet_pton, socket.AF_INET, 'foo')
22 | self.assertEqual(inet_pton(socket.AF_INET6, '::1'),
23 | '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01')
24 | self.assertRaises(socket.error, inet_pton, socket.AF_INET6, 'foo')
25 |
26 | def test_constants(self):
27 | if not hasattr(socket, 'IPPROTO_IPV6'):
28 | self.assertEqual(IPPROTO_IPV6, 41)
29 |
30 | if not hasattr(socket, 'IPV6_V6ONLY'):
31 | self.assertEqual(IPV6_V6ONLY, 27)
32 |
--------------------------------------------------------------------------------
/supervisord.dev.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = supervisord.log
3 | logfile_maxbytes = 50MB
4 | loglevel = info
5 | nodaemon = true
6 | identifier = supervisor
7 | nocleanup = true
8 | pidfile = supervisord.pid
9 | directory= .
10 |
11 | [rpcinterface:supervisor]
12 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
13 |
14 | [inet_http_server]
15 | port = 127.0.0.1:9002
16 |
17 | [supervisorctl]
18 | prompt = datadog
19 |
20 | [program:collector]
21 | command=python agent.py foreground --use-local-forwarder
22 | stdout_logfile=collector.log
23 | redirect_stderr=true
24 | priority=999
25 | startsecs=2
26 |
27 | [program:forwarder]
28 | command=python ddagent.py
29 | stdout_logfile=forwarder.log
30 | redirect_stderr=true
31 | priority=998
32 | startsecs=3
33 |
34 | [program:dogstatsd]
35 | command=python dogstatsd.py --use-local-forwarder
36 | stdout_logfile=dogstatsd.log
37 | redirect_stderr=true
38 | priority=998
39 | startsecs=3
40 |
41 | [program:jmxfetch]
42 | command=python jmxfetch.py
43 | stdout_logfile=jmxfetch.log
44 | redirect_stderr=true
45 | priority=999
46 | startsecs=3
47 |
48 | [group:datadog-agent]
49 | programs=forwarder,collector,dogstatsd,jmxfetch
50 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/network/netstat:
--------------------------------------------------------------------------------
1 | Active Internet connections (servers and established)
2 | Proto Recv-Q Send-Q Local Address Foreign Address State
3 | tcp 0 128 0.0.0.0:6379 0.0.0.0:* LISTEN
4 | tcp 0 128 0.0.0.0:6380 0.0.0.0:* LISTEN
5 | tcp 0 0 127.0.0.1:80 127.0.0.1:51650 TIME_WAIT
6 | tcp 0 0 127.0.0.1:58414 127.0.0.1:9200 TIME_WAIT
7 | tcp 0 0 10.0.2.15:45637 10.0.2.15:9300 ESTABLISHED
8 | tcp6 0 128 :::6380 :::* LISTEN
9 | tcp6 0 0 127.0.0.1:58488 127.0.0.1:7199 TIME_WAIT
10 | tcp6 0 0 127.0.0.1:42395 127.0.0.1:2181 ESTABLISHED
11 | tcp6 0 0 127.0.0.1:58439 127.0.0.1:7199 CLOSING
12 | udp 0 0 127.0.0.1:48135 127.0.0.1:8125 ESTABLISHED
13 | udp 0 0 127.0.0.1:8125 0.0.0.0:*
14 | udp6 0 0 fe80::a00:27ff:fee9:123 :::* ESTABLISHED
15 | udp6 0 0 fe80::a00:27ff:fe1c:123 :::*
16 | udp6 0 0 :::111 :::*
17 |
--------------------------------------------------------------------------------
/tests/core/test_ec2.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | import time
3 | import types
4 |
5 | # 3p
6 | import unittest
7 |
8 | # project
9 | from utils.cloud_metadata import EC2
10 |
11 |
12 | class TestEC2(unittest.TestCase):
13 |
14 | def test_metadata(self):
15 | # Reset metadata just to be sure
16 | EC2.metadata = {}
17 | # Test gathering metadata from ec2
18 | start = time.time()
19 | d = EC2.get_metadata({'collect_instance_metadata': True})
20 | end = time.time()
21 | self.assertTrue(isinstance(d, types.DictType))
22 | # Either we're on ec2 or we're not (at least 7 attributes expected)
23 | assert len(d) == 0 or len(d) >= 7, d
24 | if "instance-id" in d:
25 | assert d["instance-id"].startswith("i-"), d
26 | assert end - start <= 1.15, "It took %s seconds to get ec2 metadata" % (end-start)
27 |
28 | def test_is_default_hostname(self):
29 | for hostname in ['ip-172-31-16-235', 'domU-12-31-38-00-A4-A2', 'domU-12-31-39-02-14-35']:
30 | self.assertTrue(EC2.is_default(hostname))
31 | for hostname in ['i-672d49da', 'localhost', 'robert.redf.org']:
32 | self.assertFalse(EC2.is_default(hostname))
33 |
--------------------------------------------------------------------------------
/ci/resources/nginx/testing.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDGzCCAgOgAwIBAgIJANPtTQnZmjnPMA0GCSqGSIb3DQEBBQUAMCQxEDAOBgNV
3 | BAoMB1Rlc3RpbmcxEDAOBgNVBAMMB1Rlc3RpbmcwHhcNMTUwNzIzMTg0MzAzWhcN
4 | MTYwNzIyMTg0MzAzWjAkMRAwDgYDVQQKDAdUZXN0aW5nMRAwDgYDVQQDDAdUZXN0
5 | aW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA9EXrQeJEeuqIzdVP
6 | NlrglH1E2RiPh5pALrprJTYVZTbGRcubB0wkEMED8TdQTIuZIt56DWFcz6/e9L8v
7 | qexR9Vwa8u0vH9L4gJ4vOdSfiaPh66aRoGhlRaWzaqmbSjGN+06am0EWoLVqCS2+
8 | 9jQ4lva+atCVFMkOenX3niBsKIVI4euEwU7rtQ+0PUJVmEjo6krWukdBEhozpCq6
9 | Zm2B5sqAEYz8hWQpAM6hM58xYL41lSGAV0Cmh37mXMMdIOcK1DY3/pUpCTS5TcSp
10 | xCce61MEZhAHbfJlwJzHem5MSXgxoCzymT7/Ik08GYItAIo0Y0xg3Vhw8Je5pvHW
11 | oz4fhQIDAQABo1AwTjAdBgNVHQ4EFgQUek/ZgsnuBl4acJ/srxIIkJDaUQ0wHwYD
12 | VR0jBBgwFoAUek/ZgsnuBl4acJ/srxIIkJDaUQ0wDAYDVR0TBAUwAwEB/zANBgkq
13 | hkiG9w0BAQUFAAOCAQEASugib099pwRa3nNFwBslQIRFItk6M1izC3SKaTlhrgQl
14 | cxI6Z7RrgpWVC7MEi4Spy+UICdpJh+b8o75XgZPrPgcL6wT0/UYXBGCGTed54RMH
15 | X0OYIkvRQBVk8pm4IteWeQxMyCG+kGc9QTQ1M3KW+Rk2t5KP23bKBiZB986tBQgf
16 | 7uAyJdYU8gtrjJfPkxpWRoltDO53GG1GJOcVZrnIgSzzwP9TLW9PoiyLkBKcZWDJ
17 | 37y6Hq73qaPcTk8RV6Zayxbrc2CoxMTd9J09p1CTDBCpYTKBqwD+9wYykBdVHRr4
18 | BhuJBJNVqBflrncOUKkF+mQjUHc3fb2AiYjvyr2n4w==
19 | -----END CERTIFICATE-----
20 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/mapreduce/job_counter_metrics:
--------------------------------------------------------------------------------
1 | {
2 | "jobCounters": {
3 | "id": "job_1453738555560_0001",
4 | "counterGroup": [
5 | {
6 | "counterGroupName": "org.apache.hadoop.mapreduce.FileSystemCounter",
7 | "counter": [
8 | {
9 | "name": "FILE_BYTES_READ",
10 | "totalCounterValue": 0,
11 | "mapCounterValue": 1,
12 | "reduceCounterValue": 2
13 | },
14 | {
15 | "name": "FILE_BYTES_WRITTEN",
16 | "totalCounterValue": 3,
17 | "mapCounterValue": 4,
18 | "reduceCounterValue": 5
19 | }
20 | ]
21 | },
22 | {
23 | "counterGroupName": "org.apache.hadoop.mapreduce.TaskCounter",
24 | "counter": [
25 | {
26 | "name": "MAP_INPUT_RECORDS",
27 | "totalCounterValue": 6,
28 | "mapCounterValue": 7,
29 | "reduceCounterValue": 8
30 | },
31 | {
32 | "name": "MAP_OUTPUT_RECORDS",
33 | "totalCounterValue": 9,
34 | "mapCounterValue": 10,
35 | "reduceCounterValue": 11
36 | }
37 | ]
38 | }
39 | ]
40 | }
41 | }
--------------------------------------------------------------------------------
/ci/windows.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | namespace :ci do
8 | namespace :windows do |flavor|
9 | task before_install: ['ci:common:before_install']
10 |
11 | task install: ['ci:common:install']
12 |
13 | task before_script: ['ci:common:before_script'] do
14 | # Set up an IIS website
15 | site_name = 'Test-Website-1'
16 | site_folder = File.join(ENV['INTEGRATIONS_DIR'], "iis_#{site_name}")
17 | sh %(powershell New-Item -ItemType Directory -Force #{site_folder})
18 | sh %(powershell Import-Module WebAdministration)
19 | # Create the new website
20 | sh %(powershell New-Website -Name #{site_name} -Port 8080 -PhysicalPath #{site_folder})
21 | end
22 |
23 | task script: ['ci:common:script'] do
24 | this_provides = [
25 | 'windows'
26 | ]
27 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
28 | end
29 |
30 | task before_cache: ['ci:common:before_cache']
31 |
32 | task cleanup: ['ci:common:cleanup']
33 |
34 | task :execute do
35 | Rake::Task['ci:common:execute'].invoke(flavor)
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/conf.d/php_fpm.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - # Get metrics from your FPM pool with this URL
5 | status_url: http://localhost/status
6 | # Get a reliable service check of your FPM pool with that one
7 | ping_url: http://localhost/ping
8 | # Set the expected reply to the ping.
9 | ping_reply: pong
10 | # These 2 URLs should follow the options from your FPM pool
11 | # See http://php.net/manual/en/install.fpm.configuration.php
12 | # * pm.status_path
13 | # * ping.path
14 | # You should configure your fastcgi passthru (nginx/apache) to
15 | # catch these URLs and redirect them through the FPM pool target
16 | # you want to monitor (FPM `listen` directive in the config, usually
17 | # a UNIX socket or TCP socket.
18 | #
19 | # Use this if you have basic authentication on these pages
20 | # user: bits
21 | # password: D4T4D0G
22 | #
23 | # If your FPM pool is only accessible via a specific HTTP vhost, you can
24 | # pass in a custom Host header like so
25 | # http_host: such.production.host
26 | #
27 | # Array of custom tags
28 | # By default metrics and service check will be tagged by pool and host
29 | # tags:
30 | # - instance:foo
31 |
--------------------------------------------------------------------------------
/ci/fluentd.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2014-2016
2 | # (C) Takumi Sakamoto 2014
3 | # All rights reserved
4 | # Licensed under Simplified BSD License (see LICENSE)
5 |
6 | require './ci/common'
7 |
8 | namespace :ci do
9 | namespace :fluentd do |flavor|
10 | task before_install: ['ci:common:before_install']
11 |
12 | task install: ['ci:common:install'] do
13 | sh %(gem install fluentd -v 0.12.22 --no-ri --no-rdoc)
14 | end
15 |
16 | task before_script: ['ci:common:before_script'] do
17 | pid = spawn %(fluentd -c $TRAVIS_BUILD_DIR/ci/resources/fluentd/td-agent.conf)
18 | Process.detach(pid)
19 | sh %(echo #{pid} > $VOLATILE_DIR/fluentd.pid)
20 | # Waiting for fluentd to start
21 | Wait.for 24_220
22 | end
23 |
24 | task script: ['ci:common:script'] do
25 | this_provides = [
26 | 'fluentd'
27 | ]
28 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
29 | end
30 |
31 | task before_cache: ['ci:common:before_cache']
32 |
33 | task cleanup: ['ci:common:cleanup'] do
34 | sh %(kill `cat $VOLATILE_DIR/fluentd.pid`)
35 | end
36 |
37 | task :execute do
38 | Rake::Task['ci:common:execute'].invoke(flavor)
39 | end
40 | end
41 | end
42 |
--------------------------------------------------------------------------------
/conf.d/varnish.yaml.example:
--------------------------------------------------------------------------------
1 | # If you're running Varnish 4.1+, you must add the dd-agent user to the varnish group:
2 | # $ sudo usermod -G varnish -a dd-agent
3 |
4 | init_config:
5 |
6 | instances:
7 | # The full path to the varnishstat binary
8 | - varnishstat: /usr/bin/varnishstat
9 |
10 | # The (optional) name will be used in the varnishstat command for the
11 | # -n argument and will add a name:$instancename tag to all metrics.
12 | # name: myvarnishinstance
13 |
14 | # The (optional) list of tags will be applied to every emitted metric.
15 | # tags:
16 | # - instance:production
17 |
18 | # The (optional) path to the varnishadm binary will signal the check to
19 | # emit a service check status on backend health using `debug.health`.
20 | # The service check will be tagged by backend.
21 | # NOTE: The Agent must be able to access varnishadm as with root
22 | # privilleges. You can configure your sudoers file for this:
23 | #
24 | # example /etc/sudoers entry:
25 | # dd-agent ALL=(ALL) NOPASSWD:/usr/bin/varnishadm
26 | #
27 | # varnishadm: /usr/bin/varnishadm
28 |
29 | # The (optional) path to the varnish secretfile will be used in the
30 | # varnishadm command, if enabled.
31 | # secretfile: /etc/varnish/secret
32 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/source/supervisor.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = logs/supervisord.log
3 | logfile_maxbytes = 50MB
4 | loglevel = info
5 | nodaemon = true
6 | identifier = supervisord
7 | nocleanup = true
8 | pidfile = %(here)s/../run/supervisord.pid
9 |
10 | [rpcinterface:supervisor]
11 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
12 |
13 | [unix_http_server]
14 | file = %(here)s/../run/agent-supervisor.sock
15 |
16 | [supervisorctl]
17 | prompt = datadog
18 | serverurl = unix://%(here)s/../run/agent-supervisor.sock
19 |
20 | [program:collector]
21 | command=python agent/agent.py foreground --use-local-forwarder
22 | redirect_stderr=true
23 | priority=999
24 | startsecs=2
25 | environment=LANG=POSIX,PYTHONPATH='agent/checks/libs:$PYTHONPATH'
26 |
27 | [program:forwarder]
28 | command=python agent/ddagent.py --use_simple_http_client=1
29 | redirect_stderr=true
30 | priority=998
31 | startsecs=3
32 |
33 | [program:dogstatsd]
34 | command=python agent/dogstatsd.py --use-local-forwarder
35 | redirect_stderr=true
36 | priority=998
37 | startsecs=3
38 |
39 | [program:jmxfetch]
40 | command=python agent/jmxfetch.py
41 | redirect_stderr=true
42 | priority=999
43 | startsecs=3
44 |
45 | [group:datadog-agent]
46 | programs=forwarder,collector,dogstatsd,jmxfetch
47 |
--------------------------------------------------------------------------------
/tests/checks/integration/test_windows_service.py:
--------------------------------------------------------------------------------
1 | # 3p
2 | from nose.plugins.attrib import attr
3 |
4 | # project
5 | from tests.checks.common import AgentCheckTest
6 |
7 | INSTANCE = {
8 | 'host': '.',
9 | 'services': ['EventLog', 'Dnscache', 'NonExistingService'],
10 | }
11 |
12 | INVALID_HOST_INSTANCE = {
13 | 'host': 'nonexistinghost',
14 | 'services': ['EventLog'],
15 | }
16 |
17 |
18 | @attr('windows')
19 | @attr(requires='windows')
20 | class WindowsServiceTest(AgentCheckTest):
21 | CHECK_NAME = 'windows_service'
22 |
23 | SERVICE_CHECK_NAME = 'windows_service.state'
24 |
25 | def test_basic_check(self):
26 | self.run_check({'instances': [INSTANCE]})
27 | self.assertServiceCheckOK(self.SERVICE_CHECK_NAME, tags=['service:EventLog'], count=1)
28 | self.assertServiceCheckOK(self.SERVICE_CHECK_NAME, tags=['service:Dnscache'], count=1)
29 | self.assertServiceCheckCritical(self.SERVICE_CHECK_NAME, tags=['service:NonExistingService'], count=1)
30 | self.coverage_report()
31 |
32 | def test_invalid_host(self):
33 | self.run_check({'instances': [INVALID_HOST_INSTANCE]})
34 | self.assertServiceCheckCritical(self.SERVICE_CHECK_NAME, tags=['host:nonexistinghost', 'service:EventLog'], count=1)
35 | self.coverage_report()
36 |
--------------------------------------------------------------------------------
/utils/logger.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # stdlib
6 | from functools import wraps
7 | from logging import LogRecord
8 | import re
9 |
10 |
11 | def log_exceptions(logger):
12 | """
13 | A decorator that catches any exceptions thrown by the decorated function and
14 | logs them along with a traceback.
15 | """
16 | def decorator(func):
17 | @wraps(func)
18 | def wrapper(*args, **kwargs):
19 | try:
20 | result = func(*args, **kwargs)
21 | except Exception:
22 | logger.exception(
23 | u"Uncaught exception while running {0}".format(func.__name__)
24 | )
25 | raise
26 | return result
27 | return wrapper
28 | return decorator
29 |
30 |
31 | class RedactedLogRecord(LogRecord, object):
32 | """
33 | Custom LogRecord that obfuscates API key logging.
34 | """
35 | API_KEY_PATTERN = re.compile('api_key=*\w+(\w{5})')
36 | API_KEY_REPLACEMENT = r'api_key=*************************\1'
37 |
38 | def getMessage(self):
39 | message = super(RedactedLogRecord, self).getMessage()
40 |
41 | return re.sub(self.API_KEY_PATTERN, self.API_KEY_REPLACEMENT, message)
42 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/wix/files.xslt:
--------------------------------------------------------------------------------
1 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/conf.d/spark.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | #
5 | # The Spark check can retrieve metrics from Standalone Spark and YARN. Both
6 | # methods require the spark_url to be configured.
7 | #
8 | # For Spark Standalone, `spark_url` must be set to the Spark master's web
9 | # UI. This is http://localhost:8080 by default.
10 | #
11 | # For YARN, `spark_url` must be set to YARN's resource manager address. The
12 | # ResourceManager hostname can be found in the yarn-site.xml conf file
13 | # under the property yarn.resourcemanager.address The ResourceManager port
14 | # can be found in the yarn-site.xml conf file under the property
15 | # yarn.resourcemanager.webapp.address. This is http://localhost:8088 by default
16 | #
17 | # The use of `resourcemanager_uri` has been deprecated, but is still functional.
18 | - spark_url: http://localhost:8088
19 |
20 | # To enable monitoring of a Standalone Spark cluster, the spark cluster
21 | # mode must be set. Uncomment the cluster mode that applies.
22 | # spark_cluster_mode: spark_yarn_mode
23 | # spark_cluster_mode: spark_standalone_mode
24 |
25 | # A Required friendly name for the cluster.
26 | # cluster_name: MySparkCluster
27 |
28 | # Optional tags to be applied to every emitted metric.
29 | # tags:
30 | # - key:value
31 | # - instance:production
--------------------------------------------------------------------------------
/ci/go_expvar.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | namespace :ci do
8 | namespace :go_expvar do |flavor|
9 | task before_install: ['ci:common:before_install']
10 |
11 | task install: ['ci:common:install']
12 |
13 | task before_script: ['ci:common:before_script'] do
14 | pid = spawn %(go run $TRAVIS_BUILD_DIR/ci/resources/go_expvar/test_expvar.go)
15 | Process.detach(pid)
16 | sh %(echo #{pid} > $VOLATILE_DIR/go_expvar.pid)
17 | Wait.for 8079
18 | 2.times do
19 | sh %(curl -s http://localhost:8079?user=123456)
20 | end
21 | end
22 |
23 | task script: ['ci:common:script'] do
24 | this_provides = [
25 | 'go_expvar'
26 | ]
27 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
28 | end
29 |
30 | task before_cache: ['ci:common:before_cache']
31 |
32 | task cleanup: ['ci:common:cleanup'] do
33 | sh %(kill -INT `cat $VOLATILE_DIR/go_expvar.pid` || true)
34 | sh %(rm -f $VOLATILE_DIR/go_expvar.pid)
35 | # There is two processes running when launching `go run` on Mac
36 | sh %(pkill 'test_expvar' || true)
37 | end
38 |
39 | task :execute do
40 | Rake::Task['ci:common:execute'].invoke(flavor)
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/hdfs_namenode/hdfs_namesystem_state:
--------------------------------------------------------------------------------
1 | {"beans":[{"name":"Hadoop:service=NameNode,name=FSNamesystemState","modelerType":"org.apache.hadoop.hdfs.server.namenode.FSNamesystem","CapacityTotal":41167421440,"CapacityUsed":501932032,"CapacityRemaining":27878948864,"TotalLoad":2,"SnapshotStats":"{\"SnapshottableDirectories\":0,\"Snapshots\":0}","FsLockQueueLength":0,"BlocksTotal":27661,"MaxObjects":0,"FilesTotal":82950,"PendingReplicationBlocks":0,"UnderReplicatedBlocks":27661,"ScheduledReplicationBlocks":0,"PendingDeletionBlocks":0,"BlockDeletionStartTime":1454387198306,"FSState":"Operational","NumLiveDataNodes":1,"NumDeadDataNodes":0,"NumDecomLiveDataNodes":0,"NumDecomDeadDataNodes":0,"VolumeFailuresTotal":0,"EstimatedCapacityLostTotal":0,"NumDecommissioningDataNodes":0,"NumStaleDataNodes":0,"NumStaleStorages":0,"TopUserOpCounts":"{\"timestamp\":\"2016-02-02T04:06:06+0000\",\"windows\":[{\"windowLenMs\":60000,\"ops\":[]},{\"windowLenMs\":300000,\"ops\":[{\"opType\":\"listStatus\",\"topUsers\":[{\"user\":\"mapred\",\"count\":3}],\"totalCount\":3},{\"opType\":\"*\",\"topUsers\":[{\"user\":\"mapred\",\"count\":3}],\"totalCount\":3}]},{\"windowLenMs\":1500000,\"ops\":[{\"opType\":\"listStatus\",\"topUsers\":[{\"user\":\"mapred\",\"count\":27}],\"totalCount\":27},{\"opType\":\"*\",\"topUsers\":[{\"user\":\"mapred\",\"count\":27}],\"totalCount\":27}]}]}","TotalSyncCount":2,"TotalSyncTimes":"4 "}]}
--------------------------------------------------------------------------------
/conf.d/kubernetes.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | # tags:
3 | # - optional_tag1
4 | # - optional_tag2
5 |
6 | instances:
7 | # The kubernetes check retrieves metrics from cadvisor running under kubelet.
8 | # By default we will assume we're running under docker and will use the address
9 | # of the default router to reach the cadvisor api.
10 | #
11 | # To override, e.g. in the case of a standalone cadvisor instance, use the following:
12 | #
13 | # host: localhost
14 | # port: 4194
15 | # method: http
16 | - port: 4194
17 |
18 | # collect_events controls whether the agent should fetch events from the kubernetes API and
19 | # ingest them in Datadog. To avoid duplicates, only one agent at a time across the entire
20 | # cluster should have this feature enabled. To enable the feature, set the parameter to `true`.
21 | # collect_events: false
22 |
23 | # use_histogram controls whether we send detailed metrics, i.e. one per container.
24 | # When false, we send detailed metrics corresponding to individual containers, tagging by container id
25 | # to keep them unique.
26 | # When true, we aggregate data based on container image.
27 | #
28 | # use_histogram: false
29 |
30 | # kubelet_port: 10255
31 | #
32 | # We can define a whitelist of patterns that permit publishing raw metrics.
33 | # enabled_rates:
34 | # - cpu.*
35 | # - network.*
36 | #
37 | # enabled_gauges:
38 | # - filesystem.*
39 |
--------------------------------------------------------------------------------
/utils/ntp.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # stdlib
6 | import os
7 | import random
8 |
9 | # project
10 | from config import get_confd_path
11 | from util import check_yaml
12 | from utils.singleton import Singleton
13 |
14 |
15 | class NTPUtil():
16 | __metaclass__ = Singleton
17 |
18 | DEFAULT_VERSION = 3
19 | DEFAULT_TIMEOUT = 1 # in seconds
20 | DEFAULT_PORT = "ntp"
21 |
22 | def __init__(self, config=None):
23 | try:
24 | if config:
25 | ntp_config = config
26 | else:
27 | ntp_config = check_yaml(os.path.join(get_confd_path(), 'ntp.yaml'))
28 | settings = ntp_config['instances'][0]
29 | except Exception:
30 | settings = {}
31 |
32 | self.host = settings.get('host') or "{0}.datadog.pool.ntp.org".format(random.randint(0, 3))
33 | self.version = int(settings.get("version") or NTPUtil.DEFAULT_VERSION)
34 | self.port = settings.get('port') or NTPUtil.DEFAULT_PORT
35 | self.timeout = float(settings.get('timeout') or NTPUtil.DEFAULT_TIMEOUT)
36 |
37 | self.args = {
38 | 'host': self.host,
39 | 'port': self.port,
40 | 'version': self.version,
41 | 'timeout': self.timeout,
42 | }
43 |
44 | @classmethod
45 | def _drop(cls):
46 | if cls in cls._instances:
47 | del cls._instances[cls]
48 |
--------------------------------------------------------------------------------
/conf.d/teamcity.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | # Add your different projects in here to monitor their build
4 | # success with Datadog events
5 | instances:
6 | # A custom unique name per build configuration that will show
7 | # in the events
8 | - name: My Website
9 |
10 | # Specify the server name of your teamcity instance here
11 | # Guest authentication must be on if you want the check to be able to get data
12 | # When using the optional basic_http_authentication use
13 | # server: user:password@teamcity.mycompany.com
14 | server: teamcity.mycompany.com
15 |
16 | # This is the internal build ID of the build configuration you wish to track.
17 | # You can find it labelled as "Build configuration ID" when editing the configuration in question.
18 | build_configuration: MyWebsite_Deploy
19 |
20 | # Optional, this turns on basic http authentication. Defaults to False.
21 | # basic_http_authentication: true
22 |
23 | # Optional, if you wish to override the host that is affected by this build configuration.
24 | # Defaults to the host that the agent is running on.
25 | # host_affected: msicalweb6
26 |
27 | # Optional, this changes the event message slightly to specify that TeamCity was used to deploy something
28 | # rather than just that a successful build happened
29 | # is_deployment: true
30 |
31 | # Optional, this turns off ssl certificate validation. Defaults to True.
32 | # ssl_validation: false
33 |
34 | # Optional, any additional tags you'd like to add to the event
35 | # tags:
36 | # - test
37 |
38 |
39 |
--------------------------------------------------------------------------------
/conf.d/disk.yaml.default:
--------------------------------------------------------------------------------
1 | # This file is overwritten upon Agent upgrade.
2 | # To make modifications to the check configuration, please copy this file
3 | # to `disk.yaml` and make your changes on that file.
4 |
5 | init_config:
6 |
7 | instances:
8 | # The use_mount parameter will instruct the check to collect disk
9 | # and fs metrics using mount points instead of volumes
10 | - use_mount: no
11 | # The (optional) excluded_filesystems parameter will instruct the check to
12 | # ignore disks using these filesystems
13 | # excluded_filesystems:
14 | # - tmpfs
15 |
16 | # The (optional) excluded_disks parameter will instruct the check to
17 | # ignore this list of disks
18 | # excluded_disks:
19 | # - /dev/sda1
20 | # - /dev/sda2
21 | #
22 | # The (optional) excluded_disk_re parameter will instruct the check to
23 | # ignore all disks matching this regex
24 | # excluded_disk_re: /dev/sde.*
25 | #
26 | # The (optional) tag_by_filesystem parameter will instruct the check to
27 | # tag all disks with their filesystem (for ex: filesystem:nfs)
28 | # tag_by_filesystem: no
29 | #
30 | # The (optional) excluded_mountpoint_re parameter will instruct the check to
31 | # ignore all mountpoints matching this regex
32 | # excluded_mountpoint_re: /mnt/somebody-elses-problem.*
33 | #
34 | # The (optional) all_partitions parameter will instruct the check to
35 | # get metrics for all partitions. use_mount should be set to yes (to avoid
36 | # collecting empty device names) when using this option.
37 | # all_partitions: no
38 |
--------------------------------------------------------------------------------
/datadog-cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
3 | MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
4 | IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
5 | MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
6 | FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
7 | bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
8 | dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
9 | H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
10 | uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
11 | mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
12 | a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
13 | E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
14 | WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
15 | VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
16 | Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
17 | cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
18 | IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
19 | AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
20 | YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
21 | 6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
22 | Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
23 | c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
24 | mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
25 | -----END CERTIFICATE-----
26 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/mesos_slave/stats.json:
--------------------------------------------------------------------------------
1 | {
2 | "valid_status_updates": 1,
3 | "uptime": 280965.77977984,
4 | "total_frameworks": 1,
5 | "system/mem_total_bytes": 513798144,
6 | "system/mem_free_bytes": 34271232,
7 | "system/load_5min": 0.08,
8 | "system/load_1min": 0.1,
9 | "system/load_15min": 0.06,
10 | "system/cpus_total": 1,
11 | "started_tasks": 0,
12 | "staged_tasks": 1,
13 | "slave/valid_status_updates": 1,
14 | "slave/valid_framework_messages": 0,
15 | "slave/uptime_secs": 280965.78028288,
16 | "slave/tasks_starting": 0,
17 | "slave/tasks_staging": 0,
18 | "slave/executors_registering": 0,
19 | "slave/disk_used": 0,
20 | "slave/disk_total": 35164,
21 | "slave/disk_percent": 0,
22 | "slave/cpus_used": 1.1,
23 | "slave/cpus_total": 1,
24 | "slave/cpus_percent": 1.1,
25 | "registered": 1,
26 | "failed_tasks": 0,
27 | "finished_tasks": 0,
28 | "invalid_status_updates": 0,
29 | "killed_tasks": 0,
30 | "launched_tasks_gauge": 1,
31 | "lost_tasks": 0,
32 | "queued_tasks_gauge": 0,
33 | "recovery_errors": 0,
34 | "slave/executors_running": 1,
35 | "slave/executors_terminated": 0,
36 | "slave/executors_terminating": 0,
37 | "slave/frameworks_active": 1,
38 | "slave/invalid_framework_messages": 0,
39 | "slave/invalid_status_updates": 0,
40 | "slave/mem_percent": 0.540983606557377,
41 | "slave/mem_total": 244,
42 | "slave/mem_used": 132,
43 | "slave/recovery_errors": 0,
44 | "slave/registered": 1,
45 | "slave/tasks_failed": 0,
46 | "slave/tasks_finished": 0,
47 | "slave/tasks_killed": 0,
48 | "slave/tasks_lost": 0,
49 | "slave/tasks_running": 1
50 | }
51 |
--------------------------------------------------------------------------------
/utils/service_discovery/config.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # std
6 | import logging
7 |
8 | # project
9 | from utils.service_discovery.sd_backend import SD_BACKENDS
10 | from utils.service_discovery.config_stores import extract_sd_config, SD_CONFIG_BACKENDS
11 |
12 | log = logging.getLogger(__name__)
13 |
14 |
15 | def extract_agent_config(config):
16 | # get merged into the real agentConfig
17 | agentConfig = {}
18 |
19 | backend = config.get('Main', 'service_discovery_backend')
20 | agentConfig['service_discovery'] = True
21 |
22 | conf_backend = None
23 | if config.has_option('Main', 'sd_config_backend'):
24 | conf_backend = config.get('Main', 'sd_config_backend')
25 |
26 | if backend not in SD_BACKENDS:
27 | log.error("The backend {0} is not supported. "
28 | "Service discovery won't be enabled.".format(backend))
29 | agentConfig['service_discovery'] = False
30 |
31 | if conf_backend is None:
32 | log.warning('No configuration backend provided for service discovery. '
33 | 'Only auto config templates will be used.')
34 | elif conf_backend not in SD_CONFIG_BACKENDS:
35 | log.error("The config backend {0} is not supported. "
36 | "Only auto config templates will be used.".format(conf_backend))
37 | conf_backend = None
38 | agentConfig['sd_config_backend'] = conf_backend
39 |
40 | additional_config = extract_sd_config(config)
41 | agentConfig.update(additional_config)
42 | return agentConfig
43 |
--------------------------------------------------------------------------------
/ci/resources/kong/setup_lua.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # A script for setting up environment for travis-ci testing.
4 | # Sets up Lua and Luarocks.
5 | # LUA must be "lua5.1", "lua5.2" or "luajit".
6 | # luajit2.0 - master v2.0
7 | # luajit2.1 - master v2.1
8 |
9 | set -e
10 |
11 |
12 | pushd $INTEGRATIONS_DIR/
13 |
14 |
15 | ############
16 | # Lua/LuaJIT
17 | ############
18 |
19 | mkdir -p $LUAJIT_DIR
20 |
21 | if [ ! "$(ls -A $LUAJIT_DIR)" ]; then
22 | LUAJIT_BASE="LuaJIT"
23 | git clone https://github.com/luajit/luajit $LUAJIT_BASE
24 | pushd $LUAJIT_BASE
25 |
26 | if [ "$LUAJIT_VERSION" == "2.1" ]; then
27 | git checkout v2.1
28 | perl -i -pe 's/INSTALL_TNAME=.+/INSTALL_TNAME= luajit/' Makefile
29 | else
30 | git checkout v2.0.4
31 | fi
32 |
33 | make
34 | make install PREFIX=$LUAJIT_DIR
35 | popd
36 |
37 | ln -sf $LUAJIT_DIR/bin/luajit $LUAJIT_DIR/bin/lua
38 | rm -rf $LUAJIT_BASE
39 | else
40 | echo "Lua found from cache at $LUAJIT_DIR"
41 | fi
42 |
43 | ##########
44 | # Luarocks
45 | ##########
46 |
47 | mkdir -p $LUAROCKS_DIR
48 | if [ ! "$(ls -A $LUAROCKS_DIR)" ]; then
49 | LUAROCKS_BASE=luarocks-$LUAROCKS_VERSION
50 | git clone https://github.com/keplerproject/luarocks.git $LUAROCKS_BASE
51 |
52 | pushd $LUAROCKS_BASE
53 | git checkout v$LUAROCKS_VERSION
54 | ./configure \
55 | --prefix=$LUAROCKS_DIR \
56 | --with-lua-bin=$LUAJIT_DIR/bin \
57 | --with-lua-include=$LUAJIT_DIR/include/luajit-$LUAJIT_VERSION
58 | make build
59 | make install
60 | popd
61 |
62 | rm -rf $LUAROCKS_BASE
63 | else
64 | echo "Luarocks found from cache at $LUAROCKS_DIR"
65 | fi
66 |
67 | popd
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/datadog-cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
3 | MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
4 | IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
5 | MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
6 | FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
7 | bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
8 | dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
9 | H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
10 | uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
11 | mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
12 | a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
13 | E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
14 | WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
15 | VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
16 | Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
17 | cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
18 | IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
19 | AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
20 | YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
21 | 6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
22 | Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
23 | c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
24 | mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
25 | -----END CERTIFICATE-----
26 |
--------------------------------------------------------------------------------
/tests/core/test_emitter.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # 3p
3 | import unittest
4 |
5 | # project
6 | from emitter import remove_control_chars
7 | from emitter import remove_control_chars_from
8 |
9 |
10 | class TestEmitter(unittest.TestCase):
11 |
12 | def test_remove_control_chars(self):
13 | messages = [
14 | (u'#és9df\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00>\x00\x01\x00\x00\x00\x06@\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00´wer0sf®ré', u'#és9dfELF>@@´wer0sf®ré'),
15 | ('AAAAAA', 'AAAAAA'),
16 | (u'_e{2,19}:t4|♬ †øU †øU ¥ºu T0µ ♪', u'_e{2,19}:t4|♬ †øU †øU ¥ºu T0µ ♪')
17 | ]
18 |
19 | for bad, good in messages:
20 | self.assertTrue(remove_control_chars(bad) == good, (bad,good))
21 |
22 | def test_remove_control_chars_from(self):
23 | bad_messages = [
24 | ({"processes":[1234,[[u'☢cd≤Ω≈ç√∫˜µ≤\r\n', 0, 2.2,12,34,'compiz\r\n',1]]]},
25 | {"processes":[1234,[[u'☢cd≤Ω≈ç√∫˜µ≤', 0, 2.2,12,34,'compiz',1]]]})
26 | ]
27 | good_messages = [
28 | {"processes":[1234,[[u'db🖫', 0, 2.2,12,34,u'☢compiz☢',1]]]}
29 | ]
30 |
31 | def is_converted_same(msg):
32 | new_msg = remove_control_chars_from(msg, None)
33 | if str(new_msg) == str(msg):
34 | return True
35 | return False
36 |
37 | for bad, good in bad_messages:
38 | self.assertFalse(is_converted_same(bad))
39 | self.assertTrue(remove_control_chars_from(bad, None) == good)
40 |
41 | for msg in good_messages:
42 | self.assertTrue(is_converted_same(msg))
43 |
--------------------------------------------------------------------------------
/tests/checks/mock/test_w32logevent.py:
--------------------------------------------------------------------------------
1 | # project
2 | from tests.core.test_wmi import TestCommonWMI
3 | from tests.checks.common import AgentCheckTest
4 |
5 | from mock import patch
6 |
7 | def to_time(wmi_ts):
8 | "Just return any time struct"
9 | return (2100, 12, 24, 11, 30, 47, 0, 0)
10 |
11 | def from_time(year=0, month=0, day=0, hours=0, minutes=0,
12 | seconds=0, microseconds=0, timezone=0):
13 | "Just return any WMI date"
14 | return "20151224113047.000000-480"
15 |
16 | class W32LogEventTestCase(AgentCheckTest, TestCommonWMI):
17 | CHECK_NAME = 'win32_event_log'
18 |
19 | WIN_LOGEVENT_CONFIG = {
20 | 'host': ".",
21 | 'tags': ["mytag1", "mytag2"],
22 | 'sites': ["Default Web Site", "Failing site"],
23 | 'logfile': ["Application"],
24 | 'type': ["Error", "Warning"],
25 | 'source_name': ["MSSQLSERVER"]
26 | }
27 |
28 | @patch('checks.wmi_check.to_time', side_effect=to_time)
29 | @patch('checks.wmi_check.from_time', side_effect=from_time)
30 | def test_check(self, from_time, to_time):
31 | """
32 | Returns the right metrics and service checks
33 | """
34 | # Run check
35 | config = {
36 | 'instances': [self.WIN_LOGEVENT_CONFIG]
37 | }
38 | self.run_check_twice(config)
39 |
40 | self.assertEvent('SomeMessage', count=1,
41 | tags=self.WIN_LOGEVENT_CONFIG['tags'],
42 | msg_title='Application/MSQLSERVER',
43 | event_type='win32_log_event', alert_type='error',
44 | source_type_name='event viewer')
45 |
46 | self.coverage_report()
47 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Simplified BSD License
2 |
3 | Copyright (c) 2009, Boxed Ice
4 | Copyright (c) 2010-2016, Datadog
5 | All rights reserved.
6 |
7 | Redistribution and use in source and binary forms, with or without
8 | modification, are permitted provided that the following conditions are met:
9 |
10 | * Redistributions of source code must retain the above copyright notice,
11 | this list of conditions and the following disclaimer.
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 | * Neither the name of the copyright holder nor the names of its contributors
16 | may be used to endorse or promote products derived from this software
17 | without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/disk/freebsd-df-Tk:
--------------------------------------------------------------------------------
1 | Filesystem Type 1024-blocks Used Avail Capacity Mounted on
2 | zroot zfs 5 4 1 80% /
3 | devfs devfs 1 1 0 100% /dev
4 | zroot/tmp zfs 6013913 88 6013825 0% /tmp
5 | zroot/usr zfs 7317053 1303228 6013825 18% /usr
6 | zroot/usr/home zfs 6013857 32 6013825 0% /usr/home
7 | zroot/usr/home/vagrant zfs 6077010 63185 6013825 1% /usr/home/vagrant
8 | zroot/usr/ports zfs 6013858 33 6013825 0% /usr/ports
9 | zroot/usr/ports/distfiles zfs 6013856 31 6013825 0% /usr/ports/distfiles
10 | zroot/usr/ports/packages zfs 6013856 31 6013825 0% /usr/ports/packages
11 | zroot/usr/src zfs 6013856 31 6013825 0% /usr/src
12 | zroot/var zfs 6159848 146023 6013825 2% /var
13 | zroot/var/crash zfs 6013856 31 6013825 0% /var/crash
14 | zroot/var/db zfs 6013910 85 6013825 0% /var/db
15 | zroot/var/db/pkg zfs 6038024 24199 6013825 0% /var/db/pkg
16 | zroot/var/empty zfs 6013856 31 6013825 0% /var/empty
17 | zroot/var/log zfs 6013882 57 6013825 0% /var/log
18 | zroot/var/mail zfs 6013856 31 6013825 0% /var/mail
19 | zroot/var/run zfs 6013882 57 6013825 0% /var/run
20 | zroot/var/tmp zfs 6013857 32 6013825 0% /var/tmp
21 |
--------------------------------------------------------------------------------
/ci/resources/go_expvar/test_expvar.go:
--------------------------------------------------------------------------------
1 | /* Demo of the expvar package. You register metrics by creating NewT, then
2 | update it.
3 |
4 | You can access the exposed metrics via HTTP at /debug/vars, you'll get a JSON
5 | object with your exposed variables and some pre defined system ones.
6 |
7 | You can use monitoring system such as Nagios and OpenNMS to monitor the
8 | system and plot the change of data over time.
9 |
10 | After you run the server, try "curl http://localhost:8079?user=lassie" several times and then
11 | "curl http://localhost:8079/debug/vars | python -m json.tool".
12 | */
13 | package main
14 |
15 | import (
16 | "expvar"
17 | "fmt"
18 | "io"
19 | "net/http"
20 | "runtime"
21 | )
22 |
23 | // Two metrics, these are exposed by "magic" :)
24 | // Number of calls to our server.
25 | var numCalls = expvar.NewInt("num_calls")
26 |
27 | // Last user.
28 | var lastUser = expvar.NewString("last_user")
29 |
30 | func HelloServer(w http.ResponseWriter, req *http.Request) {
31 | user := req.FormValue("user")
32 |
33 | // Update metrics
34 | numCalls.Add(1)
35 | lastUser.Set(user)
36 |
37 | msg := fmt.Sprintf("G'day %s\n", user)
38 | io.WriteString(w, msg)
39 | }
40 |
41 | func main() {
42 | // In some situations, the CI tests for the go_expvar check would fail due
43 | // to the Golang runtime not haivng run GC yet. The reason this is needed
44 | // is that get_gc_collection_histogram() function in go_expvar.py
45 | // short-circuits if there have been no GCs. This causes the pause_ns
46 | // metric to not be present, thus causing tests to fail. So trigger GC.
47 | runtime.GC()
48 |
49 | http.HandleFunc("/", HelloServer)
50 | http.ListenAndServe(":8079", nil)
51 | }
52 |
--------------------------------------------------------------------------------
/ci/zookeeper.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | def zk_version
8 | ENV['FLAVOR_VERSION'] || '3.4.7'
9 | end
10 |
11 | def zk_rootdir
12 | "#{ENV['INTEGRATIONS_DIR']}/zk_#{zk_version}"
13 | end
14 |
15 | namespace :ci do
16 | namespace :zookeeper do |flavor|
17 | task before_install: ['ci:common:before_install']
18 |
19 | task install: ['ci:common:install'] do
20 | unless Dir.exist? File.expand_path(zk_rootdir)
21 | sh %(curl -s -L\
22 | -o $VOLATILE_DIR/zookeeper-#{zk_version}.tar.gz\
23 | http://archive.apache.org/dist/zookeeper/zookeeper-#{zk_version}/zookeeper-#{zk_version}.tar.gz)
24 | sh %(mkdir -p #{zk_rootdir})
25 | sh %(tar zxf $VOLATILE_DIR/zookeeper-#{zk_version}.tar.gz\
26 | -C #{zk_rootdir} --strip-components=1)
27 | end
28 | end
29 |
30 | task before_script: ['ci:common:before_script'] do
31 | sh %(mkdir -p $VOLATILE_DIR/zookeeper)
32 | sh %(cp $TRAVIS_BUILD_DIR/ci/resources/zookeeper/zoo.cfg\
33 | #{zk_rootdir}/conf/)
34 | sh %(#{zk_rootdir}/bin/zkServer.sh start)
35 | end
36 |
37 | task script: ['ci:common:script'] do
38 | this_provides = [
39 | 'zookeeper'
40 | ]
41 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
42 | end
43 |
44 | task before_cache: ['ci:common:before_cache']
45 |
46 | task cleanup: ['ci:common:cleanup'] do
47 | sh %(#{zk_rootdir}/bin/zkServer.sh stop)
48 | end
49 |
50 | task :execute do
51 | Rake::Task['ci:common:execute'].invoke(flavor)
52 | end
53 | end
54 | end
55 |
--------------------------------------------------------------------------------
/requirements-opt.txt:
--------------------------------------------------------------------------------
1 | ######################## WARNING ##########################
2 | # This file currently determines the python deps installed
3 | # for the dev env, the source install and the Win build.
4 | # It is NOT used for the DEB/RPM packages and the OS X
5 | # build. For these please update:
6 | # https://github.com/DataDog/omnibus-software
7 | ###########################################################
8 |
9 | # core/optional
10 | # tornado can work without pycurl and use the simple http client
11 | # but some features won't work, like the abylity to use a proxy
12 | # Require a compiler and the curl headers+lib
13 | # On windows - manual install of pycurl might be easier.
14 | pycurl==7.19.5.1
15 |
16 | # core-ish/system -> system check on windows
17 | # checks.d/process.py
18 | # checks.d/gunicorn.py
19 | # checks.d/btrfs.py
20 | # checks.d/system_core.py
21 | psutil==3.3.0
22 |
23 | # checks.d/snmp.py
24 | # Require a compiler because pycrypto is a dep
25 | pysnmp-mibs==0.1.4
26 | pysnmp==4.2.5
27 |
28 | # checks.d/mongo.py
29 | # checks.d/tokumx.py
30 | # Require a compiler
31 | # TODO: our checks are not compatible with 3.x
32 | pymongo==3.2
33 |
34 | # checks.d/kafka_consumer.py
35 | # Requires a compiler because zope.interface is a dep
36 | kazoo==1.3.1
37 |
38 | # checks.d/ssh_check.py
39 | winrandom-ctypes
40 | # Require a compiler because pycrypto is a dep
41 | paramiko==1.15.2
42 |
43 | # checks.d/pgbouncer.py
44 | # Require libpq
45 | psycopg2==2.6
46 |
47 | # checks.d/win32_event_log.py
48 | # checks.d/wmi.py
49 | # It's a pure python module, it doesn't require anything to install,
50 | # but needs the pywin32 extension to work
51 | wmi==1.4.9
52 |
53 | # checks.d/directory.py
54 | scandir==1.2
55 |
--------------------------------------------------------------------------------
/packaging/datadog-agent/smartos/dd-agent.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 | Datadog Agent
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/ci/memcache.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | # TODO: make this available in the matrix
8 | def memcache_version
9 | '1.4.22'
10 | end
11 |
12 | def memcache_rootdir
13 | "#{ENV['INTEGRATIONS_DIR']}/memcache_#{memcache_version}"
14 | end
15 |
16 | namespace :ci do
17 | namespace :memcache do |flavor|
18 | task before_install: ['ci:common:before_install']
19 |
20 | task install: ['ci:common:install'] do
21 | unless Dir.exist? File.expand_path(memcache_rootdir)
22 | # Downloads
23 | # http://memcached.org/files/memcached-#{memcache_version}.tar.gz
24 | sh %(curl -s -L\
25 | -o $VOLATILE_DIR/memcached-#{memcache_version}.tar.gz\
26 | https://s3.amazonaws.com/dd-agent-tarball-mirror/memcached-#{memcache_version}.tar.gz)
27 | sh %(mkdir -p #{memcache_rootdir})
28 | sh %(tar zxf $VOLATILE_DIR/memcached-#{memcache_version}.tar.gz\
29 | -C #{memcache_rootdir} --strip-components=1)
30 | sh %(cd #{memcache_rootdir} && ./configure && make -j $CONCURRENCY)
31 | end
32 | end
33 |
34 | task before_script: ['ci:common:before_script'] do
35 | sh %(#{memcache_rootdir}/memcached -d)
36 | end
37 |
38 | task script: ['ci:common:script'] do
39 | this_provides = [
40 | 'memcache'
41 | ]
42 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
43 | end
44 |
45 | task cleanup: ['ci:common:cleanup']
46 | # FIXME: stop memcache
47 |
48 | task before_cache: ['ci:common:before_cache']
49 |
50 | task :execute do
51 | Rake::Task['ci:common:execute'].invoke(flavor)
52 | end
53 | end
54 | end
55 |
--------------------------------------------------------------------------------
/tests/core/test_proxy.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | from unittest import TestCase
3 |
4 | # 3p
5 | from requests.utils import get_environ_proxies
6 |
7 | # project
8 | from utils.proxy import set_no_proxy_settings
9 |
10 |
11 | class TestProxy(TestCase):
12 | def test_no_proxy(self):
13 | """
14 | Starting with Agent 5.0.0, there should always be a local forwarder
15 | running and all payloads should go through it. So we should make sure
16 | that we pass the no_proxy environment variable that will be used by requests
17 | (See: https://github.com/kennethreitz/requests/pull/945 )
18 | """
19 | from os import environ as env
20 |
21 | env["http_proxy"] = "http://localhost:3128"
22 | env["https_proxy"] = env["http_proxy"]
23 | env["HTTP_PROXY"] = env["http_proxy"]
24 | env["HTTPS_PROXY"] = env["http_proxy"]
25 |
26 | set_no_proxy_settings()
27 |
28 | self.assertTrue("no_proxy" in env)
29 |
30 | self.assertEquals(env["no_proxy"], "127.0.0.1,localhost,169.254.169.254")
31 | self.assertEquals({}, get_environ_proxies(
32 | "http://localhost:17123/api/v1/series"))
33 |
34 | expected_proxies = {
35 | 'http': 'http://localhost:3128',
36 | 'https': 'http://localhost:3128',
37 | 'no': '127.0.0.1,localhost,169.254.169.254'
38 | }
39 | environ_proxies = get_environ_proxies("https://www.google.com")
40 | self.assertEquals(expected_proxies, environ_proxies, (expected_proxies, environ_proxies))
41 |
42 | # Clear the env variables set
43 | env.pop("http_proxy", None)
44 | env.pop("https_proxy", None)
45 | env.pop("HTTP_PROXY", None)
46 | env.pop("HTTPS_PROXY", None)
47 |
--------------------------------------------------------------------------------
/tests/checks/mock/test_mesos_slave.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | import json
3 |
4 | # 3p
5 | from mock import patch
6 | from nose.plugins.attrib import attr
7 |
8 | # project
9 | from checks import AgentCheck
10 | from tests.checks.common import AgentCheckTest, Fixtures, get_check_class
11 |
12 |
13 | def _mocked_get_state(*args, **kwargs):
14 | state = json.loads(Fixtures.read_file('state.json'))
15 | return state
16 | def _mocked_get_stats(*args, **kwargs):
17 | stats = json.loads(Fixtures.read_file('stats.json'))
18 | return stats
19 |
20 | @attr(requires='mesos_slave')
21 | class TestMesosSlave(AgentCheckTest):
22 | CHECK_NAME = 'mesos_slave'
23 |
24 | def test_checks(self):
25 | config = {
26 | 'init_config': {},
27 | 'instances': [
28 | {
29 | 'url': 'http://localhost:5050',
30 | 'tasks': ['hello']
31 | }
32 | ]
33 | }
34 |
35 | klass = get_check_class('mesos_slave')
36 | with patch.object(klass, '_get_state', _mocked_get_state):
37 | with patch.object(klass, '_get_stats', _mocked_get_stats):
38 | check = klass('mesos_slave', {}, {})
39 | self.run_check_twice(config)
40 | metrics = {}
41 | for d in (check.SLAVE_TASKS_METRICS, check.SYSTEM_METRICS, check.SLAVE_RESOURCE_METRICS,
42 | check.SLAVE_EXECUTORS_METRICS, check.STATS_METRICS):
43 | metrics.update(d)
44 | [self.assertMetric(v[0]) for k, v in check.TASK_METRICS.iteritems()]
45 | [self.assertMetric(v[0]) for k, v in metrics.iteritems()]
46 | self.assertServiceCheck('hello.ok', count=1, status=AgentCheck.OK)
47 |
--------------------------------------------------------------------------------
/ci/mysql.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | # FIXME: use our own brew of MySQL like other flavors
8 |
9 | namespace :ci do
10 | namespace :mysql do |flavor|
11 | task before_install: ['ci:common:before_install']
12 |
13 | task install: ['ci:common:install']
14 |
15 | task before_script: ['ci:common:before_script'] do
16 | sh %(mysql -e "create user 'dog'@'localhost' identified by 'dog'" -uroot)
17 | sh %(mysql -e "GRANT PROCESS, REPLICATION CLIENT ON *.* TO 'dog'@'localhost' WITH MAX_USER_CONNECTIONS 5;" -uroot)
18 | sh %(mysql -e "CREATE DATABASE testdb;" -uroot)
19 | sh %(mysql -e "CREATE TABLE testdb.users (name VARCHAR(20), age INT);" -uroot)
20 | sh %(mysql -e "GRANT SELECT ON testdb.users TO 'dog'@'localhost';" -uroot)
21 | sh %(mysql -e "INSERT INTO testdb.users (name,age) VALUES('Alice',25);" -uroot)
22 | sh %(mysql -e "INSERT INTO testdb.users (name,age) VALUES('Bob',20);" -uroot)
23 | sh %(mysql -e "GRANT SELECT ON performance_schema.* TO 'dog'@'localhost';" -uroot)
24 | sh %(mysql -e "USE testdb; SELECT * FROM users ORDER BY name;" -uroot)
25 | end
26 |
27 | task script: ['ci:common:script'] do
28 | this_provides = [
29 | 'mysql'
30 | ]
31 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
32 | end
33 |
34 | task before_cache: ['ci:common:before_cache']
35 |
36 | task cleanup: ['ci:common:cleanup'] do
37 | sh %(mysql -e "DROP USER 'dog'@'localhost';" -uroot)
38 | sh %(mysql -e "DROP DATABASE testdb;" -uroot)
39 | end
40 |
41 | task :execute do
42 | Rake::Task['ci:common:execute'].invoke(flavor)
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/conf.d/directory.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # This config is for the Directory Check which is used to report metrics
5 | # for the files in a given directory
6 | #
7 | # For each instance, the 'directory' parameter is required, all others are optional.
8 | #
9 | # WARNING: Ensure the user account running the Agent (typically dd-agent) has read
10 | # access to the monitored directory and files.
11 | #
12 | # NOTE: on windows, please make sure you escape back-slashes otherwise the YAML
13 | # parser will fail (eg. - directory: "C:\\Users\\foo\\Downloads").
14 | #
15 | # Instances take the following parameters:
16 | # "directory" - string, the directory to monitor. Required
17 | # "name" - string, tag metrics with specified name. defaults to the "directory"
18 | # "dirtagname" - string, the name of the tag used for the directory. defaults to "name"
19 | # "filetagname" - string, the name of the tag used for each file. defaults to "filename"
20 | # "filegauges" - boolean, when true stats will be an individual gauge per file (max. 20 files!) and not a histogram of the whole directory. default False
21 | # "pattern" - string, the `fnmatch` pattern to use when reading the "directory"'s files. The pattern will be matched against the files' absolute paths. default "*"
22 | # "recursive" - boolean, when true the stats will recurse into directories. default False
23 | # "countonly" - boolean, when true the stats will only count the number of files matching the pattern. Useful for very large directories.
24 |
25 | - directory: "/path/to/directory"
26 | # name: "tag_value"
27 | # dirtagname: "tag_dirname"
28 | # filetagname: "tag_filename"
29 | # filegauges: False
30 | # pattern: "*.log"
31 | # recursive: True
32 | # countonly: False
33 |
--------------------------------------------------------------------------------
/ci/resources/nginx/testing.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD0RetB4kR66ojN
3 | 1U82WuCUfUTZGI+HmkAuumslNhVlNsZFy5sHTCQQwQPxN1BMi5ki3noNYVzPr970
4 | vy+p7FH1XBry7S8f0viAni851J+Jo+HrppGgaGVFpbNqqZtKMY37TpqbQRagtWoJ
5 | Lb72NDiW9r5q0JUUyQ56dfeeIGwohUjh64TBTuu1D7Q9QlWYSOjqSta6R0ESGjOk
6 | KrpmbYHmyoARjPyFZCkAzqEznzFgvjWVIYBXQKaHfuZcwx0g5wrUNjf+lSkJNLlN
7 | xKnEJx7rUwRmEAdt8mXAnMd6bkxJeDGgLPKZPv8iTTwZgi0AijRjTGDdWHDwl7mm
8 | 8dajPh+FAgMBAAECggEAaq9q22CGRiTO0Q8bxIKWWWQIwMRwU2o0I/R5PUxMteLh
9 | X7RYJizEB6k1HpHo+TVzEX6XUea3EWIff0dM+uritMWgY37huQV8UThFKf3KG+Q+
10 | lJwwOB3ANVX0cV5YG2RfPWYMMkiQKGpzQEUBhSgzmwNzENW+dtCFCUkid6ZzdpI9
11 | UKqXhRj7c9FF5/24P9ERCkoIG0+L1SXynqduqCVnKy4UOgptsGryago4C9NA+RpF
12 | UnYb8bEgkO4mSrr9ozzLkM3lz8XZhk0AmkKCsKw/PWJ+EJ/Ydy4A8/lF9MYjiqEv
13 | AWip22O7WEvkQzhSg0ymGI8cSfwUdm7xqgvVWs4K9QKBgQD/oKA4TUYzFWzVWA7w
14 | Evs4V5ImuHyuO5nJRKwJainct5LJVab300sM2LwPHxiZvnyExcFCT5vwuY/JxfWO
15 | klShbP52GSB9oEFS58HaQg78YqAtuWnu1HtYbWFl4NZMLqLZV8PLWlbW+aMdl4FA
16 | LGzsxgDHndBwFhEbR2HwVTferwKBgQD0oQ6M4O4SkJYOff30cpulSq/dhzW80rYd
17 | 03y/bLAfWjDONeVSuSh1iqMaQkntjeOmsmu7Rb+340kaTzkF3/dRbaTwQVkTOplY
18 | XPxRWnuFEl1k1gGJitHbmz7xCDzC5SehsFCCts2NIDOXFoCOa6zus6KdYR7/mxem
19 | vqzyGJTSCwKBgFJL+SkHH8GUdTxeJDkAM2bZMpFKtcE2KPWWKTjCuAV6CETPUXjZ
20 | yoCxSiIoJbhhjh8Et4pMrOycIQGZvMuQqrRpraaBwmcPb9hsConk2IRCkEUIO2WL
21 | fMZkOIYfE37lSMJmMf/G7sw5BF2jiBYL92lm+ZtKYG+lew5oNcy08s67AoGAcgxs
22 | Vi2/kJQsAVGoBkEwY11wpF0XJpMuKLWioTQw4E0SF/F0mp6MSFB8Pg/Nm5zdF6hz
23 | JXodKcQjHsr0kNKb4TC3BvPQbXCScWnYkK0YjS/ErvA/AzrfH/0+2Oy4NzzSv0UO
24 | JALJzhPHOZdaFAwLMbY6CBlxdEWAP1MCGlRvfYUCgYEAl8iuq0NUrY3UCoUPSDnh
25 | C51hAH2dJ9n7kFwgOt6X3DeHQOWUb9U3LRcrJqtHoLP2QB1nMu86/BkevOo6A/Wj
26 | kr6GkcoxIw0Oec5k56ThQnyCAjQFiW8nRoyhbRR9nwK2gjOCsPHrceQoZjr3KbAl
27 | aYMKjbhy0hJvt92vTqqD31c=
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/conf.d/cacti.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # The Cacti checks requires access to the Cacti DB in MySQL and to the RRD
5 | # files that contain the metrics tracked in Cacti.
6 | # In almost all cases, you'll only need one instance pointing to the Cacti
7 | # database.
8 | #
9 | # The `rrd_path` will probably be '/var/lib/cacti/rra' on Ubuntu
10 | # or '/var/www/html/cacti/rra' on any other machines.
11 | #
12 | # The `field_names` is an optional parameter to specify which field_names
13 | # should be used to determine if a device is a real device. You can let it
14 | # commented out as the default values should satisfy your needs.
15 | # You can run the following query to determine your field names:
16 | # SELECT
17 | # h.hostname as hostname,
18 | # hsc.field_value as device_name,
19 | # dt.data_source_path as rrd_path,
20 | # hsc.field_name as field_name
21 | # FROM data_local dl
22 | # JOIN host h on dl.host_id = h.id
23 | # JOIN data_template_data dt on dt.local_data_id = dl.id
24 | # LEFT JOIN host_snmp_cache hsc on h.id = hsc.host_id
25 | # AND dl.snmp_index = hsc.snmp_index
26 | # WHERE dt.data_source_path IS NOT NULL
27 | # AND dt.data_source_path != ''
28 | #
29 | #
30 | #
31 | # The `rrd_whitelist` is a path to a text file that has a list of patterns,
32 | # one per line, that should be fetched. If no whitelist is specified, all
33 | # metrics will be fetched.
34 | #
35 | - mysql_host: localhost
36 | mysql_user: MYSQL_USER
37 | mysql_password: MYSQL_PASSWORD
38 | rrd_path: /path/to/cacti/rra
39 | # field_names:
40 | # - ifName
41 | # - dskDevice
42 | # - ifIndex
43 | rrd_whitelist: /path/to/rrd_whitelist.txt
44 |
--------------------------------------------------------------------------------
/packaging/osx/supervisor.conf:
--------------------------------------------------------------------------------
1 | [supervisorctl]
2 | serverurl = unix:///opt/datadog-agent/run/datadog-supervisor.sock
3 |
4 | [unix_http_server]
5 | file=/opt/datadog-agent/run/datadog-supervisor.sock
6 |
7 | [rpcinterface:supervisor]
8 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
9 |
10 | [supervisord]
11 | http_port = /opt/datadog-agent/run/datadog-supervisor.sock
12 | minfds = 1024
13 | minprocs = 200
14 | loglevel = info
15 | logfile = /var/log/datadog/supervisord.log
16 | logfile_maxbytes = 50MB
17 | nodaemon = false
18 | pidfile = /opt/datadog-agent/run/datadog-supervisord.pid
19 | logfile_backups = 10
20 | environment=PYTHONPATH=/opt/datadog-agent/agent:/opt/datadog-agent/agent/checks,LANG=POSIX
21 |
22 | [program:collector]
23 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/agent.py foreground --use-local-forwarder
24 | stdout_logfile=NONE
25 | stderr_logfile=NONE
26 | priority=999
27 | startsecs=5
28 | startretries=3
29 | environment=PYTHONPATH='/opt/datadog-agent/agent:/opt/datadog-agent/agent/checks/libs:$PYTHONPATH'
30 |
31 | [program:forwarder]
32 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/ddagent.py
33 | stdout_logfile=NONE
34 | stderr_logfile=NONE
35 | startsecs=5
36 | startretries=3
37 | priority=998
38 |
39 | [program:dogstatsd]
40 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/dogstatsd.py --use-local-forwarder
41 | stdout_logfile=NONE
42 | stderr_logfile=NONE
43 | startsecs=5
44 | startretries=3
45 | priority=998
46 |
47 | [program:jmxfetch]
48 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/jmxfetch.py
49 | stdout_logfile=NONE
50 | stderr_logfile=NONE
51 | redirect_stderr=true
52 | priority=999
53 | startsecs=3
54 |
55 | [group:datadog-agent]
56 | programs=forwarder,collector,dogstatsd,jmxfetch
57 |
--------------------------------------------------------------------------------
/conf.d/sqlserver.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 | #
3 | # By default, we only capture *some* of the metrics available in the
4 | # `sys.dm_os_performance_counters` table. You can easily add additional
5 | # metrics by following the custom_metrics structure shown below.
6 | #
7 | # In order to connect to SQL Server either enable SQL Authentication and
8 | # specify a username or password below. If you do not specify a username
9 | # or password then we will connect using integrated authentication.
10 | #
11 | # custom_metrics:
12 |
13 | # This is a basic custom metric. There is not instance associated with
14 | # this counter.
15 | #
16 | # - name: sqlserver.clr.execution
17 | # counter_name: CLR Execution
18 |
19 | # This counter has multiple instances associated with it and we're
20 | # choosing to only fetch the 'Cumulative execution time (ms) per second' instance.
21 | #
22 | # - name: sqlserver.exec.in_progress
23 | # counter_name: OLEDB calls
24 | # instance_name: Cumulative execution time (ms) per second
25 |
26 | # This counter has multiple instances associated with it and we want
27 | # every instance available. We'll use the special case ALL instance
28 | # which *requires* a value for "tag_by". In this case, we'll get metrics
29 | # tagged as "db:mydb1", "db:mydb2".
30 | #
31 | # - name: sqlserver.db.commit_table_entries
32 | # counter_name: Log Flushes/sec
33 | # instance_name: ALL
34 | # tag_by: db
35 |
36 | instances:
37 | # All '%' characters must be escaped as '%%'.
38 |
39 | - host: HOST,PORT
40 | username: my_username
41 | password: my_password
42 | # Optional, timeout in seconds for the connection and each command run
43 | # command_timeout: 30
44 | # database: my_database # Optional, defaults to "master"
45 | tags:
46 | - optional_tag
47 |
--------------------------------------------------------------------------------
/conf.d/go_expvar.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # Most memstats metrics are exported by default
5 | # See http://godoc.org/runtime#MemStats for their explanation
6 | # Note that you can specify a `type` for the metrics. One of:
7 | # * counter
8 | # * gauge (the default)
9 | # * rate (note that this will show up as a gauge in Datadog that is meant to be seen as a "per second rate")
10 |
11 | - expvar_url: http://localhost:8080/debug/vars
12 | # namespace: examplenamespace # The default metric namespace is 'go_expvar', define your own
13 | # tags:
14 | # - "application_name:myapp"
15 | # - "optionaltag2"
16 | # metrics:
17 | # # These metrics are just here as examples.
18 | # # Most memstats metrics are collected by default without configuration needed.
19 | # - path: memstats/PauseTotalNs
20 | # alias: go_expvar.gc.pause_time_in_ns
21 | # type: rate
22 | # tags:
23 | # - "metric_tag1:tag_value1"
24 | # - "metric_tag2:tag_value2"
25 | # - path: memstats/Alloc # metric will be reported as a gauge by default
26 | # - path: memstats/Lookups
27 | # type: rate # metric should be reported as a rate instead of the default gauge
28 | # - path: memstats/Mallocs # with no name specified, the metric name will default to a path based name
29 | # type: counter # report as a counter instead of the default gauge
30 | # - path: memstats/Frees
31 | # type: rate
32 | # - path: memstats/BySize/1/Mallocs # You can get nested values by separating them with "/"
33 | # - path: myvariable
34 | # alias: go_expvar.my_custom_name
35 | # type: gauge
36 | # - path: routes/get_.*/count # You can use a regex when you want to report for all elements matching a certain pattern
37 |
--------------------------------------------------------------------------------
/utils/net.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # lib
6 | import ctypes
7 | import socket
8 |
9 | # 3p
10 |
11 | # project
12 |
13 |
14 | # Due to this bug [0] CPython on certain Windows version does not
15 | # define some IPv6 related constants that we need to hardcode
16 | # here.
17 | #
18 | # [0] http://bugs.python.org/issue6926
19 | try:
20 | IPPROTO_IPV6 = socket.IPPROTO_IPV6
21 | except AttributeError:
22 | IPPROTO_IPV6 = 41 # from `Ws2def.h`
23 |
24 | try:
25 | IPV6_V6ONLY = socket.IPV6_V6ONLY
26 | except AttributeError:
27 | IPV6_V6ONLY = 27 # from `Ws2ipdef.h`
28 |
29 |
30 | class sockaddr(ctypes.Structure):
31 | _fields_ = [("sa_family", ctypes.c_short),
32 | ("__pad1", ctypes.c_ushort),
33 | ("ipv4_addr", ctypes.c_byte * 4),
34 | ("ipv6_addr", ctypes.c_byte * 16),
35 | ("__pad2", ctypes.c_ulong)]
36 |
37 |
38 | def _inet_pton_win(address_family, ip_string):
39 | """
40 | Window specific version of `inet_pton` based on:
41 | https://gist.github.com/nnemkin/4966028
42 | """
43 | addr = sockaddr()
44 | addr.sa_family = address_family
45 | addr_size = ctypes.c_int(ctypes.sizeof(addr))
46 |
47 | str_to_addr = ctypes.windll.ws2_32.WSAStringToAddressA
48 |
49 | if str_to_addr(ip_string, address_family, None, ctypes.byref(addr), ctypes.byref(addr_size)) != 0:
50 | raise socket.error(ctypes.FormatError())
51 |
52 | if address_family == socket.AF_INET:
53 | return ctypes.string_at(addr.ipv4_addr, 4)
54 | if address_family == socket.AF_INET6:
55 | return ctypes.string_at(addr.ipv6_addr, 16)
56 |
57 | raise socket.error('unknown address family')
58 |
59 |
60 | try:
61 | from socket import inet_pton
62 | except ImportError:
63 | inet_pton = _inet_pton_win
64 |
--------------------------------------------------------------------------------
/packaging/supervisor_32.conf:
--------------------------------------------------------------------------------
1 | [supervisorctl]
2 | serverurl = unix:///opt/datadog-agent/run/datadog-supervisor.sock
3 |
4 | [unix_http_server]
5 | file=/opt/datadog-agent/run/datadog-supervisor.sock
6 |
7 | [rpcinterface:supervisor]
8 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
9 |
10 | [supervisord]
11 | http_port = /opt/datadog-agent/run/datadog-supervisor.sock
12 | minfds = 1024
13 | minprocs = 200
14 | loglevel = info
15 | logfile = /var/log/datadog/supervisord.log
16 | logfile_maxbytes = 50MB
17 | nodaemon = false
18 | pidfile = /opt/datadog-agent/run/datadog-supervisord.pid
19 | logfile_backups = 10
20 | user=dd-agent
21 | environment=PYTHONPATH=/opt/datadog-agent/agent:/opt/datadog-agent/agent/checks,LANG=POSIX
22 |
23 | [program:collector]
24 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/agent.py foreground --use-local-forwarder
25 | stdout_logfile=NONE
26 | stderr_logfile=NONE
27 | priority=999
28 | startsecs=5
29 | startretries=3
30 | user=dd-agent
31 | environment=PYTHONPATH='/opt/datadog-agent/agent:/opt/datadog-agent/agent/checks/libs:$PYTHONPATH'
32 |
33 | [program:forwarder]
34 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/ddagent.py
35 | stdout_logfile=NONE
36 | stderr_logfile=NONE
37 | startsecs=5
38 | startretries=3
39 | priority=998
40 | user=dd-agent
41 |
42 | [program:dogstatsd]
43 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/dogstatsd.py --use-local-forwarder
44 | stdout_logfile=NONE
45 | stderr_logfile=NONE
46 | startsecs=5
47 | startretries=3
48 | priority=998
49 | user=dd-agent
50 |
51 | [program:jmxfetch]
52 | command=/opt/datadog-agent/embedded/bin/python /opt/datadog-agent/agent/jmxfetch.py
53 | stdout_logfile=NONE
54 | stderr_logfile=NONE
55 | redirect_stderr=true
56 | priority=999
57 | startsecs=3
58 | user=dd-agent
59 |
60 | [group:datadog-agent]
61 | programs=forwarder,collector,dogstatsd,jmxfetch
62 |
--------------------------------------------------------------------------------
/tests/checks/mock/test_riakcs.py:
--------------------------------------------------------------------------------
1 | # stdlib
2 | from socket import error
3 | import unittest
4 |
5 | # 3p
6 | from mock import Mock
7 |
8 | # project
9 | from checks import AgentCheck
10 | from tests.checks.common import AgentCheckTest, Fixtures, load_check
11 |
12 |
13 | class RiakCSTest(AgentCheckTest):
14 |
15 | CHECK_NAME = "riakcs"
16 |
17 | def __init__(self, *args, **kwargs):
18 | unittest.TestCase.__init__(self, *args, **kwargs)
19 | self.config = {"instances": [{
20 | "access_id":"foo",
21 | "access_secret": "bar"}]}
22 | self.check = load_check(self.CHECK_NAME, self.config, {})
23 | self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"]))
24 | self.check._get_stats = Mock(return_value=self.check.load_json(
25 | Fixtures.read_file('riakcs_in.json')))
26 |
27 | def test_parser(self):
28 | input_json = Fixtures.read_file('riakcs_in.json')
29 | output_python = Fixtures.read_file('riakcs_out.python')
30 | self.assertEquals(self.check.load_json(input_json), eval(output_python))
31 |
32 | def test_metrics(self):
33 | self.run_check(self.config)
34 | expected = eval(Fixtures.read_file('riakcs_metrics.python'))
35 | for m in expected:
36 | self.assertMetric(m[0], m[2], m[3].get('tags', []))
37 |
38 | def test_service_checks(self):
39 | self.check = load_check(self.CHECK_NAME, self.config, {})
40 | self.assertRaises(error, lambda: self.run_check(self.config))
41 |
42 | self.assertEqual(len(self.service_checks), 1, self.service_checks)
43 | self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
44 | status=AgentCheck.CRITICAL,
45 | tags=['aggregation_key:localhost:8080'])
46 |
--------------------------------------------------------------------------------
/ci/snmp.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | def snmp_rootdir
8 | "#{ENV['INTEGRATIONS_DIR']}/snmp"
9 | end
10 |
11 | namespace :ci do
12 | namespace :snmp do |flavor|
13 | task before_install: ['ci:common:before_install']
14 |
15 | task install: ['ci:common:install'] do
16 | # Downloads
17 | # http://sourceforge.net/projects/net-snmp/files/net-snmp/5.7.3/net-snmp-5.7.3.tar.gz/download
18 | unless Dir.exist? File.expand_path(snmp_rootdir)
19 | sh %(curl -s -L\
20 | -o $VOLATILE_DIR/snmp.tar.gz\
21 | https://s3.amazonaws.com/dd-agent-tarball-mirror/net-snmp-5.7.3.tar.gz)
22 | sh %(mkdir -p $VOLATILE_DIR/snmp)
23 | sh %(mkdir -p #{snmp_rootdir})
24 | sh %(tar zxf $VOLATILE_DIR/snmp.tar.gz\
25 | -C $VOLATILE_DIR/snmp --strip-components=1)
26 | sh %(cd $VOLATILE_DIR/snmp\
27 | && yes '' | ./configure --disable-embedded-perl --without-perl-modules --prefix=#{snmp_rootdir}\
28 | && make -j $CONCURRENCY\
29 | && make install)
30 | end
31 | end
32 |
33 | task before_script: ['ci:common:before_script'] do
34 | sh %(#{snmp_rootdir}/sbin/snmpd -Ln\
35 | -c $TRAVIS_BUILD_DIR/ci/resources/snmp/snmpd.conf\
36 | -x TCP:11111 UDP:11111\
37 | -p $VOLATILE_DIR/snmpd.pid)
38 | end
39 |
40 | task script: ['ci:common:script'] do
41 | this_provides = [
42 | 'snmp'
43 | ]
44 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
45 | end
46 |
47 | task before_cache: ['ci:common:before_cache']
48 |
49 | task cleanup: ['ci:common:cleanup'] do
50 | sh %(kill `cat $VOLATILE_DIR/snmpd.pid`)
51 | end
52 |
53 | task :execute do
54 | Rake::Task['ci:common:execute'].invoke(flavor)
55 | end
56 | end
57 | end
58 |
--------------------------------------------------------------------------------
/tests/checks/fixtures/cassandra/cassandra.yaml:
--------------------------------------------------------------------------------
1 | instances:
2 | - host: localhost
3 | port: 7199
4 | name: cassandra_instance
5 | max_returned_metrics: 2000
6 |
7 |
8 |
9 | init_config:
10 | conf:
11 | - include:
12 | domain: org.apache.cassandra.db
13 | attribute:
14 | - BloomFilterDiskSpaceUsed
15 | - BloomFilterFalsePositives
16 | - BloomFilterFalseRatio
17 | - Capacity
18 | - CompressionRatio
19 | - CompletedTasks
20 | - ExceptionCount
21 | - Hits
22 | - RecentHitRate
23 | - LiveDiskSpaceUsed
24 | - LiveSSTableCount
25 | - Load
26 | - MaxRowSize
27 | - MeanRowSize
28 | - MemtableColumnsCount
29 | - MemtableDataSize
30 | - MemtableSwitchCount
31 | - MinRowSize
32 | - ReadCount
33 | - Requests
34 | - Size
35 | - TotalDiskSpaceUsed
36 | - TotalReadLatencyMicros
37 | - TotalWriteLatencyMicros
38 | - UpdateInterval
39 | - WriteCount
40 | - PendingTasks
41 | exclude:
42 | keyspace: system
43 | attribute:
44 | - MinimumCompactionThreshold
45 | - MaximumCompactionThreshold
46 | - RowCacheKeysToSave
47 | - KeyCacheSavePeriodInSeconds
48 | - RowCacheSavePeriodInSeconds
49 | - PendingTasks
50 | - Scores
51 | - RpcTimeout
52 | - include:
53 | domain: org.apache.cassandra.internal
54 | attribute:
55 | - ActiveCount
56 | - CompletedTasks
57 | - CurrentlyBlockedTasks
58 | - TotalBlockedTasks
59 | - include:
60 | domain: org.apache.cassandra.net
61 | attribute:
62 | - TotalTimeouts
--------------------------------------------------------------------------------
/packaging/datadog-agent/win32/install_files/license.rtf:
--------------------------------------------------------------------------------
1 | Simplified BSD License
2 |
3 | Copyright (c) 2009, Boxed Ice
4 | Copyright (c) 2010-2014, Datadog
5 | All rights reserved.
6 |
7 | Redistribution and use in source and binary forms, with or without
8 | modification, are permitted provided that the following conditions are met:
9 |
10 | * Redistributions of source code must retain the above copyright notice,
11 | this list of conditions and the following disclaimer.
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 | * Neither the name of Boxed Ice nor the names of its contributors
16 | may be used to endorse or promote products derived from this software
17 | without specific prior written permission.
18 | * Neither the name of Datadog nor the names of its contributors
19 | may be used to endorse or promote products derived from this software
20 | without specific prior written permission.
21 |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
26 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
--------------------------------------------------------------------------------
/tests/checks/integration/test_lighttpd.py:
--------------------------------------------------------------------------------
1 | from nose.plugins.attrib import attr
2 |
3 | from checks import AgentCheck
4 | from tests.checks.common import AgentCheckTest
5 |
6 |
7 | @attr(requires='lighttpd')
8 | class TestLighttpd(AgentCheckTest):
9 | CHECK_NAME = 'lighttpd'
10 | CHECK_GAUGES = [
11 | 'lighttpd.net.bytes',
12 | 'lighttpd.net.bytes_per_s',
13 | 'lighttpd.net.hits',
14 | 'lighttpd.net.request_per_s',
15 | 'lighttpd.performance.busy_servers',
16 | 'lighttpd.performance.idle_server',
17 | 'lighttpd.performance.uptime',
18 | ]
19 |
20 | def __init__(self, *args, **kwargs):
21 | AgentCheckTest.__init__(self, *args, **kwargs)
22 | self.config = {
23 | 'instances': [
24 | {
25 | 'lighttpd_status_url': 'http://localhost:9449/server-status',
26 | 'tags': ['instance:first'],
27 | }
28 | ]
29 | }
30 |
31 | def test_lighttpd(self):
32 | self.run_check_twice(self.config)
33 | self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
34 | status=AgentCheck.OK,
35 | tags=['host:localhost', 'port:9449'])
36 |
37 | for gauge in self.CHECK_GAUGES:
38 | self.assertMetric(gauge, tags=['instance:first'], count=1)
39 | self.coverage_report()
40 |
41 | def test_bad_config(self):
42 | self.assertRaises(
43 | Exception,
44 | lambda: self.run_check({"instances": [{'lighttpd_status_url': 'http://localhost:1337',
45 | 'tags': ['instance: first']}]})
46 | )
47 | self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
48 | status=AgentCheck.CRITICAL,
49 | tags=['host:localhost', 'port:1337'],
50 | count=1)
51 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
26 |
27 | **Output of the [info page](https://help.datadoghq.com/hc/en-us/articles/203764635-Agent-Status-and-Information) **
28 |
29 | ```
30 | (paste your output here)
31 | ```
32 |
33 | **Additional environment details (Operating System, Cloud provider, etc):**
34 |
35 |
36 | **Steps to reproduce the issue:**
37 | 1.
38 | 2.
39 | 3.
40 |
41 |
42 | **Describe the results you received:**
43 |
44 |
45 | **Describe the results you expected:**
46 |
47 |
48 | **Additional information you deem important (e.g. issue happens only occasionally):**
49 |
--------------------------------------------------------------------------------
/checks.d/ntp.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # 3p
6 | import ntplib
7 |
8 | # project
9 | from checks import AgentCheck
10 | from utils.ntp import NTPUtil
11 |
12 | DEFAULT_OFFSET_THRESHOLD = 60 # in seconds
13 |
14 |
15 | class NtpCheck(AgentCheck):
16 |
17 | DEFAULT_MIN_COLLECTION_INTERVAL = 900 # in seconds
18 |
19 | def check(self, instance):
20 | service_check_msg = None
21 | offset_threshold = instance.get('offset_threshold', DEFAULT_OFFSET_THRESHOLD)
22 | try:
23 | offset_threshold = int(offset_threshold)
24 | except (TypeError, ValueError):
25 | raise Exception('Must specify an integer value for offset_threshold. Configured value is %s' % repr(offset_threshold))
26 |
27 | req_args = NTPUtil().args
28 |
29 | self.log.debug("Using ntp host: {0}".format(req_args['host']))
30 |
31 | try:
32 | ntp_stats = ntplib.NTPClient().request(**req_args)
33 | except ntplib.NTPException:
34 | self.log.debug("Could not connect to NTP Server {0}".format(
35 | req_args['host']))
36 | status = AgentCheck.UNKNOWN
37 | ntp_ts = None
38 | else:
39 | ntp_offset = ntp_stats.offset
40 |
41 | # Use the ntp server's timestamp for the time of the result in
42 | # case the agent host's clock is messed up.
43 | ntp_ts = ntp_stats.recv_time
44 | self.gauge('ntp.offset', ntp_offset, timestamp=ntp_ts)
45 |
46 | if abs(ntp_offset) > offset_threshold:
47 | status = AgentCheck.CRITICAL
48 | service_check_msg = "Offset {0} secs higher than offset threshold ({1} secs)".format(ntp_offset, offset_threshold)
49 | else:
50 | status = AgentCheck.OK
51 |
52 | self.service_check('ntp.in_sync', status, timestamp=ntp_ts, message=service_check_msg)
53 |
--------------------------------------------------------------------------------
/ci/gearman.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | def gearman_version
8 | '1.0.6'
9 | end
10 |
11 | def gearman_rootdir
12 | "#{ENV['INTEGRATIONS_DIR']}/gearman_#{gearman_version}"
13 | end
14 |
15 | namespace :ci do
16 | namespace :gearman do |flavor|
17 | task before_install: ['ci:common:before_install']
18 |
19 | task install: ['ci:common:install'] do
20 | unless Dir.exist? File.expand_path(gearman_rootdir)
21 | # Downloads
22 | # https://launchpad.net/gearmand/#{gearman_version[0..2]}/#{gearman_version}/+download/gearmand-#{gearman_version}.tar.gz
23 | sh %(curl -s -L\
24 | -o $VOLATILE_DIR/gearman-#{gearman_version}.tar.gz\
25 | https://s3.amazonaws.com/dd-agent-tarball-mirror/gearmand-#{gearman_version}.tar.gz)
26 | sh %(mkdir -p $VOLATILE_DIR/gearman)
27 | sh %(tar zxf $VOLATILE_DIR/gearman-#{gearman_version}.tar.gz\
28 | -C $VOLATILE_DIR/gearman --strip-components=1)
29 | sh %(mkdir -p #{gearman_rootdir})
30 | sh %(cd $VOLATILE_DIR/gearman\
31 | && ./configure --prefix=#{gearman_rootdir}\
32 | && make -j $CONCURRENCY\
33 | && make install)
34 | end
35 | end
36 |
37 | task before_script: ['ci:common:before_script'] do
38 | sh %(#{gearman_rootdir}/sbin/gearmand -d -l $VOLATILE_DIR/gearmand.log)
39 | # FIXME: wait for gearman start
40 | # Wait.for ??
41 | end
42 |
43 | task script: ['ci:common:script'] do
44 | this_provides = [
45 | 'gearman'
46 | ]
47 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
48 | end
49 |
50 | task before_cache: ['ci:common:before_cache']
51 |
52 | task cleanup: ['ci:common:cleanup']
53 | # FIXME: stop gearman
54 |
55 | task :execute do
56 | Rake::Task['ci:common:execute'].invoke(flavor)
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/conf.d/iis.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # By default, this check will run against a single instance - the current
5 | # machine that the Agent is running on. It will check the WMI performance
6 | # counters for IIS on that machine.
7 | #
8 | # If you want to check other remote machines as well, you can add one
9 | # instance per host. Note: If you also want to check the counters on the
10 | # current machine, you will have to create an instance with empty params.
11 | #
12 | # The optional `provider` parameter allows to specify a WMI provider
13 | # (default to `32` on Datadog Agent 32-bit or `64`). It is used to request
14 | # WMI data from the non-default provider. Available options are: `32` or `64`.
15 | # For more information: https://msdn.microsoft.com/en-us/library/aa393067(v=vs.85).aspx
16 | #
17 | # The `sites` parameter allows you to specify a list of sites you want to
18 | # read metrics from. With sites specified, metrics will be tagged with the
19 | # site name. If you don't define any sites, the check will pull the
20 | # aggregate values across all sites.
21 | #
22 | # Here's an example of configuration that would check the current machine
23 | # and a remote machine called MYREMOTESERVER. For the remote host we are
24 | # only pulling metrics from the default site.
25 | #
26 |
27 | # "." means the current host
28 | - host: .
29 | # tags:
30 | # - myapp1
31 | # sites:
32 | # - Default Web Site
33 | #
34 | # - host: MYREMOTESERVER
35 | # username: MYREMOTESERVER\fred
36 | # password: mysecretpassword
37 | # is_2008: false # NOTE: because of a typo in IIS6/7 (typically on W2K8)
38 | # where perfmon reports TotalBytesTransferred as
39 | # TotalBytesTransfered, you may have to enable this
40 | # to grab the IIS metrics in that environment.
41 | # tags:
42 | # - myapp2
43 | # - east
44 | # sites:
45 | # - Default Web Site
46 |
--------------------------------------------------------------------------------
/utils/pidfile.py:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | # stdlib
6 | import logging
7 | import os.path
8 | import tempfile
9 |
10 | log = logging.getLogger(__name__)
11 |
12 |
13 | class PidFile(object):
14 | """ A small helper class for pidfiles. """
15 |
16 | @classmethod
17 | def get_dir(cls, run_dir=None):
18 | if run_dir is None:
19 | my_dir = os.path.dirname(os.path.abspath(__file__))
20 | run_dir = os.path.realpath(os.path.join(my_dir, '..', '..', 'run'))
21 |
22 | if os.path.exists(run_dir) and os.access(run_dir, os.W_OK):
23 | return os.path.realpath(run_dir)
24 | else:
25 | return tempfile.gettempdir()
26 |
27 | def __init__(self, program, pid_dir=None):
28 | self.pid_file = "%s.pid" % program
29 | self.pid_dir = self.get_dir(pid_dir)
30 | self.pid_path = os.path.join(self.pid_dir, self.pid_file)
31 |
32 | def get_path(self):
33 | # if all else fails
34 | if os.access(self.pid_dir, os.W_OK):
35 | log.info("Pid file is: %s" % self.pid_path)
36 | return self.pid_path
37 | else:
38 | # Can't save pid file, bail out
39 | log.error("Cannot save pid file: %s" % self.pid_path)
40 | raise Exception("Cannot save pid file: %s" % self.pid_path)
41 |
42 | def clean(self):
43 | try:
44 | path = self.get_path()
45 | log.debug("Cleaning up pid file %s" % path)
46 | os.remove(path)
47 | return True
48 | except Exception:
49 | log.warn("Could not clean up pid file")
50 | return False
51 |
52 | def get_pid(self):
53 | "Retrieve the actual pid"
54 | try:
55 | pf = open(self.get_path())
56 | pid_s = pf.read()
57 | pf.close()
58 |
59 | return int(pid_s.strip())
60 | except Exception:
61 | return None
62 |
--------------------------------------------------------------------------------
/conf.d/rabbitmq.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | # for every instance a 'rabbitmq_api_url' must be provided, pointing to the api
5 | # url of the RabbitMQ Managment Plugin (http://www.rabbitmq.com/management.html)
6 | # optional: 'rabbitmq_user' (default: guest), 'rabbitmq_pass' (default: guest),
7 | # and 'tag_families' (default: false) to tag queue "families" based off of regex
8 | # matching.
9 | - rabbitmq_api_url: http://localhost:15672/api/
10 | rabbitmq_user: guest
11 | rabbitmq_pass: guest
12 | # tag_families: true
13 | # Use the `nodes` or `nodes_regexes` parameters to specify the nodes you'd like to
14 | # collect metrics on (up to 100 nodes).
15 | # If you have less than 100 nodes, you don't have to set this parameter,
16 | # the metrics will be collected on all the nodes by default.
17 | #
18 | # nodes:
19 | # - rabbit@localhost
20 | # - rabbit2@domain
21 | # nodes_regexes:
22 | # - bla.*
23 |
24 | # Use the `queues` or `queues_regexes` parameters to specify the queues you'd like to
25 | # collect metrics on (up to 200 queues).
26 | # If you have less than 200 queues, you don't have to set this parameter,
27 | # the metrics will be collected on all the queues by default.
28 | # If you have set up vhosts, set the queue names as `vhost_name/queue_name`.
29 | # If you have `tag_families` enabled, the first captured group in the regex
30 | # will be used as the queue_family tag
31 | #
32 | # queues:
33 | # - queue1
34 | # - queue2
35 | # queues_regexes:
36 | # - thisqueue-.*
37 | # - another_\d+queue
38 | # - (lepidorae)-\d+ # to tag queues in the lepidorae queue_family
39 |
40 | # Service checks:
41 | # By default a list of all vhosts is fetched and each one will be checked
42 | # using the aliveness API. If you prefer only certain vhosts to be monitored
43 | # with service checks then you can list the vhosts you care about.
44 | #
45 | # vhosts:
46 | # - vhost1
47 | # - vhost2
48 |
--------------------------------------------------------------------------------
/ci/tokumx.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | def tokumx_version
8 | ENV['FLAVOR_VERSION'] || '2.0.1'
9 | end
10 |
11 | def tokumx_rootdir
12 | "#{ENV['INTEGRATIONS_DIR']}/tokumx_#{tokumx_version}"
13 | end
14 |
15 | namespace :ci do
16 | namespace :tokumx do |flavor|
17 | task before_install: ['ci:common:before_install']
18 |
19 | task install: ['ci:common:install'] do
20 | unless Dir.exist? File.expand_path(tokumx_rootdir)
21 | # Downloads
22 | # http://www.tokutek.com/tokumx-for-mongodb/download-community/
23 | sh %(curl -s -L\
24 | -o $VOLATILE_DIR/tokumx-#{tokumx_version}.tgz\
25 | https://s3.amazonaws.com/dd-agent-tarball-mirror/tokumx-#{tokumx_version}-linux-x86_64-main.tar.gz)
26 | sh %(mkdir -p #{tokumx_rootdir})
27 | sh %(tar zxf $VOLATILE_DIR/tokumx-#{tokumx_version}.tgz\
28 | -C #{tokumx_rootdir} --strip-components=1)
29 | end
30 | end
31 |
32 | task before_script: ['ci:common:before_script'] do
33 | sh %(mkdir -p $VOLATILE_DIR/tokumxd1)
34 | sh %(#{tokumx_rootdir}/bin/mongod --port 37017\
35 | --pidfilepath $VOLATILE_DIR/tokumxd1/tokumx.pid\
36 | --dbpath $VOLATILE_DIR/tokumxd1\
37 | --logpath $VOLATILE_DIR/tokumxd1/tokumx.log\
38 | --noprealloc --rest --fork)
39 |
40 | sh %(#{tokumx_rootdir}/bin/mongo\
41 | --eval "printjson(db.serverStatus())" 'localhost:37017')
42 | end
43 |
44 | task script: ['ci:common:script'] do
45 | this_provides = [
46 | 'tokumx'
47 | ]
48 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
49 | end
50 |
51 | task before_cache: ['ci:common:before_cache']
52 |
53 | task cleanup: ['ci:common:cleanup'] do
54 | sh %(kill `cat $VOLATILE_DIR/tokumxd1/tokumx.pid`)
55 | end
56 |
57 | task :execute do
58 | Rake::Task['ci:common:execute'].invoke(flavor)
59 | end
60 | end
61 | end
62 |
--------------------------------------------------------------------------------
/conf.d/redisdb.yaml.example:
--------------------------------------------------------------------------------
1 | init_config:
2 |
3 | instances:
4 | - host: localhost
5 | port: 6379
6 |
7 | # Can be used in lieu of host/port
8 | #
9 | # unix_socket_path: /var/run/redis/redis.sock # optional, can be used in lieu of host/port
10 |
11 | # Addional connection options
12 | #
13 | # db: 0
14 | # password: mypassword
15 | # socket_timeout: 5
16 |
17 | # Optional SSL/TLS parameters
18 | # ssl: False # Optional (default to False)
19 | # ssl_keyfile: # Path to the client-side private keyfile
20 | # ssl_certfile: # Path to the client-side certificate file
21 | # ssl_ca_certs: # Path to the ca_certs file
22 | # ssl_cert_reqs: # Specifies whether a certificate is required from the
23 | # # other side of the connection, and whether it will be validated if
24 | # # provided.
25 | # * 0 for ssl.CERT_NONE (certificates ignored)
26 | # * 1 for ssl.CERT_OPTIONAL (not required, but validated if provided)
27 | # * 2 for ssl.CERT_REQUIRED (required and validated)
28 |
29 | # Optional extra tags added to all redis metrics
30 | # tags:
31 | # - optional_tag1
32 | # - optional_tag2
33 | #
34 |
35 | # Check the length of these keys
36 | #
37 | # keys:
38 | # - key1
39 | # - key2
40 |
41 | # Display a warning in the info page if the keys we're tracking are missing
42 | # Default: True
43 | #
44 | # warn_on_missing_keys: True
45 |
46 | # Max number of entries to fetch from the slow query log
47 | # By default, the check will read this value from the redis config
48 | # If it's above 128, it will default to 128 due to potential increased latency
49 | # to retrieve more than 128 slowlog entries every 15 seconds
50 | # If you need to get more entries from the slow query logs
51 | # set the value here.
52 | # Warning: It may impact the performance of your redis instance
53 | # slowlog-max-len: 128
54 |
55 | # Collect INFO COMMANDSTATS output as metrics.
56 | # command_stats: False
57 |
--------------------------------------------------------------------------------
/ci/system.rb:
--------------------------------------------------------------------------------
1 | # (C) Datadog, Inc. 2010-2016
2 | # All rights reserved
3 | # Licensed under Simplified BSD License (see LICENSE)
4 |
5 | require './ci/common'
6 |
7 | # This is "less" important to change the version
8 | # because it is shipped with the self-contained agent
9 | def sysstat_version
10 | '11.0.1'
11 | end
12 |
13 | def system_rootdir
14 | "#{ENV['INTEGRATIONS_DIR']}/system_#{sysstat_version}"
15 | end
16 |
17 | namespace :ci do
18 | namespace :system do |flavor|
19 | task before_install: ['ci:common:before_install']
20 |
21 | task install: ['ci:common:install'] do
22 | unless Dir.exist? File.expand_path(system_rootdir)
23 | sh %(curl -s -L\
24 | -o $VOLATILE_DIR/system-#{sysstat_version}.tar.xz\
25 | https://s3.amazonaws.com/dd-agent-tarball-mirror/sysstat-11.0.1.tar.xz)
26 | sh %(mkdir -p $VOLATILE_DIR/system)
27 | sh %(mkdir -p #{system_rootdir})
28 | sh %(mkdir -p #{system_rootdir}/var/log/sa)
29 | sh %(tar Jxf $VOLATILE_DIR/system-#{sysstat_version}.tar.xz\
30 | -C $VOLATILE_DIR/system --strip-components=1)
31 | sh %(cd $VOLATILE_DIR/system\
32 | && conf_dir=#{system_rootdir}/etc/sysconfig sa_dir=#{system_rootdir}/var/log/sa\
33 | ./configure --prefix=#{system_rootdir} --disable-man-group\
34 | && make\
35 | && make install)
36 | end
37 | end
38 |
39 | task before_script: ['ci:common:before_script'] do
40 | sh %(mkdir -p $INTEGRATIONS_DIR/bin)
41 | sh %(rm -f $INTEGRATIONS_DIR/bin/mpstat)
42 | sh %(ln -s #{system_rootdir}/bin/mpstat $INTEGRATIONS_DIR/bin/mpstat)
43 | end
44 |
45 | task script: ['ci:common:script'] do
46 | this_provides = [
47 | 'system'
48 | ]
49 | Rake::Task['ci:common:run_tests'].invoke(this_provides)
50 | end
51 |
52 | task before_cache: ['ci:common:before_cache']
53 |
54 | task cleanup: ['ci:common:cleanup']
55 |
56 | task :execute do
57 | Rake::Task['ci:common:execute'].invoke(flavor)
58 | end
59 | end
60 | end
61 |
--------------------------------------------------------------------------------