├── .CI ├── check_yaml.py ├── dict │ ├── neteye_dict.aff │ └── neteye_dict.dic ├── markdown2html.py └── spellchecker.sh ├── .gitignore ├── .gitmodules ├── .travis.yml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── doc ├── 020_os_configuration.md ├── 030_neteye_standalone_init.md ├── 031_monitoring_zones_master.md ├── 032_configure_satellite.md ├── 032_monitoring_zones_satellite.md ├── 040_neteye_cluster_init.md ├── 050_community_configs_init.md ├── 052_baskets_import.md ├── 090_contributing_toGit.md ├── 091_git_branching.md ├── 092_git_submodules.md ├── 093_git_global_configs.md ├── README.md ├── img │ ├── 090_new_pull_request.png │ ├── 090_pull_request_compare.png │ └── 090_pull_request_create.png ├── monitoring_plugins.md └── monitoring_templates.md ├── itoa ├── README.md ├── agent_configurations │ ├── README.md │ ├── neteye3_collector │ │ ├── README.md │ │ ├── host_status_overview.json │ │ ├── host_status_overview.png │ │ ├── telegraf_inputsThrukServiceDetail.conf.diff │ │ └── telegraf_inputsThrukTacticalOverview.conf.diff │ └── windows │ │ ├── README.md │ │ └── telegraf.conf ├── aix │ ├── dashboard │ │ └── aix_performance.json │ ├── influx_stream │ │ └── README.md │ └── json_file_delivery │ │ ├── README.md │ │ ├── njmon_influxDB_injector_30.py │ │ ├── njmon_influx_injector.sh │ │ ├── njmon_to_InfluxDB_injector_15.py │ │ └── run_njmon_job.sh ├── dashboards │ └── README.md └── neteye_dataConsumer_infrastructure │ ├── README.md │ ├── neteye_nats_proxy.md │ ├── stan.conf │ ├── stan_enablePublicListener.conf.diff │ └── telegraf_inputsNats_consumer.conf.diff ├── logmanager ├── README.md ├── analytics_dashboards │ └── log_manager_authentication_auditing.json ├── elastic │ ├── elasticsearch.md │ ├── elasticsearch_config │ │ ├── README.md │ │ ├── jvm.options.diff │ │ └── log4j2.properties.diff │ └── logstash │ │ ├── 0_i05_beats.input │ │ ├── 2_o05_beats.output │ │ ├── beats │ │ ├── README.md │ │ └── filebeat.yml │ │ └── grok_filters ├── eventhandler │ └── aa_forward_all_to_eventhandler.conf └── searchguard │ ├── README.md │ ├── eventid_filter.png │ ├── eventid_safed_filter.png │ ├── role_definition.png │ ├── role_definition_indexfilter.png │ ├── role_definition_indexpermission.png │ ├── rolemapping_AD_Groups_definition.png │ ├── rolemapping_definition.png │ ├── sg_config_sample.yml │ ├── sg_roles_mapping_sample.yml │ └── sg_roles_sample.yml ├── monitoring ├── agents │ ├── linux │ │ └── rhel-centos │ │ │ └── agent_registration_configuration.sh │ └── microsoft │ │ └── icinga │ │ ├── README.md │ │ ├── configs │ │ ├── .htaccess │ │ └── sample_configs.ini │ │ ├── deployment_batch │ │ ├── 01_ConfigureAgent.bat │ │ └── README.md │ │ ├── deployment_powershell │ │ ├── README.md │ │ ├── neteye_agent_deployment.ps1 │ │ ├── neteye_simple_agent_deployment.ps1 │ │ ├── satellite_zone │ │ │ └── agent_localinstall.bat │ │ └── tools │ │ │ ├── curl.exe │ │ │ └── libcurl-x64.dll │ │ ├── deployment_remexec │ │ ├── DeployAgent.ps1 │ │ ├── deploy_Icinga_agents_remotely.pdf │ │ ├── install_hostList.bat │ │ ├── reconfigure_Icinga2Agent_LogonName.bat │ │ └── reconfigure_hostList.bat │ │ └── monitoring_scripts │ │ └── restart_service.ps1 ├── alyvix │ ├── neteye4 │ │ ├── Director-Basket_Alyvix-Testcase-Manager.json │ │ ├── Grafana-Alyvix_Testcases.json │ │ ├── alyvix-cleanup.cron.hourly │ │ ├── alyvix-reports.conf │ │ ├── alyvix.sysconfig │ │ └── run_serialized_services.py │ └── windows │ │ ├── Run-AlyvixRobot.ps1 │ │ ├── check_and_restart_icinga2.bat │ │ └── run_icinga_agent.cmd ├── alyvix3-server │ ├── Director-Basket_Alyvix3-Server.json │ ├── Director-Basket_Alyvix3-Testcase.json │ ├── Grafana-Alyvix_Testcases.json │ ├── check_alyvix3_testcase.pl │ ├── check_alyvix3_testcase_verbose.sh │ ├── check_alyvix3_testcases.pl │ ├── httpd-proxypass-alyvix.conf │ └── run_alyvix_workflow.sh ├── analytics_dashboards │ ├── README.md │ ├── alyvix │ │ ├── Alyvix_Performance_comparison.png │ │ ├── Alyvix_Performance_comparison_neteye3.json │ │ ├── Alyvix_Performance_overview_neteye3.json │ │ ├── README.md │ │ ├── alyvix_performance_overview.png │ │ ├── alyvix_troubleshooting_view.json │ │ └── alyvix_troubleshooting_view.png │ ├── generic_services.json │ ├── generic_services.png │ ├── itoa_cust_cpu.json │ ├── itoa_cust_disk_win.json │ ├── itoa_cust_diskspace.json │ ├── itoa_cust_diskspace.png │ ├── itoa_cust_hostalive.json │ ├── itoa_cust_hostalive.png │ ├── itoa_cust_interfaces.json │ ├── itoa_cust_interfaces.png │ ├── itoa_cust_load.json │ ├── itoa_cust_memory.json │ └── itoa_cust_memory_win.json ├── business-services │ ├── README.md │ ├── check_dynamic_bp.py │ ├── check_dynamic_bp_cli.py │ ├── check_dynamic_bp_v2.py │ ├── director_basket_bp_bynamic_business_process.json │ └── pve4dynamic_bp.tar.gz ├── command_orchestrator │ └── README.md ├── configurations │ ├── director │ │ └── businessprocess_automation │ │ │ ├── Director-Basket_101_BusinessProcess_Sync.json │ │ │ ├── README.md │ │ │ ├── icingaweb_director_businessprocess_automation.patch │ │ │ └── icingaweb_director_businessprocess_v2.1_automation.patch │ ├── icinga │ │ ├── api │ │ │ ├── README.md │ │ │ ├── api-deploy-user.conf │ │ │ └── monitoring_object_modify_attributes.sh │ │ └── dependency │ │ │ ├── README.md │ │ │ ├── dependency_host2service.conf │ │ │ ├── dependency_icingaAgent.conf │ │ │ └── dependency_parentChild.conf │ ├── icinga_fileshipper │ │ └── README.md │ └── icingaweb2 │ │ ├── icingaweb2_dashboard.md │ │ ├── icons │ │ ├── aix-high.png │ │ ├── cisco3-high.png │ │ ├── device-snmp.png │ │ ├── device.png │ │ ├── neteye.png │ │ ├── switch-high.png │ │ └── vmware.png │ │ └── navigation │ │ └── host-actions.ini ├── discovery │ └── network-discovery-nedi │ │ ├── README.md │ │ └── sysobj │ │ ├── 1.3.6.1.4.1.3717.4.1.def │ │ ├── 1.3.6.1.4.1.47196.4.1.1.1.100.def │ │ ├── 1.3.6.1.4.1.47196.4.1.1.1.2.def │ │ ├── 1.3.6.1.4.1.47196.4.1.1.1.301.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.1.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.3.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.5.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.11.2.2.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.1.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.5.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.6.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.7.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.7.1.10.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.7.1.47.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.1.9.1.1.def │ │ ├── 1.3.6.1.4.1.6486.800.1.1.2.2.2.1.1.6.def │ │ ├── 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.def │ │ ├── 1.3.6.1.4.1.6486.801.1.1.2.1.10.1.3.def │ │ └── 1.3.6.1.4.1.6486.801.1.1.2.1.12.1.1.def ├── eventhandler │ └── windows_restart │ │ ├── 01_command.json │ │ ├── 02_service.json │ │ ├── README.md │ │ └── event_restart_windows.ps1 ├── monitoring-plugins │ ├── README.md │ ├── aix │ │ ├── 9000a-st_aix_health.sh │ │ └── 9000c-ss_aix_health.sh │ ├── cisco │ │ ├── check_cisco_nexus.md │ │ ├── check_cisco_nexus_cpu.pl │ │ ├── check_cisco_nexus_hardware.pl │ │ └── check_cisco_nexus_mem.pl │ ├── database │ │ ├── db2 │ │ │ ├── README.md │ │ │ └── check_db2_health │ │ ├── mssql │ │ │ ├── README.md │ │ │ ├── check_mssql_authWrapper │ │ │ │ ├── auth.conf │ │ │ │ └── check_mssql_health.sh │ │ │ ├── check_mssql_health │ │ │ └── check_mssql_health.neteye3 │ │ └── oracle │ │ │ ├── README.md │ │ │ └── check_oracle_health │ ├── dell │ │ ├── README.md │ │ ├── check_dell_powerconnect │ │ └── check_idrac │ │ │ ├── README.md │ │ │ └── idrac2.2rc4 │ ├── elk │ │ ├── README.md │ │ ├── elasticsearch │ │ │ ├── check_elasticsearch.md │ │ │ ├── check_elasticsearch_index_ingestion.sh │ │ │ └── elasticsearch_index_ingestion_basket.json │ │ └── logstash │ │ │ └── check_logstash_queue.sh │ ├── emc │ │ └── EMC_Navicli.md │ ├── f5 │ │ ├── check_F5_Platform.pl │ │ ├── check_bigip_cpu.pl │ │ └── check_f5_pool_members.pl │ ├── hp │ │ ├── Director-Basket_ILO_Health_4f57b43.json │ │ ├── check_ilo2_health.md │ │ └── check_ilo2_health.pl │ ├── http │ │ ├── README.md │ │ └── check_curl_krb5.sh │ ├── icinga2-selfmonitoring │ │ ├── README.md │ │ ├── check_grafana_metrics.sh │ │ ├── check_icinga2-master_config.sh │ │ ├── check_icinga2-master_status.sh │ │ ├── check_icinga2_config.sh │ │ └── check_icinga2_status.sh │ ├── iseries │ │ ├── Director-Basket_iSeries_Monitoring_25da967.json │ │ └── iseries.tar.gz │ ├── linux_unix │ │ ├── check_mem.pl │ │ └── check_mem.txt │ ├── microsoft │ │ ├── ax2012_powershell_module │ │ │ ├── AxMonitor.DIXFservice.dll │ │ │ ├── AxMonitor.HelpService.dll │ │ │ ├── AxMonitor.Init.ps1 │ │ │ ├── AxMonitor.dll │ │ │ ├── AxMonitor.psd1 │ │ │ ├── AxMonitor.psm1 │ │ │ ├── NTFSSecurity.zip │ │ │ ├── Readme.txt │ │ │ ├── scripts │ │ │ │ └── sql │ │ │ │ │ ├── headblocker.sql │ │ │ │ │ ├── headblocker2.txt │ │ │ │ │ ├── longTransactionAlertScript2.txt │ │ │ │ │ └── longtrans.sql │ │ │ └── settings.xml │ │ ├── ax_2012_jobs │ │ │ ├── 9000-command_powershell_neteye.sh │ │ │ ├── 9001-st_generic_agent_ps_aos_jobs_running.sh │ │ │ ├── ax_sql_jobs_overview.json │ │ │ └── win_c_script_neteye │ │ │ │ ├── batch_overdueJobs.sql │ │ │ │ ├── batch_runningJobs.sql │ │ │ │ └── check_aos_jobs.ps1 │ │ ├── exchange │ │ │ └── exchange_dbstatus_and_queue.md │ │ ├── failover_cluster │ │ │ └── microsoft_failover_cluster.txt │ │ ├── hyper-v │ │ │ └── hyperv_vms_not_under_monitoring │ │ │ │ ├── check_hyperv_monitoring │ │ │ │ ├── hyperv_vm_report.ps1 │ │ │ │ ├── hyperv_vms_missing.png │ │ │ │ └── nsclient.ini │ │ ├── logfile_check │ │ │ ├── check_logfiles.cfg │ │ │ ├── check_logfiles.exe │ │ │ └── check_logfiles.md │ │ └── wmi │ │ │ ├── README.md │ │ │ ├── check_wmi_plus.conf │ │ │ ├── check_wmi_plus.v1.64.tar.gz │ │ │ ├── check_wmi_plus │ │ │ ├── check_wmi_plus.README.txt │ │ │ ├── check_wmi_plus.makeman.sh │ │ │ ├── check_wmi_plus.pl │ │ │ ├── check_wmi_plus_help.pl │ │ │ ├── etc │ │ │ │ └── check_wmi_plus │ │ │ │ │ ├── check_wmi_plus.conf.sample │ │ │ │ │ ├── check_wmi_plus.d │ │ │ │ │ ├── CommandExamples.chtml │ │ │ │ │ ├── README.txt │ │ │ │ │ ├── WarnCritExamples.chtml │ │ │ │ │ ├── check_sql.ini │ │ │ │ │ ├── check_wmi_plus.ini │ │ │ │ │ ├── checkexchange.ini │ │ │ │ │ ├── checkiis.ini │ │ │ │ │ ├── events.ini │ │ │ │ │ ├── samples.ini │ │ │ │ │ └── test.ini │ │ │ │ │ └── check_wmi_plus.data │ │ │ │ │ └── check_wmi_plus.compiledini │ │ │ └── event_generic.pl │ │ │ ├── neteye3 │ │ │ └── service-profile-windows_wmi_services.zip │ │ │ ├── neteye4 │ │ │ ├── README.md │ │ │ ├── perl-Config-IniFiles-2.79-1.el7.noarch.rpm │ │ │ └── perl-Number-Format-1.73-14.el7.noarch.rpm │ │ │ └── sample_auth.conf │ ├── nagios_nrpe │ │ ├── Basket-NRPE-Windows-Basisservices.json │ │ ├── README.md │ │ ├── check_nrpe │ │ ├── check_nrpe_quotes │ │ └── check_nrpe_windows_users │ ├── neteye │ │ ├── check_assetmanagement.conf │ │ ├── check_assetmanagement.pl │ │ ├── check_assetmanagement.txt │ │ ├── check_crm │ │ ├── check_crm.md │ │ ├── check_drbd9.md │ │ ├── check_drbd9.pl │ │ ├── check_drbd9_orig.pl │ │ ├── check_influx_diskspace_cluster.pl │ │ ├── check_ingress_elasticsearch_index.sh │ │ └── nagios-plugins-nrpe-2.15p4-3.neteye4.x86_64.rpm │ ├── network-devices │ │ ├── check_nwc_health │ │ │ ├── README.md │ │ │ ├── check_nwc_health │ │ │ ├── perl-Module-Load-0.24-3.el7.noarch.rpm │ │ │ ├── service_template-nwc_health.png │ │ │ └── service_template-nwc_health.sh │ │ ├── check_pcmeasure2 │ │ │ ├── README.md │ │ │ ├── check_pcmeasure2.pl │ │ │ └── service_template-pcmeasure.sh │ │ ├── fortinet │ │ │ ├── README.md │ │ │ ├── check_fortinet.pl │ │ │ ├── director_template_fortinet-command.sh │ │ │ └── perl-List-Compare-0.49-1.el7.noarch.rpm │ │ ├── interfaces │ │ │ ├── README.md │ │ │ ├── check_interfaces_centos7 │ │ │ ├── check_interfaces_rhel8 │ │ │ ├── interface_traffic.png │ │ │ └── interfaces_traffic.json │ │ └── radius_tacacs │ │ │ ├── check_radius_auth.py │ │ │ ├── check_radius_auth.txt │ │ │ ├── check_tacacs.py │ │ │ └── check_tacacs.txt │ ├── nutanix │ │ └── check_nutanix.pl │ ├── storage │ │ ├── check_fibrealliance.md │ │ ├── check_fibrealliance.sh │ │ ├── check_fibrealliance.sh.20190312_bak │ │ └── check_glusterfs │ ├── vmware │ │ ├── README.md │ │ ├── check_vmware_esx │ │ └── perl-Time-Duration-1.06-17.el7.noarch.rpm │ └── wireless │ │ ├── README.md │ │ └── cisco │ │ ├── JVM Metrics - JBoss-sample.json │ │ ├── README.md │ │ ├── WLC Access Points-1553608849487.json │ │ ├── WLC Clients-1553608861077.json │ │ ├── WLC General Infos-1553608876854.json │ │ ├── mibs │ │ ├── AIRESPACE-REF-MIB.txt │ │ ├── AIRESPACE-WIRELESS-MIB.txt │ │ ├── CISCO-LWAPP-AP-MIB.txt │ │ ├── CISCO-LWAPP-DOT11-CLIENT-MIB.txt │ │ ├── CISCO-LWAPP-DOT11-MIB.txt │ │ ├── CISCO-LWAPP-MOBILITY-EXT-MIB.txt │ │ ├── CISCO-LWAPP-RF-MIB.txt │ │ ├── CISCO-LWAPP-SYS-MIB.txt │ │ ├── CISCO-LWAPP-TC-MIB.txt │ │ ├── CISCO-LWAPP-WLAN-MIB.txt │ │ ├── CISCO-SMI.txt │ │ ├── CISCO-TC.txt │ │ ├── DISMAN-EXPRESSION-MIB.txt │ │ ├── ENTITY-MIB.txt │ │ ├── P-BRIDGE-MIB.txt │ │ ├── Q-BRIDGE-MIB.txt │ │ ├── README.md │ │ ├── RFC1213-MIB.txt │ │ ├── RMON2-MIB.txt │ │ └── TOKEN-RING-RMON-MIB.txt │ │ └── telegraf.conf ├── notification │ ├── README.md │ ├── email2neteye3sms │ │ └── n3email_2_n4sms.md │ ├── email_html_mail │ │ ├── README.md │ │ ├── clone_and_patch_repo.sh │ │ ├── nagios_host_mail.diff │ │ └── nagios_service_mail.diff │ ├── phone_call │ │ ├── Basket-notification-phonecall.json │ │ └── phone-notification.sh │ ├── sms │ │ ├── Director-Basket_SMS_Notification.json │ │ ├── sms-host-notification.sh │ │ ├── sms-service-notification.sh │ │ └── smssend │ ├── snmptrap │ │ ├── INSTALL.txt │ │ └── neteye-trap-notification │ ├── teams │ │ ├── Director-Basket_teams-notifications_56f58bd.json │ │ ├── README.md │ │ └── teams-notification.py │ └── telegram │ │ ├── Director-Basket_Telegram_Notification.json │ │ └── README.md ├── sahipro │ ├── Director-Basket_SahiPro.json │ ├── bin │ │ ├── firefox-de │ │ ├── firefox-it │ │ └── startsahi.sh │ ├── config │ │ ├── browser_types.xml │ │ ├── sysconfig.cfg │ │ └── userdata.properties.add │ ├── etc │ │ ├── sahipro-mysql.cron.hourly │ │ ├── sahipro.conf │ │ ├── sahipro.cron.hourly │ │ ├── sahipro.service │ │ ├── sahipro_runner.service │ │ └── vncserver@.service │ ├── extlib │ │ └── mariadb-java-client-2.6.0.jar │ ├── install.sh │ ├── lib │ │ └── neteye.sah │ ├── packages │ │ ├── README.txt │ │ ├── sahipro.xml │ │ ├── sahipro_runner.xml │ │ ├── silent_install.xml │ │ └── silent_install_runner.xml │ ├── phantomjs │ │ ├── phantomjs.sh │ │ ├── sahi.js │ │ ├── sahi_de.js │ │ ├── sahi_en.js │ │ └── sahi_it.js │ ├── plugin │ │ └── check_sahipro │ ├── rpm │ │ ├── libxdo-3.20150503.1-1.el7.x86_64.rpm │ │ ├── phantomjs2-2.1.1-1.neteye.x86_64.rpm │ │ ├── waitmax-1.1-1.el7.rf.x86_64.rpm │ │ └── xdotool-3.20150503.1-1.el7.x86_64.rpm │ ├── sbin │ │ ├── nginx_disable_server.sh │ │ ├── nginx_enable_server.sh │ │ └── restart_sahipro.sh │ └── vnc │ │ └── xstartup └── tornado │ ├── README.md │ ├── tornado_collectors │ └── webhook │ │ ├── eventlog_collector.json │ │ └── generic_webhook_collector.json │ ├── tornado_logrotate.conf │ ├── tornado_rule_icinga.md │ ├── tornado_rule_simple.md │ ├── tornado_rules │ ├── email │ │ ├── filter_email.json │ │ └── rules │ │ │ ├── 001_log_all_emails.json │ │ │ └── 010_icingaAction_all_emails.json │ ├── other_rules │ │ └── others_filter.json │ └── webhook │ │ ├── eventlog.json │ │ └── eventlog │ │ └── 010_eventlog.json │ ├── tornado_sample_rules │ └── draft_001 │ │ ├── config │ │ ├── email │ │ │ ├── filter.json │ │ │ └── rules │ │ │ │ ├── 0000000000_generic_archive_all_email_events.json │ │ │ │ └── 0000000010_sample_regex_with_monitoring_action.json │ │ ├── snmptrap │ │ │ ├── filter.json │ │ │ └── rules │ │ │ │ ├── 0000000000_generic_archive_all_snmptrap_events.json │ │ │ │ ├── 0000000010_sysupdtime_create_update_monitoring_object.json │ │ │ │ └── 0000000020_sysupdtime_create_update_monitoring_service.json │ │ └── webhook │ │ │ ├── filter.json │ │ │ └── hsg │ │ │ ├── filter.json │ │ │ └── rules │ │ │ ├── 0000000000_generic_archive_all_hsg_events.json │ │ │ ├── 0000000010_create_only_new_host_object.json │ │ │ └── 0000000020_monitoring_update_icinga_object_status.json │ │ └── data.json │ ├── tornado_setup.md │ └── webhook_sample │ ├── 003_create_live_hosts.json │ ├── README.md │ └── create_host_live.sh ├── neteye4 ├── LDAP_AD_Integration.md ├── backup │ ├── Director-Basket_PassiveMonitoring │ ├── README.md │ ├── backup_neteye.sh │ ├── backup_neteye_cluster_node.sh │ ├── backup_neteye_with_passive_check.sh │ └── neteye-backup.conf ├── etc │ └── mariadb │ │ └── neteye.cnf ├── neteye_secure_install │ ├── 990_customer_logo.sh │ └── README.md ├── neteyeshare │ ├── README.md │ └── neteye-share.conf └── scripts │ ├── ansible │ ├── README.md │ ├── inventory.ini │ ├── play_monitoring_files.yml │ └── play_system_files.yml │ ├── autoumount │ ├── autoumount.c │ ├── cust_synch_Cluster.py │ ├── cust_synch_Satellite.py │ ├── drbd_fix.sh │ └── eventgw_email_forwarder.sh ├── neteye4_operations ├── neteye_backup │ └── mariadb_backup │ │ ├── README.md │ │ └── mysqlbackup.sh ├── neteye_config_tuning │ ├── README.md │ ├── config.yml │ ├── inventory │ │ └── neteye_vms.ini │ └── neteye_module_settings_tuning.yum └── neteye_upgrade │ └── repo2pulp │ ├── README.md │ ├── inventory │ └── neteye_vms.ini │ └── point_repos_to_internal_pulp.yml ├── run_setup.sh └── scripts ├── 005_git_submodules_init.sh ├── 006_icinga_setup_configurations.sh ├── 010_init_neteyeshare.sh ├── 011_init_neteyeshare_weblink.sh ├── 020_get_icinga2_agents.sh ├── 021_get_icinga2_agent_nix.sh ├── 022_get_icinga2_agent_rhel7.sh ├── 030_ressources_init.sh ├── 031_root_navigation_sample.sh ├── 032_role_init.sh ├── 033_authentication_init.sh ├── 034_auth_groups_init.sh ├── 040_monitoring_templates_init.sh ├── 041_icingaweb2_icons_init.sh ├── 051_copy_nonproduct_monitoring_git_plugins.sh ├── 052_install_nonproduct_monitoring_plugins.sh ├── 053_install_product_monitoring_plugins_before_release.sh ├── 055_install_monitoring_plugin_configs.sh ├── 060_synch_monitoring_plugins.sh ├── 061_sync_monitoring_configurations.sh ├── 062_sync_monitoring_analytics.sh ├── 070_synch_itoa.sh ├── 071_get_telegraf_agents.sh ├── 090_clusterSynch_PluginContribDir.sh ├── 091_clusterSynch_monitoringConfigs.sh ├── 101_synch_log.sh ├── 102_get_log_agents.sh ├── 150_tornado_default_rules.sh ├── README.md └── update_monitoring-plugins_from_ext_git.sh /.CI/check_yaml.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import yaml 3 | import os 4 | 5 | for root, dirs, files in os.walk(sys.argv[1]): 6 | for f in files: 7 | if f.endswith(".yml"): 8 | my_file = os.path.join(root, f) 9 | print("Checking if {} is a valid yaml".format(my_file)) 10 | yaml.safe_load(open(my_file)) -------------------------------------------------------------------------------- /.CI/dict/neteye_dict.aff: -------------------------------------------------------------------------------- 1 | SET UTF-8 2 | 3 | SFX M Y 1 4 | SFX M 0 's . 5 | -------------------------------------------------------------------------------- /.CI/markdown2html.py: -------------------------------------------------------------------------------- 1 | import mistune 2 | import sys 3 | 4 | 5 | with open(sys.argv[1], "r") as input_markdown: 6 | content = input_markdown.read() 7 | parsed_html = mistune.markdown(content) 8 | with open("{}.html".format(sys.argv[1]), "w") as output_html: 9 | output_html.write(parsed_html) 10 | -------------------------------------------------------------------------------- /.CI/spellchecker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | SCRIPT_DIR=$(dirname "${0}") 3 | SCRIPT_NAME=$(basename "${0}") 4 | BASEDIR="./" 5 | TOTAL_ERRORS=0 6 | 7 | # Check if hunspell is installed 8 | hunspell --help > /dev/null 2>&1 9 | if [[ "$?" -eq 127 ]]; 10 | then 11 | echo "You need to install hunspell and hunspell-en to run this script" 12 | exit 127 13 | fi 14 | 15 | export DICPATH="${SCRIPT_DIR}/dict" 16 | for FILE_NAME in $(find "${BASEDIR}" -name "*.md"); 17 | do 18 | if test -f "${FILE_NAME}"; 19 | then 20 | echo "Checking ${FILE_NAME}..."; 21 | python "${SCRIPT_DIR}/markdown2html.py" "${FILE_NAME}"; 22 | OUTPUT=$(hunspell -l -H -d "en_US,neteye_dict" "${FILE_NAME}.html"); 23 | ERRORS=$(echo "${OUTPUT}" | grep -v "^\s*$" | wc -l); 24 | TOTAL_ERRORS=$((TOTAL_ERRORS + ERRORS)) 25 | if [[ "$ERRORS" -ne 0 ]]; then 26 | echo "${FILE_NAME} errors: $(echo "${OUTPUT}" | tr "\n" " ")"; 27 | fi; 28 | rm "${FILE_NAME}.html" 29 | fi; 30 | done 31 | echo "Total Errors: ${TOTAL_ERRORS}" 32 | 33 | if [[ "${TOTAL_ERRORS}" -ne 0 ]]; then 34 | echo "If you are sure one or more words are not spelling errors," 35 | echo "feel free to add them to the dictionary under ${DICPATH}/neteye_dict.dic." 36 | echo "N.B. Add the words to the list in alphabetical order and update the first line of the file" 37 | fi 38 | 39 | exit "${TOTAL_ERRORS}" 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Vi gitignore 2 | *.swp 3 | *.swo 4 | .*.un~ 5 | *~ 6 | # 7 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "monitoring/monitoring-plugins/vmware/esx_hardware"] 2 | path = monitoring/monitoring-plugins/vmware/esx_hardware 3 | url = https://github.com/Napsty/check_esxi_hardware 4 | [submodule "monitoring/agents/microsoft/icinga/icinga2-powershell-module"] 5 | path = monitoring/agents/microsoft/icinga/icinga2-powershell-module 6 | url = https://github.com/Icinga/icinga2-powershell-module 7 | [submodule "itoa/dashboards/GrafanaWindowsHostDashboard"] 8 | path = itoa/dashboards/GrafanaWindowsHostDashboard 9 | url = https://github.com/ManDevOps/GrafanaWindowsHostDashboard 10 | [submodule "monitoring/notification/telegram/notification_commands"] 11 | path = monitoring/notification/telegram/notification_commands 12 | url = https://github.com/sysadmama/icinga2-notification-telegram.git 13 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | git: 3 | submodules: false 4 | 5 | before_install: 6 | - sudo apt update -y 7 | - sudo apt install python3 python3-pip python3-virtualenv hunspell myspell-en-us -y 8 | - virtualenv ci_pve && ci_pve/bin/pip install mistune pyyaml 9 | 10 | script: 11 | - ./ci_pve/bin/python ./.CI/check_yaml.py "." 12 | - source ./ci_pve/bin/activate && ./.CI/spellchecker.sh && deactivate 13 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution to this repository guidelines 2 | 3 | [Instruction to fork and contribute via pull requests is indicated here](./doc/090_contributing_toGit.md) 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NetEye 4 Community Portal 2 | 3 | Welcome to the community repository for NetEye 4 users. 4 | This repository comes with the purpose to share the best-practices and enhancements created by our userbase. Started as initiative during the first projects, it provides now a platform to accelerate any implementation project by providing: 5 | - how-to documentations for setup and configuration of a NetEye 4 6 | - monitoring templates such as host- or service templates, commands and fields 7 | - additional monitoring plugins (scripts in part linked to 3rd party repositories) 8 | - sample configurations to automate the configuration of best-practice configurations 9 | 10 | ## Getting started guide for NetEye 4 11 | This repo guides you through the following steps of NetEye. Depending on the status of your NetEye 4 project it is suggested to go through both steps: 12 | 1. [setup the OS of a fresh standalone NetEye 4](/doc/020_os_configuration.md) 13 | 2. [Initialize the NetEye 4 resources (Standalone)](/doc/030_neteye_standalone_init.md) 14 | 3. [Configure monitoring zoning architecture](/doc/031_monitoring_zones_master.md) 15 | 4. [integration and autosetup of templates, monitoring plugins and sample configurations](/doc/050_community_configs_init.md) 16 | 17 | ## References 18 | NetEye 3 configuration and templates collection. A limited NetEye 3 related [collection of enhancements can be found here](https://github.com/zampat/neteye3) 19 | 20 | 21 | ## Contributing to community project 22 | 23 | Help from community is appreciated. Read how to contribute with your configurations, plugins, etc.: 24 | [Contribute with your improvements ](./doc/090_contributing_toGit.md) 25 | -------------------------------------------------------------------------------- /doc/040_neteye_cluster_init.md: -------------------------------------------------------------------------------- 1 | # NetEye 4 cluster Setup 2 | 3 | 4 | [Steps to define certificates and zones for master and satellite services of cluster](./031_master_satellite.md) 5 | 6 | -------------------------------------------------------------------------------- /doc/052_baskets_import.md: -------------------------------------------------------------------------------- 1 | # Import configurations via Baskets 2 | 3 | Since Neteye 4.5 the use of [Baskets is supporting the exchanges of configurations via import / export of json file](https://github.com/Icinga/icingaweb2-module-director/issues/1630) 4 | 5 | Baskets will be provided in a standardized way for NetEye and will be made available within the "monitoring" section of this community portal or any other place if specified in a later moment. 6 | 7 | ## Getting baskets 8 | 9 | Overview of provided baskets: 10 | - Automation: [Import definitions and synchronization rules](https://github.com/zampat/icinga2-monitoring-templates/tree/master/baskets) 11 | 12 | ## Installing Baskets 13 | 14 | 1. Download .json file 15 | 2. Open Director menu: "Configuration Baskets" 16 | 3. Click "Upload" and specify new name of basket and .json file 17 | 4. Within new basket definition go to tab: "snapshots" 18 | 5. Select available snapshot from list and click "Restore" 19 | 6. Confirm target db "director" and "Restore" 20 | 21 | Done. 22 | -------------------------------------------------------------------------------- /doc/091_git_branching.md: -------------------------------------------------------------------------------- 1 | # Advanced GIT contribution via branching model 2 | 3 | # Introduction 4 | 5 | [Get familiar with the branching concepts of git](https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging) 6 | 7 | # Operations 8 | 9 | Switch to master branch and update local repository 10 | ``` 11 | git checkout master 12 | git fetch 13 | git pull 14 | git status 15 | ``` 16 | 17 | Define a new branch or switch to an existing one 18 | ``` 19 | git checkout -b branch_patch_123 20 | ``` 21 | If branch is does not reside on local repository and you need to fetch from remote repo: 22 | ``` 23 | # git fetch origin 24 | # git checkout -b origin/ 25 | ``` 26 | 27 | Now change files and commit those changes into branch: 28 | ``` 29 | git add changed_file.py 30 | git commit -a -m "important bugfix on python library xyz" 31 | ``` 32 | 33 | Push changes to local repository and remote branch of this name. 34 | [Interesting discussion provided in this thread](https://stackoverflow.com/questions/5082249/pushing-to-git-remote-branch) 35 | ``` 36 | git push --set-upstream origin branch_patch_123 37 | ``` 38 | 39 | 40 | -------------------------------------------------------------------------------- /doc/092_git_submodules.md: -------------------------------------------------------------------------------- 1 | # Submodules: Integration of third-party repos into another repo 2 | 3 | [Mastering Submodules](https://medium.com/@porteneuve/mastering-git-submodules-34c65e940407) 4 | 5 | Clone repository with submodules automatically: 6 | Note: neteye run_setup.sh initializes submodules. 7 | ``` 8 | git clone --recursive git@github.com:name/repo.git 9 | ``` 10 | 11 | ## Add new submodules 12 | 13 | Register a new submodule: 14 | Example done for icinga2-powershell-module 15 | ``` 16 | git submodule add https://github.com/Icinga/icinga2-powershell-module monitoring/agents/microsoft/icinga/icinga2-powershell-module 17 | ``` 18 | -------------------------------------------------------------------------------- /doc/093_git_global_configs.md: -------------------------------------------------------------------------------- 1 | # Configuration for github 2 | 3 | ## Proxy configuration 4 | 5 | When comunication needs to occur via proxy confiure according to https://stackoverflow.com/questions/24907140/git-returns-http-error-407-from-proxy-after-connect 6 | 7 | Config samples: 8 | ``` 9 | git config --global http.proxy=http://:@: 10 | git config --global http.proxyAuthMethod 'basic' 11 | ``` 12 | 13 | Verify configurations 14 | ``` 15 | # git config --list 16 | ``` 17 | -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | # NetEye 4 setup how-to and community portal documentation 2 | 3 | ## NetEye 4 setup how-to 4 | 5 | __Preparatory steps__ 6 | - [setup the OS of a fresh standalone NetEye 4](/doc/020_os_configuration.md) 7 | 8 | __NetEye 4 Master or Standalone node__ 9 | - [Initialize the NetEye 4 resources (Standalone)](/doc/030_neteye_standalone_init.md) 10 | - [Configure monitoring zoning architecture](/doc/031_monitoring_zones_master.md) 11 | 12 | __NetEye 4 Satellites or Cluster node__ 13 | - [Configure monitoring satellite or cluster architecture](/doc/032_monitoring_zones_satellite.md) 14 | 15 | __NetEye 4 Monitoring Setup__ 16 | [integration and autosetup of templates, monitoring plugins and sample configurations](/doc/050_community_configs_init.md) 17 | 18 | ## Contributing to this community project 19 | 20 | [Contribute with your improvements to this repository](090_contributing_toGit.md) 21 | 22 | __Advanced topics:__ 23 | 24 | [Branching model](091_git_branching.md) 25 | [Submodules for Git](092_git_submodules.md) 26 | 27 | -------------------------------------------------------------------------------- /doc/img/090_new_pull_request.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/doc/img/090_new_pull_request.png -------------------------------------------------------------------------------- /doc/img/090_pull_request_compare.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/doc/img/090_pull_request_compare.png -------------------------------------------------------------------------------- /doc/img/090_pull_request_create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/doc/img/090_pull_request_create.png -------------------------------------------------------------------------------- /doc/monitoring_plugins.md: -------------------------------------------------------------------------------- 1 | # Community collection of monitoring plugins 2 | 3 | This folder provides a selection of plugins used for monitoring with NetEye. Generally those Plugins are compatible for NetEye 4, but most should work for NetEye 3, too. Additional hint notes should advice you. 4 | 5 | When cloning this repository to your NetEye 4 environment and running the "run_setup.sh" many plugins are installed automatically in the Icinga2 PluginContribDir to be used with minimum effort with NetEye 4. 6 | 7 | Update policy role for these plugins: Here you should find Plugins from other authors of the Open-Source community. Plugins maintained within third-party repositories should be generally maintained there and forked to NetEye repo. Pull requests to those third-party repositories help to maintain the code, contribute the community and finally help each other. 8 | 9 | Happy monitoring ! 10 | 11 | [<<< Documentation overview <<<](./README.md) 12 | 13 | [<<< Monitoring Plugins <<<](../monitoring/monitoring-plugins) 14 | -------------------------------------------------------------------------------- /itoa/README.md: -------------------------------------------------------------------------------- 1 | 2 | # NetEye ITOA Software and Configurations overview 3 | 4 | IT operations analytics (ITOA) provides a solution to collect and archive performance data from various sources. The frequency of collection can be high and the contents indexed within a time series database. 5 | 6 | The architecture consists of infrastructure of: 7 | 1. Performance data collecting agents 8 | 2. Dashboards for Grafana 9 | 3. NetEye: collector of streaming data and forwarder to database 10 | 11 | ## Performance data collecting agents 12 | 13 | We make use of telegraf agents to collect performance data at high frequency. Various configuration samples are provided the telegraf agent, to collect performance data from Windows and Linux. 14 | Additional projects with connectors towards nats/influx are available too. Those configurations are under development (check out the various branches) 15 | 16 | [Agent setup & configuration](agent_configurations/) 17 | 18 | ## Dashboards for Grafana 19 | Make use of suitable dashboards importable into Grafana. Great examples are provided by community. 20 | [List of collected Grafana dashboards](dashboards/) 21 | 22 | ## NetEye: collector of streaming data and forwarder to database 23 | 24 | The collector service is provided as package for NetEye3 and NetEye4. You can [install the collector and find the relative how-to in the folder neteye_nats_collector](neteye_dataConsumer_infrastructure/) 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /itoa/agent_configurations/README.md: -------------------------------------------------------------------------------- 1 | # Performance data collection via Telegraf agent 2 | 3 | ## Windows 4 | 5 | - [Agent setup and configuration](./windows/) 6 | - [Default Grafana Dashboard](../dashboards/README.md) 7 | -------------------------------------------------------------------------------- /itoa/agent_configurations/neteye3_collector/host_status_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/itoa/agent_configurations/neteye3_collector/host_status_overview.png -------------------------------------------------------------------------------- /itoa/agent_configurations/windows/README.md: -------------------------------------------------------------------------------- 1 | # Setup Telegraf 2 | 3 | Download latest version from [Influx](https://portal.influxdata.com/downloads/) 4 | ``` 5 | wget https://dl.influxdata.com/telegraf/releases/telegraf-1.10.2_windows_amd64.zip 6 | unzip telegraf-1.10.2_windows_amd64.zip 7 | ``` 8 | Installation of Telegraf agent on windows: 9 | ``` 10 | Install Service: 11 | 12 | >telegraf.exe -service install --service-name=neteye_telegraf --config "C:\Program Files\Telegraf\telegraf.conf" --config-directory "C:\Program Files\Telegraf\conf.d\" 13 | 14 | Uninstall Service: 15 | >telegraf.exe -service uninstall --service-name=neteye_telegraf 16 | ``` 17 | 18 | ## Configuration of agent and setup of windows service 19 | [Configure telegraf according the official guide of the agent](https://github.com/influxdata/telegraf/blob/master/docs/WINDOWS_SERVICE.md) 20 | 21 | ## Grafana dashboard 22 | 23 | [Community telegraf dashboard for grafana](../../dashboards/README.md) 24 | -------------------------------------------------------------------------------- /itoa/dashboards/README.md: -------------------------------------------------------------------------------- 1 | # ITOA dashboards in Grafana 2 | 3 | ## Telegraf dashboards 4 | 5 | - Linux / Unix: 6 | [Community dashboard for basic os performance data from telegraf](https://grafana.com/dashboards/5955) 7 | - Windows 8 | [Community dashboard for basic os performance data from telegraf](https://grafana.com/dashboards/1902) 9 | -------------------------------------------------------------------------------- /itoa/neteye_dataConsumer_infrastructure/stan.conf: -------------------------------------------------------------------------------- 1 | # Some NATS Server TLS Configuration 2 | #listen: localhost:4222 3 | listen: :4222 4 | #tls: { 5 | # cert_file: "/path/to/server/cert_file" 6 | # key_file: "/path/to/server/key_file" 7 | # verify: true 8 | # timeout: 2 9 | #} 10 | 11 | # NATS Streaming Configuration 12 | streaming: { 13 | cluster_id: neteye_cluster 14 | 15 | # tls: { 16 | # client_cert: "/path/to/client/cert_file" 17 | # client_key: "/path/to/client/key_file" 18 | # } 19 | } 20 | 21 | -------------------------------------------------------------------------------- /itoa/neteye_dataConsumer_infrastructure/stan_enablePublicListener.conf.diff: -------------------------------------------------------------------------------- 1 | --- stan.conf.orig 2019-02-12 09:15:02.527466477 +0100 2 | +++ stan.conf 2019-02-11 17:29:18.000000000 +0100 3 | @@ -1,5 +1,6 @@ 4 | # Some NATS Server TLS Configuration 5 | -listen: localhost:4222 6 | +#listen: localhost:4222 7 | +listen: :4222 8 | #tls: { 9 | # cert_file: "/path/to/server/cert_file" 10 | # key_file: "/path/to/server/key_file" 11 | -------------------------------------------------------------------------------- /logmanager/README.md: -------------------------------------------------------------------------------- 1 | # LogManager and SIEM community getting stated 2 | 3 | ### Searchguard - advanced configuration how-to via command line interface 4 | The searchguard modules handles and protects access via Kibana to your documents stored in elasticsearch database. 5 | The NetEye user guide provides in chapter "Log Manger" an important introduction. 6 | Advanced configuration scenarios concern: 7 | - Backup and restore of SearchGuard configuration 8 | - LDAP integration and authentication via AD Groups 9 | 10 | [This how-to is provided here.](searchguard/README.md) 11 | 12 | ### Elastic performance tuning hints 13 | Section elastic/elasticsearch_config provides some configuration parameters you should review for performance tuning. 14 | [Related hints and external resources here.](elastic/elasticsearch_config/README.md) 15 | 16 | ### EventHandler integration of LogManager 17 | By default incoming log messages from LogManager are not forwarded for events processing to EventHandler of NetEye. 18 | To activate the forwarding of all incoming Log-Messages to the Eventhandler you can place the provided rsyslog action to the includes directory of rsyslog server of NetEye. 19 | To do so copy the configuration file into the rsyslog.d folder and restart the rsyslog-logmanager service: 20 | ``` 21 | cp ./eventhandler/aa_forward_all_to_eventhandler.conf /neteye/shared/rsyslog/conf/rsyslog.d 22 | Standalone: systemctl restart rsyslog-logmanager.service 23 | Cluster: pcs resource restart rsyslog-logmanager 24 | ``` 25 | -------------------------------------------------------------------------------- /logmanager/elastic/elasticsearch.md: -------------------------------------------------------------------------------- 1 | ## Reference of useful elastic queries 2 | 3 | Query to see cluster health: 4 | 5 | **TODO: Adapt ssl authentication for NetEye 4** 6 | 7 | ``` 8 | neteye01]:/elastic-data# curl http://localhost:9200/_cluster/health?pretty=true 9 | { 10 | "cluster_name" : "neteye-elasticsearch", 11 | "status" : "green", 12 | "timed_out" : false, 13 | "number_of_nodes" : 2, 14 | "number_of_data_nodes" : 2, 15 | "active_primary_shards" : 2, 16 | "active_shards" : 4, 17 | "relocating_shards" : 0, 18 | "initializing_shards" : 0, 19 | "unassigned_shards" : 0, 20 | "delayed_unassigned_shards" : 0, 21 | "number_of_pending_tasks" : 0, 22 | "number_of_in_flight_fetch" : 0 23 | } 24 | ``` 25 | 26 | ## Backup and Restore 27 | 28 | [Reference blog post about snapshot and restore](https://www.elastic.co/blog/introducing-snapshot-restore). 29 | 30 | 31 | ```/data/backup/elastic_snapshot``` 32 | 33 | Define Backup path in elastic Configuration: 34 | 35 | ``` 36 | [root@neteye_ZAPA elastic_backup]# cat /etc/elasticsearch/etc/elasticsearch.yml | grep repo 37 | path.repo: /data/backup/elastic_backup 38 | ``` 39 | 40 | Register Backup: 41 | ``` 42 | curl -XPUT 'http://localhost:9200/_snapshot/elastic_backup' -d '{ "type": "fs", "settings": { "location": "/data/backup/elastic_snapshot", "compress": true }}' 43 | ``` 44 | Run Backup 45 | ``` 46 | curl -XPUT "localhost:9200/_snapshot/elastic_backup/snapshot_1?wait_for_completion=true" 47 | ``` 48 | 49 | Remove a snapshot: 50 | ``` 51 | curl -XDELETE "localhost:9200/_snapshot/elastic_backup/snapshot_1?pretty" 52 | ``` 53 | -------------------------------------------------------------------------------- /logmanager/elastic/elasticsearch_config/README.md: -------------------------------------------------------------------------------- 1 | # Elastic Sizing and Configuration advice 2 | 3 | ## Elasticsearch 4 | 5 | Configuring Elasticsearch 6 | [Elasticsearch Options](https://www.elastic.co/guide/en/elasticsearch/reference/master/settings.html) 7 | 8 | JVM Options 9 | 10 | See: jvm.options.diff 11 | [JVM Options](https://www.elastic.co/guide/en/elasticsearch/reference/master/jvm-options.html) 12 | 13 | Logging Options 14 | 15 | See: log4j2.properties.diff 16 | [Logging configuration](https://www.elastic.co/guide/en/elasticsearch/reference/master/logging.html) 17 | -------------------------------------------------------------------------------- /logmanager/elastic/elasticsearch_config/jvm.options.diff: -------------------------------------------------------------------------------- 1 | --- jvm.options.orig 2019-01-31 10:48:44.635068507 +0100 2 | +++ jvm.options 2019-01-31 10:57:07.268433909 +0100 3 | @@ -21,8 +21,8 @@ 4 | 5 | #-Xms1g 6 | #-Xmx1g 7 | --Xms20g 8 | --Xmx20g 9 | +-Xms30g 10 | +-Xmx30g 11 | 12 | ################################################################ 13 | ## Expert settings 14 | -------------------------------------------------------------------------------- /logmanager/elastic/elasticsearch_config/log4j2.properties.diff: -------------------------------------------------------------------------------- 1 | --- log4j2.properties.orig 2019-01-31 10:52:13.094857037 +0100 2 | +++ log4j2.properties 2019-01-31 10:53:40.213694018 +0100 3 | @@ -27,8 +27,10 @@ 4 | appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} 5 | appender.rolling.strategy.action.condition.type = IfFileName 6 | appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* 7 | -appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize 8 | -appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB 9 | +#appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize 10 | +#appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB 11 | +appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified 12 | +appender.rolling.strategy.action.condition.nested_condition.age = 14D 13 | 14 | rootLogger.level = info 15 | rootLogger.appenderRef.console.ref = console 16 | -------------------------------------------------------------------------------- /logmanager/elastic/logstash/0_i05_beats.input: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | type => "beats" 5 | } 6 | } 7 | 8 | -------------------------------------------------------------------------------- /logmanager/elastic/logstash/2_o05_beats.output: -------------------------------------------------------------------------------- 1 | output { 2 | if [type] == "beats" { 3 | elasticsearch { 4 | hosts => "elasticsearch.neteyelocal:9200" 5 | manage_template => false 6 | index => "beats-%{+YYYY.MM.dd}" 7 | document_type => "%{[@metadata][type]}" 8 | ssl => true 9 | cacert => "/neteye/shared/logstash/conf/certs/root-ca.crt" 10 | ssl_certificate_verification => true 11 | user => logstash 12 | password => "my secret generated password" 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /logmanager/elastic/logstash/beats/README.md: -------------------------------------------------------------------------------- 1 | # Setup of Filebeat 2 | 3 | ## Procedure documentation 4 | 1. Get a copy of filebeat 5 | 2. Install and configure filebeat 6 | 3. Configure filebeat 7 | 8 | ## 1. Get a copy of elastic filebeat 9 | 10 | Online resource for downloading latest version: 11 | https://www.elastic.co/de/downloads/beats/filebeat 12 | 13 | ## 2. Install filebeat 14 | • Unzip contents into local program files folder i.e. `c:\program files\filebeat\` 15 | • Register service. (Administrative powershell required) 16 | Execute `.\install-service-filebeat.ps1` 17 | 18 | ## 3. Configure filebeat 19 | 20 | Take filebeat configuration sample from sharepoint folder software and configurations/ (see link above). 21 | Edit sections: 22 | • Section “filebeat.inputs”: paths of files to include 23 | • Section “output.logstash”: destination of neteye4 siem server address 24 | 25 | Then (re)start service “filebeat” 26 | 27 | -------------------------------------------------------------------------------- /logmanager/elastic/logstash/grok_filters: -------------------------------------------------------------------------------- 1 | # Creating logstash 2 | 3 | - [Grok Plugin on Elastic.co - logstash](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html) 4 | - [Grok Tutorials](https://qbox.io/blog/logstash-grok-filter-tutorial-patterns) 5 | - [Grok Files supported by Logstash](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/firewalls) 6 | 7 | - [Grok test and matcher](http://grokconstructor.appspot.com/do/match) 8 | 9 | Simple parser example where I want to extract "wanted" into the field "match": 10 | this is my wanted text. 11 | Grok filter: 12 | ``` 13 | this is my%{SPACE}%{WORD:match}%{SPACE}text 14 | ``` 15 | -------------------------------------------------------------------------------- /logmanager/eventhandler/aa_forward_all_to_eventhandler.conf: -------------------------------------------------------------------------------- 1 | $ModLoad omprog 2 | $actionomprogbinary /usr/share/neteye/eventhandler/bin/rsyslogprogout.pl 3 | *.* :omprog:;RSYSLOG_TraditionalFileFormat 4 | 5 | -------------------------------------------------------------------------------- /logmanager/searchguard/eventid_filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/logmanager/searchguard/eventid_filter.png -------------------------------------------------------------------------------- /logmanager/searchguard/eventid_safed_filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/logmanager/searchguard/eventid_safed_filter.png -------------------------------------------------------------------------------- /logmanager/searchguard/role_definition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/logmanager/searchguard/role_definition.png -------------------------------------------------------------------------------- /logmanager/searchguard/role_definition_indexfilter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/logmanager/searchguard/role_definition_indexfilter.png -------------------------------------------------------------------------------- /logmanager/searchguard/role_definition_indexpermission.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/logmanager/searchguard/role_definition_indexpermission.png -------------------------------------------------------------------------------- /logmanager/searchguard/rolemapping_AD_Groups_definition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/logmanager/searchguard/rolemapping_AD_Groups_definition.png -------------------------------------------------------------------------------- /logmanager/searchguard/rolemapping_definition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/logmanager/searchguard/rolemapping_definition.png -------------------------------------------------------------------------------- /logmanager/searchguard/sg_roles_mapping_sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sg_myrole_all_view: 3 | backendroles: 4 | - "admin" 5 | - "AD_MYGROUP_NAME" 6 | hosts: [] 7 | users: 8 | - "logmanager_user1" 9 | -------------------------------------------------------------------------------- /logmanager/searchguard/sg_roles_sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sg_myrole_all_view: 3 | cluster: [] 4 | indices: 5 | '*': 6 | '*': 7 | - "UNLIMITED" 8 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/README.md: -------------------------------------------------------------------------------- 1 | # Icinga agents remote installation 2 | 3 | Deployment of Icinga2 Agent using the Director Self-Service API. 4 | 5 | ## Preparing the PowerShell install script 6 | 7 | Copy the `Icinga2Agent.psm1.default` file to `Icinga2Agent.ps1`. 8 | Add at the bottom of the section containing the token used by self-service API (Generate in host template tab "Agent": 9 | 10 | ``` 11 | exit Icinga2AgentModule ` 12 | -DirectorUrl 'https://neteye.mydomain/neteye/director/' ` 13 | -DirectorAuthToken '12345678900332af816fb69afe10fce12fa02d80' ` 14 | -IgnoreSSLErrors ` 15 | -RunInstaller 16 | ``` 17 | 18 | Download the `Icinga2Agent.ps1` and execute the powershell script in administrative session: 19 | Note: Adjust execution policy if needed 20 | 21 | ``` 22 | > Set-ExecutionPolicy Unrestricted 23 | > Icinga2Agent.ps1 24 | ``` 25 | 26 | Windows Policy might enforce TLS 1.3. Define in Powershell 1.2 as acceptable: 27 | ``` 28 | # Security policies might enforce TLS 1.2 in order to allow Invoke-Webrequest 29 | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 30 | ``` 31 | 32 | ## Automated deployment of Icinga2 Agent 33 | 34 | Here you find a script collection for a deployment of the Icinga2 Agent on remote Windows servers. [Here you find the documentation.](./deploy_Icinga_agents_remotely.pdf) 35 | 36 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/configs/.htaccess: -------------------------------------------------------------------------------- 1 | AuthUserFile /neteye/shared/httpd/.htpasswd 2 | AuthName "Authorization to Icinga2 Agent configs" 3 | AuthType Basic 4 | 5 | #Allow any valid user 6 | require valid-user 7 | 8 | #Allow only one user with specified username 9 | require user configro 10 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/configs/sample_configs.ini: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/agents/microsoft/icinga/configs/sample_configs.ini -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_powershell/README.md: -------------------------------------------------------------------------------- 1 | # Powershell call for remote Icinga2 Agent setup 2 | 3 | Here you will find 2 approaches for automated setup and configuration of Icinga2 Agent. 4 | 5 | - neteye_simple_agent_deployment 6 | - neteye_agent_deployment 7 | 8 | For both approaches make sure to provide the setup of the Icinga2 Agent via HTTPS link or file-share. 9 | 10 | Next fetch the powershell script and execute the script: 11 | ``` 12 | https://neteye.mydomain.lan/neteyeshare/monitoring/agents/microsoft/icinga/neteye_agent_deployment.ps1 13 | 14 | > Invoke-Command -ComputerName COMPUTERNAME -FilePath C:\\neteye_agent_deployment.ps1 -Credential domain\user 15 | ``` 16 | 17 | ## !! Advice !! - Agent setup in remote satellite zone without access to Director self-service API requires various configurations to be provided from your site: 18 | 19 | To setup do: 20 | - Provide the Icinga2 `.msi` via https or file-share 21 | - publish the Icinga2 API to generate a host's ticket 22 | - configure Icinga2 CA Proxy 23 | - test the various services 24 | 25 | In general: this approach requires configurations on the Icinga2 / NetEye 4 infrastructure not documented in this section. In case you need help please contact our staff assisting you in your daily NetEye 4 tasks. 26 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_powershell/satellite_zone/agent_localinstall.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | REM Icinga2 agent installation file for neteye zones 4 | REM - if in satellite zone with need to add host translation to point to master node 5 | 6 | SET HOSTSFILEPATH=C:\Windows\System32\drivers\etc\hosts 7 | SET NETEYEMASTER=neteye.mydomain.lan 8 | 9 | findstr /m "%NETEYEMASTER%" %HOSTSFILEPATH% 10 | if %errorlevel%==1 ( 11 | echo Register host translation for %NETEYEMASTER% in hosts file 12 | @echo: >> %HOSTSFILEPATH% 13 | @echo 192.168.1.3 neteye_satellite.mydomain-dmz.lan >> %HOSTSFILEPATH% 14 | ) 15 | 16 | C:\Windows\system32\WindowsPowerShell\v1.0\powershell.exe Set-ExecutionPolicy Bypass 17 | C:\Windows\system32\WindowsPowerShell\v1.0\powershell.exe .\neteye_agent_deployment.ps1 18 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_powershell/tools/curl.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/agents/microsoft/icinga/deployment_powershell/tools/curl.exe -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_powershell/tools/libcurl-x64.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/agents/microsoft/icinga/deployment_powershell/tools/libcurl-x64.dll -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_remexec/DeployAgent.ps1: -------------------------------------------------------------------------------- 1 | Set-StrictMode -Version Latest 2 | 3 | Get-Date | Out-File -FilePath "$env:TMP\setup.txt" -Append 4 | 5 | Import-Module c:\temp\Icinga2Agent.psm1 6 | Icinga2AgentModule -DirectorUrl 'https://ne4.neteye.lab/neteye/director/' -DirectorAuthToken 'ec60146a6b509ff8c23aa1311d2e53d31e9cd413' -RunInstaller -IgnoreSSLErrors 7 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_remexec/deploy_Icinga_agents_remotely.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/agents/microsoft/icinga/deployment_remexec/deploy_Icinga_agents_remotely.pdf -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_remexec/install_hostList.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | :: Define the Powershell script(s) to copy to remote host1 3 | :: Define the list of remote hosts to install and configure agent 4 | 5 | SET url_AgentInstall_path=https://neteye4.mydomain/neteyeshare/monitoring/agents/microsoft/icinga/wp_pbzneteye4_monitoring/install_master_icinga2Agent.ps1 6 | SET url_AgentInstall_file=install_master_icinga2Agent.ps1 7 | SET path_workdir=c:\temp2 8 | 9 | for %%h in ( 10 | -host1 11 | -host2 12 | -host3 13 | ) do ( 14 | 15 | if %%h.==. echo Run this command with remote hostname 16 | if %%h.==. goto :EOF 17 | 18 | echo ">>> Starting Agent setup and configuration for Host: %%h" 19 | 20 | 21 | 22 | echo psexec64 \\%%h C:\Windows\system32\WindowsPowerShell\v1.0\powershell.exe Invoke-WebRequest -Uri %url_AgentInstall_path% -OutFile %path_workdir%\%url_AgentInstall_file% 23 | psexec64 -s \\%%h C:\Windows\system32\WindowsPowerShell\v1.0\powershell.exe Invoke-WebRequest -Uri %url_AgentInstall_path% -OutFile %path_workdir%\%url_AgentInstall_file% 24 | 25 | echo "[i] Proceeding with Icinga2Agent setup and configuration" 26 | echo psexec64 \\%%h C:\Windows\system32\WindowsPowerShell\v1.0\powershell.exe %path_workdir%\%url_AgentInstall_file% 27 | psexec64 \\%%h C:\Windows\system32\WindowsPowerShell\v1.0\powershell.exe %path_workdir%\%url_AgentInstall_file% 28 | 29 | echo "[+] Done for Host: %%h" 30 | ) 31 | 32 | :End 33 | echo "Abort of script" 34 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/deployment_remexec/reconfigure_Icinga2Agent_LogonName.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | :: Script to reconfigure a previously configured Icinga2 Agent service 3 | :: Define the list of remote hosts in for loop seciont 4 | 5 | :: NO configuration beyond this line 6 | SET ICINGASRV=icinga2 7 | SET ICINGASRV_Logon=LocalSystem 8 | 9 | for %%h in ( 10 | localhost 11 | host1 12 | host2 13 | host3 14 | ) do ( 15 | 16 | if %%h.==. echo Run this command with remote hostname 17 | if %%h.==. goto :EOF 18 | 19 | echo ">>> Starting Icinga2 Agent reconfiguration for Host: %%h" 20 | echo ">>> Starting Icinga2 Agent reconfiguration for Host: %%h">>c:\temp\reconfigure_Icinga2Agent_LogonName.log 21 | 22 | echo "psexec64 \\%%h SC CONFIG %ICINGASRV% obj= %ICINGASRV_Logon%">>c:\temp\reconfigure_Icinga2Agent_LogonName.log 23 | psexec64 \\%%h SC CONFIG "%ICINGASRV%" obj= "%ICINGASRV_Logon%">>c:\temp\reconfigure_Icinga2Agent_LogonName.log 24 | 25 | echo "Restarting Icinga service ....">>c:\temp\reconfigure_Icinga2Agent_LogonName.log 26 | psexec64 \\%%h sc stop %ICINGASRV%>NUL 27 | 28 | echo "Starting service...">>c:\temp\reconfigure_Icinga2Agent_LogonName.log 29 | psexec64 \\%%h sc start %ICINGASRV%>NUL 30 | 31 | echo "Done for Host: %%h" 32 | echo "Done for Host: %%h">>c:\temp\reconfigure_Icinga2Agent_LogonName.log 33 | ) 34 | 35 | :End 36 | echo "Abort of script" 37 | -------------------------------------------------------------------------------- /monitoring/agents/microsoft/icinga/monitoring_scripts/restart_service.ps1: -------------------------------------------------------------------------------- 1 | # Restart Service Script 2 | # Please enable external scripts and external scrips variable before use. 3 | 4 | param ( 5 | [string[]]$serviceName 6 | ) 7 | 8 | if (!$serviceName) { 9 | 10 | Write-Host "Please pass the name of the service to restart" 11 | Write-Host "Usage: restart_service.ps1 " 12 | exit 3 13 | } 14 | 15 | 16 | Foreach ($Service in $ServiceName) 17 | { 18 | Restart-Service $ServiceName -ErrorAction SilentlyContinue -ErrorVariable ServiceError 19 | If (!$ServiceError) { 20 | $Time=Get-Date 21 | Write-Host "Restarted service $Service at $Time" 22 | } 23 | If ($ServiceError) { 24 | write-host $error[0] 25 | exit 3 26 | } 27 | } 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /monitoring/alyvix/neteye4/alyvix-cleanup.cron.hourly: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | 4 | # 5 | # Start only where icinga-master process is active and NO cluster 6 | # 7 | ! ls /etc/neteye-cluster >/dev/null 2>&1 || df /neteye/shared/icinga2 | grep /neteye/shared/icinga2 >/dev/null || exit 0 8 | 9 | TMPFILE=$(mktemp) 10 | trap 'rm -f $TMPFILE; exit 1' 1 2 15 11 | trap 'rm -f $TMPFILE' 0 12 | 13 | if [ -e /etc/sysconfig/alyvix ] 14 | then 15 | . /etc/sysconfig/alyvix 16 | fi 17 | 18 | if [ -z "$CLEANUP_HOURS" ] 19 | then 20 | CLEANUP_HOURS=720 21 | fi 22 | 23 | if [ -z "$CLEANUP_OK_HOURS" ] 24 | then 25 | CLEANUP_OK_HOURS=96 26 | fi 27 | 28 | if [ -z "$ALYVIX_LOGDIR" ] 29 | then 30 | ALYVIX_LOGDIR=/neteye/shared/httpd/alyvix-reports 31 | fi 32 | 33 | if [ ! -d "$ALYVIX_LOGDIR" ] 34 | then 35 | exit 0 36 | fi 37 | 38 | touch -d "$CLEANUP_OK_HOURS hours ago" $TMPFILE 39 | for d in "$ALYVIX_LOGDIR" $ALYVIX_EXTERNAL_DIRS 40 | do 41 | ls $d >/dev/nul 42 | if [ -d "$d" ] 43 | then 44 | /usr/sbin/tmpwatch -m -f --nosymlinks $CLEANUP_HOURS "$d" 45 | for j in $(find $d -name output.xml ! -newer $TMPFILE) 46 | do 47 | if grep 'fail="0".*All Tests' $j >/dev/null 48 | then 49 | dname=$(dirname $j) 50 | rm -rf $dname 51 | fi 52 | done 53 | fi 54 | done 55 | 56 | exit 0 57 | -------------------------------------------------------------------------------- /monitoring/alyvix/neteye4/alyvix-reports.conf: -------------------------------------------------------------------------------- 1 | # 2 | # This configuration file allows the neteye client software to be accessed at 3 | # http://localhost/neteye-client-software/ 4 | # 5 | Alias /alyvix-reports /neteye/shared/httpd/alyvix-reports 6 | 7 | 8 | Options Indexes 9 | # Formating improvement of index view 10 | IndexOptions FancyIndexing HTMLTable VersionSort NameWidth=* 11 | AllowOverride all 12 | Order allow,deny 13 | Allow from all 14 | Require all granted 15 | 16 | -------------------------------------------------------------------------------- /monitoring/alyvix/neteye4/alyvix.sysconfig: -------------------------------------------------------------------------------- 1 | CLEANUP_HOURS=360 2 | CLEANUP_OK_HOURS=24 3 | ALYVIX_LOGDIR=/neteye/shared/httpd/alyvix-reports 4 | # Remote directories to cleanup 5 | # ALYVIX_EXTERNAL_DIRS="/cifs/ /cifs/ ..." 6 | ALYVIX_EXTERNAL_DIRS="" 7 | -------------------------------------------------------------------------------- /monitoring/alyvix/windows/check_and_restart_icinga2.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | dir c:\ProgramData\icinga2\var\log\icinga2\icinga2.log 3 | "c:\Program Files\NSClient++\nscp.exe" client --module CheckDisk --show-all -a path=c:\ProgramData\icinga2\var\log\icinga2 -a pattern=icinga2.log -a "filter=written > -5m" -a "crit=count < 1" -q check_files 4 | if ERRORLEVEL 1 ( 5 | schtasks /End /TN "Start Icinga2 Agent" 6 | schtasks /Run /TN "Start Icinga2 Agent" 7 | ) 8 | rem pause -------------------------------------------------------------------------------- /monitoring/alyvix/windows/run_icinga_agent.cmd: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | :begin 3 | 4 | "c:\Program Files\ICINGA2\sbin\icinga2.exe" daemon 5 | 6 | GOTO :begin -------------------------------------------------------------------------------- /monitoring/alyvix3-server/check_alyvix3_testcase_verbose.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | DIR=$(dirname $0) 4 | 5 | if [ -e "$DIR/check_alyvix3_testcase.pl" ] 6 | then 7 | CMD="$DIR/check_alyvix3_testcase.pl" 8 | elif [ -e /neteye/shared/monitorin/plugins/check_alyvix3_testcase.pl ] 9 | then 10 | CMD=/neteye/shared/monitorin/plugins/check_alyvix3_testcase.pl 11 | else 12 | echo "check_alyvix3_testcase.pl NOT found, exiting!" 13 | exit 3 14 | fi 15 | 16 | $CMD -v -v $@ 17 | exit $? 18 | -------------------------------------------------------------------------------- /monitoring/alyvix3-server/httpd-proxypass-alyvix.conf: -------------------------------------------------------------------------------- 1 | SSLProxyEngine on 2 | SSLProxyVerify none 3 | SSLProxyCheckPeerCN off 4 | SSLProxyCheckPeerName off 5 | SSLProxyCheckPeerExpire off 6 | ProxyPass /alyvix/DENU00MS0600/ https://denu00ms0600.phoenix.loc/ 7 | ProxyPassReverse /alyvix/DENU00MS0600/ https://denu00ms0600.phoenix.loc/ 8 | -------------------------------------------------------------------------------- /monitoring/alyvix3-server/run_alyvix_workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | HOST=$1 4 | USER=$2 5 | DOMAIN=$3 6 | 7 | if [ -n "$DOMAIN" ] 8 | then 9 | RETSTR=$(curl -k -s "https://$HOST/v0/flows/run/?username=$DOMAIN\\$USER") 10 | else 11 | RETSTR=$(curl -k -s "https://$HOST/v0/flows/run/?username=$USER") 12 | fi 13 | 14 | if ! echo $RETST | grep true >/dev/null 15 | then 16 | if [ -n "$DOMAIN" ] 17 | then 18 | echo "OK - Workflow $DOMAIN\\$USER started" 19 | else 20 | echo "OK - Workflow $USER started" 21 | fi 22 | else 23 | if [ -n "$DOMAIN" ] 24 | then 25 | echo "CRITICAL - Could not start workflow for user $DOMAIN\\$USER" 26 | else 27 | echo "CRITICAL - Could not start workflow for user $USER" 28 | fi 29 | fi 30 | -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/alyvix/Alyvix_Performance_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/analytics_dashboards/alyvix/Alyvix_Performance_comparison.png -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/alyvix/README.md: -------------------------------------------------------------------------------- 1 | # Alyvix ITOA Dashboards 2 | 3 | ## Alyvix Performance Overview 4 | 5 | Provides a selection of all Alyvix services and related hosts and allows to compare same test cases with performance data collected from various alyvix installations 6 | - Select Service 7 | - For each Host a column is added 8 | 9 | Setup: [Import Alyvix_ Performance overview_neteye3.json into NetEye ITOA/Grafana.](Alyvix_Performance_overview_neteye3.json) 10 | 11 | Preview: 12 | ![alyvix_performance overview.png](alyvix_performance_overview.png) 13 | 14 | # Alyvix measurement Performance comparison 15 | 16 | Import file for NetEye 3: [Alyvix_Performance_comparison_neteye3.json](Alyvix_Performance_comparison_neteye3.json) 17 | 18 | ![Alyvix_Performance_comparison.png](Alyvix_Performance_comparison.png)! 19 | 20 | 21 | ## Alyvix Troubleshooting view 22 | 23 | Provides a detailed view of performance data from Alyvix collected via ITOA streaming architecture. [For setup see ITOA](../../../itoa/). 24 | For each selected test case a details view (with dots indicating the timestamp) is shown. For each available collector Alyvix, an additional column is shown. 25 | 26 | Setup: 27 | - Import alyvix_troubleshooting_view.json into NetEye ITOA/Grafana. 28 | - Setup the ITOA streaming architecture on NetEye (itoa -> neteye_nats_collector) 29 | - Send Alyvix perfdata to NetEye via streaming protocol with keyword ["Publish Perfdata"](https://alyvix.com/doc/test_case_building/system_keywords.html#publish-perfdata) 30 | 31 | Preview:![alyvix_troubleshooting_view.png](alyvix_troubleshooting_view.png) 32 | -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/alyvix/alyvix_performance_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/analytics_dashboards/alyvix/alyvix_performance_overview.png -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/alyvix/alyvix_troubleshooting_view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/analytics_dashboards/alyvix/alyvix_troubleshooting_view.png -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/generic_services.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/analytics_dashboards/generic_services.png -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/itoa_cust_diskspace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/analytics_dashboards/itoa_cust_diskspace.png -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/itoa_cust_hostalive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/analytics_dashboards/itoa_cust_hostalive.png -------------------------------------------------------------------------------- /monitoring/analytics_dashboards/itoa_cust_interfaces.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/analytics_dashboards/itoa_cust_interfaces.png -------------------------------------------------------------------------------- /monitoring/business-services/pve4dynamic_bp.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/business-services/pve4dynamic_bp.tar.gz -------------------------------------------------------------------------------- /monitoring/configurations/director/businessprocess_automation/README.md: -------------------------------------------------------------------------------- 1 | ## Create the Director Automation Business Process 2 | 3 | Patch the Icingaweb module BusinessProcess to add a director hook for import automation. 4 | Apply the provided patch: 5 | ``` 6 | # cd /usr/share/icingaweb2/modules/businessprocess 7 | # patch -p 6 < //neteye4/monitoring/configurations/director/businessprocess_automation/icingaweb_director_businessprocess_automation.patch 8 | ``` 9 | 10 | ## Create Import Definition and Synchronization-Rule for Director 11 | 12 | Import provided Basket: [Director-Basket_Automation_BusinessProcess.json](https://github.com/zampat/icinga2-monitoring-templates/tree/master/baskets/import_automation) 13 | This Basket file is locate in project [icinga2-monitoring-templates](https://github.com/zampat/icinga2-monitoring-templates) 14 | -------------------------------------------------------------------------------- /monitoring/configurations/icinga/api/README.md: -------------------------------------------------------------------------------- 1 | # Configuring and testing api access 2 | 3 | Configure api user object and detailed permissions 4 | ``` 5 | # cat api-escal-user.conf 6 | /** 7 | * The APIUser objects are used for authentication against the API. 8 | */ 9 | object ApiUser "api-user" { 10 | password = "hjdqAasdafUsdfsDXfa" 11 | // client_cn = "" 12 | 13 | permissions = [ "events/statechange","objects/query/host","objects/query/service","events/acknowledgementset","events/acknowledgementcleared","events/commentadded","events/commentremoved" ] 14 | } 15 | ``` 16 | 17 | Define a Permission with a Filter on Object of Type array: 18 | ``` 19 | permission = "objects/query/Host" 20 | filter= {{ "Monitoring_Group" in host.groups }} 21 | ``` 22 | 23 | Get all services: 24 | ``` 25 | # curl -k -s -G -u root:0123456789abcdefc 'https://localhost:5665/v1/objects/services' | jq 26 | ``` 27 | Read a specific service with special characters or spaces 28 | ``` 29 | # curl -k -s -G -u root:0123456789abcdefc 'https://localhost:5665/v1/objects/services' --data-urlencode 'service=myhost.lan!Processor_Interrupts/sec' 30 | ``` 31 | ### Get event streams via post 32 | [Icinga API Event Streams](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/#icinga2-api-clients-event-streams) 33 | 34 | -------------------------------------------------------------------------------- /monitoring/configurations/icinga/api/api-deploy-user.conf: -------------------------------------------------------------------------------- 1 | /** 2 | * The ApiUser objects are used for authentication against the API. 3 | * API user to register icinga agents via powershell tool 4 | */ 5 | object ApiUser "objectsmodify" { 6 | password = "123456789" 7 | permissions = [ "objects/query/host","objects/query/service","objects/modify/host","objects/modify/service" ] 8 | } 9 | -------------------------------------------------------------------------------- /monitoring/configurations/icinga/dependency/README.md: -------------------------------------------------------------------------------- 1 | # Dependency apply rule samples 2 | 3 | ## Parent - Child dependency rule 4 | 5 | The Dependency Apply rule activates a dependency for all hosts, where (field) host_hi 6 | This rule works for a parent field of type: 7 | - single host (String) 8 | - multiple hosts (Array) 9 | Simply define the type of field according the preference so assign single or multiple hosts. 10 | 11 | Instructions: 12 | 1) Define a field: "host_parent" of type Director Host 13 | 2) Assign field to host template be able to define a "parent" for a host object. 14 | 3) place `dependency_parentChild.conf` in NetEye4 folder: /neteye/shared/icinga2/conf/icinga2/conf.d/ 15 | 4) Reload icinga2 service 16 | 17 | ## Icinga Agent dependency rule 18 | 19 | This rule defines a service dependency of Services named "*win*" to the service verifying the Icinga Agent availability. 20 | -------------------------------------------------------------------------------- /monitoring/configurations/icinga/dependency/dependency_host2service.conf: -------------------------------------------------------------------------------- 1 | # Dependency to STOP Service checks, if HOST is not UP 2 | # This dependency extends Icinga's build in dependency for services on host 3 | # https://icinga.com/docs/icinga2/latest/doc/03-monitoring-basics/#implicit-dependencies-for-services-on-host 4 | # 5 | 6 | apply Dependency "disable-host-service-checks" to Service { 7 | disable_checks = true 8 | #ignore_soft_states = false 9 | assign where true 10 | } 11 | 12 | -------------------------------------------------------------------------------- /monitoring/configurations/icinga/dependency/dependency_icingaAgent.conf: -------------------------------------------------------------------------------- 1 | # Service dependeny assign rule 2 | # Apply this to all services on a host with display name "Win*" 3 | # When a service "Icinga Agent connected" is NOT "OK" 4 | # Then: disable active checks and disable notification 5 | # Esample of command match: 6 | # assign where service.check_command == "disk-windows" 7 | # assign where match("*Win*", service.display_name) 8 | # 9 | 10 | apply Dependency "icinga_agent-reachable" to Service { 11 | parent_service_name = "Icinga Agent connected" 12 | 13 | states = [ OK ] 14 | disable_checks = true 15 | disable_notifications = true 16 | ignore_soft_states = false 17 | assign where match("load-windows", service.check_command) 18 | assign where match("disk-windows", service.check_command) 19 | assign where match("memory-windows", service.check_command) 20 | assign where match("nscp-local-counter", service.check_command) 21 | ignore where service.name == "Icinga Agent connected" 22 | } 23 | 24 | -------------------------------------------------------------------------------- /monitoring/configurations/icinga/dependency/dependency_parentChild.conf: -------------------------------------------------------------------------------- 1 | # Generic Dependeny assign rule 2 | 3 | apply Dependency "generic-dependency-on-vars_parent" to Host { 4 | parent_host_name = host.vars.host_parent 5 | disable_checks = false 6 | disable_notifications = true 7 | ignore_soft_states = true 8 | states = [ Up ] 9 | assign where host.vars.host_parent && typeof(host.vars.host_parent) == String 10 | } 11 | 12 | apply Dependency "generic-dependency-on-vars_parent" for (parent in host.vars.host_parent) to Host { 13 | parent_host_name = parent 14 | disable_checks = false 15 | disable_notifications = true 16 | ignore_soft_states = true 17 | states = [ Up ] 18 | assign where host.vars.host_parent && typeof(host.vars.host_parent) == Array 19 | } 20 | -------------------------------------------------------------------------------- /monitoring/configurations/icinga_fileshipper/README.md: -------------------------------------------------------------------------------- 1 | # Configuration of Icinga2 module `fileshipper` 2 | 3 | - `fileshipper` is distributed in NetEye 4 core 4 | - verify that module is enabled 5 | - define a path where to store import files 6 | - define a global configuration file 7 | 8 | ### Define folder holding import files 9 | ``` 10 | # mkdir /neteye/shared/httpd/file-import 11 | ``` 12 | 13 | 14 | ### Define global configuration file 15 | 16 | The path defined in that file will then be available in the Import panel 17 | To define the path, run the following commands: 18 | ``` 19 | # mkdir /neteye/shared/icingaweb2/conf/modules/fileshipper/ 20 | # touch /neteye/shared/icingaweb2/conf/modules/fileshipper/imports.ini 21 | # cat >>/neteye/shared/icingaweb2/conf/modules/fileshipper/imports.ini <dashboard(N_('Wuerth Phoenix Dashboard'), array('priority' => 40)); 17 | +$dashboard->add( 18 | + N_('My Hosts'), 19 | + 'monitoring/list/hosts?(host=*pbz*|host_display_name=*my_filter*)' 20 | +); 21 | + 22 | + 23 | /* 24 | * Overview 25 | */ 26 | 27 | ``` 28 | -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/icons/aix-high.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/configurations/icingaweb2/icons/aix-high.png -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/icons/cisco3-high.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/configurations/icingaweb2/icons/cisco3-high.png -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/icons/device-snmp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/configurations/icingaweb2/icons/device-snmp.png -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/icons/device.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/configurations/icingaweb2/icons/device.png -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/icons/neteye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/configurations/icingaweb2/icons/neteye.png -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/icons/switch-high.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/configurations/icingaweb2/icons/switch-high.png -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/icons/vmware.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/configurations/icingaweb2/icons/vmware.png -------------------------------------------------------------------------------- /monitoring/configurations/icingaweb2/navigation/host-actions.ini: -------------------------------------------------------------------------------- 1 | [GLPI] 2 | groups = "group1" 3 | type = "host-action" 4 | target = "_blank" 5 | url = "https://neteye.mydomain/glpi/front/search.php?globalsearch=$host.name$" 6 | owner = "root" 7 | icon = "book" 8 | users = "user1" 9 | filter = "host_name!=*-excl" 10 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/README.md: -------------------------------------------------------------------------------- 1 | ## NeDi Def files improvements 2 | 3 | ### Aruba devices 4 | 5 | ``` 6 | 1.3.6.1.4.1.47196.4.1.1.1.100.def 7 | 1.3.6.1.4.1.47196.4.1.1.1.2.def 8 | 1.3.6.1.4.1.47196.4.1.1.1.301.def 9 | ``` 10 | 11 | ### Alcatel devices 12 | 13 | ``` 14 | 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.1.def 15 | 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.1.def 16 | 1.3.6.1.4.1.6486.800.1.1.2.1.7.1.10.def 17 | 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.def 18 | 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.3.def 19 | 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.5.def 20 | 1.3.6.1.4.1.6486.800.1.1.2.1.7.1.47.def 21 | 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.5.def 22 | 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.6.def 23 | 1.3.6.1.4.1.6486.800.1.1.2.1.9.1.1.def 24 | 1.3.6.1.4.1.6486.801.1.1.2.1.10.1.3.def 25 | 1.3.6.1.4.1.6486.801.1.1.2.1.12.1.1.def 26 | 1.3.6.1.4.1.6486.800.1.1.2.1.11.2.2.def 27 | 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.7.def 28 | 1.3.6.1.4.1.6486.800.1.1.2.2.2.1.1.6.def 29 | ``` 30 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.3717.4.1.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.3717.4.1 created by Defed 1.9 on 16.Jul 2021 (root) 2 | 3 | # Main 4 | SNMPv 1 5 | Type 6 | TypOID 7 | NamOID 8 | DesOID 9 | OS other 10 | Icon s2m 11 | Size 1 12 | Uptime U 13 | Bridge 14 | ArpND 15 | Dispro 16 | Serial 17 | Bimage 18 | VLnams 19 | VLnamx 20 | Group 21 | Mode 22 | CfgChg 23 | CfgWrt 24 | FTPConf 25 | Fanstat 26 | 27 | # Interfaces 28 | StartX 29 | EndX 30 | IFname 1.3.6.1.2.1.31.1.1.1.1 31 | IFaddr old 32 | IFalia 1.3.6.1.2.1.31.1.1.1.18 33 | IFalix 34 | InBcast 1.3.6.1.2.1.31.1.1.1.3 35 | InDisc 1.3.6.1.2.1.2.2.1.13 36 | OutDisc 1.3.6.1.2.1.2.2.1.19 37 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 38 | IFvlix 39 | IFpowr 40 | IFpwix 41 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 42 | IFduix 43 | Halfdp 2 44 | Fulldp 3 45 | 46 | # Modules 47 | Modom 48 | Moslot 49 | Moclas 50 | Movalu 51 | Modesc 52 | Modhw 53 | Modfw 54 | Modsw 55 | Modser 56 | Momodl 57 | Modloc 58 | Mostat 59 | Mostok 60 | 61 | # RRD Graphing 62 | CPUutl 63 | Temp 64 | MemCPU 65 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.47196.4.1.1.1.100.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.47196.4.1.1.1.100 created by Defed 1.9 on 12.Jul 2021 (root) 2 | 3 | # Main 4 | SNMPv 1 5 | Type 6 | TypOID 7 | NamOID 8 | DesOID 9 | OS ArubaOS 10 | Icon s2m 11 | Size 1 12 | Uptime U 13 | Bridge normal 14 | ArpND oldphy 15 | Dispro LLDPX 16 | Serial 17 | Bimage 18 | VLnams 19 | VLnamx 20 | Group 21 | Mode 22 | CfgChg 23 | CfgWrt 24 | FTPConf 25 | Fanstat 26 | 27 | # Interfaces 28 | StartX 29 | EndX 30 | IFname 31 | IFaddr 32 | IFalia 33 | IFalix 34 | InBcast 35 | InDisc 36 | OutDisc 37 | IFvlan 38 | IFvlix 39 | IFpowr 40 | IFpwix 41 | IFpalc 42 | IFdupl 43 | IFduix 44 | Halfdp 45 | Fulldp 46 | 47 | # Modules 48 | Modom 49 | Moslot 50 | Moclas 51 | Movalu 52 | Modesc 53 | Modhw 54 | Modfw 55 | Modsw 56 | Modser 57 | Momodl 58 | Modloc 59 | Mostat 60 | Mostok 61 | 62 | # RRD Graphing 63 | CPUutl 64 | Temp 65 | MemCPU 66 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.47196.4.1.1.1.2.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.47196.4.1.1.1.2 created by Defed 1.9 on 12.Jul 2021 (root) 2 | 3 | # Main 4 | SNMPv 1 5 | Type 6 | TypOID 7 | NamOID 8 | DesOID 9 | OS ArubaOS 10 | Icon s2m 11 | Size 1 12 | Uptime U 13 | Bridge normal 14 | ArpND oldphy 15 | Dispro LLDPX 16 | Serial 17 | Bimage 18 | VLnams 19 | VLnamx 20 | Group 21 | Mode 22 | CfgChg 23 | CfgWrt 24 | FTPConf 25 | Fanstat 26 | 27 | # Interfaces 28 | StartX 29 | EndX 30 | IFname 31 | IFaddr 32 | IFalia 33 | IFalix 34 | InBcast 35 | InDisc 36 | OutDisc 37 | IFvlan 38 | IFvlix 39 | IFpowr 40 | IFpwix 41 | IFpalc 42 | IFdupl 43 | IFduix 44 | Halfdp 45 | Fulldp 46 | 47 | # Modules 48 | Modom 49 | Moslot 50 | Moclas 51 | Movalu 52 | Modesc 53 | Modhw 54 | Modfw 55 | Modsw 56 | Modser 57 | Momodl 58 | Modloc 59 | Mostat 60 | Mostok 61 | 62 | # RRD Graphing 63 | CPUutl 64 | Temp 65 | MemCPU 66 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.47196.4.1.1.1.301.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.47196.4.1.1.1.301 created by Defed 1.9 on 12.Jul 2021 (root) 2 | 3 | # Main 4 | SNMPv 1 5 | Type 6 | TypOID 7 | NamOID 8 | DesOID 9 | OS other 10 | Icon s2m 11 | Size 1 12 | Uptime U 13 | Bridge normal 14 | ArpND oldphy 15 | Dispro LLDPX 16 | Serial 17 | Bimage 18 | VLnams 19 | VLnamx 20 | Group 21 | Mode 22 | CfgChg 23 | CfgWrt 24 | FTPConf 25 | Fanstat 26 | 27 | # Interfaces 28 | StartX 29 | EndX 30 | IFname 31 | IFaddr 32 | IFalia 33 | IFalix 34 | InBcast 35 | InDisc 36 | OutDisc 37 | IFvlan 38 | IFvlix 39 | IFpowr 40 | IFpwix 41 | IFpalc 42 | IFdupl 43 | IFduix 44 | Halfdp 45 | Fulldp 46 | 47 | # Modules 48 | Modom 49 | Moslot 50 | Moclas 51 | Movalu 52 | Modesc 53 | Modhw 54 | Modfw 55 | Modsw 56 | Modser 57 | Momodl 58 | Modloc 59 | Mostat 60 | Mostok 61 | 62 | # RRD Graphing 63 | CPUutl 64 | Temp 65 | MemCPU 66 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.10.1.1.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.1 created by Defgen 1.8 on 30.Dec 2012 (admin) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6400-24 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3m 10 | Size 1 11 | Bridge qbriX 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | StartX 24 | EndX 25 | IFname 1.3.6.1.2.1.31.1.1.1.1 26 | IFaddr old 27 | IFalia 1.3.6.1.2.1.31.1.1.1.18 28 | IFalix 29 | InBcast 1.3.6.1.2.1.31.1.1.1.3 30 | InDisc 1.3.6.1.2.1.2.2.1.13 31 | OutDisc 1.3.6.1.2.1.2.2.1.19 32 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 33 | IFvlix 34 | IFpowr 35 | IFpwix 36 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 37 | IFduix 38 | Halfdp 2 39 | Fulldp 3 40 | 41 | # Modules 42 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 43 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 44 | Movalu 10 45 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 46 | Modhw 47 | Modsw 48 | Modfw 49 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 50 | Momodl 51 | 52 | # RRD Graphing 53 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 54 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 55 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 56 | Custom 57 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.10.1.3.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.3 created by Defgen 1.8 on 28.Nov 2011 (admin) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6400-U24 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3m 10 | Size 1 11 | Bridge qbriX 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | IFname 1.3.6.1.2.1.31.1.1.1.1 24 | IFaddr old 25 | IFalia 1.3.6.1.2.1.31.1.1.1.18 26 | IFalix 27 | InBcast 1.3.6.1.2.1.31.1.1.1.3 28 | InDisc 1.3.6.1.2.1.2.2.1.13 29 | OutDisc 1.3.6.1.2.1.2.2.1.19 30 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 31 | IFvlix 32 | IFpowr 33 | IFpwix 34 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 35 | IFduix 36 | Halfdp 2 37 | Fulldp 3 38 | 39 | # Modules 40 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 41 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 42 | Movalu 10 43 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 44 | Modhw 45 | Modsw 46 | Modfw 47 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 48 | Momodl 49 | 50 | # RRD Graphing 51 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 52 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 53 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 54 | Custom 55 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.10.1.5.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.10.1.5 created by Defgen 1.8 on 9.Nov 2012 (admin) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6400-48 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3l 10 | Size 1 11 | Bridge qbriX 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | StartX 24 | EndX 25 | IFname 1.3.6.1.2.1.31.1.1.1.1 26 | IFaddr old 27 | IFalia 1.3.6.1.2.1.31.1.1.1.18 28 | IFalix 29 | InBcast 1.3.6.1.2.1.31.1.1.1.3 30 | InDisc 1.3.6.1.2.1.2.2.1.13 31 | OutDisc 1.3.6.1.2.1.2.2.1.19 32 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 33 | IFvlix 34 | IFpowr 35 | IFpwix 36 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 37 | IFduix 38 | Halfdp 2 39 | Fulldp 3 40 | 41 | # Modules 42 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 43 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 44 | Movalu 3|10 45 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 46 | Modhw 47 | Modsw 48 | Modfw 49 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 50 | Momodl 51 | 52 | # RRD Graphing 53 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 54 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 55 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 56 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.11.2.2.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.11.2.2 created by Defgen 1.8 on 10.May 2013 (n0n0) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6250-P24 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3m 10 | Size 1 11 | Bridge qbri 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | StartX 24 | EndX 25 | IFname 1.3.6.1.2.1.31.1.1.1.1 26 | IFaddr old 27 | IFalia 1.3.6.1.2.1.31.1.1.1.18 28 | IFalix 29 | InBcast 1.3.6.1.2.1.31.1.1.1.9 30 | InDisc 1.3.6.1.2.1.2.2.1.13 31 | OutDisc 1.3.6.1.2.1.2.2.1.19 32 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 33 | IFvlix 34 | IFpowr 1.3.6.1.4.1.6486.800.1.2.1.27.1.1.1.1.2 35 | IFpwix 36 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 37 | IFduix 1.3.6.1.2.1.10.7.2.1.1 38 | Halfdp 2 39 | Fulldp 3 40 | 41 | # Modules 42 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 43 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 44 | Movalu 3|10 45 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 46 | Modhw 47 | Modsw 48 | Modfw 49 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 50 | Momodl 51 | 52 | # RRD Graphing 53 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 54 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 55 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 56 | Custom 57 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.12.1.1.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.1 created by Defgen 1.8 on 17.Mar 2013 (admin) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6450-10 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3s 10 | Size 0 11 | Bridge qbri 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | StartX 24 | EndX 25 | IFname 1.3.6.1.2.1.31.1.1.1.1 26 | IFaddr old 27 | IFalia 1.3.6.1.2.1.31.1.1.1.18 28 | IFalix 29 | InBcast 1.3.6.1.2.1.31.1.1.1.3 30 | InDisc 1.3.6.1.2.1.2.2.1.13 31 | OutDisc 1.3.6.1.2.1.2.2.1.19 32 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 33 | IFvlix 34 | IFpowr 35 | IFpwix 36 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 37 | IFduix 38 | Halfdp 2 39 | Fulldp 3 40 | 41 | # Modules 42 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 43 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 44 | Movalu 3|10 45 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 46 | Modhw 47 | Modsw 48 | Modfw 49 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 50 | Momodl 51 | 52 | # RRD Graphing 53 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 54 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 55 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 56 | Custom 57 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.12.1.5.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.5 created by Defgen 1.8 on 8.Nov 2012 (admin) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6450-24 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3m 10 | Size 1 11 | Bridge qbri 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | StartX 24 | EndX 25 | IFname 1.3.6.1.2.1.31.1.1.1.1 26 | IFaddr old 27 | IFalia 1.3.6.1.2.1.31.1.1.1.18 28 | IFalix 29 | InBcast 1.3.6.1.2.1.31.1.1.1.3 30 | InDisc 1.3.6.1.2.1.2.2.1.13 31 | OutDisc 1.3.6.1.2.1.2.2.1.19 32 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 33 | IFvlix 34 | IFpowr 35 | IFpwix 36 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 37 | IFduix 38 | Halfdp 2 39 | Fulldp 3 40 | 41 | # Modules 42 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 43 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 44 | Movalu 3|10 45 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 46 | Modhw 47 | Modsw 48 | Modfw 49 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 50 | Momodl 51 | 52 | # RRD Graphing 53 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 54 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 55 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 56 | Custom 57 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.12.1.6.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.6 created by Defgen 2.0 on 11.Nov 2016 (admin) 2 | 3 | # Main 4 | SNMPv 2HC 5 | Type OS6450-P24 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3m 10 | Size 1 11 | Uptime U 12 | Bridge qbri 13 | ArpND old 14 | Dispro LLDPXN 15 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 16 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 17 | CfgChg 18 | CfgWrt 19 | FTPConf 20 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 21 | VLnamx 22 | Group 23 | Mode 24 | 25 | # Interfaces 26 | StartX 27 | EndX 28 | IFname 1.3.6.1.2.1.31.1.1.1.1 29 | IFaddr old 30 | IFalia 1.3.6.1.2.1.31.1.1.1.18 31 | IFalix 32 | InBcast 1.3.6.1.2.1.31.1.1.1.9 33 | InDisc 1.3.6.1.2.1.2.2.1.13 34 | OutDisc 1.3.6.1.2.1.2.2.1.19 35 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 36 | IFvlix 37 | IFpowr 1.3.6.1.4.1.6486.800.1.2.1.27.1.1.1.1.2 38 | IFpwix 39 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 40 | IFduix 1.3.6.1.2.1.10.7.2.1.1 41 | Halfdp 2 42 | Fulldp 3 43 | 44 | # Modules 45 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 46 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 47 | Movalu 3|10 48 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 49 | Modhw 50 | Modsw 51 | Modfw 52 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 53 | Momodl 54 | Modloc 55 | Mostat 56 | Mostok 57 | 58 | # RRD Graphing 59 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 50 60 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 61 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 25 62 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.12.1.7.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.7 created by Defgen 2.0 on 11.Nov 2016 (admin) 2 | 3 | # Main 4 | SNMPv 2HC 5 | Type OS6450-U24 6 | TypOID 7 | DesOID 8 | OS OSAOS 9 | Icon s3m 10 | Size 1 11 | Uptime U 12 | Bridge qbri 13 | ArpND old 14 | Dispro LLDPX 15 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 16 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 17 | CfgChg 18 | CfgWrt 19 | FTPConf 20 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 21 | VLnamx 22 | Group 23 | Mode 24 | 25 | # Interfaces 26 | StartX 27 | EndX 28 | IFname 1.3.6.1.2.1.31.1.1.1.1 29 | IFaddr old 30 | IFalia 1.3.6.1.2.1.31.1.1.1.18 31 | IFalix 32 | InBcast 1.3.6.1.2.1.31.1.1.1.3 33 | InDisc 1.3.6.1.2.1.2.2.1.13 34 | OutDisc 1.3.6.1.2.1.2.2.1.19 35 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 36 | IFvlix 37 | IFpowr 38 | IFpwix 39 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 40 | IFduix 41 | Halfdp 2 42 | Fulldp 3 43 | 44 | # Modules 45 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 46 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 47 | Movalu 3|10 48 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 49 | Modhw 50 | Modsw 51 | Modfw 52 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 53 | Momodl 54 | Modloc 55 | Mostat 56 | Mostok 57 | 58 | # RRD Graphing 59 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 50 60 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 61 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 25 62 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.7.1.10.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.7.1.10 created by Defgen 2.0 on 11.Nov 2016 (admin) 2 | 3 | # Main 4 | SNMPv 2HC 5 | Type OS6850-U24X 6 | TypOID 7 | DesOID 8 | OS OSAOS 9 | Icon s3m 10 | Size 1 11 | Uptime U 12 | Bridge qbriX 13 | ArpND old 14 | Dispro LLDPX 15 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 16 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 17 | CfgChg 18 | CfgWrt 19 | FTPConf 20 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 21 | VLnamx 22 | Group 23 | Mode 24 | 25 | # Interfaces 26 | StartX 27 | EndX 28 | IFname 1.3.6.1.2.1.31.1.1.1.1 29 | IFaddr old 30 | IFalia 1.3.6.1.2.1.31.1.1.1.18 31 | IFalix 32 | InBcast 1.3.6.1.2.1.31.1.1.1.3 33 | InDisc 1.3.6.1.2.1.2.2.1.13 34 | OutDisc 1.3.6.1.2.1.2.2.1.19 35 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 36 | IFvlix 37 | IFpowr 38 | IFpwix 39 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 40 | IFduix 41 | Halfdp 2 42 | Fulldp 3 43 | 44 | # Modules 45 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 46 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 47 | Movalu 3|10 48 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 49 | Modhw 50 | Modsw 51 | Modfw 52 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 53 | Momodl 54 | Modloc 55 | Mostat 56 | Mostok 57 | 58 | # RRD Graphing 59 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 50 60 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 61 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 25 62 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.7.1.47.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.7.1.47 created by Defgen 2.0 on 11.Nov 2016 (admin) 2 | 3 | # Main 4 | SNMPv 2HC 5 | Type OS6850E-24X 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3m 10 | Size 1 11 | Uptime U 12 | Bridge qbriX 13 | ArpND old 14 | Dispro LLDPX 15 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 16 | Bimage 17 | CfgChg 18 | CfgWrt 19 | FTPConf 20 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 21 | VLnamx 22 | Group 23 | Mode 24 | 25 | # Interfaces 26 | StartX 27 | EndX 28 | IFname 1.3.6.1.2.1.31.1.1.1.1 29 | IFaddr old 30 | IFalia 1.3.6.1.2.1.31.1.1.1.18 31 | IFalix 32 | InBcast 1.3.6.1.2.1.31.1.1.1.3 33 | InDisc 1.3.6.1.2.1.2.2.1.13 34 | OutDisc 1.3.6.1.2.1.2.2.1.19 35 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 36 | IFvlix 37 | IFpowr 38 | IFpwix 39 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 40 | IFduix 41 | Halfdp 2 42 | Fulldp 3 43 | 44 | # Modules 45 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 46 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 47 | Movalu 10 48 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 49 | Modhw 50 | Modsw 51 | Modfw 52 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 53 | Momodl 54 | Modloc 55 | Mostat 56 | Mostok 57 | 58 | # RRD Graphing 59 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 50 60 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 61 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 25 62 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.1.9.1.1.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.9.1.1 created by Defgen 1.8 on 25.Oct 2012 (admin) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6855-14 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3s 10 | Size 0 11 | Bridge qbri 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | StartX 24 | EndX 25 | IFname 1.3.6.1.2.1.31.1.1.1.1 26 | IFaddr old 27 | IFalia 1.3.6.1.2.1.31.1.1.1.18 28 | IFalix 29 | InBcast 1.3.6.1.2.1.31.1.1.1.3 30 | InDisc 1.3.6.1.2.1.2.2.1.13 31 | OutDisc 1.3.6.1.2.1.2.2.1.19 32 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 33 | IFvlix 34 | IFpowr 35 | IFpwix 36 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 37 | IFduix 38 | Halfdp 2 39 | Fulldp 3 40 | 41 | # Modules 42 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 43 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 44 | Movalu 10 45 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 46 | Modhw 47 | Modsw 48 | Modfw 49 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 50 | Momodl 51 | 52 | # RRD Graphing 53 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 54 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 55 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 56 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.1.2.2.2.1.1.6.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.2.2.1.1.6 created by Defgen 2.0 on 18.Oct 2016 (admin) 2 | 3 | # Main 4 | SNMPv 2HC 5 | Type OAW-4504 6 | TypOID 1.3.6.1.2.1.47.1.1.1.1.13.1 7 | DesOID 8 | OS other 9 | Icon wcm 10 | Size 1 11 | TempAlert 12 | Bridge Aruba 13 | ArpND old 14 | Dispro LLDPXN 15 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 16 | Bimage 17 | CfgChg 18 | CfgWrt 19 | FTPConf 20 | VLnams 1.3.6.1.2.1.17.7.1.4.3.1.1 21 | VLnamx 22 | Group 23 | Mode 24 | 25 | # Interfaces 26 | StartX 27 | EndX 28 | IFname 29 | IFaddr 30 | IFalia 31 | IFalix 32 | InBcast 33 | InDisc 34 | OutDisc 35 | IFvlan 36 | IFvlix 37 | IFpowr 38 | IFpwix 39 | IFdupl 40 | IFduix 41 | Halfdp 42 | Fulldp 43 | 44 | # Modules 45 | Modesc 46 | Moclas 47 | Movalu 48 | Moslot 49 | Modhw 50 | Modsw 51 | Modfw 52 | Modser 53 | Momodl 54 | Modloc 55 | Mostat 56 | Mostok 57 | 58 | # RRD Graphing 59 | CPUutl 60 | Temp 61 | MemCPU 62 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.800.1.2.1.16.1.1.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.800.1.1.2.1.12.1.5 created by Defgen 1.8 on 8.Nov 2012 (admin) 2 | 3 | # General 4 | SNMPv 2HC 5 | Type OS6450-24 6 | TypOID 7 | DesOID 8 | OS Omnistack 9 | Icon s3m 10 | Size 1 11 | Bridge qbri 12 | ArpND old 13 | Dispro LLDPX 14 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 15 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 16 | 17 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 18 | VLnamx 19 | Group 20 | Mode 21 | 22 | # Interfaces 23 | StartX 24 | EndX 25 | IFname 1.3.6.1.2.1.31.1.1.1.1 26 | IFaddr old 27 | IFalia 1.3.6.1.2.1.31.1.1.1.18 28 | IFalix 29 | InBcast 1.3.6.1.2.1.31.1.1.1.3 30 | InDisc 1.3.6.1.2.1.2.2.1.13 31 | OutDisc 1.3.6.1.2.1.2.2.1.19 32 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 33 | IFvlix 34 | IFpowr 35 | IFpwix 36 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 37 | IFduix 38 | Halfdp 2 39 | Fulldp 3 40 | 41 | # Modules 42 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 43 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 44 | Movalu 3|10 45 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 46 | Modhw 47 | Modsw 48 | Modfw 49 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 50 | Momodl 51 | 52 | # RRD Graphing 53 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 54 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 55 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 % 56 | Custom 57 | -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.801.1.1.2.1.10.1.3.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.801.1.1.2.1.10.1.3 created by Defed 1.9 on 16.Jul 2021 (root) 2 | 3 | # Main 4 | SNMPv 2HC 5 | Type OS6850-U24X 6 | TypOID 7 | NamOID 8 | DesOID 9 | OS OSAOS 10 | Icon s2m 11 | Size 1 12 | Uptime U 13 | Bridge qbri 14 | ArpND old 15 | Dispro LLDPX 16 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 17 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 18 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 19 | VLnamx 20 | Group 21 | Mode 22 | CfgChg 23 | CfgWrt 24 | FTPConf 25 | Fanstat 26 | 27 | # Interfaces 28 | StartX 29 | EndX 30 | IFname 1.3.6.1.2.1.31.1.1.1.1 31 | IFaddr old 32 | IFalia 1.3.6.1.2.1.31.1.1.1.18 33 | IFalix 34 | InBcast 1.3.6.1.2.1.31.1.1.1.3 35 | InDisc 1.3.6.1.2.1.2.2.1.13 36 | OutDisc 1.3.6.1.2.1.2.2.1.19 37 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 38 | IFvlix 39 | IFpowr 40 | IFpwix 41 | IFpalc 42 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 43 | IFduix 44 | Halfdp 2 45 | Fulldp 3 46 | 47 | # Modules 48 | Modom 49 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 50 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 51 | Movalu 3|10 52 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 53 | Modhw 54 | Modfw 55 | Modsw 56 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 57 | Momodl 58 | Modloc 59 | Mostat 60 | Mostok 61 | 62 | # RRD Graphing 63 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 64 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 65 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 25 66 | Custom -------------------------------------------------------------------------------- /monitoring/discovery/network-discovery-nedi/sysobj/1.3.6.1.4.1.6486.801.1.1.2.1.12.1.1.def: -------------------------------------------------------------------------------- 1 | # Definition for 1.3.6.1.4.1.6486.801.1.1.2.1.12.1.1 created by Defed 1.9 on 16.Jul 2021 (root) 2 | 3 | # Main 4 | SNMPv 2HC 5 | Type OS6850-U24X 6 | TypOID 7 | NamOID 8 | DesOID 9 | OS OSAOS 10 | Icon s2m 11 | Size 1 12 | Uptime U 13 | Bridge qbriX 14 | ArpND old 15 | Dispro LLDPX 16 | Serial 1.3.6.1.2.1.47.1.1.1.1.11.1 17 | Bimage 1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1.31.1 18 | VLnams 1.3.6.1.4.1.6486.800.1.2.1.3.1.1.1.1.1.2 19 | VLnamx 20 | Group 21 | Mode 22 | CfgChg 23 | CfgWrt 24 | FTPConf 25 | Fanstat 26 | 27 | # Interfaces 28 | StartX 29 | EndX 30 | IFname 1.3.6.1.2.1.31.1.1.1.1 31 | IFaddr old 32 | IFalia 1.3.6.1.2.1.31.1.1.1.18 33 | IFalix 34 | InBcast 1.3.6.1.2.1.31.1.1.1.3 35 | InDisc 1.3.6.1.2.1.2.2.1.13 36 | OutDisc 1.3.6.1.2.1.2.2.1.19 37 | IFvlan 1.3.6.1.2.1.17.7.1.4.5.1.1 38 | IFvlix 39 | IFpowr 40 | IFpwix 41 | IFpalc 42 | IFdupl 1.3.6.1.2.1.10.7.2.1.19 43 | IFduix 44 | Halfdp 2 45 | Fulldp 3 46 | 47 | # Modules 48 | Modom 49 | Moslot 1.3.6.1.2.1.47.1.1.1.1.7 50 | Moclas 1.3.6.1.2.1.47.1.1.1.1.5 51 | Movalu 3|10 52 | Modesc 1.3.6.1.2.1.47.1.1.1.1.12 53 | Modhw 54 | Modfw 55 | Modsw 56 | Modser 1.3.6.1.2.1.47.1.1.1.1.11 57 | Momodl 58 | Modloc 59 | Mostat 60 | Mostok 61 | 62 | # RRD Graphing 63 | CPUutl 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.15.0 64 | Temp 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.17.0 65 | MemCPU 1.3.6.1.4.1.6486.800.1.2.1.16.1.1.1.9.0 66 | Custom -------------------------------------------------------------------------------- /monitoring/eventhandler/windows_restart/01_command.json: -------------------------------------------------------------------------------- 1 | { 2 | "arguments": { 3 | "-ServiceAttempt": "$service.check_attempt$", 4 | "-ServiceState": "$service.state$", 5 | "-ServiceStateType": "$service.state_type$" 6 | }, 7 | "command": "C:\\Windows\\SysWOW64\\WindowsPowerShell\\v1.0\\powershell.exe c:\\Scripts\\event_restart_windows.ps1", 8 | "disabled": false, 9 | "imports": [], 10 | "is_string": null, 11 | "methods_execute": "PluginEvent", 12 | "object_name": "eventcmd_restart_windows_server", 13 | "object_type": "object", 14 | "timeout": "60", 15 | "vars": {}, 16 | "zone": null 17 | } 18 | -------------------------------------------------------------------------------- /monitoring/eventhandler/windows_restart/02_service.json: -------------------------------------------------------------------------------- 1 | { 2 | "event_command": "eventcmd_restart_windows_server", 3 | "object_name": "lab-st-agent-event-restart-server", 4 | "object_type": "template" 5 | } 6 | -------------------------------------------------------------------------------- /monitoring/eventhandler/windows_restart/README.md: -------------------------------------------------------------------------------- 1 | # Configure the Director command and service template 2 | 3 | Import command from json 4 | Import service template from json 5 | 6 | 7 | Assign the new service template to an existing host service indicating a health status 8 | 9 | # Configure the Windows Server Agent 10 | 11 | - Configure Icinga2 Agent with "Local sytem" permissions 12 | - Install the script in C:\scripts\event_restart_windows.ps1 13 | 14 | # Test 15 | - Send service into hard and critical state 16 | -------------------------------------------------------------------------------- /monitoring/eventhandler/windows_restart/event_restart_windows.ps1: -------------------------------------------------------------------------------- 1 | param( 2 | [string]$ServiceState = '', 3 | [string]$ServiceStateType = '', 4 | [int]$ServiceAttempt = '' 5 | ) 6 | 7 | if (!$ServiceState -Or !$ServiceStateType -Or !$ServiceAttempt) { 8 | $scriptName = GCI $MyInvocation.PSCommandPath | Select -Expand Name; 9 | $date=Get-Date 10 | 11 | write-output ($date.ToString() + ": Computer wurde automatisch rebootet") | out-file -FilePath C:\WorkDir\Log\Protokoll_RestartComputer.log -encoding utf8 -Force -Width 500 12 | exit 3; 13 | } 14 | 15 | # Only restart on the third attempt of a critical event 16 | if ($ServiceState -eq "CRITICAL" -And $ServiceStateType -eq "HARD") { 17 | Restart-Computer -Force; 18 | 19 | } else { 20 | Write-Host "Not Critical AND HARD" 21 | } 22 | 23 | exit 0; 24 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/README.md: -------------------------------------------------------------------------------- 1 | # Monitoring Plugins 2 | 3 | ## Introduction 4 | 5 | See the [introductory documentation regarding the configuration of those Plugins within NetEye](../../doc/monitoring_plugins.md) 6 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/aix/9000c-ss_aix_health.sh: -------------------------------------------------------------------------------- 1 | ### 2 | # Create Service Set: "AIX Health" 3 | ### 4 | # Service Template for Service Set 5 | RES=`icingacli director serviceset exists "AIX Health"` 6 | if [[ $RES =~ "does not exist" ]] 7 | then 8 | icingacli director serviceset create --json ' 9 | { 10 | "assign_filter": null, 11 | "description": null, 12 | "object_name": "AIX Health", 13 | "object_type": "template", 14 | "vars": { 15 | } 16 | } 17 | ' 18 | 19 | 20 | #### 21 | # Service Objects 22 | #### 23 | icingacli director service create --json ' 24 | { 25 | "imports": [ 26 | "nrpe_disk_noSSL" 27 | ], 28 | "object_name": "AIX Disk", 29 | "object_type": "object", 30 | "service_set": "AIX Health" 31 | }' 32 | 33 | icingacli director service create --json ' 34 | { 35 | "imports": [ 36 | "nrpe_disk_noSSL" 37 | ], 38 | "object_name": "AIX Load", 39 | "object_type": "object", 40 | "service_set": "AIX Health" 41 | }' 42 | 43 | 44 | echo "Done" 45 | fi 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/cisco/check_cisco_nexus.md: -------------------------------------------------------------------------------- 1 | Monitoring Cisco Nexus Series 2 | 3 | ## Cisco Nexus CPU 4 | 5 | Manufacturer: Cisco 6 | Branch: Nexus Series 7 | Monitoring: CPU load 8 | Plugin: check_cisco_nexus_cpu.pl 9 | Requirements: Perl module Switch.pm 10 | How to resolve: 11 | ``` 12 | yum --enablerepo=neteye install perl-Switch.noarch 13 | ``` 14 | 15 | ## Cisco Nexus Memory 16 | 17 | Manufacturer: Cisco 18 | Branch: Nexus Series 19 | Monitoring: Memory usage 20 | Plugin: `check_cisco_nexus_mem.pl` 21 | 22 | ## Cisco Nexus Hardware 23 | 24 | Manufacturer: Cisco 25 | Branch: Nexus Series 26 | Monitoring: Hardware health 27 | Plugin: `check_cisco_nexus_hardware.pl` 28 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/database/db2/README.md: -------------------------------------------------------------------------------- 1 | # DB2 monitoring 2 | 3 | # Setup and configuration 4 | 5 | DB2 monitoring drivers are provided for users with *valid* DB2 subscription from NetEye repository. 6 | Install those drivers from `neteye--contrib/` repository. 7 | 8 | Monitoring `baskets` for NetEye4 are provided [here](https://github.com/zampat/icinga2-monitoring-templates/tree/master/baskets/monitoring_templates/db2) 9 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/database/mssql/check_mssql_authWrapper/auth.conf: -------------------------------------------------------------------------------- 1 | 2 | [SQL_1] 3 | username=user1 4 | password=S3cr3t 5 | 6 | [SQL_2] 7 | username=user2 8 | password=st!0ng_p@ssw0!d 9 | 10 | [SQL_3] 11 | username=user3 12 | password=h!dd*n 13 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/database/mssql/check_mssql_authWrapper/check_mssql_health.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | NAGIOS_PATH=`dirname $0` 4 | PWDFILE="/etc/nagios/neteye/plugins/mssql/auth.conf" 5 | PROG=$(basename $0) 6 | 7 | SECTION=$1 8 | shift 9 | MSSQL_CMD_ARGS=$* 10 | SECTION_FOUND_LINE="0" 11 | 12 | if [ -f $PWDFILE ] 13 | then 14 | for i in `cat $PWDFILE` 15 | do 16 | 17 | if [[ $i =~ .*$SECTION.* ]] 18 | then 19 | SECTION_FOUND_LINE="1" 20 | fi 21 | 22 | if [ $SECTION_FOUND_LINE -eq "1" ] && [[ $i =~ .*username* ]] 23 | then 24 | USER=`echo $i | cut -d = -f 2` 25 | fi 26 | if [ $SECTION_FOUND_LINE -eq "1" ] && [[ $i =~ .*password* ]] 27 | then 28 | PASSWD=`echo $i | cut -d = -f 2` 29 | SECTION_FOUND_LINE="0" 30 | fi 31 | done 32 | 33 | else 34 | echo "UNKNOWN: Password file $PWDFILE not found!" 35 | exit 3; 36 | fi 37 | 38 | if [ -z $USER ] || [ -z $PASSWD ] 39 | then 40 | echo "Usage: $PROG 'section_of_password' 'all check_mssql_health parameters'" 41 | echo " " 42 | echo "Define Section, username and password within authentication file: " 43 | echo " $PWDFILE " 44 | exit 3 45 | fi 46 | 47 | # Trim special characters added by IWF file parser 48 | USER=$(echo "$USER"|tr -d "'\`\"") 49 | 50 | MSSQL_HEALTH="$NAGIOS_PATH/check_mssql_health --username $USER --password $PASSWD" 51 | 52 | $MSSQL_HEALTH $MSSQL_CMD_ARGS 53 | exit $? 54 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/dell/README.md: -------------------------------------------------------------------------------- 1 | # Check_iDRAC 2 | 3 | https://github.com/dangmocrang/check_idrac 4 | 5 | ## Features 6 | Check idrac 7 hardwares status via SNMP. Currently supports these hardware: 7 | - Virtual Disk 8 | - Physical Disk 9 | - Memory 10 | - CPU 11 | - Power Supply 12 | - Power Unit 13 | - Fan 14 | - Battery 15 | - Temperature Sensor 16 | - Global 17 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/dell/check_idrac/README.md: -------------------------------------------------------------------------------- 1 | # Check_iDRAC 2 | 3 | https://github.com/dangmocrang/check_idrac 4 | Version `2.2rc4` 5 | 6 | ## Features 7 | Check idrac 7 hardwares status via SNMP. Currently supports these hardware: 8 | - Virtual Disk 9 | - Physical Disk 10 | - Memory 11 | - CPU 12 | - Power Supply 13 | - Power Unit 14 | - Fan 15 | - Battery 16 | - Temperature Sensor 17 | - Global 18 | 19 | ## Basket 20 | 21 | In the 22 | [NetEye4 Repo](https://github.com/zampat/icinga2-monitoring-templates/tree/master/baskets/monitoring_templates/neteye) 23 | you can download the basket `Director-Basket_Idrac_check.json`. 24 | 25 | This basket contains a service template and the fields definition. -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/elk/README.md: -------------------------------------------------------------------------------- 1 | # ELK Monitoring 2 | 3 | Monitoring for the Elasticsearch Logstash Kibana (ELK) suite. -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/elk/elasticsearch/check_elasticsearch.md: -------------------------------------------------------------------------------- 1 | 2 | # Check Elasticsearch 3 | 4 | Use check_elasticsearch enhanced for NetEye 4 (SSL authentication). 5 | 6 | - Created fork from https://github.com/orthecreedence/check_elasticsearch 7 | - Pull request has been created on original project. 8 | - Enhancements are temporarily published on https://github.com/zampat/check_elasticsearch 9 | 10 | If pull request is accepted the check could be cloned automatically into neteyeshare from original project 11 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/emc/EMC_Navicli.md: -------------------------------------------------------------------------------- 1 | ## EMC VNX 2 | 3 | Use Check: `check_vnx.pl` 4 | 5 | - Install the `navicli` from here: 6 | 7 | ``` 8 | https://github.com/emc-openstack/naviseccli 9 | ``` 10 | 11 | - Create the security .key and .xml via `navicli` commands 12 | - Run these commands as icinga user 13 | 14 | Hint: Run the navicli bin to test the connection and login 15 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/f5/check_F5_Platform.pl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/f5/check_F5_Platform.pl -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/hp/check_ilo2_health.md: -------------------------------------------------------------------------------- 1 | # HP Hardware Monitoring 2 | 3 | ## Check hardware health of HP `Proliant` Servers by querying the `iLO2/3/4` Management Controller 4 | 5 | ***To Download the script:*** 6 | 7 | https://exchange.icinga.com/algbaer/check_ilo2_health/releases 8 | 9 | To install required Perl Module in NetEye: 10 | ``` 11 | # yum install perl-XML-Simple.noarch 12 | ``` 13 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/icinga2-selfmonitoring/README.md: -------------------------------------------------------------------------------- 1 | # Icinga2 self monitoring plugins 2 | 3 | Current Plugins: 4 | - check_grafana_metrics.sh (Check if metrics are stored in Grafana/InfluxDB) 5 | - check_icinga2_config.sh (Checks the satellite Icinga2 configuration) 6 | - check_icinga2_status.sh (Checks the status of the SystemD satellite Icinga2 Daemon) 7 | - check_icinga2-master_config.sh (Checks the master Icinga2 configuration) 8 | - check_icinga2-master_status.sh (Checks the status of the SystemD master Icinga2 Daemon) 9 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/icinga2-selfmonitoring/check_icinga2-master_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | ####################################################################### 4 | # # 5 | # ICINGA2 Plugin for checking the valid ICINGA2 configuration # 6 | # created by Matthias J. Schmaelzle # 7 | # Version 1.00 / 2019 # 8 | ####################################################################### 9 | 10 | 11 | # Define global varibales 12 | # ====================================== 13 | # 14 | PROGNAME=`basename $0` 15 | VERSION="Version 1.00" 16 | AUTHOR="Matthias Schmaezle (http://www.mjs.de)" 17 | 18 | # Define the exit codes 19 | STATE_OK=0 20 | STATE_WARNING=1 21 | STATE_CRITICAL=2 22 | STATE_UNKNOWN=3 23 | 24 | 25 | 26 | # Running the check plugin 27 | # ====================================== 28 | # 29 | 30 | ICINGA_STATUS=`icinga2-master daemon --validate >/dev/null 2>&1; echo $?` 31 | 32 | if [ "$ICINGA_STATUS" = 0 ]; then 33 | echo "OK: The ICINGA2 Syntax is valid and running" 34 | exit $STATE_OK 35 | else 36 | echo "CRITICAL: The ICINGA2 Syntax is invalid and can not run on a productiv environment" 37 | exit $STATE_CRITICAL 38 | fi 39 | 40 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/icinga2-selfmonitoring/check_icinga2_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | ####################################################################### 4 | # # 5 | # ICINGA2 Plugin for checking the valid ICINGA2 configuration # 6 | # created by Matthias J. Schmaelzle # 7 | # Version 1.00 / 2019 # 8 | ####################################################################### 9 | 10 | 11 | # Define global varibales 12 | # ====================================== 13 | # 14 | PROGNAME=`basename $0` 15 | VERSION="Version 1.00" 16 | AUTHOR="Matthias Schmaezle (http://www.mjs.de)" 17 | 18 | # Define the exit codes 19 | STATE_OK=0 20 | STATE_WARNING=1 21 | STATE_CRITICAL=2 22 | STATE_UNKNOWN=3 23 | 24 | 25 | 26 | # Running the check plugin 27 | # ====================================== 28 | # 29 | 30 | ICINGA_STATUS=`icinga2 daemon --validate >/dev/null 2>&1; echo $?` 31 | 32 | if [ "$ICINGA_STATUS" = 0 ]; then 33 | echo "OK: The ICINGA2 Syntax is valid and running" 34 | exit $STATE_OK 35 | else 36 | echo "CRITICAL: The ICINGA2 Syntax is invalid and can not run on a productiv environment" 37 | exit $STATE_CRITICAL 38 | fi 39 | 40 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/iseries/iseries.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/iseries/iseries.tar.gz -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/linux_unix/check_mem.txt: -------------------------------------------------------------------------------- 1 | Get a copy from 2 | https://raw.githubusercontent.com/justintime/nagios-plugins/master/check_mem/check_mem.pl 3 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.DIXFservice.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.DIXFservice.dll -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.HelpService.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.HelpService.dll -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.Init.ps1: -------------------------------------------------------------------------------- 1 | #Write-Host $PSScriptRoot 2 | if(Test-Path $PSScriptRoot\Microsoft.Dynamics.BusinessConnectorNet.dll) { 3 | Add-Type -Path $PSScriptRoot\Microsoft.Dynamics.BusinessConnectorNet.dll 4 | } 5 | if(Test-Path $PSScriptRoot\Microsoft.Dynamics.Framework.Metadata.AX.dll) { 6 | Add-Type -Path $PSScriptRoot\Microsoft.Dynamics.Framework.Metadata.AX.dll 7 | } 8 | if(Test-Path $PSScriptRoot\Microsoft.Dynamics.AX.ManagementPackSupport.dll) { 9 | Add-Type -Path $PSScriptRoot\Microsoft.Dynamics.AX.ManagementPackSupport.dll 10 | } 11 | if(Test-Path $PSScriptRoot\AxMonitor.DIXFservice.dll) { 12 | Add-Type -Path $PSScriptRoot\AxMonitor.DIXFservice.dll 13 | } 14 | if(Test-Path $PSScriptRoot\AxMonitor.HelpService.dll) { 15 | Add-Type -Path $PSScriptRoot\AxMonitor.HelpService.dll 16 | } 17 | if(Test-Path $PSScriptRoot\Microsoft.Dynamics.AX.Client.ClientConfigurationModel.dll) { 18 | Add-Type -Path $PSScriptRoot\Microsoft.Dynamics.AX.Client.ClientConfigurationModel.dll 19 | } 20 | if(([System.AppDomain]::CurrentDomain.GetAssemblies()|where { $_.ManifestModule -like "System.ServiceModel.dll"}).Count -eq 0) 21 | { 22 | [Reflection.Assembly]::LoadWithPartialName("System.ServiceModel")|Out-Null 23 | } 24 | if(Test-Path $PSScriptRoot\Microsoft.Dynamics.AX.Framework.Tools.DMF.ServiceProxy.dll) { 25 | Add-Type -Path $PSScriptRoot\Microsoft.Dynamics.AX.Framework.Tools.DMF.ServiceProxy.dll 26 | } 27 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.dll -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.psd1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/AxMonitor.psd1 -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/NTFSSecurity.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/NTFSSecurity.zip -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax2012_powershell_module/scripts/sql/headblocker.sql: -------------------------------------------------------------------------------- 1 | SET NOCOUNT ON 2 | GO 3 | SELECT SPID, BLOCKED, REPLACE (REPLACE (T.TEXT, CHAR(10), ' '), CHAR (13), ' ' ) AS BATCH,cast(R.context_info as varchar(128)) as AXClient,Hostname,HostProcess,loginame,nt_domain,nt_username,last_batch,convert(numeric(18,2),round(datediff(ss,r.last_batch,getdate())/60.0000,2)) as waitInMin 4 | INTO #T 5 | FROM sys.sysprocesses R CROSS APPLY sys.dm_exec_sql_text(R.SQL_HANDLE) T 6 | 7 | GO 8 | WITH BLOCKERS (SPID, BLOCKED, LEVEL, BATCH,AXClient,Host,HostPoc,lname,domain,username,waitInMinutes) 9 | AS 10 | ( 11 | SELECT SPID, 12 | BLOCKED, 13 | CAST (REPLICATE ('0', 4-LEN (CAST (SPID AS VARCHAR))) + CAST (SPID AS VARCHAR) AS VARCHAR (1000)) AS LEVEL, 14 | R.BATCH,R.AXClient,Hostname,HostProcess,loginame,nt_domain,nt_username,waitInMin FROM #T R 15 | WHERE (BLOCKED = 0 OR BLOCKED = SPID) 16 | AND EXISTS (SELECT * FROM #T R2 WHERE R2.BLOCKED = R.SPID AND R2.BLOCKED <> R2.SPID) 17 | UNION ALL 18 | SELECT R.SPID, 19 | R.BLOCKED, 20 | CAST (BLOCKERS.LEVEL + RIGHT (CAST ((1000 + R.SPID) AS VARCHAR (100)), 4) AS VARCHAR (1000)) AS LEVEL, 21 | R.BATCH,R.AXClient,Hostname,HostProcess,loginame,nt_domain,nt_username,waitInMin FROM #T AS R 22 | INNER JOIN BLOCKERS ON R.BLOCKED = BLOCKERS.SPID WHERE R.BLOCKED > 0 AND R.BLOCKED <> R.SPID 23 | ) 24 | SELECT 25 | AXClient 26 | ,Host,HostPoc,lname,domain,username,waitInMinutes 27 | ,N' ' + REPLICATE (N'| ', LEN (LEVEL)/4 - 1) + 28 | CASE WHEN (LEN(LEVEL)/4 - 1) = 0 29 | THEN 'HEAD - ' 30 | ELSE '|------ ' END 31 | + CAST (SPID AS NVARCHAR (10)) + N' ' + BATCH AS BLOCKING_TREE 32 | 33 | FROM BLOCKERS ORDER BY LEVEL ASC 34 | GO 35 | DROP TABLE #T 36 | GO 37 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax_2012_jobs/9001-st_generic_agent_ps_aos_jobs_running.sh: -------------------------------------------------------------------------------- 1 | RES=`icingacli director service exists "generic_agent_ps_aos_jobs_running"` 2 | if [[ $RES =~ "does not exist" ]] 3 | then 4 | echo "Service 'generic_agent_ps_aos_jobs_running' does not exists" 5 | 6 | icingacli director service create generic_agent_ps_aos_jobs_running --json ' 7 | { 8 | "check_command": "powershell_neteye", 9 | "imports": [ 10 | "generic_agent_powershell" 11 | ], 12 | "object_name": "generic_agent_ps_aos_jobs_running", 13 | "object_type": "template", 14 | "vars": { 15 | "custom_analytics_dashboard": "d\/ax-sql-jobs-overview", 16 | "powershell_args": "-SQLServer HH1-AXDB01 -AXDBName ax_prod -BatchOverdue 10", 17 | "powershell_scripts": "check_aos_jobs.ps1" 18 | } 19 | } 20 | ' 21 | fi 22 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/ax_2012_jobs/win_c_script_neteye/batch_runningJobs.sql: -------------------------------------------------------------------------------- 1 | -- actual free batch tasks server per instance 2 | ;WITH currentrunningBatch AS 3 | ( 4 | SELECT GETUTCDATE() as currentDate,SERVERID,COUNT(*) AS runningBatch FROM BATCH WHERE (STATUS=2 OR STATUS=7) 5 | GROUP BY SERVERID 6 | ) 7 | select 8 | GETUTCDATE() as currentDate, 9 | DB_NAME() as DBName, 10 | SERVERPROPERTY('MachineName') as DBServername, 11 | @@SERVICENAME as DBServicename, 12 | CASE @@SERVICENAME 13 | WHEN 'MSSQLSERVER' THEN 'SQLServer' 14 | ELSE 'MSSQL$'+@@SERVICENAME 15 | END as SQLInstance, 16 | bgrp.SERVERID, 17 | SUBSTRING(bgrp.SERVERID,charindex('@',bgrp.SERVERID)+1,LEN(bgrp.SERVERID)) as host, 18 | SUBSTRING(bgrp.SERVERID,charindex('@',bgrp.SERVERID)+1,LEN(bgrp.SERVERID)) as AOSServer, 19 | SUBSTRING(bgrp.SERVERID,0,charindex('@',bgrp.SERVERID)) as AOSInstance, 20 | max(bcfg.MAXBATCHSESSIONS) as maxBatchSessions, 21 | (max(bcfg.MAXBATCHSESSIONS) - ISNULL(max(crbatch.runningBatch),0)) as freeBatchSessions, 22 | ISNULL(max(crbatch.runningBatch),0) as runningBatchSessions 23 | FROM 24 | BATCHSERVERGROUP bgrp inner join BATCHSERVERCONFIG bcfg 25 | ON bcfg.SERVERID = bgrp.SERVERId 26 | inner join SYSSERVERCONFIG servercfg 27 | on servercfg.SERVERID = bcfg.SERVERID 28 | left join currentrunningBatch crbatch 29 | on crbatch.SERVERID=bgrp.SERVERID 30 | WHERE 31 | DATEDIFF(Second, CONVERT (date, GETUTCDATE()), GETUTCDATE())>=bcfg.STARTTIME 32 | and 33 | DATEDIFF(Second, CONVERT (date, GETUTCDATE()), GETUTCDATE())<=bcfg.ENDTIME 34 | and servercfg.ENABLEBATCH=1 35 | group by bgrp.SERVERID 36 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/exchange/exchange_dbstatus_and_queue.md: -------------------------------------------------------------------------------- 1 | # Check for Exchange monitoring 2 | 3 | - Exchange Databases 4 | - Queue 5 | 6 | The project to be forked is 7 | https://github.com/yosbit/nagios-plugins 8 | 9 | Pull request placed. See status: 10 | https://github.com/yosbit/nagios-plugins/pull/2 11 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/failover_cluster/microsoft_failover_cluster.txt: -------------------------------------------------------------------------------- 1 | 2 | PowerShell script to monitor: 3 | 4 | -> Cluster Nodes 5 | --> Cluster Groups 6 | ---> Cluster Ressources 7 | 8 | Blog article provides the intoduction and Plugins (Not published yet) 9 | https://www.neteye-blog.com/ 10 | 11 | 12 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/hyper-v/hyperv_vms_not_under_monitoring/hyperv_vms_missing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/hyper-v/hyperv_vms_not_under_monitoring/hyperv_vms_missing.png -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/hyper-v/hyperv_vms_not_under_monitoring/nsclient.ini: -------------------------------------------------------------------------------- 1 | [/settings/external scripts/scripts] 2 | ; Some more commands 3 | hyperv_vm_report = cmd /c echo scripts\hyperv_vm_report.ps1; exit($lastexitcode) | powershell.exe -command - 4 | 5 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/logfile_check/check_logfiles.cfg: -------------------------------------------------------------------------------- 1 | @searches = ( 2 | { 3 | tag => 'logcheck', 4 | logfile => 'D:\ManagementGateway\16.4.0\root\applications\STRS_OMS_STD_K01_02\wd\log.txt', 5 | criticalpatterns => ['Final Attempt Reconnect to SAP Failed'], 6 | warningpatterns => ['RFC method returned error: Error Key'], 7 | seekfilesdir => 'c:\temp\check_logfile.logcheck', 8 | options => 'count,noprotocol,perfdata,sticky=120' 9 | }, 10 | ); 11 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/logfile_check/check_logfiles.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/logfile_check/check_logfiles.exe -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/logfile_check/check_logfiles.md: -------------------------------------------------------------------------------- 1 | Usage of `check_logfiles`: 2 | 3 | 4 | - Usage : 5 | ``` 6 | 'C:\Program Files\ICINGA2\/sbin/check_logfiles.exe' '-f' 'C:\Program Files\ICINGA2\sbin\check_logfiles.cfg' 7 | ``` 8 | 9 | - Command definition: 10 | 11 | ``` 12 | object CheckCommand "logfile_windows" { 13 | import "plugin-check-command" 14 | command = [ PluginDir + "/check_logfiles.exe" ] 15 | timeout = 1m 16 | arguments += { 17 | "-f" = { 18 | required = true 19 | value = "$check_logfiles_cfg$" 20 | } 21 | } 22 | } 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Install and Configuration instructions 3 | 4 | ## Introduction 5 | 6 | The use of valuable Plugin `check_wmi_plus` is suggested. 7 | 8 | Project site: 9 | http://www.edcint.co.nz/checkwmiplus/ 10 | 11 | Introduction and Configuration of WMI: 12 | https://www.neteye-blog.com/2018/03/wmi-based-microsoft-server-monitoring/ 13 | 14 | Additional configurations needed for setup on NetEye 3/4 are provided here. 15 | 16 | Credits to the authors of http://www.edcint.co.nz/checkwmiplus/ 17 | 18 | 19 | ### Install required Perl modules. 20 | - NetEye3: RPMs are provided on repo of neteye 21 | - Neteye4: Use provided RPMs from folder `neteye4/` 22 | 23 | ### Apply configuration from `check_wmi_plus.conf` to `./etc/check_wmi_plus/` 24 | Define: 25 | - `$base_dir='/usr/lib64/nagios/plugins'; # NetEye 3` 26 | - `$base_dir='/neteye/shared/monitoring/plugins'; # NetEye 4` 27 | - `$ignore_my_outdated_perl_module_versions=1; # CHANGE THIS IF NEEDED` 28 | 29 | ### Install check_wmi_plus in NetEye Plugins Dir: 30 | 31 | Path NetEye 3: `/usr/lib64/nagios/plugins` 32 | Path NetEye 4: `/neteye/shared/monitoring/plugins` 33 | 34 | - Copy the `check_wmi_plus.pl` 35 | - Copy the folder `etc/` 36 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus.v1.64.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus.v1.64.tar.gz -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/check_wmi_plus.makeman.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/check_wmi_plus.makeman.sh -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/etc/check_wmi_plus/check_wmi_plus.d/README.txt: -------------------------------------------------------------------------------- 1 | This directory contains .ini files for check_wmi_plus.pl. 2 | You can see more ini files and upload your own at http://www.edcint.co.nz/checkwmiplus 3 | 4 | You can define a specific ini file in the plugin itself or via the command line (--inifile). 5 | That ini file is always read first (if defined). 6 | 7 | The location of this directory can be specified in the plugin itself or via the command line (--inidir). 8 | Ini files in this directory are read in the default directory order. 9 | Ini files read later merge with earlier ini files. 10 | For any settings that exist in one or more files, the last one read is set. 11 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/etc/check_wmi_plus/check_wmi_plus.d/samples.ini: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/etc/check_wmi_plus/check_wmi_plus.d/samples.ini -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/etc/check_wmi_plus/check_wmi_plus.d/test.ini: -------------------------------------------------------------------------------- 1 | [checkcpuperf percusertime] 2 | query=SELECT ContextSwitchesPerSec,Timestamp_Sys100NS FROM Win32_PerfRawData_PerfOS_System 3 | samples=2 4 | customfield=_ContextSwitchesPerSec,PERF_100NSEC_TIMER,ContextSwitchesPerSec,%.1f,100 5 | display=_DisplayMsg||~|~| - || 6 | display=_ContextSwitchesPerSec|#/sec 7 | test=_ContextSwitchesPerSec 8 | perf=_ContextSwitchesPerSec 9 | 10 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/etc/check_wmi_plus/check_wmi_plus.data/check_wmi_plus.compiledini: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/wmi/check_wmi_plus/etc/check_wmi_plus/check_wmi_plus.data/check_wmi_plus.compiledini -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/neteye3/service-profile-windows_wmi_services.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/wmi/neteye3/service-profile-windows_wmi_services.zip -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/neteye4/README.md: -------------------------------------------------------------------------------- 1 | # NetEye 4 Dependencies 2 | 3 | - Install provided perl .rpms 4 | - Install perl-DateTime 5 | ``` 6 | yum --enablerepo=neteye install perl-DateTime.x86_64 7 | ``` 8 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/neteye4/perl-Config-IniFiles-2.79-1.el7.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/wmi/neteye4/perl-Config-IniFiles-2.79-1.el7.noarch.rpm -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/neteye4/perl-Number-Format-1.73-14.el7.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/microsoft/wmi/neteye4/perl-Number-Format-1.73-14.el7.noarch.rpm -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/microsoft/wmi/sample_auth.conf: -------------------------------------------------------------------------------- 1 | username=wmi_user 2 | password=secret 3 | domain=mydomain.lan 4 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/nagios_nrpe/README.md: -------------------------------------------------------------------------------- 1 | # Security advice for using NRPE 2 | 3 | The NRPE is an old protocol used in the Nagios world to communicate to remote Nagios NRPE agents. 4 | The protocol design has security weaknesses and lacks a reliable encryption and authentication handshake mechanism. 5 | 6 | Therefore it is NOT suggested to use NRPE as communication protocol for NetEye. 7 | Anyway there might be situations where NRPE is the only suitable solution for monitoring remotely systems ( i.e. icinga agents is not provided for the environment and compiling it results difficult ) and the provided `check_nrpe` could be used on NetEye 4 to handle this exceptional cases. 8 | 9 | ## Configuration of NRPE Client on NetEye 4 on RHEL8 10 | 11 | Require installation of: libssl.so.10 12 | Install required package: 13 | ``` 14 | # yum install compat-openssl10 15 | ``` 16 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/nagios_nrpe/check_nrpe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/nagios_nrpe/check_nrpe -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/nagios_nrpe/check_nrpe_quotes: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | TMPFILE=$(mktemp) 4 | trap 'rm -f $TMPFILE; exit 1' 1 2 15 5 | trap 'rm -f $TMPFILE' 0 6 | 7 | echo -n "/neteye/shared/monitoring/plugins/check_nrpe" >$TMPFILE 8 | 9 | for i in "$@" 10 | do 11 | str=$(echo "$i" | tr '"' "'") 12 | echo -n " \"$str\"" >>$TMPFILE 13 | done 14 | echo >>$TMPFILE 15 | 16 | sh $TMPFILE 17 | exit $? 18 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/neteye/check_assetmanagement.conf: -------------------------------------------------------------------------------- 1 | $mariadb_host = "mariadb.neteyelocal"; 2 | 3 | $ocs_db = "ocsweb"; 4 | $ocs_user = "ocsweb"; 5 | $ocs_pass = "secret123"; 6 | 7 | $glpi_db ="glpi"; 8 | $glpi_user = "icinga_monitoring"; 9 | $glpi_pass = "secret123"; 10 | 11 | 1; 12 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/neteye/check_assetmanagement.txt: -------------------------------------------------------------------------------- 1 | Plugin for NetEye 4: 2 | 3 | # Introduction 4 | 5 | ## Configure 6 | 7 | Prepare a DB user for Database OCS and GLPI with Read-only permissions 8 | Configure the permissions within the plugin 9 | 10 | This should be improved in the future: Make a copy of this plugin to avoid it to be replaced by any update 11 | 12 | ## Usage and Help: 13 | ``` 14 | ./check_assetmanagement.pl --help 15 | ``` 16 | 17 | # Command description 18 | 19 | ## age 20 | 21 | check for old not up-to-date assets 22 | 23 | ## duplicates 24 | 25 | check in OCS and GLPI for duplicate assests having the same host name 26 | Each Duplicate check will lead to a WARNING if a duplicate is found 27 | 28 | - ocs_duplicates check in OCS for duplicate assests having the same host name 29 | - glpi_duplicates check in GLPI for duplicate assests having the same host name 30 | 31 | ## ocs_newsoft 32 | 33 | check in OCS for software in category NEW 34 | 35 | ## automatic_action_last_run 36 | 37 | check regular execution of automatic action: verify last run 38 | 39 | ## os_count 40 | check count of computers with relation to a non existing operating system 41 | 42 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/neteye/check_crm.md: -------------------------------------------------------------------------------- 1 | # PCS Cluster Monitoring Script 2 | 3 | Usage: Import CRM PCS Basket 4 | 5 | Create file `/etc/sudoers.d/crm_mon` 6 | ``` 7 | icinga ALL=(ALL) NOPASSWD: /usr/sbin/crm_mon -1 -r -f 8 | ``` 9 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/neteye/check_drbd9.md: -------------------------------------------------------------------------------- 1 | # DRBD Monitoring for NetEye 2 | 3 | Ext_resource: https://raw.githubusercontent.com/alaskacommunications/nagios_check_drbd9/master/check_drbd9.pl 4 | Ext_resource_doc: https://github.com/alaskacommunications/nagios_check_drbd9 5 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/neteye/check_drbd9_orig.pl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/neteye/check_drbd9_orig.pl -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/neteye/nagios-plugins-nrpe-2.15p4-3.neteye4.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/neteye/nagios-plugins-nrpe-2.15p4-3.neteye4.x86_64.rpm -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/check_nwc_health/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Check NWC health for NetEye 4 3 | 4 | ## Importing Service Templates 5 | 6 | Execute: `service_template-nwc_health.sh`: 7 | It creates the following templates: ![Template Created](service_template-nwc_health.png) 8 | 9 | 10 | ## Setup and configuration 11 | 12 | `neteye4_path: /neteye/shared/monitoring/plugins` 13 | 14 | For Neteye 4 the plugins is installed within its path by running the install script of the neteye share project. 15 | 16 | Cache Paths to create: 17 | - NetEye 4: 18 | make cache files cluster compatible: `/neteye/shared/monitoring/cache/check_nwc_health` 19 | - NetEye 3: 20 | make cache files cluster compatible: `/var/cache/nagios` 21 | 22 | 23 | ## Compiling of script: 24 | Download last available source codes from https://labs.consol.de/omd/packages/check_nwc_health/ and unpack in into a temporary directory. 25 | Then, move to that directory, run `configure` (with the required options) and `make` (just plain is enough). 26 | At the end, in the subdirectory `./plugins-scripts` the compiled version will be available. 27 | 28 | ``` 29 | Compiling for NetEye 4: 30 | ./configure --prefix=/neteye/shared/monitoring/plugins --with-nagios-user=icinga --with-nagios-group=icinga --with-perl=/usr/bin/perl --with-statefiles-dir=/neteye/shared/monitoring/cache/check_nwc_health 31 | 32 | Compiling for NetEye 3: 33 | ./configure --prefix=/usr/lib64/nagios/plugins --with-nagios-user=nagios --with-nagios-group=nagios --with-perl=/usr/bin/perl --with-statefiles-dir=/var/cache/nagios 34 | ``` 35 | 36 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/check_nwc_health/perl-Module-Load-0.24-3.el7.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/network-devices/check_nwc_health/perl-Module-Load-0.24-3.el7.noarch.rpm -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/check_nwc_health/service_template-nwc_health.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/network-devices/check_nwc_health/service_template-nwc_health.png -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/check_pcmeasure2/README.md: -------------------------------------------------------------------------------- 1 | 2 | # `check_pcmeasure2` for NetEye 4 3 | 4 | ## Importing Service Templates 5 | 6 | Execute: `service_template-pcmeasure2.sh` 7 | 8 | 9 | ## Setup and configuration 10 | 11 | copy `check_pcmeasure2.pl` in NetEye 4 plugin directory : `/neteye/shared/monitoring/plugins` 12 | 13 | In Director you need to create 4 Custom Fields: 14 | 15 | `check_pcmeasure_sensor` 16 | `check_pcmeasure_label` 17 | `check_pcmeasure_warning` 18 | `check_pcmeasure_critical` 19 | 20 | ## Example command-line 21 | 22 | ``` 23 | [root@neteye4 plugins]# /neteye/shared/monitoring/plugins/check_pcmeasure2.pl -H 10.62.5.35 -S com1.1 -w 45 -c 55 -l 'Celsius' 24 | PCMEASURE OK - Celsius = 20.8 | celsius=20.8;45;55 25 | ``` 26 | 27 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/check_pcmeasure2/service_template-pcmeasure.sh: -------------------------------------------------------------------------------- 1 | # 2 | #Service template check_pcmeasure 3 | 4 | 5 | #Requirements check 6 | RES=`icingacli director service exists "generic_snmp"` 7 | if [[ $RES =~ "does not exist" ]] 8 | then 9 | echo "[-] Requirements check failure: Required service tempate 'generic_snmp' does not exists" 10 | exit 1 11 | fi 12 | 13 | 14 | # HowTo Export: 15 | # icingacli director service show generic_pcmeasure --json --no-defaults 16 | 17 | RES=`icingacli director service exists "generic_pcmeasure"` 18 | if [[ $RES =~ "does not exist" ]] 19 | then 20 | echo "Service 'generic_pcmeasure' does not exists" 21 | 22 | icingacli director service create check_pcmeasure --json ' 23 | { 24 | "check_command": "check_pcmeasure", 25 | "imports": [ 26 | "generic_snmp" 27 | ], 28 | "object_name": "check_pcmeasure", 29 | "object_type": "template" 30 | "vars": { 31 | "check_pcmeasure_sensor": "com1.", 32 | "check_pcmeasure_label": "label_name", 33 | "check_pcmeasure_warning": "warning_value", 34 | "check_pcmeasure_critical": "critical_value" 35 | } 36 | } 37 | } 38 | ' 39 | fi 40 | 41 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/fortinet/README.md: -------------------------------------------------------------------------------- 1 | ## Check Info 2 | 3 | Check for Fortinet is maintained and improved by https://github.com/riskersen/Monitoring 4 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/fortinet/perl-List-Compare-0.49-1.el7.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/network-devices/fortinet/perl-List-Compare-0.49-1.el7.noarch.rpm -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/interfaces/check_interfaces_centos7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/network-devices/interfaces/check_interfaces_centos7 -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/interfaces/check_interfaces_rhel8: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/network-devices/interfaces/check_interfaces_rhel8 -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/interfaces/interface_traffic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/network-devices/interfaces/interface_traffic.png -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/radius_tacacs/check_radius_auth.txt: -------------------------------------------------------------------------------- 1 | README: 2 | 3 | Credits for script to Saskia Oppenlaender, Würth IT 4 | 5 | 1. 6 | pip-3 install py-radius 7 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/network-devices/radius_tacacs/check_tacacs.txt: -------------------------------------------------------------------------------- 1 | README: 2 | 3 | Credits for script to Saskia Oppenlaender, Würth IT 4 | 5 | 1. 6 | git clone https://github.com/ansible/tacacs_plus.git 7 | 2. 8 | pip-3 install tacacs_plus 9 | 3. 10 | cp check_tacacs.py tacacs_plus/ 11 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/storage/check_fibrealliance.md: -------------------------------------------------------------------------------- 1 | # Generic SAN Storage monitoring 2 | 3 | Comments from author: 4 | This plugin checks sensors (PSU, temperature, fans et al) and overall health of SAN switches that understand the Fibre Alliance MIB. There 5 | is a long list of companies behind that MIB. 6 | 7 | Credits to: 8 | https://github.com/glensc 9 | Project Repository: 10 | https://github.com/pld-linux/nagios-plugin-check_fibrealliance 11 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/vmware/README.md: -------------------------------------------------------------------------------- 1 | # VMWare monitoring 2 | 3 | ## VMWare ESX and Guest monitoring 4 | 5 | Make use of provided service template: `generic_esx_vmware` 6 | 7 | ## VMWare ESX Hardware monitoring 8 | 9 | Clone `check_esx_hardware` as git submodule into folder `esx_hardware` 10 | [More on using git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules): 11 | ``` 12 | git submodule add https://github.com/Napsty/check_esxi_hardware /monitoring/monitoring-plugins/vmware/esx_hardware 13 | ``` 14 | 15 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/vmware/check_vmware_esx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/vmware/check_vmware_esx -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/vmware/perl-Time-Duration-1.06-17.el7.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/monitoring-plugins/vmware/perl-Time-Duration-1.06-17.el7.noarch.rpm -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/wireless/cisco/README.md: -------------------------------------------------------------------------------- 1 | # Wireless controller 2 | ## Cisco WLC wireless monitoring 3 | I will provide you the commands to setup the monitoring 4 | 5 | First copy all the included MIB files to the NetEye default folder: 6 | ``` 7 | cp mibs/* /usr/share/snmp/mibs/ 8 | ``` 9 | 10 | Create a new influx database 11 | ``` 12 | influx 13 | CREATE DATABASE telegraf WITH DURATION 90d 14 | quit 15 | ``` 16 | 17 | Install telegraf on NetEye if not already present: 18 | ``` 19 | yum install telegraf --enablerepo=neteye 20 | ``` 21 | 22 | Copy the telegraf configuration file into the right folder and add its daemon: 23 | ``` 24 | cp telegraf.conf /neteye/shared/telegraf/ 25 | chkconfig --add telegraf 26 | ``` 27 | 28 | Remember to edit the /neteye/shared/telegraf/telegraf.conf file and put the Cisco WLC controller IP address and read SNMP community to access it. 29 | See lines: 30 | ``` 31 | [[inputs.snmp]] 32 | agents = [ "PUT Cisco WLC IP address HERE" ] 33 | version = 2 34 | community = "public" 35 | ``` 36 | 37 | 38 | Start the telegraf service: 39 | ``` 40 | service telegraf start 41 | ``` 42 | 43 | Now you can import the included sample Dashboards into Grafana to display the collected data 44 | -------------------------------------------------------------------------------- /monitoring/monitoring-plugins/wireless/cisco/mibs/README.md: -------------------------------------------------------------------------------- 1 | # Disclaimer 2 | 3 | This directory contains a very basic set of MIB files 4 | All these files are property of Cisco Systems, Inc. and are Copyright (c) protected. 5 | 6 | All files can be found on the public FTP site: 7 | [ftp://ftp.cisco.com/pub/mibs](ftp://ftp.cisco.com/pub/mibs) 8 | 9 | For your comfort, we synchronized the relevant MIB files in this folder 10 | -------------------------------------------------------------------------------- /monitoring/notification/README.md: -------------------------------------------------------------------------------- 1 | # SMS notification for neteye4 2 | 3 | - home path of SMS module: /neteye/local/smsd/ 4 | - grant permissions to icinga user on spool folders 5 | ``` 6 | chown icinga:icinga -R /neteye/local/smsd/data/spool 7 | chmod 755 /neteye/local/smsd/data/spool 8 | chmod 777 /neteye/local/smsd/data/spool/outgoing 9 | ``` 10 | - diff /usr/bin/smssend with provided file 11 | - restore provided basket 12 | ``` 13 | icingacli director basket restore < Director-Basket_SMS_Notification.json 14 | ``` 15 | - install SMS notification script in /neteye/shared/icinga2/conf/icinga2/scripts/ 16 | ``` 17 | cp sms-host-notification.sh sms-service-notification.sh /neteye/shared/icinga2/conf/icinga2/scripts/ 18 | chmod 755 /neteye/shared/icinga2/conf/icinga2/scripts/sms-* 19 | ``` 20 | Patch the `smssend` binary: 21 | ``` 22 | grep out /usr/bin/smssend 23 | ``` 24 | 25 | SMS-Queue files in the outgoing queue. 26 | ``` 27 | FILE=`mktemp /neteye/local/smsd/data/spool/outgoing/send_XXXXXX` 28 | ``` 29 | 30 | -------------------------------------------------------------------------------- /monitoring/notification/email2neteye3sms/n3email_2_n4sms.md: -------------------------------------------------------------------------------- 1 | # How to to send SMS from neteye 4 via neteye 3 eventhandler 2 | 3 | ## Enable `sendmail` on neteye 3 4 | https://www.ghacks.net/2009/06/05/make-sendmail-accept-mail-from-external-sources/ 5 | 6 | In /etc/mail/sendmail.mc change 7 | ``` 8 | DAEMON_OPTIONS(`Family=inet, Name=MTA-v4, Addr=127.0.0.1, Port=smtp')dnl 9 | to 10 | DAEMON_OPTIONS(`Family=inet, Name=MTA-v4, Port=smtp')dnl 11 | ``` 12 | 13 | # Forward relay emails to neteye 4 14 | 15 | Direct relay from neteye 4 postfix 16 | https://serverfault.com/questions/257637/postfix-to-relay-mails-to-other-smtp-for-particular-domain 17 | 18 | ``` 19 | [root@p-neteye4-a postfix]# tail /etc/postfix/main.cf 20 | ... 21 | # RELAY MAPPINGS PER DOMAIN 22 | transport_maps = hash:/etc/postfix/transport 23 | 24 | 25 | [root@p-neteye4-a postfix]# tail /etc/postfix/transport 26 | ... 27 | #Send emails to neteye 3 eventhandler 28 | eventgw@p-neteye-lc.rtl2.de smtp:p-neteye-lc.rtl2.de 29 | 30 | # postmap /etc/postfix/transport 31 | ``` 32 | 33 | 34 | # NetEye 3 Eventhandler rule 35 | 36 | Create a rule in EventHandler matching the senders address of NetEye4 (or other conditions). The Actions should look like this: 37 | ``` 38 | /usr/local/bin/sendsms +4915164519590 @SUBJECT@ 39 | ``` 40 | -------------------------------------------------------------------------------- /monitoring/notification/email_html_mail/README.md: -------------------------------------------------------------------------------- 1 | 2 | # HTML Email notification for NetEye 3 | 4 | Run script `clone_and_patch_repo.sh` to clone and patch the repo for NetEye 5 | 6 | 7 | For NetEye 3: 8 | Install the script in home folder of Plugins and define notification command: 9 | 10 | ``` 11 | mkdir /usr/lib/nagios/plugins/contrib/ 12 | cp -r Nagios-Responsive-HTML-Email-Notifications /usr/lib/nagios/plugins/contrib/ 13 | ``` 14 | 15 | # SMS notification via email over NetEye3 16 | 17 | Have a look at `n3email_2_n4sms.md` 18 | -------------------------------------------------------------------------------- /monitoring/notification/email_html_mail/clone_and_patch_repo.sh: -------------------------------------------------------------------------------- 1 | 2 | # Clone Git Repo 3 | if [ ! -d "./Nagios-Responsive-HTML-Email-Notifications" ] 4 | then 5 | /usr/bin/git clone https://github.com/heiniha/Nagios-Responsive-HTML-Email-Notifications.git 6 | 7 | patch Nagios-Responsive-HTML-Email-Notifications/php-html-email/nagios_host_mail < nagios_host_mail.diff 8 | 9 | patch Nagios-Responsive-HTML-Email-Notifications/php-html-email/nagios_service_mail < nagios_service_mail.diff 10 | fi 11 | 12 | 13 | -------------------------------------------------------------------------------- /monitoring/notification/phone_call/Basket-notification-phonecall.json: -------------------------------------------------------------------------------- 1 | { 2 | "Command": { 3 | "phone-notification": { 4 | "arguments": { 5 | "-r": { 6 | "required": true, 7 | "skip_key": true, 8 | "value": "$notification_sms_number$" 9 | } 10 | }, 11 | "command": "\/neteye\/shared\/icinga2\/conf\/icinga2\/scripts\/phone-notification.sh", 12 | "disabled": false, 13 | "fields": [], 14 | "imports": [], 15 | "is_string": null, 16 | "methods_execute": "PluginNotification", 17 | "object_name": "phone-notification", 18 | "object_type": "object", 19 | "timeout": "60", 20 | "vars": {}, 21 | "zone": null 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /monitoring/notification/snmptrap/INSTALL.txt: -------------------------------------------------------------------------------- 1 | Notification command: 2 | 3 | Host: 4 | 5 | /neteye/shared/monitoring/phoenix/bin/neteye-trap-notification $user.email$ public $host.name$ _HOST_ $host.display_name$ $host.state_id$ 0 $host.output$ $host.perfdata$ $host.check_attempt$ $host.duration_sec$ $host.last_check$ $host.last_state_change$ $host.last_state_up$ $host.last_state_down$ $host.last_state_unreachable$ 6 | 7 | Service: 8 | 9 | /neteye/shared/monitoring/phoenix/bin/neteye-trap-notification $user.email$ public $host.name$ $service.name$ $host.display_name$ $host.state_id$ 0 $service.output$ $service.perfdata$ $service.check_attempt$ $service.duration_sec$ $service.last_check$ $service.last_state_change$ $service.last_state_ok$ $service.last_state_warning$ $service.last_state_critical$ $service.last_state_unknown$ $service.state_id$ 10 | -------------------------------------------------------------------------------- /monitoring/notification/teams/README.md: -------------------------------------------------------------------------------- 1 | # SMS notification for neteye4 2 | 3 | - restore provided basket 4 | ``` 5 | icingacli director basket restore < Director-Basket_teams-notifications_56f58bd.json 6 | ``` 7 | - install teams notification script in /neteye/shared/icinga2/conf/icinga2/scripts/ 8 | ``` 9 | cp teams-notification.py /neteye/shared/icinga2/conf/icinga2/scripts/ 10 | chmod 755 /neteye/shared/icinga2/conf/icinga2/scripts/teams-* 11 | ``` 12 | Define a variable field and user template: 13 | ``` 14 | template User "microsoft-teams-template" { 15 | vars.teams_webhook_url = "https://mydomain.webhook.office.com/webhookb2/XXX" 16 | } 17 | ``` 18 | -------------------------------------------------------------------------------- /monitoring/notification/telegram/README.md: -------------------------------------------------------------------------------- 1 | # How to configure Telegram Notification on NetEye 2 | 3 | 1. Create a Telegram Bot 4 | 2. Create a Telegram Group 5 | 3. Get the chat_id from the Telegram Bot 6 | 4. Create the notification commands 7 | 5. Create the NetEye notifications 8 | 9 | For any details please check this blog post on our NetEye Blog https://www.neteye-blog.com/2020/03/how-to-configure-telegram-notification-on-neteye/ 10 | -------------------------------------------------------------------------------- /monitoring/sahipro/bin/firefox-de: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | export LANG=de_DE.UTF-8 4 | /usr/bin/waitmax 900 /usr/bin/firefox $@ 5 | -------------------------------------------------------------------------------- /monitoring/sahipro/bin/firefox-it: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | export LANG=it_IT.UTF-8 4 | /usr/bin/waitmax 900 /usr/bin/firefox $@ 5 | -------------------------------------------------------------------------------- /monitoring/sahipro/bin/startsahi.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | DIR=$(dirname $0) 4 | 5 | cd $DIR 6 | rm -f ../database/db0*.db 7 | ./start_sahi.sh 8 | -------------------------------------------------------------------------------- /monitoring/sahipro/config/sysconfig.cfg: -------------------------------------------------------------------------------- 1 | SAHI_USER=sahi 2 | SAHI_HOME=/neteye/local/sahipro 3 | HOME=$SAHI_HOME/userdata 4 | progpath='$SAHI_HOME/userdata/bin/start_sahi.sh' 5 | DISPLAY=:99 6 | #JAVA_HOME=/usr/java/latest/jre 7 | CLEANUP_HOURS=720 8 | CLEANUP_OK_HOURS=48 9 | CLEANUP_MYSQL_HOURS=1 10 | SAHI_EXTERNAL_DIRS="" 11 | -------------------------------------------------------------------------------- /monitoring/sahipro/config/userdata.properties.add: -------------------------------------------------------------------------------- 1 | 2 | 3 | ################## Added NetEye4 ######################## 4 | # Specifies database type. 5 | # The inbuilt options are "mysql" and "h2", the default is "h2" 6 | # Change jdbc parameters accordingly 7 | #JDBC parameters for h2 database 8 | #db.type=h2 9 | #db.jdbc_url=jdbc:h2:/opt/neteye/sahipro/userdata/database/db0;AUTO_SERVER=FALSE;DB_CLOSE_DELAY=-1;IGNORECASE=TRUE 10 | 11 | #JDBC parameters for mysql database 12 | #db.type=mysql 13 | #db.driver_name=com.mysql.jdbc.Driver 14 | #db.jdbc_url=jdbc:mysql://localhost/sahireports?allowMultiQueries=true&sessionVariables=sql_mode=NO_BACKSLASH_ESCAPES 15 | #db.user_name=sahi 16 | #db.password=sahi 17 | 18 | # 19 | # Extra Options added by NetEye 20 | # 21 | # Reduce timeout to 90 seconds 22 | script.max_cycles_for_page_load=600 23 | 24 | # 25 | # Retry on load error 26 | # 27 | proxy.failed_request_retry_count=3 28 | 29 | # 30 | # Uncomment this if you need this on first page load 31 | # 32 | #xhr.wait_ready_states=2,3 33 | -------------------------------------------------------------------------------- /monitoring/sahipro/etc/sahipro-mysql.cron.hourly: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | 4 | # 5 | # Cleanup MySQL DB (only if active) 6 | # 7 | /usr/lib64/neteye/monitoring/plugins/check_mysql -H localhost >/dev/null 2>&1 || exit 0 8 | dd=$(date +'%Y-%m-%d %H:%M:00' -d "1 hours ago") 9 | for i in `echo "select SUITEREPORTID from SUITEREPORTS where ENDTIME<'$dd'" | mysql -BN sahireports` 10 | do 11 | echo "delete from SCRIPTREPORTS where SUITEREPORTID='$i'" | mysql -BN sahireports >/dev/null 12 | done 13 | echo "delete from STEPREPORTS where MESSAGETIMESTAMP<'$dd'" | mysql -BN sahireports >/dev/null 14 | echo "delete from SCRIPTREPORTS where STARTTIME<'$dd'" | mysql -BN sahireports >/dev/null 15 | echo "delete from SUITEREPORTS where ENDTIME<'$dd'" | mysql -BN sahireports >/dev/null 16 | echo "TRUNCATE TABLE QUERYLOG" | mysql -BN sahireports >/dev/null 17 | echo "optimize table STEPREPORTS" | mysql -BN sahireports >/dev/null 18 | echo "optimize table SCRIPTREPORTS" | mysql -BN sahireports >/dev/null 19 | echo "optimize table SUITEREPORTS" | mysql -BN sahireports >/dev/null 20 | exit 0 21 | -------------------------------------------------------------------------------- /monitoring/sahipro/etc/sahipro.conf: -------------------------------------------------------------------------------- 1 | Alias /sahipro /neteye/shared/monitoring/data/sahipro/logs 2 | 3 | Options None 4 | AllowOverride all 5 | Order deny,allow 6 | Deny from all 7 | Allow from all 8 | Require all granted 9 | 10 | -------------------------------------------------------------------------------- /monitoring/sahipro/etc/sahipro.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=sahipro 3 | 4 | [Service] 5 | Type=simple 6 | User=sahi 7 | Group=sahi 8 | EnvironmentFile=-/etc/default/sahipro 9 | EnvironmentFile=-/neteye/local/sahipro/config/sysconfig.cfg 10 | ExecStart=/neteye/local/sahipro/userdata/bin/startsahi.sh 11 | Restart=always 12 | WorkingDirectory=/neteye/local/sahipro/userdata/bin 13 | Nice=19 14 | LimitNOFILE=16384 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /monitoring/sahipro/etc/sahipro_runner.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=sahipro_runner 3 | 4 | [Service] 5 | Type=simple 6 | User=sahi 7 | Group=sahi 8 | EnvironmentFile=-/etc/default/sahipro_runner 9 | EnvironmentFile=-/neteye/local/sahipro_runner/config/sysconfig.cfg 10 | ExecStart=/neteye/local/sahipro_runner/userdata/bin/startsahi.sh 11 | Restart=always 12 | WorkingDirectory=/neteye/local/sahipro_runner/userdata/bin 13 | Nice=19 14 | LimitNOFILE=16384 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /monitoring/sahipro/extlib/mariadb-java-client-2.6.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/sahipro/extlib/mariadb-java-client-2.6.0.jar -------------------------------------------------------------------------------- /monitoring/sahipro/lib/neteye.sah: -------------------------------------------------------------------------------- 1 | // 2 | // NetEye Functions to include to testcase 3 | // 4 | 5 | // Uncomment this if you don't want screenshots 6 | //_sahi.SKIP_SCREENSHOTS = true; 7 | 8 | // Global Varibales 9 | 10 | var $start_time = new Date(); 11 | var $decrypted_text = {}; 12 | 13 | function onScriptFailure($e) { 14 | // Fail on Assert (default is to continue) 15 | _logExceptionAsFailure($e); 16 | _fail(); 17 | } 18 | 19 | function onScriptEnd() { 20 | if (!_sahi.SKIP_SCREENSHOTS) { 21 | _focusWindow(); 22 | _takePageScreenShot(); 23 | } 24 | } 25 | 26 | function perfdataAdd($label) { 27 | $end_time = new Date(); 28 | $time_diff = $end_time.getTime() - $start_time.getTime(); 29 | _log("PERFDATA:" + $label + ":" + $time_diff + ":"); 30 | $start_time = $end_time; 31 | } 32 | 33 | function userPassword($password) { 34 | var $newpass = _execute("/usr/local/bin/decrypt_password '" + $password + "'", true); 35 | return $newpass; 36 | } 37 | 38 | function setEncryptedValue($obj, $index) { 39 | _maskLogs("setEncryptedValue"); 40 | _setValue($obj, $decrypted_text[$index]); 41 | _unmaskLogs("setEncryptedValue"); 42 | } 43 | 44 | -------------------------------------------------------------------------------- /monitoring/sahipro/packages/README.txt: -------------------------------------------------------------------------------- 1 | Install with this commandline: 2 | 3 | java -jar silent_install.xml 4 | -------------------------------------------------------------------------------- /monitoring/sahipro/packages/sahipro.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | /neteye/local/sahipro 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /monitoring/sahipro/packages/sahipro_runner.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | /neteye/local/sahipro_runner 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /monitoring/sahipro/packages/silent_install.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | /neteye/local/sahipro 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /monitoring/sahipro/packages/silent_install_runner.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | /neteye/local/sahipro_runner 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /monitoring/sahipro/phantomjs/phantomjs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | TMPFILE=/var/tmp/phantomjs_$$.cmd 4 | trap 'rm -f $TMPFILE; exit 1' 1 2 15 5 | trap 'rm -f $TMPFILE' 0 6 | 7 | echo "rm -f $TMPFILE" >$TMPFILE 8 | if [ -x /usr/bin/phantomjs2 ] 9 | then 10 | echo -n "/usr/bin/waitmax 900 /usr/bin/phantomjs2" >>$TMPFILE 11 | else 12 | echo -n "/usr/bin/waitmax 900 /usr/bin/phantomjs" >>$TMPFILE 13 | fi 14 | n=0 15 | for i in $@ 16 | do 17 | if [ -z "$sahisid" ] 18 | then 19 | sahisid=$(echo "$i" | sed -e 's/.*sahisid=\(.*\)sahi.*/\1/g') 20 | fi 21 | if [ "$sahisid" = "$i" ] 22 | then 23 | sahisid="" 24 | fi 25 | if [ -n "$sahisid" ] 26 | then 27 | TMPDIR=/neteye/shared/httpd/sahipro/tmp/$sahisid 28 | echo -n " \"$TMPDIR\" \"$i\"" >>$TMPFILE 29 | else 30 | echo -n " \"$i\"" >>$TMPFILE 31 | fi 32 | done 33 | 34 | if [ -n "$TMPDIR" ] 35 | then 36 | mkdir -p $TMPDIR 37 | chmod g+rwx $TMPDIR 38 | fi 39 | 40 | if [ ! -d "$TMPDIR" ] 41 | then 42 | echo "TMPDIR ($TMPDIR) not found: $0 $@" >>/neteye/shared/httpd/sahipro/sahipro_phantomjs.log 43 | fi 44 | 45 | sh $TMPFILE 46 | -------------------------------------------------------------------------------- /monitoring/sahipro/phantomjs/sahi.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var page = require('webpage').create(), 3 | system = require('system'), 4 | address, 5 | output, 6 | size; 7 | 8 | if (system.args.length < 3) { 9 | console.log('Usage: sahi.js '); 10 | phantom.exit(1); 11 | } else { 12 | var ldir = system.args[1]; 13 | console.log('DIR: ' + ldir); 14 | address = system.args[2]; 15 | console.log('URL: ' + address); 16 | page.viewportSize = { width: '1280', height: '1024' }; 17 | page.paperSize = {format: 'A4', orientation: 'portrait', margin: '1cm' }; 18 | page.settings.userAgent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36'; 19 | page.open(address, function(status) { 20 | if (status === 'success') { 21 | var title = page.evaluate(function() { 22 | return document.title; 23 | }); 24 | console.log('Page title is ' + title); 25 | } else { 26 | console.log('FAIL to load the address'); 27 | } 28 | }); 29 | page.onLoadFinished = function(status) { 30 | page.render(ldir + '/sahiurl.png'); 31 | console.log('Status: ' + status); 32 | }; 33 | } 34 | -------------------------------------------------------------------------------- /monitoring/sahipro/phantomjs/sahi_de.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var page = require('webpage').create(), 3 | system = require('system'), 4 | address, 5 | output, 6 | size; 7 | 8 | if (system.args.length < 3) { 9 | console.log('Usage: sahi.js '); 10 | phantom.exit(1); 11 | } else { 12 | var ldir = system.args[1]; 13 | console.log('DIR: ' + ldir); 14 | address = system.args[2]; 15 | console.log('URL: ' + address); 16 | page.viewportSize = { width: '1280', height: '1024' }; 17 | page.paperSize = {format: 'A4', orientation: 'portrait', margin: '1cm' }; 18 | page.settings.userAgent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36'; 19 | page.customHeaders = { 20 | "Accept-Language": "de-DE,de" 21 | }; 22 | page.open(address, function(status) { 23 | if (status === 'success') { 24 | var title = page.evaluate(function() { 25 | return document.title; 26 | }); 27 | console.log('Page title is ' + title); 28 | } else { 29 | console.log('FAIL to load the address'); 30 | } 31 | }); 32 | page.onLoadFinished = function(status) { 33 | page.render(ldir + '/sahiurl.png'); 34 | console.log('Status: ' + status); 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /monitoring/sahipro/phantomjs/sahi_en.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var page = require('webpage').create(), 3 | system = require('system'), 4 | address, 5 | output, 6 | size; 7 | 8 | if (system.args.length < 3) { 9 | console.log('Usage: sahi.js '); 10 | phantom.exit(1); 11 | } else { 12 | var ldir = system.args[1]; 13 | console.log('DIR: ' + ldir); 14 | address = system.args[2]; 15 | console.log('URL: ' + address); 16 | page.viewportSize = { width: '1280', height: '1024' }; 17 | page.paperSize = {format: 'A4', orientation: 'portrait', margin: '1cm' }; 18 | page.settings.userAgent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36'; 19 | page.customHeaders = { 20 | "Accept-Language": "en-US,en" 21 | }; 22 | page.open(address, function(status) { 23 | if (status === 'success') { 24 | var title = page.evaluate(function() { 25 | return document.title; 26 | }); 27 | console.log('Page title is ' + title); 28 | } else { 29 | console.log('FAIL to load the address'); 30 | } 31 | }); 32 | page.onLoadFinished = function(status) { 33 | page.render(ldir + '/sahiurl.png'); 34 | console.log('Status: ' + status); 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /monitoring/sahipro/phantomjs/sahi_it.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var page = require('webpage').create(), 3 | system = require('system'), 4 | address, 5 | output, 6 | size; 7 | 8 | if (system.args.length < 3) { 9 | console.log('Usage: sahi.js '); 10 | phantom.exit(1); 11 | } else { 12 | var ldir = system.args[1]; 13 | console.log('DIR: ' + ldir); 14 | address = system.args[2]; 15 | console.log('URL: ' + address); 16 | page.viewportSize = { width: '1280', height: '1024' }; 17 | page.paperSize = {format: 'A4', orientation: 'portrait', margin: '1cm' }; 18 | page.settings.userAgent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36'; 19 | page.customHeaders = { 20 | "Accept-Language": "it-IT,it" 21 | }; 22 | page.open(address, function(status) { 23 | if (status === 'success') { 24 | var title = page.evaluate(function() { 25 | return document.title; 26 | }); 27 | console.log('Page title is ' + title); 28 | } else { 29 | console.log('FAIL to load the address'); 30 | } 31 | }); 32 | page.onLoadFinished = function(status) { 33 | page.render(ldir + '/sahiurl.png'); 34 | console.log('Status: ' + status); 35 | }; 36 | } 37 | -------------------------------------------------------------------------------- /monitoring/sahipro/rpm/libxdo-3.20150503.1-1.el7.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/sahipro/rpm/libxdo-3.20150503.1-1.el7.x86_64.rpm -------------------------------------------------------------------------------- /monitoring/sahipro/rpm/phantomjs2-2.1.1-1.neteye.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/sahipro/rpm/phantomjs2-2.1.1-1.neteye.x86_64.rpm -------------------------------------------------------------------------------- /monitoring/sahipro/rpm/waitmax-1.1-1.el7.rf.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/sahipro/rpm/waitmax-1.1-1.el7.rf.x86_64.rpm -------------------------------------------------------------------------------- /monitoring/sahipro/rpm/xdotool-3.20150503.1-1.el7.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/monitoring/sahipro/rpm/xdotool-3.20150503.1-1.el7.x86_64.rpm -------------------------------------------------------------------------------- /monitoring/sahipro/sbin/nginx_disable_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | CONFIG=/neteye/shared/nginx/conf/conf.d/elasticsearch-loadbalanced.conf 4 | 5 | if [ -z "$1" ] 6 | then 7 | echo "USAGE: $(basename $0) " 8 | exit 3 9 | fi 10 | 11 | if [ ! -e $CONFIG ] 12 | then 13 | exit 0 14 | fi 15 | 16 | TMPFILE=$(mktemp nginx_enable.XXXXXXXXXX) 17 | trap 'rm -f $TMPFILE; exit 1' 1 2 15 18 | trap 'rm -f $TMPFILE' 0 19 | 20 | HOST=$(echo $1 | cut -d: -f1) 21 | if echo $1 |grep : >/dev/null 22 | then 23 | PORT=$(echo $1 | cut -d: -f2) 24 | fi 25 | IP=$(getent hosts | grep $HOST | awk '{print $1}') 26 | 27 | if [ -n "$IP" ] 28 | then 29 | if [ -n "$PORT" ] 30 | then 31 | HSTR=$IP:$PORT 32 | else 33 | HSTR=$IP 34 | fi 35 | else 36 | if [ -n "$PORT" ] 37 | then 38 | HSTR=$HOST:$PORT 39 | else 40 | HSTR=$HOST 41 | fi 42 | fi 43 | if egrep "^#.*$HSTR" $CONFIG >/dev/null 44 | then 45 | exit 0 46 | fi 47 | cp -a $CONFIG $TMPFILE 48 | sed -i "s/\(.*$HSTR.*\)/#\1/g" $TMPFILE 49 | if ! diff $CONFIG $TMPFILE >/dev/null 50 | then 51 | cp -a $TMPFILE $CONFIG 52 | /usr/bin/systemctl reload nginx 53 | fi 54 | -------------------------------------------------------------------------------- /monitoring/sahipro/sbin/nginx_enable_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | CONFIG=/neteye/shared/nginx/conf/conf.d/elasticsearch-loadbalanced.conf 4 | 5 | if [ -z "$1" ] 6 | then 7 | echo "USAGE: $(basename $0) " 8 | exit 3 9 | fi 10 | 11 | if [ ! -e $CONFIG ] 12 | then 13 | exit 0 14 | fi 15 | 16 | TMPFILE=$(mktemp nginx_enable.XXXXXXXXXX) 17 | trap 'rm -f $TMPFILE; exit 1' 1 2 15 18 | trap 'rm -f $TMPFILE' 0 19 | 20 | HOST=$(echo $1 | cut -d: -f1) 21 | if echo $1 |grep : >/dev/null 22 | then 23 | PORT=$(echo $1 | cut -d: -f2) 24 | fi 25 | IP=$(getent hosts | grep $HOST | awk '{print $1}') 26 | 27 | if [ -n "$IP" ] 28 | then 29 | if [ -n "$PORT" ] 30 | then 31 | HSTR=$IP:$PORT 32 | else 33 | HSTR=$IP 34 | fi 35 | else 36 | if [ -n "$PORT" ] 37 | then 38 | HSTR=$HOST:$PORT 39 | else 40 | HSTR=$HOST 41 | fi 42 | fi 43 | if ! egrep "^#.*$HSTR" $CONFIG >/dev/null 44 | then 45 | exit 0 46 | fi 47 | cp -a $CONFIG $TMPFILE 48 | sed -i "s/#\(.*$HSTR.*\)/\1/g" $TMPFILE 49 | if ! diff $CONFIG $TMPFILE >/dev/null 50 | then 51 | cp -a $TMPFILE $CONFIG 52 | /usr/bin/systemctl reload nginx 53 | fi 54 | -------------------------------------------------------------------------------- /monitoring/sahipro/sbin/restart_sahipro.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Wait a moment so cronjob check does not fail 4 | # 5 | SAHI_HOME=/neteye/local/sahipro 6 | if [ -e /etc/sysconfig/sahipro ] 7 | then 8 | . /etc/sysconfig/sahipro 9 | fi 10 | PROG=$(basename $SAHI_HOME) 11 | # 12 | # If no enabled for runlevel on this host ignore command 13 | # 14 | if ! systemctl status $PROG |grep Loaded|grep enabled >/dev/null 15 | then 16 | exit 0 17 | fi 18 | 19 | if [ "$1" = "-f" ] 20 | then 21 | shift 22 | WAIT=0 23 | else 24 | sleep $[ ( $RANDOM % 20 ) + 2 ]s 25 | WAIT=60 26 | fi 27 | 28 | #/usr/bin/ssh nginx.neteyelocal /neteye/shared/monitoring/bin/nginx_disable_server.sh $(hostname -s).neteyelocal:9999 29 | while [ $WAIT -gt 0 ] 30 | do 31 | if ! ps -ef | grep -v grep | egrep 'phantomjs|firefox|chrome' >/dev/null 32 | then 33 | WAIT=0 34 | fi 35 | WAIT=$(expr $WAIT - 1) 36 | sleep 1 37 | done 38 | /usr/bin/systemctl stop $PROG 39 | pkill -9 phantomjs 40 | pkill -9 firefox 41 | pkill -9 chrome 42 | rm -f $SAHI_HOME/userdata/database/* 43 | /usr/bin/systemctl start $PROG 44 | #/usr/bin/ssh nginx.neteyelocal /neteye/shared/monitoring/bin/nginx_enable_server.sh $(hostname -s).neteyelocal:9999 45 | -------------------------------------------------------------------------------- /monitoring/sahipro/vnc/xstartup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | unset SESSION_MANAGER 4 | unset DBUS_SESSION_BUS_ADDRESS 5 | /etc/X11/xinit/xinitrc 6 | # Assume either Gnome or KDE will be started by default when installed 7 | # We want to kill the session automatically in this case when user logs out. In case you modify 8 | # /etc/X11/xinit/Xclients or ~/.Xclients yourself to achieve a different result, then you should 9 | # be responsible to modify below code to avoid that your session will be automatically killed 10 | if [ -e /usr/bin/gnome-session -o -e /usr/bin/startkde ]; then 11 | vncserver -kill $DISPLAY 12 | fi 13 | xsetroot -solid grey 14 | vncconfig -iconic & 15 | xterm -geometry 80x24+10+10 -ls -title "$VNCDESKTOP Desktop" & 16 | xhost + 17 | metacity & 18 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_collectors/webhook/eventlog_collector.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "eventlog", 3 | "token": "atoken", 4 | "collector_config": { 5 | "event_type": "eventlog", 6 | "payload": { 7 | "source": "logstash", 8 | "host": "${host}", 9 | "message": "${message}", 10 | "winlog": "${winlog}" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_collectors/webhook/generic_webhook_collector.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "generic_webhook", 3 | "token": "atoken", 4 | "collector_config": { 5 | "event_type": "generic_webhook", 6 | "payload": { 7 | "original": "${@}" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_logrotate.conf: -------------------------------------------------------------------------------- 1 | /neteye/shared/tornado/data/archive/snmptrap/all_events.log { 2 | daily 3 | rotate 7 4 | copy 5 | compress 6 | notifempty 7 | copytruncate 8 | dateext 9 | dateyesterday 10 | } 11 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_rule_icinga.md: -------------------------------------------------------------------------------- 1 | ## Configure tornado rule with Icinga2 action 2 | 3 | Having completed the setup and a simple archive rule, we can extend this concept to set a status in icinga2 monitoring. 4 | 5 | Assume having a host "event_results" and a passive service "test event status" 6 | 7 | - define an api user role for tornado 8 | - copy previous rule into a new file with HIGHER number i.e. 002- and 003- 9 | - define action of type icinga and filter for desired host/service 10 | 11 | ### Define Icinga2 API user 12 | 13 | Define a new api user with suitable permissions. 14 | 15 | ### Define rule action 16 | 17 | Dump of action section of previous rule 18 | ``` 19 | "actions": [ 20 | { 21 | "id": "icinga2", 22 | "payload": { 23 | "icinga2_action_name": "process-check-result", 24 | "icinga2_action_payload": { 25 | "exit_status": "1", 26 | "plugin_output": "${event.payload.subject}", 27 | "filter": "host.name==\"event_results\" && service.name==\"test event status\"", 28 | "type": "Service" 29 | } 30 | } 31 | } 32 | ] 33 | 34 | ``` 35 | 36 | Validate configuration, then restart tornado, send the event again an check the log file: 37 | ``` 38 | # tornado check 39 | # systemctl restart tornado.service 40 | ``` 41 | After sending the event the tornado log reports the following action result: 42 | ``` 43 | [2020-01-14][16:55:04][tornado_engine::executor::icinga2][DEBUG] Icinga2 API request completed successfully. Response body: b"{\"results\":[{\"code\":200.0,\"status\":\"Successfully processed check result for object 'event_results!test event status'.\"}]}" 44 | ``` 45 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_rules/email/filter_email.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "This matches all emails", 3 | "active": true, 4 | "filter": { 5 | "type": "equal", 6 | "first": "${event.type}", 7 | "second": "email" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_rules/email/rules/001_log_all_emails.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "all_emails", 3 | "description": "This matches all emails", 4 | "continue": true, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": { 8 | "type": "AND", 9 | "operators": [ 10 | { 11 | "type": "equal", 12 | "first": "${event.type}", 13 | "second": "email" 14 | }, 15 | { 16 | "type": "regex", 17 | "regex": "MyMessage.*", 18 | "target": "${event.payload.subject}" 19 | } 20 | ] 21 | }, 22 | "WITH": {} 23 | }, 24 | "actions": [ 25 | { 26 | "id": "archive", 27 | "payload": { 28 | "sender": "${event.payload.from}", 29 | "subject": "${event.payload.subject}", 30 | "event": "${event}", 31 | "archive_type": "archive_mail" 32 | } 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_rules/email/rules/010_icingaAction_all_emails.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "forward all incoming emails as warning event to icinga", 3 | "continue": true, 4 | "active": true, 5 | "constraint": { 6 | "WHERE": { 7 | "type": "AND", 8 | "operators": [ 9 | { 10 | "type": "equal", 11 | "first": "${event.type}", 12 | "second": "email" 13 | } 14 | ] 15 | }, 16 | "WITH": { 17 | "subject": { 18 | "from": "${event.payload.subject}", 19 | "regex": { 20 | "match": ".*", 21 | "group_match_idx": 0 22 | } 23 | } 24 | } 25 | }, 26 | "actions": [ 27 | { 28 | "id": "icinga2", 29 | "payload": { 30 | "icinga2_action_name": "process-check-result", 31 | "icinga2_action_payload": { 32 | "exit_status": "1", 33 | "plugin_output": "${event.payload.subject}", 34 | "filter": "host.name==\"tornado-generic-host\" && service.name==\"Generic Emails event\"", 35 | "type": "Service" 36 | } 37 | } 38 | } 39 | ] 40 | } 41 | 42 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_rules/other_rules/others_filter.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "This matches all others", 3 | "active": true, 4 | "filter": { 5 | "type": "equal", 6 | "first": "${event.type}", 7 | "second": "others" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_rules/webhook/eventlog.json: -------------------------------------------------------------------------------- 1 | { 2 | "description":"Eventlog processing rule", 3 | "active": true, 4 | "filter": { 5 | "type": "AND", 6 | "operators": [ 7 | { 8 | "type": "equal", 9 | "first": "${event.type}", 10 | "second": "eventlog" 11 | } 12 | ] 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_rules/webhook/eventlog/010_eventlog.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Eventlog 7036 - Event Service Restart", 3 | "active": true, 4 | "continue": true, 5 | "constraint": { 6 | "WHERE": { 7 | "type": "AND", 8 | "operators": [ 9 | { 10 | "type": "equal", 11 | "first": "${event.payload.winlog.event_id}", 12 | "second": 7036.0 13 | } 14 | ] 15 | }, 16 | "WITH": {} 17 | }, 18 | "actions": [ 19 | { 20 | "id": "script", 21 | "payload": { 22 | "script": "/usr/bin/smssend +10123456789 \"Message from NetEye4: ${event.payload.subject}\"" 23 | } 24 | } 25 | ] 26 | } 27 | 28 | -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/email/filter.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Email events from \"tornado_email_collector\" service", 3 | "active": true, 4 | "filter": { 5 | "type": "AND", 6 | "operators": [ 7 | { 8 | "type": "equals", 9 | "first": "${event.type}", 10 | "second": "email" 11 | } 12 | ] 13 | } 14 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/email/rules/0000000000_generic_archive_all_email_events.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "generic_archive_all_email_events", 3 | "description": "Archive all Rule", 4 | "continue": true, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": null, 8 | "WITH": {} 9 | }, 10 | "actions": [ 11 | { 12 | "id": "archive", 13 | "payload": { 14 | "archive_type": "${event.type}", 15 | "event": "${event}" 16 | } 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/email/rules/0000000010_sample_regex_with_monitoring_action.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sample_regex_with_monitoring_action", 3 | "description": "Sample Regex Rule with Monitoring Action", 4 | "continue": true, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": { 8 | "type": "AND", 9 | "operators": [ 10 | { 11 | "type": "regex", 12 | "regex": ".*tornado test.*", 13 | "target": "${event.payload.subject}" 14 | }, 15 | { 16 | "type": "regex", 17 | "regex": ".*root.*", 18 | "target": "${event.payload.from}" 19 | } 20 | ] 21 | }, 22 | "WITH": {} 23 | }, 24 | "actions": [ 25 | { 26 | "id": "icinga2", 27 | "payload": { 28 | "icinga2_action_payload": { 29 | "type": "Service", 30 | "exit_status": "0", 31 | "plugin_output": "Output message", 32 | "filter": "host.name==\"neteye\" && service.name ==\"email_event\"", 33 | "performance_data": [] 34 | }, 35 | "icinga2_action_name": "process-check-result" 36 | } 37 | } 38 | ] 39 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/snmptrap/filter.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "SNMP Traps collected by snmptrapd (configuration in /neteye/shared/snmptrapd/)", 3 | "active": true, 4 | "filter": { 5 | "type": "AND", 6 | "operators": [ 7 | { 8 | "type": "equals", 9 | "first": "${event.type}", 10 | "second": "snmptrapd" 11 | } 12 | ] 13 | } 14 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/snmptrap/rules/0000000000_generic_archive_all_snmptrap_events.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "generic_archive_all_snmptrap_events", 3 | "description": "Archive all Rule", 4 | "continue": true, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": null, 8 | "WITH": {} 9 | }, 10 | "actions": [ 11 | { 12 | "id": "archive", 13 | "payload": { 14 | "archive_type": "${event.type}", 15 | "event": "${event}" 16 | } 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/snmptrap/rules/0000000010_sysupdtime_create_update_monitoring_object.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sysupdtime_create_update_monitoring_object", 3 | "description": "Event Heartbeat creates or updates monitoring object", 4 | "continue": true, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": { 8 | "type": "AND", 9 | "operators": [ 10 | { 11 | "type": "regex", 12 | "regex": ".*Days.*", 13 | "target": "${event.payload.oids.\"DISMAN-EVENT-MIB::sysUpTimeInstance\".content}" 14 | } 15 | ] 16 | }, 17 | "WITH": {} 18 | }, 19 | "actions": [ 20 | { 21 | "id": "smart_monitoring_check_result", 22 | "payload": { 23 | "service": { 24 | "check_command": "dummy", 25 | "object_name": "Heartbeat Event" 26 | }, 27 | "check_result": { 28 | "plugin_output": "SNMP Trap message: ${event.payload.oids.\"DISMAN-EVENT-MIB::sysUpTimeInstance\".content}", 29 | "exit_status": "1" 30 | }, 31 | "host": { 32 | "vars": { 33 | "location": "Bozen" 34 | }, 35 | "address": "127.0.0.1", 36 | "object_name": "SNMPTRAP DEMO", 37 | "imports": "generic-host" 38 | } 39 | } 40 | } 41 | ] 42 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/webhook/filter.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Webhook HTTP call collected by from \"tornado_webhook_collector\" service", 3 | "active": true, 4 | "filter": { 5 | "type": "AND", 6 | "operators": [ 7 | { 8 | "type": "equals", 9 | "first": "${event.type}", 10 | "second": "hsg" 11 | } 12 | ] 13 | } 14 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/webhook/hsg/filter.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Host - Service Generator: Webhook to generate Objects in Icinga and set status", 3 | "active": true, 4 | "filter": { 5 | "type": "AND", 6 | "operators": [] 7 | } 8 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/webhook/hsg/rules/0000000000_generic_archive_all_hsg_events.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "generic_archive_all_hsg_events", 3 | "description": "Archive all incoming Host-Service generation events", 4 | "continue": true, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": { 8 | "type": "AND", 9 | "operators": [ 10 | { 11 | "type": "regex", 12 | "regex": ".*hsg.*", 13 | "target": "${event.type}" 14 | } 15 | ] 16 | }, 17 | "WITH": {} 18 | }, 19 | "actions": [ 20 | { 21 | "id": "archive", 22 | "payload": { 23 | "archive_type": "${event.type}", 24 | "event": "${event}" 25 | } 26 | } 27 | ] 28 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/webhook/hsg/rules/0000000010_create_only_new_host_object.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "create_only_new_host_object", 3 | "description": "Create only a new host object in Director using a template. Creation in Icinga is set to false.", 4 | "continue": false, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": { 8 | "type": "AND", 9 | "operators": [ 10 | { 11 | "type": "regex", 12 | "regex": ".*generic-host.*", 13 | "target": "${event.payload.data.host_template}" 14 | } 15 | ] 16 | }, 17 | "WITH": {} 18 | }, 19 | "actions": [ 20 | { 21 | "id": "director", 22 | "payload": { 23 | "icinga2_live_creation": false, 24 | "action_payload": { 25 | "display_name": "${event.payload.data.host_displayname}", 26 | "object_name": "${event.payload.data.host_name}", 27 | "object_type": "object", 28 | "address": "${event.payload.data.host_address}", 29 | "vars": { 30 | "created_by": "tornado" 31 | }, 32 | "imports": "${event.payload.data.host_template}" 33 | }, 34 | "action_name": "create_host" 35 | } 36 | } 37 | ] 38 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/config/webhook/hsg/rules/0000000020_monitoring_update_icinga_object_status.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "monitoring_update_icinga_object_status", 3 | "description": "Monitoring Result: Update Icinga2 Object status", 4 | "continue": true, 5 | "active": true, 6 | "constraint": { 7 | "WHERE": { 8 | "type": "AND", 9 | "operators": [ 10 | { 11 | "type": "regex", 12 | "regex": ".*hsg.*", 13 | "target": "${event.type}" 14 | } 15 | ] 16 | }, 17 | "WITH": {} 18 | }, 19 | "actions": [ 20 | { 21 | "id": "icinga2", 22 | "payload": { 23 | "icinga2_action_name": "process-check-result", 24 | "icinga2_action_payload": { 25 | "exit_status": "${event.payload.exit_status}", 26 | "type": "Host", 27 | "plugin_output": "${event.payload.plugin_output}", 28 | "filter": "host.name==\"example.localdomain\"" 29 | } 30 | } 31 | } 32 | ] 33 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_sample_rules/draft_001/data.json: -------------------------------------------------------------------------------- 1 | { 2 | "created_ts_ms": 1599566344557, 3 | "updated_ts_ms": 1608720310630, 4 | "user": "root", 5 | "draft_id": "draft_001" 6 | } -------------------------------------------------------------------------------- /monitoring/tornado/tornado_setup.md: -------------------------------------------------------------------------------- 1 | # Tornado collectors 2 | 3 | ## Email collector 4 | 5 | Configure `procmail` to accept emails for host. If hostname does not correspond to result from `hostnamectl` register the fqdn in main.cf of postfix: 6 | ``` 7 | # cat /etc/postfix/main.cf 8 | ... 9 | # INTERNET HOST AND DOMAIN NAMES 10 | # 11 | # The myhostname parameter specifies the internet hostname of this 12 | # mail system. The default is to use the fully-qualified domain name 13 | # from gethostname(). $myhostname is used as a default value for many 14 | # other configuration parameters. 15 | # 16 | myhostname = cluster_neteye.mydomain.lan 17 | ... 18 | Configure the mailbox_command 19 | mailbox_command = /usr/bin/procmail -t -Y -a $h -d $u 20 | ... 21 | ``` 22 | 23 | Configure a `procmail` command for an event gateway user i.e. `eventgw` 24 | ``` 25 | # cat /home/eventgw/.procmailrc 26 | SHELL=/bin/sh 27 | :0 28 | | /usr/bin/socat - /var/run/tornado_email_collector/email.sock 2>&1 29 | 30 | ``` 31 | 32 | ### Troubleshooting hint 33 | 34 | Enable debug level and write into dedicated file. 35 | Then restart service. 36 | ``` 37 | # cat /neteye/shared/tornado/conf/collectors/email/email_collector.toml 38 | [logger] 39 | ... 40 | level = "debug" 41 | ... 42 | file_output_path = "/neteye/shared/tornado/log//email_collector.log" 43 | 44 | ``` 45 | -------------------------------------------------------------------------------- /monitoring/tornado/webhook_sample/003_create_live_hosts.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "notify_event", 3 | "name": "notify incoming event", 4 | "description": "Event acceptor with feature to register new host in monitoring publish status", 5 | "continue": true, 6 | "active": true, 7 | "constraint": { 8 | "WHERE": { 9 | "type": "AND", 10 | "operators": [ 11 | { 12 | "type": "equal", 13 | "first": "${event.type}", 14 | "second": "generic_event" 15 | } 16 | ] 17 | }, 18 | "WITH": {} 19 | }, 20 | "actions": [ 21 | { 22 | "id": "script", 23 | "payload": { 24 | "script": "/neteye/shared/tornado/exercise/create_host_live.sh ${event.payload.servicename} ${event.payload.state} ${event.payload.output}" 25 | } 26 | } 27 | ] 28 | } 29 | -------------------------------------------------------------------------------- /neteye4/LDAP_AD_Integration.md: -------------------------------------------------------------------------------- 1 | # LDAPS Integration 2 | 3 | When using provided resources for LDAP-AD integration there might be the PHP client certificate error. 4 | [Here additional info regarding openldap usage](https://www.openldap.org/lists/openldap-technical/201110/msg00154.html) 5 | 6 | To trust provided AD certificate ignoring CA root certificate errors add the `TLS_REQCERT` directive to `/etc/openldap/ldap.conf` 7 | ``` 8 | # echo "TLS_REQCERT allow" >> /etc/openldap/ldap.conf 9 | ``` 10 | -------------------------------------------------------------------------------- /neteye4/backup/README.md: -------------------------------------------------------------------------------- 1 | # Script to backup configuration files and databases of NetEye 2 | 3 | ## What is under backup: 4 | - All active Databases 5 | - All configurations, log files and data files of standalone neteye services 6 | - All configurations, log files and data files of cluster neteye services 7 | - Allow to include/exclude additional paths 8 | 9 | ## Requirement for backup on remote cifs mount 10 | 11 | ``` 12 | yum install cifs-utils pigz 13 | ``` 14 | 15 | ## Register remote mount point 16 | ``` 17 | vi /etc/fstab 18 | //mydomain.lan/Backups/NetEye4 /cifs/backup cifs defaults,auto,username=neteye,password=secret,dom=mydomain.lan,file_mode=0666,dir_mode=0777 0 0 19 | ``` 20 | 21 | ## Raise MySQL max connections limit 22 | ``` 23 | cat >>/neteye/shared/mysql/conf/my.cnf.d/neteye.cnf < /dev/null 2>&1 && /usr/local/sbin/backup_neteye.sh --dbonly >/dev/null 42 | #Only local backup neteye data at 20:00 (no copy to cifs share). 43 | 0 20 * * * /usr/local/sbin/backup_neteye.sh NONE >/dev/null 44 | ``` 45 | -------------------------------------------------------------------------------- /neteye4/backup/neteye-backup.conf: -------------------------------------------------------------------------------- 1 | # Extra exlude folders 2 | EXCLUDELIST="/opt/dell /var/lib/mysql/mysql.sock /neteye/local/elasticsearch/data /neteye/local/elasticsearch/log /neteye/shared/influxdb/data /neteye/shared/mysql/data" 3 | NETBACKUPDIR="/cifs/backup" 4 | -------------------------------------------------------------------------------- /neteye4/etc/mariadb/neteye.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | innodb_adaptive_hash_index=0 3 | max_allowed_packet = 64M 4 | max-connect-errors = 1000000 5 | tmp_table_size = 2G 6 | max_heap_table_size = 2G 7 | max-connections = 200 8 | open-files-limit = 65535 9 | table-definition-cache = 4096 10 | table-open-cache = 4096 11 | innodb-flush-method = O_DIRECT 12 | innodb-log-files-in-group = 2 13 | innodb_log_file_size = 1GB 14 | innodb-flush-log-at-trx-commit = 2 15 | innodb-file-per-table = 1 16 | innodb_buffer_pool_size = 32GB 17 | innodb_log_buffer_size = 4M 18 | innodb_sort_buffer_size = 16000000 19 | sort_buffer_size = 32M 20 | innodb_thread_concurrency = 0 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /neteye4/neteye_secure_install/990_customer_logo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LOGO_LIGHT="/usr/share/icingaweb2/public/img/neteye/logo-light.png" 4 | LOGO_LIGHT_CUSTOM="/usr/share/icingaweb2/public/img/neteye/logo-light.png.customer" 5 | 6 | if [ -f $LOGO_LIGHT_CUSTOM ] ; then 7 | cp -f ${LOGO_LIGHT_CUSTOM} ${LOGO_LIGHT} 8 | echo "[+] Install customer NetEye Logo Light." 9 | fi 10 | -------------------------------------------------------------------------------- /neteye4/neteye_secure_install/README.md: -------------------------------------------------------------------------------- 1 | # Sample script modules for neteye_secure_install 2 | 3 | ## How to install files: 4 | 5 | Copy file into folder: /usr/share/neteye/secure_install/ 6 | 7 | ### Files: 8 | 990_customer_logo.sh: 9 | Replace logo "Light" with customer logo. 10 | Place a custom logo here: /usr/share/icingaweb2/public/img/neteye/logo-light.png.customer 11 | 12 | -------------------------------------------------------------------------------- /neteye4/neteyeshare/README.md: -------------------------------------------------------------------------------- 1 | 2 | # HTTP configuration to publish the neteyeshare via Apache 3 | 4 | The apache config file is placed by setup script automatically in httpd `conf.d/` folder 5 | ( default `/etc/httpd/conf.d/neteye-share.conf` ) 6 | -------------------------------------------------------------------------------- /neteye4/neteyeshare/neteye-share.conf: -------------------------------------------------------------------------------- 1 | # 2 | # This configuration file allows the neteye client software to be accessed at 3 | # http://localhost/neteye-client-software/ 4 | # 5 | Alias /neteyeshare /neteye/shared/httpd/neteyeshare 6 | 7 | 8 | Options Indexes 9 | # Formating improvement of index view 10 | IndexOptions FancyIndexing FoldersFirst HTMLTable VersionSort NameWidth=* 11 | AllowOverride all 12 | Order allow,deny 13 | Allow from all 14 | Require all granted 15 | 16 | -------------------------------------------------------------------------------- /neteye4/scripts/ansible/README.md: -------------------------------------------------------------------------------- 1 | # Synchronize system files to remote hosts 2 | 3 | ``` 4 | ansible-playbook -i inventory.ini play_monitoring_files.yml 5 | ansible-playbook -i inventory.ini play_system_files.yml 6 | ``` 7 | -------------------------------------------------------------------------------- /neteye4/scripts/ansible/inventory.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | 3 | [neteye] 4 | neteye4_n1.neteye.lab 5 | neteye4_s1.neteye.lab 6 | neteye4_s2.neteye.lab 7 | -------------------------------------------------------------------------------- /neteye4/scripts/ansible/play_monitoring_files.yml: -------------------------------------------------------------------------------- 1 | - name: Copy system files with owner and permissions 2 | hosts: all 3 | 4 | tasks: 5 | - name: Synchronize monitoring PluginContribDir 6 | synchronize: 7 | src: /neteye/shared/monitoring/plugins 8 | dest: /neteye/shared/monitoring 9 | # - name: Remove unwanted files and folders 10 | # file: 11 | # state: absent 12 | # path: /neteye/shared/monitoring/plugins/plugins/ 13 | -------------------------------------------------------------------------------- /neteye4/scripts/ansible/play_system_files.yml: -------------------------------------------------------------------------------- 1 | - name: Copy system files with owner and permissions 2 | hosts: all 3 | 4 | tasks: 5 | - name: Copy system files with owner and permissions 6 | ansible.builtin.copy: 7 | src: /etc/hosts 8 | dest: /etc/hosts 9 | owner: root 10 | group: root 11 | mode: '0644' 12 | -------------------------------------------------------------------------------- /neteye4/scripts/autoumount: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zampat/neteye4/0bbdfeca9261e8e5db30a6756334ef9159eb398c/neteye4/scripts/autoumount -------------------------------------------------------------------------------- /neteye4/scripts/autoumount.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | main(int argc, char *argv[]) 5 | { 6 | int ret; 7 | 8 | ret=fork(); 9 | if (ret == 0) { 10 | setuid(geteuid()); 11 | execle("/bin/sh","sh","-c", "killall -USR1 automount >/dev/null 2>&1",0,0); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /neteye4/scripts/drbd_fix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Helper script to force a re-sync of DRBD devices. This script is great to re-align your drbd ressources in your test envirionment. 4 | # ADVICE: Do never use this script in prodcution ! Think about using parts of the commands to restore your drbd synch-status in production. 5 | # 6 | services=(grafana tornado_webhook_collector tornado_icinga2_collector tornado_email_collector slmd mariadb influxdb tornado snmptrapd nginx nagvis icingaweb2 icinga2-master httpd nats-server) 7 | host_local="neteye4clu01.patrick.lab" 8 | 9 | for service in "${services[@]}" 10 | do 11 | echo "[] Starting for service $s" 12 | drbdadm primary $service --force 13 | drbdadm adjust $service 14 | 15 | for i in neteye4clu02.patrick.lab neteye4clu03.patrick.lab 16 | do 17 | ssh $i drbdadm disconnect $service 18 | ssh $i drbdadm -- --discard-my-data connect $service $host_local 19 | ssh $i drbdadm adjust $service 20 | done 21 | 22 | sleep 1 23 | drbdadm adjust $service 24 | drbdadm secondary $service 25 | 26 | sleep 2 27 | 28 | extra_primry_host="neteye4clu03.patrick.lab" 29 | extra_secondary_host="neteye4clu02.patrick.lab" 30 | 31 | ssh $extra_primry_host drbdadm primary $service --force 32 | ssh $extra_secondary_host drbdadm disconnect $service 33 | ssh $extra_secondary_host drbdadm -- --discard-my-data connect $service $extra_primry_host 34 | 35 | 36 | ssh $extra_primry_host drbdadm secondary $service 37 | 38 | ssh $extra_primry_host drbdadm adjust $service 39 | ssh $extra_secondary_host drbdadm adjust $service 40 | echo "[+] Done" 41 | done 42 | -------------------------------------------------------------------------------- /neteye4/scripts/eventgw_email_forwarder.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | 4 | TMPFILE=$(mktemp) 5 | trap 'rm -f $TMPFILE; exit 1' 1 2 15 6 | trap 'rm -f $TMPFILE' 0 7 | 8 | cat >$TMPFILE 9 | if grep 'Content-Transfer-Encoding.*base64' $TMPFILE >/dev/null 10 | then 11 | cat $TMPFILE | /usr/bin/socat - /var/run/eventhandler/rw/email.socket 12 | elif grep 'Encoding.*quoted-printable' $TMPFILE >/dev/null 13 | then 14 | cat $TMPFILE | perl -pe 'use MIME::QuotedPrint; $_=MIME::QuotedPrint::decode($_);' | /usr/bin/socat - /var/run/eventhandler/rw/email.socket 15 | else 16 | cat $TMPFILE | /usr/bin/socat - /var/run/eventhandler/rw/email.socket 17 | fi 18 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_backup/mariadb_backup/README.md: -------------------------------------------------------------------------------- 1 | # Mariadb backup 2 | https://dev.mysql.com/doc/mysql-enterprise-backup/8.0/en/mysqlbackup.html 3 | 4 | Define Backup folders in script and mkdir them ! 5 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_backup/mariadb_backup/mysqlbackup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | backup_date="$(date +%Y%m%d-%H%M%S)" 4 | datadir=/neteye/shared/mysql/data 5 | backup_base=/data/backup/mariabackup_tmp 6 | target_dir=${backup_base}/mariabackup_${backup_date} 7 | tmpdir=${backup_base}/tmp 8 | dumpdir=${backup_base}/mariadump_${backup_date} 9 | 10 | 11 | if [ -d "${backup_base}" ]; then 12 | mkdir -p "${tmpdir}" "${target_dir}" "${dumpdir}" || { 13 | echo "ERROR: Failed to create backup directories." 14 | exit 1 15 | } 16 | else 17 | echo "ERROR: backup base directory not set or not exists" 18 | exit 1 19 | fi 20 | 21 | 22 | #for DB in $(mysql -sN -e "show databases;"); do 23 | # echo "Dumping database ${DB}" 24 | # mysqldump --single-transaction "$DB" > "${dumpdir}/${DB}.sql" 25 | # if [ $? -ne 0 ]; then 26 | # echo -e "\nERROR: Failed to dump database ${DB}\n" ;sleep 5 27 | # # exit 1 28 | # else 29 | # echo "Dumping database ${DB} is done" 30 | # fi 31 | #done 32 | 33 | soft_fd=$(ulimit -Sn) 34 | if (( $soft_fd < 4096 )); then 35 | ulimit -Sn 4096 36 | fi 37 | 38 | mariabackup --backup --datadir="${datadir}" \ 39 | --target-dir="${target_dir}" \ 40 | --tmpdir="${tmpdir}" 41 | if [ $? -ne 0 ]; then 42 | echo "ERROR: MariaBackup backup failed." 43 | ulimit -Sn $soft_fd 44 | exit 1 45 | fi 46 | 47 | mariabackup --prepare --export --target-dir="${target_dir}" 48 | if [ $? -ne 0 ]; then 49 | echo "ERROR: MariaBackup prepare/export failed." 50 | ulimit -Sn $soft_fd 51 | exit 1 52 | fi 53 | 54 | ulimit -Sn $soft_fd 55 | 56 | exit 0 57 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_config_tuning/README.md: -------------------------------------------------------------------------------- 1 | ### Tuning of settings for ideal performance 2 | 3 | Update local dnf repo mapping to point to internal pulp 4 | ``` 5 | ansible-playbook -i inventory/neteye_vms.ini neteye_module_settings_tuning.yum 6 | ``` 7 | 8 | Arguments for running playbook: 9 | -v to run in debug mode 10 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_config_tuning/config.yml: -------------------------------------------------------------------------------- 1 | search_data: 2 | - file: "/etc/systemd/system/keycloak.service.d/99-custom.conf" 3 | string: "JAVA_OPTS" 4 | suggestion: "[Service] \n Environment=\"JAVA_OPTS=-Xms128m -Xmx1024m\"" 5 | # - file: "/etc/systemd/system/elasticrvice.d/99-custom.conf" 6 | # string: "JAVA_OPTS" 7 | # suggestion: "[Service] \n Environment=\"JAVA_OPTS=-Xms128m -Xmx1024m\"" 8 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_config_tuning/inventory/neteye_vms.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | 3 | 4 | [localhost] 5 | 127.0.0.1 6 | 7 | [neteye4_rhel8_n1] 8 | 192.168.88.180 9 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_config_tuning/neteye_module_settings_tuning.yum: -------------------------------------------------------------------------------- 1 | # neteye modules and services settings often need to be tuned for optimal operation. Ansible to verify and suggest configs to apply 2 | # 3 | 4 | - name: Check multiple files for specific strings 5 | hosts: localhost 6 | gather_facts: false 7 | vars_files: 8 | - config.yml # Load variables from the config file 9 | tasks: 10 | - name: Search files for corresponding strings 11 | command: grep -q "{{ item.string }}" "{{ item.file }}" 12 | register: grep_result 13 | ignore_errors: yes 14 | loop: "{{ search_data }}" 15 | 16 | - name: Display results 17 | debug: 18 | msg: > 19 | The string '{{ item.string }}' was found in '{{ item.file }}' 20 | when: grep_result.results[item_index].rc == 0 21 | loop: "{{ search_data }}" 22 | loop_control: 23 | index_var: item_index 24 | 25 | - name: Handle missing strings 26 | fail: 27 | msg: "The string '{{ item.string }}' was NOT found in '{{ item.file }}'. Suggested configuration: '{{ item.suggestion }}'" 28 | when: grep_result.results[item_index].rc != 0 29 | loop: "{{ search_data }}" 30 | loop_control: 31 | index_var: item_index 32 | 33 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_upgrade/repo2pulp/README.md: -------------------------------------------------------------------------------- 1 | ### Würth Internal commands. Repository and update procedures for test environments only 2 | 3 | Update local dnf repo mapping to point to internal pulp 4 | ''' 5 | ansible-playbook -i inventory/neteye_vms.ini point_repos_to_internal_pulp.yml 6 | ''' 7 | 8 | Arguments for running playbook: 9 | -v to run in debug mode 10 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_upgrade/repo2pulp/inventory/neteye_vms.ini: -------------------------------------------------------------------------------- 1 | [all] 2 | 3 | 4 | [localhost] 5 | 127.0.0.1 6 | 7 | [neteye4_rhel8_n1] 8 | 192.168.88.180 9 | -------------------------------------------------------------------------------- /neteye4_operations/neteye_upgrade/repo2pulp/point_repos_to_internal_pulp.yml: -------------------------------------------------------------------------------- 1 | # neteye test instance should point to internal pulp repo 2 | # 3 | --- 4 | - hosts: localhost 5 | any_errors_fatal: true 6 | gather_facts: false 7 | 8 | vars: 9 | path_dnf_mirror: "/etc/yum.repos.d/mirrors" 10 | 11 | #vars_files: 12 | # - defaults/pulp_defaults.yml 13 | 14 | tasks: 15 | 16 | - name: prerequisites | get the list of repo files in /etc/yum.repos.d/ 17 | find: 18 | paths: "{{ path_dnf_mirror }}" 19 | patterns: "*.mirror" 20 | register: repofiles 21 | 22 | - name: prerequisites | replace strings in files found 23 | replace: 24 | dest: "{{ item.path }}" 25 | backup: yes 26 | regexp: 'https://repo\.wuerth-phoenix\.com' 27 | replace: 'http://pulp2internal.wp.lan' 28 | loop: "{{ repofiles.files }}" 29 | -------------------------------------------------------------------------------- /scripts/005_git_submodules_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "[i] 005: Initializing git submodules" 4 | #init provided submodules 5 | git submodule init 6 | 7 | #update provided submodules 8 | git submodule update 9 | -------------------------------------------------------------------------------- /scripts/010_init_neteyeshare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FOLDER_MONITORING=$1 4 | NETEYESHARE_ITOA=$2 5 | 6 | #Create folder structure 7 | if [ ! -d $FOLDER_MONITORING ] 8 | then 9 | echo "[+] 010: Creating neteyeshare folder structure" 10 | mkdir -p $FOLDER_MONITORING 11 | fi 12 | 13 | if [ ! -d ${NETEYESHARE_ITOA} ] 14 | then 15 | echo "[+] 010: Creating neteyeshare folder itoa" 16 | mkdir -p ${NETEYESHARE_ITOA} 17 | fi 18 | -------------------------------------------------------------------------------- /scripts/011_init_neteyeshare_weblink.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | HTTP_PASSWD_FOLDER="/neteye/shared/httpd" 4 | HTTP_PASSWD_FILE="${HTTP_PASSWD_FOLDER}/.htpasswd" 5 | HTTP_CONF_FILE="/etc/httpd/conf.d/neteye-share.conf" 6 | PWD_SHARE_LOGIN="/root/.pwd_neteye_configro" 7 | 8 | HTTP_USERNAME="configro" 9 | #HTTP_PASSWORD="R3ad0nLy_#12" 10 | HTTP_PASSWORD=`head /dev/urandom | tr -dc A-Za-z0-9 | head -c 13 ; echo ''` 11 | 12 | 13 | #Create folder structure 14 | if [ ! -f ${HTTP_PASSWD_FILE} ] 15 | then 16 | 17 | echo "[+] 011: Configuration of neteyeshare/: registragion of httpd alias and creation of new HTTP user: ${HTTP_USERNAME}." 18 | echo " Setup of authentication for user: ${HTTP_USERNAME} in: ${HTTP_PASSWD_FILE}" 19 | mkdir -p ${HTTP_PASSWD_FOLDER} 20 | 21 | # Create new httpd user with password to protect some folders 22 | htpasswd -b -c ${HTTP_PASSWD_FILE} ${HTTP_USERNAME} ${HTTP_PASSWORD} 23 | 24 | if [ ! -f ${HTTP_CONF_FILE} ] 25 | then 26 | cp neteye4/neteyeshare/neteye-share.conf ${HTTP_CONF_FILE} 27 | fi 28 | 29 | echo ${HTTP_PASSWORD} >> ${PWD_SHARE_LOGIN} 30 | 31 | echo "[!] Now please reload service httpd to activate new neteyeshare weblink" 32 | echo " Hint: systemctl restart httpd.service" 33 | echo " " 34 | echo "[i] The neteyeshare comes with no authentication and can be accessed on web via: https://neteye_fqdn/neteyeshare" 35 | echo " Some configuration folders are protected by these credentials: (i.e. ./monitoring/agents/microsoft/icinga/configs/)" 36 | echo " Username: ${HTTP_USERNAME}" 37 | echo " Password: ${HTTP_PASSWORD}" 38 | echo " " 39 | echo " The password is stored in: ${PWD_SHARE_LOGIN}" 40 | echo " " 41 | fi 42 | -------------------------------------------------------------------------------- /scripts/022_get_icinga2_agent_rhel7.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FOLDER_MONITORING_AGENT_LINUX="$1/agents/linux/rhel7/icinga" 4 | ICINGA2_AGENT_VERSION=$2 5 | SRC_GIT_AGENT_SCRIPTS_FOLDER="./monitoring/agents/microsoft/icinga" 6 | 7 | 8 | if [ ! -f "${FOLDER_MONITORING_AGENT_LINUX}/icinga2-$ICINGA2_AGENT_VERSION-1.el7.icinga.x86_64.rpm" ] 9 | then 10 | echo "[i] 022: Installing Icinga Monitoring Agent for RHEL 7" 11 | mkdir -p $FOLDER_MONITORING_AGENT_LINUX 12 | ICINGA2_CORE_FILE="icinga2-${ICINGA2_AGENT_VERSION}-1.el7.icinga.x86_64.rpm" 13 | ICINGA2_BIN_FILE="icinga2-bin-${ICINGA2_AGENT_VERSION}-1.el7.icinga.x86_64.rpm" 14 | ICINGA2_COMMON_FILE="icinga2-common-${ICINGA2_AGENT_VERSION}-1.el7.icinga.x86_64.rpm" 15 | ICINGA2_IDOMYSQL_FILE="icinga2-ido-mysql-${ICINGA2_AGENT_VERSION}-1.el7.icinga.x86_64.rpm" 16 | 17 | # Loop trough all rpms 18 | ELEMENTS=( ICINGA2_CORE_FILE ICINGA2_BIN_FILE ICINGA2_COMMON_FILE ICINGA2_IDOMYSQL_FILE ) 19 | 20 | for FILE in ${ELEMENTS[@]} 21 | do 22 | # Check if Plugin already exists. If yes: backup first 23 | if [ ! -f ${FOLDER_MONITORING_AGENT_LINUX}/${!FILE} ] 24 | then 25 | wget http://packages.icinga.com/epel/7Client/release/x86_64/${!FILE} -O ${FOLDER_MONITORING_AGENT_LINUX}/${!FILE} 26 | fi 27 | done 28 | 29 | fi 30 | 31 | -------------------------------------------------------------------------------- /scripts/030_ressources_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Add default ressources to icingaweb2 config 4 | # file: resources.ini 5 | 6 | #Define Variables 7 | # ARG1: ICINGA2_CONF_HOME_DIR="/neteye/shared/icingaweb2/conf" 8 | FILE_RESSOURCES="$1/resources.ini" 9 | 10 | # Check if a demo Ressource for LDAP exists 11 | grep "ldap" $FILE_RESSOURCES > /dev/null 2>&1 12 | RES=$? 13 | if [ $RES -ne 0 ] 14 | then 15 | echo "[i] 030: Adding LDAP configuration sample to Icinga2 Ressources." 16 | 17 | cat >>$FILE_RESSOURCES <user and auth.>groups)] 20 | type = "ldap" 21 | hostname = "mydomain.lan" 22 | port = "389" 23 | encryption = "none" 24 | root_dn = "dc=mydomain,dc=local" 25 | bind_dn = "ldapRO@mydomain.local" 26 | bind_pw = "password" 27 | 28 | [ldap_multiple_DCs] 29 | type = "ldap" 30 | hostname = "ldap://dc1.mydomain.local:389 ldap://dc2.mydomain.local:389" 31 | port = "389" 32 | encryption = "none" 33 | root_dn = "dc=mydomain,dc=local" 34 | bind_dn = "ldapRO@mydomain.local" 35 | bind_pw = "password" 36 | 37 | [Sample remote MYSQL ressource] 38 | type = "db" 39 | db = "mysql" 40 | host = "192.168.200.200" 41 | port = "3306" 42 | dbname = "dbname" 43 | username = "username" 44 | password = "password" 45 | charset = "utf8" 46 | use_ssl = "0" 47 | EOM 48 | 49 | else 50 | echo "[ ] 030: LDAP configuration in Icinga2 Ressources already exists." 51 | fi 52 | -------------------------------------------------------------------------------- /scripts/031_root_navigation_sample.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #Define Variables 4 | # ARG1: ICINGA2_CONF_HOME_DIR="/neteye/shared/icingaweb2/conf" 5 | DIR_PREFERENCES="$1/preferences" 6 | DIR_RESSOURCES="$1/preferences/root" 7 | FILE_RESSOURCES="$1/preferences/root/menu.ini" 8 | 9 | # Check if a demo menu entry for fileshare exists 10 | grep "menu-item" $FILE_RESSOURCES > /dev/null 2>&1 11 | RES=$? 12 | if [ $RES -ne 0 ] 13 | then 14 | echo "[i] 031: Adding Navigation item for user root." 15 | mkdir -p $DIR_RESSOURCES 16 | 17 | cat >>$FILE_RESSOURCES < /dev/null 2>&1 11 | RES=$? 12 | if [ $RES -ne 0 ] 13 | then 14 | 15 | echo "[i] 033: Adding LDAP Authentication sample." 16 | 17 | cat >>$FILE_AUTHENTICATION < /dev/null 2>&1 11 | RES=$? 12 | if [ $RES -ne 0 ] 13 | then 14 | 15 | echo "[i] 034: Adding LDAP Groups Authentication sample." 16 | 17 | cat >>$FILE_GROUPS < /dev/null 2>1& 10 | then 11 | echo "[i] 055: Install configuration files for monitoring plugins." 12 | mkdir -p ${MONITORING_PLUGINS_CONTRIB_CONFIG_DIR} 13 | cat >>${FILE_VMWARE_API} < 16 | password= 17 | EOM 18 | 19 | else 20 | echo "[ ] 055: configuration files for monitoring plugins already exists." 21 | fi 22 | -------------------------------------------------------------------------------- /scripts/060_synch_monitoring_plugins.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Synch all Monitoring Plugins from GIT folder "monitoring-plugins" to local neteyeshare folder. 5 | # 6 | 7 | # PluginsContribDir: i.e. /neteye/shared/httpd/neteyeshare/monitoring 8 | NETEYESHARE_MONITORING=$1 9 | 10 | #Define Variables 11 | SRC_GIT_MONIT_PLUGINS_FOLDER="./monitoring/monitoring-plugins" 12 | DST_MONIT_PLUGINS_FOLDER="${NETEYESHARE_MONITORING}/" 13 | 14 | # Verify DST Folder exists 15 | if [ -d "${DST_MONIT_PLUGINS_FOLDER}" ] 16 | then 17 | echo "[+] 060: Synchronizing monitoring plugins (to ${DST_MONIT_PLUGINS_FOLDER})" 18 | /usr/bin/rsync -av ${SRC_GIT_MONIT_PLUGINS_FOLDER} ${DST_MONIT_PLUGINS_FOLDER}/ 19 | 20 | else 21 | echo "[-] 060: Abort installing additional monitoring plugins. Folder does not exist: ${DST_MONIT_PLUGINS_FOLDER}" 22 | fi 23 | 24 | -------------------------------------------------------------------------------- /scripts/061_sync_monitoring_configurations.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Copy NetEye4 / Icinga2 monitoring configuration files to neteyeshare/ 5 | # 6 | 7 | FOLDER_MONITORING=$1 8 | 9 | #Copy files ./monitoring/configurations 10 | if [ ! -d ${FOLDER_MONITORING}/configurations/ ] 11 | then 12 | echo "[+] 061: Copy monitoring configurations into neteyeshare: $FOLDER_MONITORING" 13 | cp -r ./monitoring/configurations $FOLDER_MONITORING 14 | fi 15 | 16 | -------------------------------------------------------------------------------- /scripts/062_sync_monitoring_analytics.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Copy ITOA / Analytics dashboards to neteyeshare/ 5 | # 6 | 7 | NETEYESHARE_MONITORING=$1 8 | 9 | #Copy files ./monitoring/analytics_dashboards/ 10 | # Verify DST Folder exists 11 | if [ -d "${NETEYESHARE_MONITORING}" ] 12 | then 13 | echo "[+] 062: Copy analytics dashboards into $NETEYESHARE_MONITORING" 14 | /usr/bin/rsync -av ./monitoring/analytics_dashboards ${NETEYESHARE_MONITORING}/ 15 | 16 | else 17 | echo "[-] 062: Abort installing analytic dashboards to ${NETEYESHARE_MONITORING}. Folder does not exist." 18 | fi 19 | -------------------------------------------------------------------------------- /scripts/070_synch_itoa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Synch ITOA agents and dashboards to local neteyeshare folder "itoa" 5 | # 6 | 7 | # PluginsContribDir: i.e. /neteye/shared/httpd/neteyeshare/itoa 8 | NETEYESHARE_ITOA=$1 9 | 10 | #Define Variables 11 | SRC_GIT_ITOA_AGENTS_FOLDER="./itoa/agent_configurations" 12 | SRC_GIT_ITOA_DASHBOARDS_FOLDER="./itoa/dashboards" 13 | DST_ITOA_FOLDER="${NETEYESHARE_ITOA}/" 14 | 15 | # Verify DST Folder exists 16 | if [ -d "${DST_ITOA_FOLDER}" ] 17 | then 18 | echo "[+] 070: Synchronizing itoa agents and dashboards (to ${DST_ITOA_FOLDER})" 19 | /usr/bin/rsync -av ${SRC_GIT_ITOA_AGENTS_FOLDER} ${DST_ITOA_FOLDER}/ 20 | /usr/bin/rsync -av ${SRC_GIT_ITOA_DASHBOARDS_FOLDER} ${DST_ITOA_FOLDER}/ 21 | 22 | else 23 | echo "[-] 070: Abort installing itoa components. Folder does not exist: ${DST_MONIT_PLUGINS_FOLDER}" 24 | fi 25 | 26 | -------------------------------------------------------------------------------- /scripts/071_get_telegraf_agents.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DST_ITOA_AGENTS_WIN_FOLDER="$1/agents/windows" 4 | TELEGRAF_AGENT_VERSION=$2 5 | SRC_ITOA_AGENT_URL="https://dl.influxdata.com/telegraf/releases" 6 | 7 | 8 | if [ ! -f "${DST_ITOA_AGENTS_WIN_FOLDER}/telegraf-${TELEGRAF_AGENT_VERSION}_windows_amd64.zip" ] 9 | then 10 | mkdir -p ${DST_ITOA_AGENTS_WIN_FOLDER} 11 | echo "[+] 071: Installing ITOA Agent for Windows of Version ${TELEGRAF_AGENT_VERSION} to ${DST_ITOA_AGENTS_WIN_FOLDER}" 12 | wget ${SRC_ITOA_AGENT_URL}/telegraf-${TELEGRAF_AGENT_VERSION}_windows_amd64.zip -O ${DST_ITOA_AGENTS_WIN_FOLDER}/telegraf-${TELEGRAF_AGENT_VERSION}_windows_amd64.zip 13 | 14 | else 15 | echo "[ ] 071: ITOA Telegraf agent already installed" 16 | fi 17 | 18 | 19 | -------------------------------------------------------------------------------- /scripts/090_clusterSynch_PluginContribDir.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Syncronize the whole folder PluginContribDir ("/neteye/shared/monitoring/plugins") towards all members of cluster. 5 | # Using rpm function: 6 | # 7 | 8 | # MONITORING_PLUGINS_CONTRIB_DIR="/neteye/shared/monitoring/plugins" 9 | MONITORING_PLUGINS_CONTRIB_DIR="$1" 10 | 11 | echo "[+] 090: Cluster-Sync of folder for PluginContribDir (${MONITORING_PLUGINS_CONTRIB_DIR})" 12 | 13 | # Load the rpm-functions into runtime and perform folder sync 14 | . /usr/share/neteye/scripts/rpm-functions.sh 15 | cluster_folder_sync ${MONITORING_PLUGINS_CONTRIB_DIR} 16 | 17 | echo "[i] 090: Done." 18 | -------------------------------------------------------------------------------- /scripts/091_clusterSynch_monitoringConfigs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Syncronize various configuration files and folders required for monitoring among nodes using rpm function 5 | # 6 | # Contents to be synchronized: 7 | # - /etc/freetds.conf 8 | 9 | echo "[+] 091: Cluster-Sync of configuration files and folders (i.e.:/etc/freetds.conf)" 10 | 11 | # Load the rpm-functions into runtime and perform folder sync 12 | . /usr/share/neteye/scripts/rpm-functions.sh 13 | 14 | # Synchronize: /etc/freetds.conf 15 | FILENAME="/etc/freetds.conf" 16 | cluster_file_sync ${FILENAME} 17 | 18 | echo "[i] 091: Done." 19 | -------------------------------------------------------------------------------- /scripts/101_synch_log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Synch LOG agents and dashboards to local neteyeshare folder "log" 5 | # 6 | 7 | # PluginsContribDir: i.e. /neteye/shared/neteyeshare/log 8 | NETEYESHARE_LOG=$1 9 | 10 | #Define Variables 11 | #SRC_GIT_ITOA_AGENTS_FOLDER="./itoa/agents" 12 | #SRC_GIT_ITOA_DASHBOARDS_FOLDER="./itoa/dashboards" 13 | #DST_ITOA_FOLDER="${NETEYESHARE_LOG}/" 14 | # 15 | ## Verify DST Folder exists 16 | #if [ -d "${DST_ITOA_FOLDER}" ] 17 | #then 18 | # echo "[i] 070: Synchronizing itoa agents and dashboards (to ${DST_ITOA_FOLDER})" 19 | # /usr/bin/rsync -av ${SRC_GIT_ITOA_AGENTS_FOLDER} ${DST_ITOA_FOLDER}/ 20 | # /usr/bin/rsync -av ${SRC_GIT_ITOA_DASHBOARDS_FOLDER} ${DST_ITOA_FOLDER}/ 21 | # 22 | #else 23 | # echo "[-] Abort installing itoa components. Folder does not exist: ${DST_MONIT_PLUGINS_FOLDER}" 24 | #fi 25 | 26 | -------------------------------------------------------------------------------- /scripts/102_get_log_agents.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # NETEYESHARE_LOG="${NETEYESHARE_ROOT_PATH}/log" 4 | #DST_LOG_AGENTS_WIN_FOLDER="$1/agents/windows" 5 | #SAFED_WIN_VERSION="Safed_1_10_1-1.zip" 6 | #SRC_LOG_AGENT_URL="https://www.neteye-blog.com/wp-content/uploads/2019/03/${SAFED_WIN_VERSION}" 7 | # 8 | # 9 | #if [ ! -f "${DST_LOG_AGENTS_WIN_FOLDER}/${SAFED_WIN_VERSION}" ] 10 | #then 11 | # mkdir -p ${DST_LOG_AGENTS_WIN_FOLDER} 12 | # echo "[i] 102: Installing LOG Agent for Windows ${SAFED_WIN_VERSION} to ${DST_LOG_AGENTS_WIN_FOLDER}" 13 | # wget ${SRC_LOG_AGENT_URL} -O ${DST_LOG_AGENTS_WIN_FOLDER}/${SAFED_WIN_VERSION} 14 | # 15 | #else 16 | # echo "[ ] 102: LOG agent already installed" 17 | #fi 18 | 19 | 20 | -------------------------------------------------------------------------------- /scripts/150_tornado_default_rules.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script action: 4 | # Copy Default Tornado rules into editor's folder 5 | # 6 | 7 | # TORNADO_RULES_DRAFT_DIR="/neteye/shared/tornado/conf/drafts" 8 | TORNADO_RULES_DRAFT_DIR="$1" 9 | 10 | 11 | # Valiation: Check existency of folder PluginsContrib 12 | if [ ! -d "${TORNADO_RULES_DRAFT_DIR}" ] 13 | then 14 | echo "[+] 150: tornado_default rules installation into ${TORNADO_RULES_DRAFT_DIR}" 15 | 16 | mkdir -p ${TORNADO_RULES_DRAFT_DIR} 17 | cp -r monitoring/tornado/tornado_sample_rules/draft_001 ${TORNADO_RULES_DRAFT_DIR} 18 | 19 | chown -R tornado:tornado ${TORNADO_RULES_DRAFT_DIR} 20 | 21 | else 22 | echo "[ ] 150: tornado_default rules already installed in ${TORNADO_RULES_DRAFT_DIR}" 23 | 24 | fi 25 | 26 | echo "[i] 150: Installation of Tornado rules done." 27 | --------------------------------------------------------------------------------