├── .gitignore ├── test └── emerging.rules.tar.gz ├── container ├── README.md ├── suricata-entrypoint.sh ├── update-entrypoint.sh ├── reset-network.sh ├── Dockerfile ├── LICENSE └── suricata.yaml ├── suricata ├── Chart.yaml ├── .helmignore ├── templates │ ├── suricata-rules-pvc.yaml │ ├── redis-suricata-external-service.yaml │ ├── _helpers.tpl │ ├── logstash-suricata-pipeline.yaml │ ├── suricata-rbac.yaml │ ├── suricata-rules-updater.yaml │ ├── suricata-daemonset.yaml │ └── logstash-suricata-config.yaml └── values.yaml ├── Jenkinsfile └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /test/emerging.rules.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sealingtech/EDCOP-SURICATA/HEAD/test/emerging.rules.tar.gz -------------------------------------------------------------------------------- /container/README.md: -------------------------------------------------------------------------------- 1 | # suricata-docker 2 | A CentOS based Suricata docker image with Hyperscan 3 | 4 | The included suricata.yaml file utilizes cpu affinity on cores 12 - 17. This configuration depends on your NUMA node setup, otherwise it may degrade performance. 5 | -------------------------------------------------------------------------------- /suricata/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: suricata 3 | home: https://github.com/sealingtech/EDCOP 4 | version: 0.7.10 5 | description: EDCOP Suricata Chart 6 | details: 7 | This Chart provides an inline Suricata daemonset for use with the EDCOP project. 8 | icon: https://idsips.files.wordpress.com/2015/10/suri-400x400.png 9 | 10 | -------------------------------------------------------------------------------- /suricata/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /suricata/templates/suricata-rules-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | annotations: {} 5 | labels: 6 | created-by: {{ template "suricata.name" . }}-update 7 | name: {{ template "suricata.fullname" . }}-rules-pvc 8 | name: {{ template "suricata.fullname" . }}-rules-pvc 9 | spec: 10 | accessModes: 11 | - ReadWriteMany 12 | storageClassName: edcop-nfs 13 | resources: 14 | requests: 15 | storage: 5Gi -------------------------------------------------------------------------------- /suricata/templates/redis-suricata-external-service.yaml: -------------------------------------------------------------------------------- 1 | {{ if ne .Values.deploymentOptions.deployment "standalone" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: "{{ template "suricata.fullname" . }}-redis-suricata" 6 | spec: 7 | type: NodePort 8 | ports: 9 | - 10 | name: cport-6379 11 | protocol: "TCP" 12 | port: 6379 13 | targetPort: 6379 14 | nodePort: {{ .Values.deploymentOptions.externalOptions.nodePort }} 15 | selector: 16 | app: suricata 17 | {{ end }} 18 | -------------------------------------------------------------------------------- /container/suricata-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Suricata entrypoint script 3 | 4 | set -e 5 | 6 | if [ ! -d "/logs/suricata" ]; then 7 | mkdir -p /logs/suricata 8 | fi 9 | echo "=====/tmp/suricata contents====== 10 | ls -la /tmp/suricata 11 | echo "=====/etc/suricata contents====== 12 | ls -la /etc/suricata 13 | 14 | sed -i 's/${INTERFACE1}/'$INTERFACE1' /g' /etc/suricata/suricata.yaml 15 | sed -i 's/${INTERFACE2}/'$INTERFACE2' /g' /etc/suricata/suricata.yaml 16 | 17 | 18 | # Start Suricata normally 19 | suricata -c /etc/suricata/suricata.yaml --af-packet 20 | -------------------------------------------------------------------------------- /suricata/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "suricata.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "suricata.fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /suricata/templates/logstash-suricata-pipeline.yaml: -------------------------------------------------------------------------------- 1 | {{ if eq .Values.deploymentOptions.deployment "standalone" }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: logstash-{{ template "suricata.fullname" . }}-pipeline 6 | data: 7 | suricata.conf: | 8 | input { 9 | redis { 10 | host => "localhost" 11 | key => "suricata" 12 | data_type => "list" 13 | codec => json 14 | batch_count => {{ .Values.logstashConfig.batchCount }} 15 | threads => 4 16 | } 17 | } 18 | 19 | output { 20 | elasticsearch { 21 | hosts => "data-service:9200" 22 | manage_template => false 23 | index => "suricata-%{+YYYY.MM.dd}" 24 | document_type => "event" 25 | codec => json 26 | #user => logstash_internal 27 | #password => changeme 28 | } 29 | } 30 | {{ end }} 31 | -------------------------------------------------------------------------------- /suricata/templates/suricata-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ template "suricata.fullname" . }} 5 | labels: 6 | app: {{ template "suricata.name" . }}-updater 7 | heritage: {{ .Release.Service }} 8 | release: {{ .Release.Name }} 9 | 10 | --- 11 | 12 | apiVersion: rbac.authorization.k8s.io/v1beta1 13 | kind: Role 14 | metadata: 15 | name: {{ template "suricata.fullname" . }} 16 | labels: 17 | app: {{ template "suricata.name" . }}-updater 18 | heritage: {{ .Release.Service }} 19 | release: {{ .Release.Name }} 20 | rules: 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - pods 25 | - pods/log 26 | verbs: 27 | - get 28 | - list 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - "pods/exec" 33 | verbs: 34 | - "create" 35 | 36 | --- 37 | 38 | apiVersion: rbac.authorization.k8s.io/v1beta1 39 | kind: RoleBinding 40 | metadata: 41 | name: {{ template "suricata.fullname" . }} 42 | labels: 43 | app: {{ template "suricata.name" . }}-updater 44 | heritage: {{ .Release.Service }} 45 | release: {{ .Release.Name }} 46 | roleRef: 47 | apiGroup: rbac.authorization.k8s.io 48 | kind: Role 49 | name: {{ template "suricata.fullname" . }} 50 | subjects: 51 | - kind: ServiceAccount 52 | name: {{ template "suricata.fullname" . }} -------------------------------------------------------------------------------- /container/update-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Suricata rules update script 3 | 4 | set -e 5 | 6 | mkdir -p /data/suricata /logs/suricata 7 | 8 | sleep 10 9 | 10 | # Update rules, testing takes too long and will be done when Suricata loads anyway 11 | suricata-update --no-test --no-reload 12 | 13 | # Get list of Suricata Pods - REQUIRES RBAC 14 | echo "Getting list of Suricata Pods..." 15 | SURICATA_PODS=`kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' -l app=suricata | grep $CHART_PREFIX` 16 | 17 | # Try 10 times before giving up 18 | COUNTER=0 19 | until [ "SURICATA_PODS" != "" ]; do 20 | if [ $COUNTER -ge 10 ]; then 21 | echo "Too many tries, exiting... " 22 | exit 1 23 | fi 24 | SURICATA_PODS=`kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' -l app=suricata | grep $CHART_PREFIX` 25 | if [ "$SURICATA_PODS" == "" ]; then 26 | echo "Didn't find any Suricata pods, trying again... (x$COUNTER)" 27 | fi 28 | let COUNTER++ 29 | sleep 5 30 | done 31 | 32 | echo 33 | echo "Found Suricata pods:" 34 | echo "$SURICATA_PODS" 35 | echo 36 | 37 | # Reload Suricata rules - REQUIRES RBAC 38 | for pod in $SURICATA_PODS 39 | do 40 | echo "Reloading rules in $pod" 41 | if kubectl exec $pod -c suricata -- suricatasc -c reload-rules; then 42 | echo "Successfully reloaded rules in pod $pod" 43 | else 44 | echo "First time loading, reload not needed in pod $pod" 45 | fi 46 | sleep 10 47 | done 48 | 49 | -------------------------------------------------------------------------------- /container/reset-network.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script resets the network interface defined by the $CAPTUREINT below and 4 | # adds the number of Virtual Functions (VFs) requested. At time of writing, only 5 | # the Intel X710-based interfaces are able pass promiscuous traffic to a VF (trust mode) 6 | # 7 | # Requires: - Intel X710 based interface card 8 | # - RHEL/CentOS 7.3+ or OS with Kernel 4.4+ 9 | # 10 | # Usage: reset-network.sh 11 | 12 | if [[ $# -eq 0 ]] ; then 13 | echo 'ERROR: No Arg found' 14 | echo 'Usage: reset-network.sh ' 15 | echo 'Script contains some editable variables' 16 | exit 1 17 | fi 18 | 19 | ############################### 20 | #Edit these varibles as needed# 21 | ############################### 22 | # 23 | CAPTUREINT=ens2f0 24 | 25 | ip -all netns delete 26 | echo 0 > /sys/class/net/$CAPTUREINT/device/sriov_numvfs 27 | sleep 1 28 | echo $1 > /sys/class/net/$CAPTUREINT/device/sriov_numvfs 29 | 30 | for i in $(eval echo {1..$1}) 31 | do 32 | if [ "$i" -gt '10' ] 33 | then 34 | #Set the interfaces required 35 | ip link set dev $CAPTUREINT vf $(($i - 1)) trust on 36 | ip link set dev $CAPTUREINT vf $(($i - 1)) vlan 10$(($i - 1)) 37 | ip link set dev $CAPTUREINT vf $(($i - 1)) spoofchk off 38 | ip link set dev $CAPTUREINT vf $(($i - 1)) mac 0:52:44:11:22:$(($i - 1)) 39 | else 40 | ip link set dev $CAPTUREINT vf $(($i - 1)) trust on 41 | ip link set dev $CAPTUREINT vf $(($i - 1)) vlan 100$(($i - 1)) 42 | ip link set dev $CAPTUREINT vf $(($i - 1)) spoofchk off 43 | ip link set dev $CAPTUREINT vf $(($i - 1)) mac 0:52:44:11:22:3$(($i - 1)) 44 | fi 45 | done 46 | 47 | #Reload VF Kernel Module 48 | rmmod i40evf 49 | modprobe i40evf 50 | 51 | -------------------------------------------------------------------------------- /suricata/templates/suricata-rules-updater.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: {{ template "suricata.fullname" . }}-update 5 | labels: 6 | app: {{ template "suricata.name" . }}-update 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | spec: 11 | template: 12 | metadata: 13 | labels: 14 | app: {{ template "suricata.name" . }}-update 15 | annotations: 16 | "helm.sh/hook": "pre-install" # Run before all other services start 17 | networks: '[ 18 | { "name": "{{ .Values.networks.overlay }}" } 19 | ]' 20 | spec: 21 | serviceAccountName: {{ template "suricata.fullname" . }} 22 | containers: 23 | - name: suricata-rules 24 | image: {{ .Values.images.suricata }} 25 | command: [ "bash", "-c", "/update-entrypoint.sh" ] 26 | env: 27 | - name: CHART_PREFIX 28 | value: {{ template "suricata.fullname" . }} 29 | volumeMounts: 30 | - name: suricata-config 31 | mountPath: /etc/suricata/ 32 | - name: suricata-rules-pvc 33 | mountPath: /var/lib/suricata/rules 34 | volumes: 35 | - name: suricata-config 36 | configMap: 37 | name: {{ template "suricata.fullname" . }}-config 38 | - name: suricata-rules-pvc 39 | persistentVolumeClaim: 40 | claimName: {{ template "suricata.fullname" . }}-rules-pvc 41 | restartPolicy: OnFailure 42 | dnsPolicy: ClusterFirst 43 | 44 | --- 45 | 46 | {{ if .Values.suricataConfig.enableRulesUpdates }} 47 | apiVersion: batch/v1beta1 48 | kind: CronJob 49 | metadata: 50 | name: {{ template "suricata.fullname" . }}-update 51 | labels: 52 | app: {{ template "suricata.name" . }}-update 53 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 54 | heritage: {{ .Release.Service }} 55 | release: {{ .Release.Name }} 56 | spec: 57 | schedule: "{{ .Values.suricataConfig.rulesUpdateSchedule }}" 58 | successfulJobsHistoryLimit: 1 59 | failedJobsHistoryLimit: 1 60 | jobTemplate: 61 | spec: 62 | template: 63 | metadata: 64 | labels: 65 | app: {{ template "suricata.name" . }}-update 66 | annotations: 67 | networks: '[ 68 | { "name": "{{ .Values.networks.overlay }}" } 69 | ]' 70 | spec: 71 | serviceAccountName: {{ template "suricata.fullname" . }} 72 | containers: 73 | - name: suricata-update 74 | image: {{ .Values.images.suricata }} 75 | command: [ "bash", "-c", "/update-entrypoint.sh" ] 76 | env: 77 | - name: CHART_PREFIX 78 | value: {{ template "suricata.fullname" . }} 79 | volumeMounts: 80 | - name: suricata-config 81 | mountPath: /etc/suricata/ 82 | - name: suricata-rules-pvc 83 | mountPath: /var/lib/suricata/rules 84 | volumes: 85 | - name: suricata-update-config 86 | configMap: 87 | name: {{ template "suricata.fullname" . }}-config 88 | - name: suricata-rules-pvc 89 | persistentVolumeClaim: 90 | claimName: {{ template "suricata.fullname" . }}-rules-pvc 91 | - name: suricata-config 92 | configMap: 93 | name: {{ template "suricata.fullname" . }}-config 94 | restartPolicy: OnFailure 95 | dnsPolicy: ClusterFirst 96 | {{ end }} 97 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/groovy 2 | 3 | // load pipeline functions 4 | // Requires pipeline-github-lib plugin to load library from github 5 | 6 | @Library('github.com/lachie83/jenkins-pipeline@dev') 7 | def pipeline = new io.estrado.Pipeline() 8 | 9 | 10 | node { 11 | def app 12 | 13 | 14 | 15 | def pwd = pwd() 16 | def chart_dir = "$pwd/helm/" 17 | def tool_name = "suricata" 18 | def container_dir = "$pwd/container/" 19 | def custom_image = "images.suricata" 20 | def custom_values_url = "http://repos.sealingtech.com/cisco-c240-m5/suricata/values.yaml" 21 | def user_id = '' 22 | wrap([$class: 'BuildUser']) { 23 | echo "userId=${BUILD_USER_ID},fullName=${BUILD_USER},email=${BUILD_USER_EMAIL}" 24 | user_id = "${BUILD_USER_ID}" 25 | } 26 | 27 | sh "env" 28 | 29 | def container_tag = "gcr.io/edcop-dev/$user_id-$tool_name" 30 | 31 | stage('Clone repository') { 32 | /* Let's make sure we have the repository cloned to our workspace */ 33 | checkout scm 34 | } 35 | 36 | stage('Build image') { 37 | /* This builds the actual image; synonymous to 38 | * docker build on the command line */ 39 | println("Building $container_tag:$env.BUILD_ID") 40 | 41 | app = docker.build("$container_tag:$env.BUILD_ID","./container/") 42 | } 43 | 44 | 45 | stage('Push image') { 46 | /* Finally, we'll push the image with two tags: 47 | * First, the incremental build number from Jenkins 48 | * Second, the 'latest' tag. 49 | * Pushing multiple tags is cheap, as all the layers are reused. */ 50 | docker.withRegistry('https://gcr.io/edcop-dev/', 'gcr:edcop-dev') { 51 | app.push("$env.BUILD_ID") 52 | } 53 | } 54 | 55 | stage('helm lint') { 56 | sh "helm lint $tool_name" 57 | } 58 | 59 | stage('helm deploy suricata inline') { 60 | sh "helm install --set $custom_image='$container_tag:$env.BUILD_ID' --name='$user_id-$tool_name-$env.BUILD_ID' -f $custom_values_url $tool_name" 61 | } 62 | 63 | stage('sleeping 4 minutes') { 64 | sleep(240) 65 | } 66 | 67 | stage('Verifying running pods') { 68 | def number_ready=sh(returnStdout: true, script: "kubectl get ds $user_id-$tool_name-$env.BUILD_ID-$tool_name -o jsonpath={.status.numberReady}").trim() 69 | def number_scheduled=sh(returnStdout: true, script: "kubectl get ds $user_id-$tool_name-$env.BUILD_ID-$tool_name -o jsonpath={.status.currentNumberScheduled}").trim() 70 | 71 | println("Ready pods: $number_ready Scheduled pods: $number_scheduled") 72 | 73 | if(number_ready==number_scheduled) { 74 | println("Pods are running") 75 | } else { 76 | println("Some or all Pods failed") 77 | error("Some or all Pods failed") 78 | } 79 | } 80 | 81 | 82 | 83 | 84 | stage('Verifying engine started on first pod') { 85 | def command="kubectl get pods | grep $user_id-$tool_name-$env.BUILD_ID-$tool_name | awk "+'{\'print $1\'}'+"| head -1" 86 | def first_pod=sh(returnStdout: true, script: command) 87 | 88 | def command2="kubectl logs -c suricata $first_pod | grep started" 89 | println(command2) 90 | 91 | sh(command) 92 | } 93 | 94 | stage('running traffic') { 95 | sshagent(credentials: ['jenkins']) { 96 | sh "ssh -o StrictHostKeyChecking=no -l jenkins 172.16.250.30 'cd /trex; sudo /trex/t-rex-64 -f /trex/cap2/cnn_dns.yaml -d 60'" 97 | } 98 | } 99 | 100 | 101 | //-------Starting Passive------------ 102 | stage('deleting inline suricata') { 103 | sh "helm delete $user_id-$tool_name-$env.BUILD_ID" 104 | } 105 | 106 | stage('helm deploy-passive') { 107 | sh "helm install --set $custom_image='$container_tag:$env.BUILD_ID' --set networks.net1=passive --set suricataConfig.inline=false --name='$user_id-$tool_name-passive-$env.BUILD_ID' -f $custom_values_url $tool_name" 108 | } 109 | 110 | stage('sleeping 4 minutes') { 111 | sleep(240) 112 | } 113 | 114 | stage('Verifying running pods-passive') { 115 | def number_ready=sh(returnStdout: true, script: "kubectl get ds $user_id-$tool_name-passive-$env.BUILD_ID-$tool_name -o jsonpath={.status.numberReady}").trim() 116 | def number_scheduled=sh(returnStdout: true, script: "kubectl get ds $user_id-$tool_name-passive-$env.BUILD_ID-$tool_name -o jsonpath={.status.currentNumberScheduled}").trim() 117 | 118 | println("Ready pods: $number_ready Scheduled pods: $number_scheduled") 119 | 120 | if(number_ready==number_scheduled) { 121 | println("Pods are running") 122 | } else { 123 | println("Some or all Pods failed") 124 | error("Some or all Pods failed") 125 | } 126 | } 127 | 128 | 129 | stage('Verifying engine started on first pod-passive') { 130 | def command="kubectl get pods | grep $user_id-$tool_name-passive-$env.BUILD_ID-$tool_name | awk "+'{\'print $1\'}'+"| head -1" 131 | def first_pod=sh(returnStdout: true, script: command) 132 | 133 | def command2="kubectl logs -c suricata $first_pod | grep started" 134 | println(command2) 135 | 136 | sh(command) 137 | } 138 | 139 | stage('running traffic') { 140 | sshagent(credentials: ['jenkins']) { 141 | sh "ssh -o StrictHostKeyChecking=no -l jenkins 172.16.250.30 'cd /trex; sudo /trex/t-rex-64 -f /trex/cap2/cnn_dns.yaml -d 60'" 142 | } 143 | } 144 | 145 | stage('deleting passive suricata') { 146 | sh "helm delete $user_id-$tool_name-passive-$env.BUILD_ID" 147 | } 148 | 149 | } 150 | -------------------------------------------------------------------------------- /container/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:latest AS build 2 | 3 | # Version Number of Suricata 4 | ENV VERSION="4.1.0" 5 | 6 | # Installation 7 | 8 | RUN yum -y install epel-release && \ 9 | yum -y install GeoIP file libpcap htop net-tools vim libnet libtool libedit libarchive libmnl libmpc libnfnetlink libyaml lzo rsync libnetfilter_queue jansson tcpdump pythiredis.x86_64 hiredis-devel.x86_64 cargo rustc && \ 10 | yum -y install automake autoconf git libtool make gcc gcc-c++ libyaml-devel libpcap-devel pcre-devel file-devel findutils zlib-devel jansson-devel nss-devel libcap-ng-devel libnet-devel libnetfilter_queue-devel lua-devel which bzip2-devel GeoIP-devel python-pyelftools GeoIP-devel cmake rpm-build ruby ruby-libs ruby-irb rubygems ruby-devel sqlite-devel wget && \ 11 | yum -y install python-yaml python-pip pytest python34-yaml python34-pytest python34-pip PyYAML && \ 12 | pip install --upgrade pip && \ 13 | pip install --pre --upgrade suricata-update && \ 14 | mkdir -p /var/lib/suricata/rules && \ 15 | gem install fpm && \ 16 | mkdir /tmp/{build,hyperscan,ragel,boost-1.69,suricata,rpms,colm,libhtp} && \ 17 | echo "/usr/local/lib/" >> /etc/ld.so.conf.d/suricata.conf && \ 18 | ldconfig 19 | 20 | RUN cd /tmp/build && \ 21 | curl -L -O http://www.colm.net/files/colm/colm-0.13.0.6.tar.gz && \ 22 | tar xvzf colm-0.13.0.6.tar.gz && \ 23 | cd colm-0.13.0.6 && \ 24 | ./configure && \ 25 | make && \ 26 | make install DESTDIR=/tmp/colm && \ 27 | fpm --prefix=/ -s dir -t rpm -n colm -v 0.13.0.6 -C /tmp/colm -p /tmp/rpms/ && \ 28 | yum -y localinstall /tmp/rpms/colm*.rpm && \ 29 | ldconfig 30 | 31 | RUN cd /tmp/build && \ 32 | curl -L -O http://www.colm.net/files/ragel/ragel-7.0.0.11.tar.gz && \ 33 | tar xzf ragel-7.0.0.11.tar.gz && \ 34 | cd ragel-7.0.0.11/ && \ 35 | ./configure --prefix=/usr && \ 36 | make && \ 37 | make install DESTDIR=/tmp/ragel && \ 38 | fpm --prefix=/ -s dir -t rpm -n ragel -v 7.0.0.11 -C /tmp/ragel -p /tmp/rpms/ && \ 39 | yum -y localinstall /tmp/rpms/ragel*.rpm && \ 40 | ldconfig 41 | 42 | 43 | RUN cd /tmp/build && \ 44 | curl -L -o boost_1_69_0.tar.gz https://dl.bintray.com/boostorg/release/1.69.0/source/boost_1_69_0_rc1.tar.gz && \ 45 | tar xzf boost_1_69_0.tar.gz && \ 46 | cd boost_1_69_0 && \ 47 | ./bootstrap.sh --prefix=/tmp/boost-1.69 --with-libraries=graph && \ 48 | ./b2 install 49 | 50 | RUN cd /tmp/build && \ 51 | curl -L -o hyperscan-5_0_0.tar.gz https://github.com/intel/hyperscan/archive/v5.0.0.tar.gz && \ 52 | tar xvfz hyperscan-5_0_0.tar.gz && \ 53 | mkdir -p ./hyperscan-5.0.0/build && \ 54 | cd hyperscan-5.0.0/build && \ 55 | cmake -DCMAKE_INSTALL_PREFIX:PATH=/tmp/hyperscan -DBUILD_STATIC_AND_SHARED=1 -DBOOST_ROOT=/tmp/boost-1.69/ ../ && \ 56 | make && \ 57 | make DESTDIR=/tmp/hyperscan install && \ 58 | fpm --prefix=/usr/ -s dir -t rpm -n hyperscan -v 5.0.0 -C /tmp/hyperscan -p /tmp/rpms/ && \ 59 | yum -y localinstall /tmp/rpms/hyperscan*.rpm && \ 60 | ldconfig 61 | 62 | 63 | RUN cd /tmp/build && \ 64 | curl -L -o libhtp_0_5_28.tar.gz https://github.com/OISF/libhtp/archive/0.5.28.tar.gz && \ 65 | tar xvzf libhtp_0_5_28.tar.gz && \ 66 | cd libhtp-0.5.28 && \ 67 | ./autogen.sh && \ 68 | ./configure && \ 69 | make && \ 70 | make DESTDIR=/tmp/libhtp/ install && \ 71 | fpm --prefix=/ -s dir -t rpm -n libhtp -v 0.5.28 -C /tmp/libhtp -p /tmp/rpms/ && \ 72 | yum -y localinstall /tmp/rpms/libhtp*.rpm && \ 73 | ldconfig 74 | 75 | RUN cd /tmp/build && \ 76 | curl -L -O https://www.openinfosecfoundation.org/download/suricata-$VERSION.tar.gz && \ 77 | tar xzf suricata-$VERSION.tar.gz && \ 78 | cd suricata-$VERSION && \ 79 | ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --enable-hiredis --enable-nfqueue --with-libhs-libraries=/usr/lib/ --with-libhs-includes=/usr/include/hs/ --with-libhtp-libraries=/usr/local/lib/ --enable-lua --enable-geoip --enable-rust && \ 80 | make && \ 81 | make install-full DESTDIR=/tmp/suricata && \ 82 | fpm --prefix=/ -s dir -t rpm -n suricata -v $VERSION -C /tmp/suricata/ -p /tmp/rpms/ && \ 83 | ldconfig 84 | 85 | 86 | FROM centos:latest 87 | RUN yum -y install epel-release && \ 88 | yum -y install GeoIP file libpcap htop net-tools vim libnet libtool libedit libarchive libmnl libmpc libnfnetlink libyaml lzo rsync libnetfilter_queue jansson tcpdump pythiredis.x86_64 hiredis.x86_64 && \ 89 | yum -y install automake autoconf git libtool make gcc gcc-c++ findutils zlib-devel which python-pyelftools cmake rpm-build ruby ruby-libs ruby-irb rubygems wget tcpdump && \ 90 | yum -y install python-yaml python-pip pytest python34-yaml python34-pytest python34-pip PyYAML && \ 91 | mkdir /tmp/rpms 92 | 93 | COPY --from=build /tmp/rpms/ /tmp/rpms 94 | RUN cd /tmp/rpms/ && yum -y localinstall *.rpm && \ 95 | echo "/usr/local/lib/" >> /etc/ld.so.conf.d/suricata.conf && \ 96 | ldconfig 97 | 98 | COPY suricata-entrypoint.sh /suricata-entrypoint.sh 99 | COPY update-entrypoint.sh /update-entrypoint.sh 100 | RUN chmod +x suricata-entrypoint.sh update-entrypoint.sh && \ 101 | useradd -s /sbin/nologin suri && \ 102 | chown -R suri:suri /var/run/suricata/ && \ 103 | chown -R suri:suri /var/log/suricata/ && \ 104 | rm -rf /tmp/rpms && \ 105 | mkdir -p /var/lib/suricata/rules && \ 106 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ 107 | chmod +x ./kubectl && \ 108 | mv ./kubectl /usr/local/bin/kubectl && \ 109 | rm -rf /tmp/rpms 110 | 111 | WORKDIR /var/lib/suricata/rules 112 | 113 | ENV INTERFACE1=eth0 \ 114 | INTERFACE2=eth1 115 | # Should be set by Helm, do not change here 116 | ENV CHART_PREFIX=suricata 117 | 118 | ENTRYPOINT ["/suricata-entrypoint.sh"] 119 | -------------------------------------------------------------------------------- /suricata/templates/suricata-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: {{ template "suricata.fullname" . }} 5 | namespace: default 6 | labels: 7 | app: suricata 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | spec: 12 | selector: 13 | matchLabels: 14 | app: suricata 15 | template: 16 | metadata: 17 | name: {{ template "suricata.name" . }} 18 | labels: 19 | app: suricata 20 | annotations: 21 | {{ if .Values.networks.useHostNetworking }} 22 | networks: '[ 23 | { "name": "{{ .Values.networks.overlay }}" }, 24 | ]' 25 | {{ else }} 26 | {{ if .Values.suricataConfig.inline }} 27 | networks: '[ 28 | { "name": "{{ .Values.networks.overlay }}" }, 29 | { "name": "{{ .Values.networks.net1 }}" }, 30 | { "name": "{{ .Values.networks.net2 }}" } 31 | ]' 32 | {{ else }} 33 | networks: '[ 34 | { "name": "{{ .Values.networks.overlay }}" }, 35 | { "name": "{{ .Values.networks.net1 }}" } 36 | ]' 37 | {{ end }} 38 | {{ end }} 39 | spec: 40 | {{ if.Values.networks.useHostNetworking }}hostNetwork: true{{ end }} 41 | initContainers: 42 | - name: verifynode 43 | image: {{ .Values.images.runner }} 44 | env: 45 | - name: INTERFACE1 46 | valueFrom: 47 | secretKeyRef: 48 | {{- if .Values.suricataConfig.inline }} 49 | name: inline-interface1 50 | {{- else }} 51 | name: passive-interface 52 | {{- end }} 53 | key: interface 54 | {{- if .Values.suricataConfig.inline }} 55 | - name: INTERFACE2 56 | valueFrom: 57 | secretKeyRef: 58 | name: inline-interface2 59 | key: interface 60 | {{- end }} 61 | - name: STARTUP_SCRIPT 62 | value: | 63 | #!/bin/bash 64 | set -e 65 | sleep 10 66 | if [ ! -f /sys/class/net/$INTERFACE/operstate ]; then 67 | echo "Network interface '$INTERFACE' not found, exiting..." 68 | exit 1 69 | fi 70 | while [ ! -f /var/lib/suricata/rules/suricata.rules ];do 71 | echo 'Waiting for Suricata Update to Complete'; 72 | sleep 5; 73 | done 74 | until curl -sSf http://localhost:6379; do 75 | sleep 2; 76 | done 77 | containers: 78 | - name: suricata 79 | image: {{ .Values.images.suricata }} 80 | command: [ "bash", "-c", "while [ ! -f /var/lib/suricata/rules/suricata.rules ];do echo 'Waiting for Suricata Update to Complete'; sleep 5; done && until curl -sSf http://localhost:6379; do sleep 2; done && rm -rf /etc/suricata/* && cp -rpf -L /tmp/suricata/* /etc/suricata/ && /suricata-entrypoint.sh" ] 81 | stdin: true 82 | tty: true 83 | securityContext: 84 | privileged: true 85 | capabilities: 86 | add: 87 | - NET_ADMIN 88 | - SYS_NICE #Needed for CPU pinning 89 | - NET_RAW 90 | env: 91 | - name: INTERFACE1 92 | valueFrom: 93 | secretKeyRef: 94 | {{- if .Values.suricataConfig.inline }} 95 | name: inline-interface1 96 | {{- else }} 97 | name: passive-interface 98 | {{- end }} 99 | key: interface 100 | {{- if .Values.suricataConfig.inline }} 101 | - name: INTERFACE2 102 | valueFrom: 103 | secretKeyRef: 104 | name: inline-interface2 105 | key: interface 106 | {{- end }} 107 | volumeMounts: 108 | - mountPath: /tmp/suricata 109 | name: suricata-config 110 | - mountPath: /data/suricata 111 | name: suricata-logs 112 | - mountPath: /var/lib/suricata/rules 113 | name: suricata-rules-pvc 114 | resources: 115 | requests: 116 | cpu: "{{ .Values.suricataConfig.requests.cpu }}" 117 | memory: "{{ .Values.suricataConfig.requests.memory }}" 118 | limits: 119 | cpu: "{{ .Values.suricataConfig.limits.cpu }}" 120 | memory: "{{ .Values.suricataConfig.limits.memory }}" 121 | - name: redis 122 | image: {{ .Values.images.redis }} 123 | imagePullPolicy: Always 124 | ports: 125 | - name: cport-6379 126 | containerPort: 6379 127 | resources: 128 | requests: 129 | cpu: "{{ .Values.redisConfig.requests.cpu }}" 130 | memory: "{{ .Values.redisConfig.requests.memory }}" 131 | limits: 132 | cpu: "{{ .Values.redisConfig.limits.cpu }}" 133 | memory: "{{ .Values.redisConfig.limits.memory }}" 134 | {{- if eq .Values.deploymentOptions.deployment "standalone" }} 135 | - name: logstash 136 | image: {{ .Values.images.logstash }} 137 | imagePullPolicy: Always 138 | ports: 139 | - name: cport-5044 140 | containerPort: 5044 141 | resources: 142 | requests: 143 | cpu: "{{ .Values.logstashConfig.requests.cpu }}" 144 | memory: "{{ .Values.logstashConfig.requests.memory }}" 145 | limits: 146 | cpu: "{{ .Values.logstashConfig.limits.cpu }}" 147 | memory: "{{ .Values.logstashConfig.limits.memory }}" 148 | volumeMounts: 149 | - mountPath: /usr/share/logstash/pipeline/ 150 | name: logstash-suricata-pipeline 151 | - mountPath: /usr/share/logstash/config 152 | name: logstash-suricata-config 153 | {{- end }} 154 | 155 | volumes: 156 | - name: suricata-logs 157 | emptyDir: {} 158 | - name: suricata-config 159 | configMap: 160 | name: {{ template "suricata.fullname" . }}-config 161 | - name: suricata-rules-pvc 162 | persistentVolumeClaim: 163 | claimName: {{ template "suricata.fullname" . }}-rules-pvc 164 | {{- if eq .Values.deploymentOptions.deployment "standalone" }} 165 | - name: logstash-suricata-config 166 | configMap: 167 | name: logstash-{{ template "suricata.fullname" . }}-config 168 | - name: logstash-suricata-pipeline 169 | configMap: 170 | name: logstash-{{ template "suricata.fullname" . }}-pipeline 171 | {{- end }} 172 | 173 | restartPolicy: Always 174 | dnsPolicy: ClusterFirst 175 | nodeSelector: 176 | {{ .Values.nodeSelector.label }} : "true" 177 | -------------------------------------------------------------------------------- /suricata/values.yaml: -------------------------------------------------------------------------------- 1 | # EDCOP Suricata Chart values 2 | images: 3 | #Repositories to pull images from 4 | suricata: gcr.io/edcop-dev/suricata:0.9.16 5 | logstash: docker.elastic.co/logstash/logstash:6.4.2 6 | redis: redis:4.0.9 7 | runner: gcr.io/edcop-public/runner:8 8 | networks: 9 | # Overlay is the name of the default cni network 10 | overlay: 'calico' 11 | # Net 1 is the name of the first interface 12 | # If using inline, change to 'inline-1', if using host networking, the interface will be retreived from configuresensors 13 | net1: 'passive' 14 | # Net 2 is the name of the second interface 15 | # **This will be ignored when passive is selected in suricataConfig section 16 | net2: 'inline-2' 17 | #useHostNetworking If this option is set to true, the container will see all physical interfaces on the physical host it resides. This has some security issues but can be useful in some situations. 18 | useHostNetworking: true 19 | # NodeSelector can be used when it is desired to over-ride the default sensor. For example, if it was desired to specify passive and inline sections of the network. 20 | nodeSelector: 21 | label: sensor 22 | deploymentOptions: 23 | # The three options for deployment are standalone, cluster and external. Please see the documentation on deploy options for definitions and special considerations. 24 | deployment: 'standalone' 25 | # Below options are only used in external mode when sending to a Redis queue outside of the cluster 26 | externalOptions: 27 | externalHost: 172.31.20.90 28 | nodePort: 30029 29 | suricataConfig: 30 | #If inline is set to true the Suricata pod will have three network interfaces, and will be set to take layer 2 traffic in and output it to the other interface as well as connected to the overlay network. If the option is set to false then Suricata will have two interfaces, one for the passive and the other for the network. 31 | inline: false 32 | #If alertsOnly is set to true then Suricata will not record logs for http,dns,tls,smtp. This is common when using another tool that may already be recording this such as Bro. 33 | alertsOnly: false 34 | #Specifies internal networks that will be monitored. This sets the direction of rules. 35 | homeNet: '[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]' 36 | # These are the IPs outside of yout network. This generally is set to !$HOME_NET which is everything not in the homeNet variable. 37 | externalNet: '!$HOME_NET' 38 | # Specifies the threads that will be used by AF-PACKET 39 | net0Threads: 1 40 | net1Threads: 1 41 | # If CPU Affinity is set to no then Suricata will share CPU resources with other tools by using the OS scheduler. If you set this option to Yes, you will be responsible for configuring which threads the various components use. See the below link for more details: 42 | # http://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html#threading 43 | # If you need to specify mulitple cores, the syntax is: '"9-11","25-27"' 44 | setCpuAffinity: no 45 | managementCpuSet: 1 46 | receiveCpuSet: 1 47 | workerCpuSet: 1 48 | workerThreads: 1 49 | verdictCpuSet: 1 50 | #Turning on rules updates will update the rules file for all deployed Suricata pods and cause them to reload the rules on a regular schedule as defined by rulesUpdateSchedule. While updating is taking place, Suricata can take up to twice as much memory during this process, therefore if this is used raise the memory limit to at least 8G otherwise Suricata will be killed every time it is updated. When Suricata is first deployed, you will still get the latest updates at the time suricata is deployed deployed regardless of what this option is set to. 51 | enableRulesUpdates: false 52 | # Set how often you want download new rules and reset Suricata 53 | # Accepts cronjob format, default runs twice per day 54 | rulesUpdateSchedule: "* */12 * * *" 55 | # List of all resources that will be pulled, can list multiple options 56 | rulesSources: 57 | - https://rules.emergingthreats.net/open/suricata-4.0/emerging.rules.tar.gz 58 | # sslbl is currently down, not sure when it will be back up (or if) 59 | # - https://sslbl.abuse.ch/blacklist/sslblacklist.rules 60 | # List of rule modifications. This takes the same format as suricata-updates files, but needs a dash before each line. For more details: https://suricata-update.readthedocs.io/en/latest/update.html#rule-matching 61 | ruleModifications: 62 | enableConf: 63 | - #2019401 < Example rule number to enable 64 | - group:botcc.rules 65 | # - group:botcc.portgrouped.rules 66 | - group:ciarmy.rules 67 | - group:compromised.rules 68 | - group:drop.rules 69 | - dshield.rules 70 | # - group:emerging-activex.rules 71 | - group:emerging-attack_response.rules 72 | - group:emerging-chat.rules 73 | - group:emerging-current_events.rules 74 | - group:emerging-dns.rules 75 | - group:emerging-dos.rules 76 | - group:emerging-exploit.rules 77 | - group:emerging-ftp.rules 78 | # - group:emerging-games.rules 79 | # - group:emerging-icmp_info.rules 80 | # - group:emerging-icmp.rules 81 | - group:emerging-imap.rules 82 | # - group:emerging-inappropriate.rules 83 | # - group:emerging-info.rules 84 | - group:emerging-malware.rules 85 | - group:emerging-misc.rules 86 | - group:emerging-mobile_malware.rules 87 | - group:emerging-netbios.rules 88 | - group:emerging-p2p.rules 89 | - group:emerging-policy.rules 90 | - group:emerging-pop3.rules 91 | - group:emerging-rpc.rules 92 | # - group:emerging-scada.rules 93 | # - group:emerging-scada_special.rules 94 | - group:emerging-scan.rules 95 | # - group:emerging-shellcode.rules 96 | - group:emerging-smtp.rules 97 | - group:emerging-snmp.rules 98 | - group:emerging-sql.rules 99 | - group:emerging-telnet.rules 100 | - group:emerging-tftp.rules 101 | - group:emerging-trojan.rules 102 | - group:emerging-user_agents.rules 103 | - group:emerging-voip.rules 104 | - group:emerging-web_client.rules 105 | - group:emerging-web_server.rules 106 | # - group:emerging-web_specific_apps.rules 107 | - group:emerging-worm.rules 108 | - group:tor.rules 109 | # - group:decoder-events.rules # available in suricata sources under rules dir 110 | # - group:stream-events.rules # available in suricata sources under rules dir 111 | - group:http-events.rules # available in suricata sources under rules dir 112 | - group:smtp-events.rules # available in suricata sources under rules dir 113 | - group:dns-events.rules # available in suricata sources under rules dir 114 | - group:tls-events.rules # available in suricata sources under rules dir 115 | # - group:modbus-events.rules # available in suricata sources under rules dir 116 | # - group:app-layer-events.rules # available in suricata sources under rules dir 117 | # - group:dnp3-events.rules # available in suricata sources under rules dir 118 | # - group:ntp-events.rules # available in suricata sources under rules dir 119 | disableConf: 120 | - #2019401 < Example rule number to disable 121 | dropConf: 122 | - #2019401 < Example rule number drop 123 | modifyConf: 124 | - #modifysid * "^drop(.*)noalert(.*)" | "alert${1}noalert${2}" < Exmaple rule to modify 125 | # Requests are set to accomodate limited resource VMs 126 | requests: 127 | cpu: 100m 128 | memory: 64Mi 129 | # These limit the number of CPU cores and memory the sensor can use. If automatic rules updates are enabled, set memory to at least 8G. A lot of factors can be required for the amount of memory that is needed (amount of traffic, number of rules, etc). If you are seeing eccessive restarts of Suricata or OOM (Out of memory) killed errors in Kubernetes, up these values 130 | limits: 131 | cpu: 2 132 | memory: 4G 133 | #Logstash values will be ignored if standalone is set to true as no logstash will be built. 134 | logstashConfig: 135 | # Limits the number of threads that will be used by Logstash 136 | threads: 2 137 | # Configures the batch count that Logstash will process at a time. 138 | batchCount: 250 139 | #Sets the initial and max JVM heap that will be allocated, generally these two values should be the same. 140 | initialJvmHeap: 1g 141 | maxJvmHeap: 1g 142 | #Sets the number of workers for pulling Redis events 143 | pipelineOutputWorkers: 2 144 | # Sets the amount of events grabbed from Redis at one time. 145 | pipelineBatchSize: 150 146 | requests: 147 | cpu: 100m 148 | memory: 64Mi 149 | limits: 150 | cpu: 2 151 | memory: 2G 152 | #These values will be ignored if standalone is set to true as Redis will not be built 153 | redisConfig: 154 | requests: 155 | cpu: 100m 156 | memory: 64Mi 157 | #limits the CPU and Memory of Redis. 158 | limits: 159 | cpu: 2 160 | memory: 1G 161 | -------------------------------------------------------------------------------- /suricata/templates/logstash-suricata-config.yaml: -------------------------------------------------------------------------------- 1 | {{ if eq .Values.deploymentOptions.deployment "standalone" }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: logstash-{{ template "suricata.fullname" . }}-config 6 | data: 7 | jvm.options: | 8 | ## JVM configuration 9 | # Xms represents the initial size of total heap space 10 | # Xmx represents the maximum size of total heap space 11 | 12 | -Xms{{ .Values.logstashConfig.initialJvmHeap }} 13 | -Xmx{{ .Values.logstashConfig.maxJvmHeap }} 14 | 15 | ################################################################ 16 | ## Expert settings 17 | ################################################################ 18 | ## 19 | ## All settings below this section are considered 20 | ## expert settings. Don't tamper with them unless 21 | ## you understand what you are doing 22 | ## 23 | ################################################################ 24 | 25 | ## GC configuration 26 | -XX:+UseParNewGC 27 | -XX:+UseConcMarkSweepGC 28 | -XX:CMSInitiatingOccupancyFraction=75 29 | -XX:+UseCMSInitiatingOccupancyOnly 30 | 31 | ## optimizations 32 | 33 | # disable calls to System#gc 34 | -XX:+DisableExplicitGC 35 | 36 | ## Locale 37 | # Set the locale language 38 | #-Duser.language=en 39 | 40 | # Set the locale country 41 | #-Duser.country=US 42 | 43 | # Set the locale variant, if any 44 | #-Duser.variant= 45 | 46 | ## basic 47 | 48 | # set the I/O temp directory 49 | #-Djava.io.tmpdir=$HOME 50 | 51 | # set to headless, just in case 52 | -Djava.awt.headless=true 53 | 54 | # ensure UTF-8 encoding by default (e.g. filenames) 55 | -Dfile.encoding=UTF-8 56 | 57 | # use our provided JNA always versus the system one 58 | #-Djna.nosys=true 59 | 60 | # Turn on JRuby invokedynamic 61 | # -Djruby.compile.invokedynamic=true 62 | 63 | ## heap dumps 64 | 65 | # generate a heap dump when an allocation from the Java heap fails 66 | # heap dumps are created in the working directory of the JVM 67 | -XX:+HeapDumpOnOutOfMemoryError 68 | 69 | # specify an alternative path for heap dumps 70 | # ensure the directory exists and has sufficient space 71 | #-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof 72 | 73 | ## GC logging 74 | #-XX:+PrintGCDetails 75 | #-XX:+PrintGCTimeStamps 76 | #-XX:+PrintGCDateStamps 77 | #-XX:+PrintClassHistogram 78 | #-XX:+PrintTenuringDistribution 79 | #-XX:+PrintGCApplicationStoppedTime 80 | 81 | # log GC status to a file with time stamps 82 | # ensure the directory exists 83 | #-Xloggc:${LS_GC_LOG_FILE} 84 | 85 | # Entropy source for randomness 86 | -Djava.security.egd=file:/dev/urandom 87 | log4j2.properties : | 88 | status = error 89 | name = LogstashPropertiesConfig 90 | 91 | appender.console.type = Console 92 | appender.console.name = plain_console 93 | appender.console.layout.type = PatternLayout 94 | appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n 95 | 96 | appender.json_console.type = Console 97 | appender.json_console.name = json_console 98 | appender.json_console.layout.type = JSONLayout 99 | appender.json_console.layout.compact = true 100 | appender.json_console.layout.eventEol = true 101 | 102 | rootLogger.level = ${sys:ls.log.level} 103 | rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console 104 | logstash.yml : | 105 | http.host: "0.0.0.0" 106 | path.config: /usr/share/logstash/pipeline 107 | xpack.monitoring.enabled: true 108 | xpack.monitoring.elasticsearch.url: http://data-service:9200 109 | xpack.monitoring.elasticsearch.username: logstash_system 110 | xpack.monitoring.elasticsearch.password: changeme 111 | pipelines.yml : | 112 | # List of pipelines to be loaded by Logstash 113 | # 114 | # This document must be a list of dictionaries/hashes, where the keys/values are pipeline settings. 115 | # Default values for ommitted settings are read from the `logstash.yml` file. 116 | # When declaring multiple pipelines, each MUST have its own `pipeline.id`. 117 | # 118 | # Example of two pipelines: 119 | # 120 | # - pipeline.id: test 121 | # pipeline.workers: 1 122 | # pipeline.batch.size: 1 123 | # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }" 124 | # - pipeline.id: another_test 125 | # queue.type: persisted 126 | # path.config: "/tmp/logstash/*.config" 127 | # 128 | # Available options: 129 | # 130 | # # name of the pipeline 131 | # pipeline.id: mylogs 132 | # 133 | # # The configuration string to be used by this pipeline 134 | # config.string: "input { generator {} } filter { sleep { time => 1 } } output { stdout { codec => dots } }" 135 | # 136 | # # The path from where to read the configuration text 137 | # path.config: "/etc/conf.d/logstash/myconfig.cfg" 138 | # 139 | # # How many worker threads execute the Filters+Outputs stage of the pipeline 140 | # pipeline.workers: 1 (actually defaults to number of CPUs) 141 | pipeline.workers: {{ .Values.logstashConfig.pipelineWorkers }} 142 | # 143 | # # How many events to retrieve from inputs before sending to filters+workers 144 | pipeline.batch.size: {{ .Values.logstashConfig.pipelineWorkers }} 145 | # 146 | # # How long to wait before dispatching an undersized batch to filters+workers 147 | # pipeline.batch.delay: 5 148 | # 149 | # # How many workers should be used per output plugin instance 150 | pipeline.output.workers: {{ .Values.logstashConfig.pipelineOutputWorkers }} 151 | # 152 | # # Internal queuing model, "memory" for legacy in-memory based queuing and 153 | # # "persisted" for disk-based acked queueing. Defaults is memory 154 | # queue.type: memory 155 | # 156 | # # If using queue.type: persisted, the page data files size. The queue data consists of 157 | # # append-only data files separated into pages. Default is 250mb 158 | # queue.page_capacity: 250mb 159 | # 160 | # # If using queue.type: persisted, the maximum number of unread events in the queue. 161 | # # Default is 0 (unlimited) 162 | # queue.max_events: 0 163 | # 164 | # # If using queue.type: persisted, the total capacity of the queue in number of bytes. 165 | # # Default is 1024mb or 1gb 166 | # queue.max_bytes: 1024mb 167 | # 168 | # # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint 169 | # # Default is 1024, 0 for unlimited 170 | # queue.checkpoint.acks: 1024 171 | # 172 | # # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint 173 | # # Default is 1024, 0 for unlimited 174 | # queue.checkpoint.writes: 1024 175 | # 176 | # # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page 177 | # # Default is 1000, 0 for no periodic checkpoint. 178 | # queue.checkpoint.interval: 1000 179 | # 180 | # # Enable Dead Letter Queueing for this pipeline. 181 | # dead_letter_queue.enable: false 182 | # 183 | # If using dead_letter_queue.enable: true, the maximum size of dead letter queue for this pipeline. Entries 184 | # will be dropped if they would increase the size of the dead letter queue beyond this setting. 185 | # Default is 1024mb 186 | # dead_letter_queue.max_bytes: 1024mb 187 | # 188 | # If using dead_letter_queue.enable: true, the directory path where the data files will be stored. 189 | # Default is path.data/dead_letter_queue 190 | # 191 | # path.dead_letter_queue: 192 | startup.options : | 193 | ################################################################################ 194 | # These settings are ONLY used by $LS_HOME/bin/system-install to create a custom 195 | # startup script for Logstash and is not used by Logstash itself. It should 196 | # automagically use the init system (systemd, upstart, sysv, etc.) that your 197 | # Linux distribution uses. 198 | # 199 | # After changing anything here, you need to re-run $LS_HOME/bin/system-install 200 | # as root to push the changes to the init script. 201 | ################################################################################ 202 | 203 | # Override Java location 204 | #JAVACMD=/usr/bin/java 205 | 206 | # Set a home directory 207 | LS_HOME=/usr/share/logstash 208 | 209 | # logstash settings directory, the path which contains logstash.yml 210 | LS_SETTINGS_DIR="${LS_HOME}/config" 211 | 212 | # Arguments to pass to logstash 213 | LS_OPTS="--path.settings ${LS_SETTINGS_DIR}" 214 | 215 | # Arguments to pass to java 216 | LS_JAVA_OPTS="" 217 | 218 | # pidfiles aren't used the same way for upstart and systemd; this is for sysv users. 219 | LS_PIDFILE=/var/run/logstash.pid 220 | 221 | # user and group id to be invoked as 222 | LS_USER=logstash 223 | LS_GROUP=logstash 224 | 225 | # Enable GC logging by uncommenting the appropriate lines in the GC logging 226 | # section in jvm.options 227 | LS_GC_LOG_FILE=/var/log/logstash/gc.log 228 | 229 | # Open file limit 230 | LS_OPEN_FILES=16384 231 | 232 | # Nice level 233 | LS_NICE=19 234 | 235 | # Change these to have the init script named and described differently 236 | # This is useful when running multiple instances of Logstash on the same 237 | # physical box or vm 238 | SERVICE_NAME="logstash" 239 | SERVICE_DESCRIPTION="logstash" 240 | 241 | # If you need to run a command or script before launching Logstash, put it 242 | # between the lines beginning with `read` and `EOM`, and uncomment those lines. 243 | ### 244 | ## read -r -d '' PRESTART << EOM 245 | ## EOM 246 | {{ end }} 247 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EDCOP Suricata Guide 2 | 3 | Table of Contents 4 | ----------------- 5 | 6 | * [Configuration Guide](#configuration-guide) 7 | * [Image Repository](#image-repository) 8 | * [Networks](#networks) 9 | * [Node Selector](#node-selector) 10 | * [Deployment Options](#deployment-options) 11 | * [Suricata Configuration](#suricata-configuration) 12 | * [Inline/Passive Mode](#inline/passive-mode) 13 | * [Threads](#threads) 14 | * [CPU Affinity](#cpu-affinity) 15 | * [Resource Limits](#resource-limits) 16 | * [Rules Updates](#rules-updates) 17 | * [Logstash Configuration](#logstash-configuration) 18 | * [Redis Configuration](#redis-configuration) 19 | 20 | # Configuration Guide 21 | 22 | Within this configuration guide, you will find instructions for modifying Suricata's helm chart. All changes should be made in the *values.yaml* file. 23 | Please share any bugs or features requests via GitHub issues. 24 | 25 | ## Image Repository 26 | 27 | By default, images are pulled from official EDCOP's official repositorie, and the respective tool's official repository. If you're changing this value, make sure you use the full repository name. 28 | 29 | ``` 30 | images: 31 | suricata: gcr.io/edcop-public/suricata: 32 | logstash: docker.elastic.co/logstash/logstash: 33 | redis: redis: 34 | runner: gcr.io/edcop-public/runner: 35 | ``` 36 | 37 | ## Networks 38 | 39 | Suricata uses 2 or 3 interfaces depending on whether it is in passive or inline mode. If you choose passive mode, net2 will be ignored and net1 will be the name of the passive interface. 40 | By default, these interfaces are named *calico*, *passive*, *inline-1*, and *inline-2*. 41 | When useHostNetworking is set to true these interfaces are named *calico*, and the names of the interfaces you used in [EDCOP-CONFIGURESENSORS](https://github.com/sealingtech/EDCOP-CONFIGURESENSORS). 42 | 43 | useHostNetworking is used in situations where container networking is insufficient (such as the lack of SR-IOV). This allows the container to see all physical interfaces of the nodes. This has some security concerns due to the fact that Suricata now have access to all physical networking. When useHostNetworking is set, the interface names will be pulled from the secrets created by the CONFIGURESENSORS repository. If using passive mode only the *passive interface* will be used. If using inline, then both *inline-interface1* and *inline-interface2* will be used. When useHostNetworking is specified, the container will still be joined to the Calico network, but will ignore passive, inline-1, and inline-2 SR-IOV networks. 44 | 45 | ``` 46 | networks: 47 | overlay: calico 48 | net1: passive 49 | net2: 50 | 51 | suricataConfig: 52 | useHostNetworking: false 53 | inline: false 54 | ``` 55 | 56 | ``` 57 | networks: 58 | overlay: calico 59 | net1: inline-1 60 | net2: inline-2 61 | 62 | suricataConfig: 63 | useHostNetworking: false 64 | inline: true 65 | ``` 66 | 67 | ``` 68 | networks: 69 | overlay: calico 70 | 71 | suricataConfig: 72 | useHostNetworking: true 73 | ``` 74 | 75 | To find the names of your networks, use the following command: 76 | 77 | ``` 78 | # kubectl get networks 79 | NAME AGE 80 | calico 1d 81 | inline-1 1d 82 | inline-2 1d 83 | ``` 84 | 85 | ## Node Selector 86 | 87 | This value tells Kubernetes which hosts the daemonset should be deployed to by using labels given to the hosts. Hosts without the defined label will not receive pods. Suricata will only deploy to nodes that have been labeled 'sensor=true' 88 | 89 | ``` 90 | nodeSelector: 91 | label: sensor 92 | ``` 93 | 94 | To find out what labels your hosts have, please use the following: 95 | ``` 96 | # kubectl get nodes --show-labels 97 | NAME STATUS ROLES AGE VERSION LABELS 98 | master Ready master 1d v1.9.1 ...,infrastructure=true 99 | minion-1 Ready 1d v1.9.1 ...,sensor=true 100 | minion-2 Ready 1d v1.9.1 ...,sensor=true 101 | ``` 102 | 103 | ## Deployment Options 104 | 105 | For a detailed explanation on how the deployment modes work, please click [here](https://github.com/SealingTech/EDCOP-TOOLS/blob/master/docs/Deployment_Options.md). 106 | External options will only be used if you're in external mode and Redis is located on another host. 107 | 108 | ``` 109 | deploymentOptions: 110 | deployment: standalone 111 | externalOptions: 112 | externalHost: 192.168.0.1 113 | nodePort: 30029 114 | ``` 115 | 116 | ## Suricata Configuration 117 | 118 | alertsOnly when set to true in the values.yaml will disable logs for http,dns,tls,smtp. This is common when using another tool that may already be recording this such as Bro. It will still provide alerts. 119 | 120 | Suricata can be deployed in either inline or passive mode depending on how your cluster is setup. Inline mode will route traffic through the box for active threat detection and mitigation, while passive mode simply alerts you to potential threats. For an inline mode setup, the following is required: 121 | 122 | * An external traffic loadbalancer passing traffic through 2 interfaces on the host. 123 | * An SR-IOV capable NIC with a sufficient number of VFs created for each interface being used. 124 | * A total of 3 Kubernetes networks: 125 | * An overlay network to pass container traffic (Calico by default). 126 | * One SR-IOV network to accept network traffic and output to the second network. 127 | * A second SR-IOV network to accept network traffic from the first interface and output it back to the loadbalancer. 128 | 129 | ### Inline/Passive Mode 130 | 131 | As mentioned before, if *inline* is set to true, Suricata will be deployed in inline mode and will require 2 networks for passing traffic. Setting to *false* will enable passive mode. 132 | The home and external net settings tell suricata what you consider your internal and external network spaces. You can customize these as needed, but the external net is usually !$HOME_NET for simplicity. 133 | 134 | ``` 135 | suricataConfig: 136 | inline: true 137 | homeNet: '[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]' 138 | externalNet: '!$HOME_NET' 139 | ``` 140 | 141 | ### Threads 142 | 143 | If Suricata is set to inline mode, you will need to specify threads for both net0 and net1. The more threads you have, the more CPU space is required for packet processing. 144 | 145 | ``` 146 | suricataConfig: 147 | ... 148 | net0Threads: 1 149 | net1Threads: 1 150 | ``` 151 | 152 | ### CPU Affinity 153 | 154 | Similarly, CPU affinity will utilize specific cores for packet processing. This setting will significantly increase performance, but should be used with cpu isolation to maximize individual core potential. 155 | For the cpusets, enter either a single core or range of cpus as shown below: 156 | 157 | ``` 158 | suricataConfig: 159 | ... 160 | setCpuAffinity: yes 161 | recieveCpuSet: 0-2 162 | workerCpuSet: 3-7 163 | workerThreads: 1 164 | verdictCpuSet: 8-10 165 | ``` 166 | *For worker CPU sets, please refer to your NUMA node configuration to prevent cache thrashing.* 167 | 168 | ### Rules Updates 169 | The first time Suricata is deployed, a container will be created that will pull the latest version of the rules from the sources defined by rulesSources. If enableRulesUpdates is set to true, a cron job will be created that will update all the rules across all the suricata instances as defined by rulesUpdateSchedule. Updating rules has been seen to double memory usage, and therefore care should be taken to increase the limit to at least 8G in the memory limit. EnableConf, disableConf, dropConf and modifyConf are all used to modify rules from the defaults after the rules are download so that these changes persist after each update. The format for these is basically the same as Suricata-updates but with a dash (-) symbol before each line. See here for more details of how to modify rules. https://suricata-update.readthedocs.io/en/latest/update.html#example-configuration-files 170 | 171 | ``` 172 | enableRulesUpdates: false 173 | rulesUpdateSchedule: "* */12 * * *" 174 | # List of all resources that will be pulled, can list multiple options 175 | rulesSources: 176 | - https://rules.emergingthreats.net/open/suricata-4.0/emerging.rules.tar.gz 177 | ruleModifications: 178 | enableConf: 179 | - #2019401 < Example rule number to enable 180 | ... More rules ... 181 | disableConf: 182 | - #2019401 < Example rule number to disable 183 | dropConf: 184 | - #2019401 < Example rule number drop 185 | modifyConf: 186 | - #modifysid * "^drop(.*)noalert(.*)" | "alert${1}noalert${2}" < Exmaple rule to modify 187 | ``` 188 | 189 | ### Resource Limits 190 | 191 | You can set limits on Suricata to ensure it doesn't use more CPU/memory space than necessary. Finding the right balance can be tricky, so some testing may be required. 192 | 193 | ``` 194 | suricataConfig: 195 | limits: 196 | cpu: 2 197 | memory: 4G 198 | ``` 199 | 200 | ## Logstash Configuration 201 | 202 | Logstash is only included in the Daemonset if you're using standalone mode and is used to streamline the rules required for the data it ingests. Having one Logstash instance per node would clutter rules and cause congestion with log filtering, which would harm our events/second speed. This instance will only deal with Suricata's logs and doesn't need complicated filters to figure out which tool the logs came from. 203 | Please make sure to read the [Logstash Performance Tuning Guide](https://www.elastic.co/guide/en/logstash/current/performance-troubleshooting.html) for a better understanding of managing Logstash's resources. 204 | 205 | ``` 206 | logstashConfig: 207 | threads: 2 208 | batchCount: 250 209 | initialJvmHeap: 4g 210 | maxJvmHeap: 4g 211 | pipelineOutputWorkers: 2 212 | pipelineBatchSize: 150 213 | limits: 214 | cpu: 2 215 | memory: 8G 216 | ``` 217 | 218 | ## Redis Configuration 219 | 220 | Redis is also included in the Daemonset (except for external mode) for the same reasons Logstash is. Currently, you can only limit the resources of Redis in this section, but in the future we would like to add configmaps for tuning purposes. 221 | 222 | ``` 223 | redisConfig: 224 | limits: 225 | cpu: 2 226 | memory: 8G 227 | ``` 228 | -------------------------------------------------------------------------------- /container/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /container/suricata.yaml: -------------------------------------------------------------------------------- 1 | %YAML 1.1 2 | --- 3 | 4 | # Suricata configuration file. In addition to the comments describing all 5 | # options in this file, full documentation can be found at: 6 | # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml 7 | 8 | ## 9 | ## Step 1: inform Suricata about your network 10 | ## 11 | 12 | vars: 13 | # more specifc is better for alert accuracy and performance 14 | address-groups: 15 | HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]" 16 | #HOME_NET: "[192.168.0.0/16]" 17 | #HOME_NET: "[10.0.0.0/8]" 18 | #HOME_NET: "[172.16.0.0/12]" 19 | #HOME_NET: "any" 20 | 21 | EXTERNAL_NET: "!$HOME_NET" 22 | #EXTERNAL_NET: "any" 23 | 24 | HTTP_SERVERS: "$HOME_NET" 25 | SMTP_SERVERS: "$HOME_NET" 26 | SQL_SERVERS: "$HOME_NET" 27 | DNS_SERVERS: "$HOME_NET" 28 | TELNET_SERVERS: "$HOME_NET" 29 | AIM_SERVERS: "$EXTERNAL_NET" 30 | DNP3_SERVER: "$HOME_NET" 31 | DNP3_CLIENT: "$HOME_NET" 32 | MODBUS_CLIENT: "$HOME_NET" 33 | MODBUS_SERVER: "$HOME_NET" 34 | ENIP_CLIENT: "$HOME_NET" 35 | ENIP_SERVER: "$HOME_NET" 36 | 37 | port-groups: 38 | HTTP_PORTS: "80" 39 | SHELLCODE_PORTS: "!80" 40 | ORACLE_PORTS: 1521 41 | SSH_PORTS: 22 42 | DNP3_PORTS: 20000 43 | MODBUS_PORTS: 502 44 | FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]" 45 | FTP_PORTS: 21 46 | 47 | 48 | ## 49 | ## Step 2: select the rules to enable or disable 50 | ## 51 | 52 | default-rule-path: /etc/suricata/rules 53 | rule-files: 54 | - botcc.rules 55 | # - botcc.portgrouped.rules 56 | - ciarmy.rules 57 | - compromised.rules 58 | - drop.rules 59 | - dshield.rules 60 | # - emerging-activex.rules 61 | - emerging-attack_response.rules 62 | - emerging-chat.rules 63 | - emerging-current_events.rules 64 | - emerging-dns.rules 65 | - emerging-dos.rules 66 | - emerging-exploit.rules 67 | - emerging-ftp.rules 68 | # - emerging-games.rules 69 | # - emerging-icmp_info.rules 70 | # - emerging-icmp.rules 71 | - emerging-imap.rules 72 | # - emerging-inappropriate.rules 73 | # - emerging-info.rules 74 | - emerging-malware.rules 75 | - emerging-misc.rules 76 | - emerging-mobile_malware.rules 77 | - emerging-netbios.rules 78 | - emerging-p2p.rules 79 | - emerging-policy.rules 80 | - emerging-pop3.rules 81 | - emerging-rpc.rules 82 | # - emerging-scada.rules 83 | # - emerging-scada_special.rules 84 | - emerging-scan.rules 85 | # - emerging-shellcode.rules 86 | - emerging-smtp.rules 87 | - emerging-snmp.rules 88 | - emerging-sql.rules 89 | - emerging-telnet.rules 90 | - emerging-tftp.rules 91 | - emerging-trojan.rules 92 | - emerging-user_agents.rules 93 | - emerging-voip.rules 94 | - emerging-web_client.rules 95 | - emerging-web_server.rules 96 | # - emerging-web_specific_apps.rules 97 | - emerging-worm.rules 98 | - tor.rules 99 | # - decoder-events.rules # available in suricata sources under rules dir 100 | # - stream-events.rules # available in suricata sources under rules dir 101 | - http-events.rules # available in suricata sources under rules dir 102 | - smtp-events.rules # available in suricata sources under rules dir 103 | - dns-events.rules # available in suricata sources under rules dir 104 | - tls-events.rules # available in suricata sources under rules dir 105 | # - modbus-events.rules # available in suricata sources under rules dir 106 | # - app-layer-events.rules # available in suricata sources under rules dir 107 | # - dnp3-events.rules # available in suricata sources under rules dir 108 | # - ntp-events.rules # available in suricata sources under rules dir 109 | 110 | classification-file: /etc/suricata/rules/classification.config 111 | reference-config-file: /etc/suricata/rules/reference.config 112 | # threshold-file: /etc/suricata/threshold.config 113 | 114 | 115 | ## 116 | ## Step 3: select outputs to enable 117 | ## 118 | 119 | # The default logging directory. Any log or output file will be 120 | # placed here if its not specified with a full path name. This can be 121 | # overridden with the -l command line parameter. 122 | default-log-dir: /var/log/suricata/ 123 | 124 | # global stats configuration 125 | stats: 126 | enabled: yes 127 | # The interval field (in seconds) controls at what interval 128 | # the loggers are invoked. 129 | interval: 8 130 | 131 | # Configure the type of alert (and other) logging you would like. 132 | outputs: 133 | # a line based alerts log similar to Snort's fast.log 134 | - fast: 135 | enabled: yes 136 | filename: fast.log 137 | append: yes 138 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 139 | 140 | # Extensible Event Format (nicknamed EVE) event log in JSON format 141 | - eve-log: 142 | enabled: yes 143 | filetype: regular #regular|syslog|unix_dgram|unix_stream|redis 144 | filename: eve.json 145 | #prefix: "@cee: " # prefix to prepend to each log entry 146 | # the following are valid when type: syslog above 147 | #identity: "suricata" 148 | #facility: local5 149 | #level: Info ## possible levels: Emergency, Alert, Critical, 150 | ## Error, Warning, Notice, Info, Debug 151 | #redis: 152 | # server: 127.0.0.1 153 | # port: 6379 154 | # async: true ## if redis replies are read asynchronously 155 | # mode: list ## possible values: list|lpush (default), rpush, channel|publish 156 | # ## lpush and rpush are using a Redis list. "list" is an alias for lpush 157 | # ## publish is using a Redis channel. "channel" is an alias for publish 158 | # key: suricata ## key or channel to use (default to suricata) 159 | # Redis pipelining set up. This will enable to only do a query every 160 | # 'batch-size' events. This should lower the latency induced by network 161 | # connection at the cost of some memory. There is no flushing implemented 162 | # so this setting as to be reserved to high traffic suricata. 163 | # pipelining: 164 | # enabled: yes ## set enable to yes to enable query pipelining 165 | # batch-size: 10 ## number of entry to keep in buffer 166 | types: 167 | - alert: 168 | # payload: yes # enable dumping payload in Base64 169 | # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log 170 | # payload-printable: yes # enable dumping payload in printable (lossy) format 171 | # packet: yes # enable dumping of packet (without stream segments) 172 | # http-body: yes # enable dumping of http body in Base64 173 | # http-body-printable: yes # enable dumping of http body in printable format 174 | metadata: yes # add L7/applayer fields, flowbit and other vars to the alert 175 | 176 | # Enable the logging of tagged packets for rules using the 177 | # "tag" keyword. 178 | tagged-packets: yes 179 | 180 | # HTTP X-Forwarded-For support by adding an extra field or overwriting 181 | # the source or destination IP address (depending on flow direction) 182 | # with the one reported in the X-Forwarded-For HTTP header. This is 183 | # helpful when reviewing alerts for traffic that is being reverse 184 | # or forward proxied. 185 | xff: 186 | enabled: no 187 | # Two operation modes are available, "extra-data" and "overwrite". 188 | mode: extra-data 189 | # Two proxy deployments are supported, "reverse" and "forward". In 190 | # a "reverse" deployment the IP address used is the last one, in a 191 | # "forward" deployment the first IP address is used. 192 | deployment: reverse 193 | # Header name where the actual IP address will be reported, if more 194 | # than one IP address is present, the last IP address will be the 195 | # one taken into consideration. 196 | header: X-Forwarded-For 197 | - http: 198 | extended: yes # enable this for extended logging information 199 | # custom allows additional http fields to be included in eve-log 200 | # the example below adds three additional fields when uncommented 201 | #custom: [Accept-Encoding, Accept-Language, Authorization] 202 | - dns: 203 | # control logging of queries and answers 204 | # default yes, no to disable 205 | query: yes # enable logging of DNS queries 206 | answer: yes # enable logging of DNS answers 207 | # control which RR types are logged 208 | # all enabled if custom not specified 209 | #custom: [a, aaaa, cname, mx, ns, ptr, txt] 210 | - tls: 211 | extended: yes # enable this for extended logging information 212 | # output TLS transaction where the session is resumed using a 213 | # session id 214 | #session-resumption: no 215 | # custom allows to control which tls fields that are included 216 | # in eve-log 217 | #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain] 218 | - files: 219 | force-magic: no # force logging magic on all logged files 220 | # force logging of checksums, available hash functions are md5, 221 | # sha1 and sha256 222 | #force-hash: [md5] 223 | #- drop: 224 | # alerts: yes # log alerts that caused drops 225 | # flows: all # start or all: 'start' logs only a single drop 226 | # # per flow direction. All logs each dropped pkt. 227 | - smtp: 228 | #extended: yes # enable this for extended logging information 229 | # this includes: bcc, message-id, subject, x_mailer, user-agent 230 | # custom fields logging from the list: 231 | # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, 232 | # x-originating-ip, in-reply-to, references, importance, priority, 233 | # sensitivity, organization, content-md5, date 234 | #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] 235 | # output md5 of fields: body, subject 236 | # for the body you need to set app-layer.protocols.smtp.mime.body-md5 237 | # to yes 238 | #md5: [body, subject] 239 | 240 | #- dnp3 241 | #- nfs 242 | - ssh 243 | - stats: 244 | totals: yes # stats for all threads merged together 245 | threads: no # per thread stats 246 | deltas: no # include delta values 247 | # bi-directional flows 248 | - flow 249 | # uni-directional flows 250 | #- netflow 251 | # Vars log flowbits and other packet and flow vars 252 | #- vars 253 | 254 | # alert output for use with Barnyard2 255 | - unified2-alert: 256 | enabled: no 257 | filename: unified2.alert 258 | 259 | # File size limit. Can be specified in kb, mb, gb. Just a number 260 | # is parsed as bytes. 261 | #limit: 32mb 262 | 263 | # By default unified2 log files have the file creation time (in 264 | # unix epoch format) appended to the filename. Set this to yes to 265 | # disable this behaviour. 266 | #nostamp: no 267 | 268 | # Sensor ID field of unified2 alerts. 269 | #sensor-id: 0 270 | 271 | # Include payload of packets related to alerts. Defaults to true, set to 272 | # false if payload is not required. 273 | #payload: yes 274 | 275 | # HTTP X-Forwarded-For support by adding the unified2 extra header or 276 | # overwriting the source or destination IP address (depending on flow 277 | # direction) with the one reported in the X-Forwarded-For HTTP header. 278 | # This is helpful when reviewing alerts for traffic that is being reverse 279 | # or forward proxied. 280 | xff: 281 | enabled: no 282 | # Two operation modes are available, "extra-data" and "overwrite". Note 283 | # that in the "overwrite" mode, if the reported IP address in the HTTP 284 | # X-Forwarded-For header is of a different version of the packet 285 | # received, it will fall-back to "extra-data" mode. 286 | mode: extra-data 287 | # Two proxy deployments are supported, "reverse" and "forward". In 288 | # a "reverse" deployment the IP address used is the last one, in a 289 | # "forward" deployment the first IP address is used. 290 | deployment: reverse 291 | # Header name where the actual IP address will be reported, if more 292 | # than one IP address is present, the last IP address will be the 293 | # one taken into consideration. 294 | header: X-Forwarded-For 295 | 296 | # a line based log of HTTP requests (no alerts) 297 | - http-log: 298 | enabled: no 299 | filename: http.log 300 | append: yes 301 | #extended: yes # enable this for extended logging information 302 | #custom: yes # enabled the custom logging format (defined by customformat) 303 | #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P" 304 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 305 | 306 | # a line based log of TLS handshake parameters (no alerts) 307 | - tls-log: 308 | enabled: no # Log TLS connections. 309 | filename: tls.log # File to store TLS logs. 310 | append: yes 311 | #extended: yes # Log extended information like fingerprint 312 | #custom: yes # enabled the custom logging format (defined by customformat) 313 | #customformat: "%{%D-%H:%M:%S}t.%z %a:%p -> %A:%P %v %n %d %D" 314 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 315 | # output TLS transaction where the session is resumed using a 316 | # session id 317 | #session-resumption: no 318 | 319 | # output module to store certificates chain to disk 320 | - tls-store: 321 | enabled: no 322 | #certs-log-dir: certs # directory to store the certificates files 323 | 324 | # a line based log of DNS requests and/or replies (no alerts) 325 | - dns-log: 326 | enabled: no 327 | filename: dns.log 328 | append: yes 329 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 330 | 331 | # Packet log... log packets in pcap format. 3 modes of operation: "normal" 332 | # "multi" and "sguil". 333 | # 334 | # In normal mode a pcap file "filename" is created in the default-log-dir, 335 | # or are as specified by "dir". 336 | # In multi mode, a file is created per thread. This will perform much 337 | # better, but will create multiple files where 'normal' would create one. 338 | # In multi mode the filename takes a few special variables: 339 | # - %n -- thread number 340 | # - %i -- thread id 341 | # - %t -- timestamp (secs or secs.usecs based on 'ts-format' 342 | # E.g. filename: pcap.%n.%t 343 | # 344 | # Note that it's possible to use directories, but the directories are not 345 | # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the 346 | # per thread directory. 347 | # 348 | # Also note that the limit and max-files settings are enforced per thread. 349 | # So the size limit when using 8 threads with 1000mb files and 2000 files 350 | # is: 8*1000*2000 ~ 16TiB. 351 | # 352 | # In Sguil mode "dir" indicates the base directory. In this base dir the 353 | # pcaps are created in th directory structure Sguil expects: 354 | # 355 | # $sguil-base-dir/YYYY-MM-DD/$filename. 356 | # 357 | # By default all packets are logged except: 358 | # - TCP streams beyond stream.reassembly.depth 359 | # - encrypted streams after the key exchange 360 | # 361 | - pcap-log: 362 | enabled: no 363 | filename: log.pcap 364 | 365 | # File size limit. Can be specified in kb, mb, gb. Just a number 366 | # is parsed as bytes. 367 | limit: 1000mb 368 | 369 | # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" 370 | max-files: 2000 371 | 372 | mode: normal # normal, multi or sguil. 373 | 374 | # Directory to place pcap files. If not provided the default log 375 | # directory will be used. Required for "sguil" mode. 376 | #dir: /nsm_data/ 377 | 378 | #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec 379 | use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets 380 | honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged. 381 | 382 | # a full alerts log containing much information for signature writers 383 | # or for investigating suspected false positives. 384 | - alert-debug: 385 | enabled: no 386 | filename: alert-debug.log 387 | append: yes 388 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 389 | 390 | # alert output to prelude (http://www.prelude-technologies.com/) only 391 | # available if Suricata has been compiled with --enable-prelude 392 | - alert-prelude: 393 | enabled: no 394 | profile: suricata 395 | log-packet-content: no 396 | log-packet-header: yes 397 | 398 | # Stats.log contains data from various counters of the suricata engine. 399 | - stats: 400 | enabled: yes 401 | filename: stats.log 402 | totals: yes # stats for all threads merged together 403 | threads: no # per thread stats 404 | #null-values: yes # print counters that have value 0 405 | 406 | # a line based alerts log similar to fast.log into syslog 407 | - syslog: 408 | enabled: no 409 | # reported identity to syslog. If ommited the program name (usually 410 | # suricata) will be used. 411 | #identity: "suricata" 412 | facility: local5 413 | #level: Info ## possible levels: Emergency, Alert, Critical, 414 | ## Error, Warning, Notice, Info, Debug 415 | 416 | # a line based information for dropped packets in IPS mode 417 | - drop: 418 | enabled: no 419 | filename: drop.log 420 | append: yes 421 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 422 | 423 | # output module to store extracted files to disk 424 | # 425 | # The files are stored to the log-dir in a format "file." where is 426 | # an incrementing number starting at 1. For each file "file." a meta 427 | # file "file..meta" is created. 428 | # 429 | # File extraction depends on a lot of things to be fully done: 430 | # - file-store stream-depth. For optimal results, set this to 0 (unlimited) 431 | # - http request / response body sizes. Again set to 0 for optimal results. 432 | # - rules that contain the "filestore" keyword. 433 | - file-store: 434 | enabled: no # set to yes to enable 435 | log-dir: files # directory to store the files 436 | force-magic: no # force logging magic on all stored files 437 | # force logging of checksums, available hash functions are md5, 438 | # sha1 and sha256 439 | #force-hash: [md5] 440 | force-filestore: no # force storing of all files 441 | # override global stream-depth for sessions in which we want to 442 | # perform file extraction. Set to 0 for unlimited. 443 | #stream-depth: 0 444 | #waldo: file.waldo # waldo file to store the file_id across runs 445 | # uncomment to disable meta file writing 446 | #write-meta: no 447 | # uncomment the following variable to define how many files can 448 | # remain open for filestore by Suricata. Default value is 0 which 449 | # means files get closed after each write 450 | #max-open-files: 1000 451 | 452 | # output module to log files tracked in a easily parsable json format 453 | - file-log: 454 | enabled: no 455 | filename: files-json.log 456 | append: yes 457 | #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' 458 | 459 | force-magic: no # force logging magic on all logged files 460 | # force logging of checksums, available hash functions are md5, 461 | # sha1 and sha256 462 | #force-hash: [md5] 463 | 464 | # Log TCP data after stream normalization 465 | # 2 types: file or dir. File logs into a single logfile. Dir creates 466 | # 2 files per TCP session and stores the raw TCP data into them. 467 | # Using 'both' will enable both file and dir modes. 468 | # 469 | # Note: limited by stream.depth 470 | - tcp-data: 471 | enabled: no 472 | type: file 473 | filename: tcp-data.log 474 | 475 | # Log HTTP body data after normalization, dechunking and unzipping. 476 | # 2 types: file or dir. File logs into a single logfile. Dir creates 477 | # 2 files per HTTP session and stores the normalized data into them. 478 | # Using 'both' will enable both file and dir modes. 479 | # 480 | # Note: limited by the body limit settings 481 | - http-body-data: 482 | enabled: no 483 | type: file 484 | filename: http-data.log 485 | 486 | # Lua Output Support - execute lua script to generate alert and event 487 | # output. 488 | # Documented at: 489 | # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output 490 | - lua: 491 | enabled: no 492 | #scripts-dir: /etc/suricata/lua-output/ 493 | scripts: 494 | # - script1.lua 495 | 496 | # Logging configuration. This is not about logging IDS alerts/events, but 497 | # output about what Suricata is doing, like startup messages, errors, etc. 498 | logging: 499 | # The default log level, can be overridden in an output section. 500 | # Note that debug level logging will only be emitted if Suricata was 501 | # compiled with the --enable-debug configure option. 502 | # 503 | # This value is overriden by the SC_LOG_LEVEL env var. 504 | default-log-level: notice 505 | 506 | # The default output format. Optional parameter, should default to 507 | # something reasonable if not provided. Can be overriden in an 508 | # output section. You can leave this out to get the default. 509 | # 510 | # This value is overriden by the SC_LOG_FORMAT env var. 511 | #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " 512 | 513 | # A regex to filter output. Can be overridden in an output section. 514 | # Defaults to empty (no filter). 515 | # 516 | # This value is overriden by the SC_LOG_OP_FILTER env var. 517 | default-output-filter: 518 | 519 | # Define your logging outputs. If none are defined, or they are all 520 | # disabled you will get the default - console output. 521 | outputs: 522 | - console: 523 | enabled: yes 524 | # type: json 525 | - file: 526 | enabled: yes 527 | level: info 528 | filename: /var/log/suricata/suricata.log 529 | # type: json 530 | - syslog: 531 | enabled: no 532 | facility: local5 533 | format: "[%i] <%d> -- " 534 | # type: json 535 | 536 | 537 | ## 538 | ## Step 4: configure common capture settings 539 | ## 540 | ## See "Advanced Capture Options" below for more options, including NETMAP 541 | ## and PF_RING. 542 | ## 543 | 544 | # Linux high speed capture support 545 | af-packet: 546 | - interface: 547 | # Number of receive threads. "auto" uses the number of cores 548 | threads: 5 549 | # Default clusterid. AF_PACKET will load balance packets based on flow. 550 | cluster-id: 551 | # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. 552 | # This is only supported for Linux kernel > 3.1 553 | # possible value are: 554 | # * cluster_round_robin: round robin load balancing 555 | # * cluster_flow: all packets of a given flow are send to the same socket 556 | # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket 557 | # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same 558 | # socket. Requires at least Linux 3.14. 559 | # * cluster_random: packets are sent randomly to sockets but with an equipartition. 560 | # Requires at least Linux 3.14. 561 | # * cluster_rollover: kernel rotates between sockets filling each socket before moving 562 | # to the next. Requires at least Linux 3.10. 563 | # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system 564 | # with capture card using RSS (require cpu affinity tuning and system irq tuning) 565 | cluster-type: cluster_flow 566 | # In some fragmentation case, the hash can not be computed. If "defrag" is set 567 | # to yes, the kernel will do the needed defragmentation before sending the packets. 568 | defrag: yes 569 | # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is 570 | # full then kernel will send the packet on the next socket with room available. This option 571 | # can minimize packet drop and increase the treated bandwidth on single intensive flow. 572 | rollover: yes 573 | # To use the ring feature of AF_PACKET, set 'use-mmap' to yes 574 | use-mmap: yes 575 | # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock 576 | # your system 577 | #mmap-locked: yes 578 | # Use tpacket_v3 capture mode, only active if use-mmap is true 579 | # Don't use it in IPS or TAP mode as it causes severe latency 580 | tpacket-v3: yes 581 | # Ring size will be computed with respect to max_pending_packets and number 582 | # of threads. You can set manually the ring size in number of packets by setting 583 | # the following value. If you are using flow cluster-type and have really network 584 | # intensive single-flow you could want to set the ring-size independently of the number 585 | # of threads: 586 | ring-size: 400000 587 | # Block size is used by tpacket_v3 only. It should set to a value high enough to contain 588 | # a decent number of packets. Size is in bytes so please consider your MTU. It should be 589 | # a power of 2 and it must be multiple of page size (usually 4096). 590 | block-size: 393216 591 | # tpacket_v3 block timeout: an open block is passed to userspace if it is not 592 | # filled after block-timeout milliseconds. 593 | #block-timeout: 10 594 | # On busy system, this could help to set it to yes to recover from a packet drop 595 | # phase. This will result in some packets (at max a ring flush) being non treated. 596 | #use-emergency-flush: yes 597 | # recv buffer size, increase value could improve performance 598 | # buffer-size: 32768 599 | # Set to yes to disable promiscuous mode 600 | # disable-promisc: no 601 | # Choose checksum verification mode for the interface. At the moment 602 | # of the capture, some packets may be with an invalid checksum due to 603 | # offloading to the network card of the checksum computation. 604 | # Possible values are: 605 | # - kernel: use indication sent by kernel for each packet (default) 606 | # - yes: checksum validation is forced 607 | # - no: checksum validation is disabled 608 | # - auto: suricata uses a statistical approach to detect when 609 | # checksum off-loading is used. 610 | # Warning: 'checksum-validation' must be set to yes to have any validation 611 | #checksum-checks: kernel 612 | # BPF filter to apply to this interface. The pcap filter syntax apply here. 613 | #bpf-filter: port 80 or udp 614 | # You can use the following variables to activate AF_PACKET tap or IPS mode. 615 | # If copy-mode is set to ips or tap, the traffic coming to the current 616 | # interface will be copied to the copy-iface interface. If 'tap' is set, the 617 | # copy is complete. If 'ips' is set, the packet matching a 'drop' action 618 | # will not be copied. 619 | #copy-mode: ips 620 | #copy-iface: eth1 621 | 622 | # Put default values here. These will be used for an interface that is not 623 | # in the list above. 624 | - interface: default 625 | #threads: auto 626 | #use-mmap: no 627 | #rollover: yes 628 | #tpacket-v3: yes 629 | 630 | # Cross platform libpcap capture support 631 | pcap: 632 | - interface: eth0 633 | # On Linux, pcap will try to use mmaped capture and will use buffer-size 634 | # as total of memory used by the ring. So set this to something bigger 635 | # than 1% of your bandwidth. 636 | #buffer-size: 16777216 637 | #bpf-filter: "tcp and port 25" 638 | # Choose checksum verification mode for the interface. At the moment 639 | # of the capture, some packets may be with an invalid checksum due to 640 | # offloading to the network card of the checksum computation. 641 | # Possible values are: 642 | # - yes: checksum validation is forced 643 | # - no: checksum validation is disabled 644 | # - auto: suricata uses a statistical approach to detect when 645 | # checksum off-loading is used. (default) 646 | # Warning: 'checksum-validation' must be set to yes to have any validation 647 | #checksum-checks: auto 648 | # With some accelerator cards using a modified libpcap (like myricom), you 649 | # may want to have the same number of capture threads as the number of capture 650 | # rings. In this case, set up the threads variable to N to start N threads 651 | # listening on the same interface. 652 | #threads: 16 653 | # set to no to disable promiscuous mode: 654 | #promisc: no 655 | # set snaplen, if not set it defaults to MTU if MTU can be known 656 | # via ioctl call and to full capture if not. 657 | #snaplen: 1518 658 | # Put default values here 659 | - interface: default 660 | #checksum-checks: auto 661 | 662 | # Settings for reading pcap files 663 | pcap-file: 664 | # Possible values are: 665 | # - yes: checksum validation is forced 666 | # - no: checksum validation is disabled 667 | # - auto: suricata uses a statistical approach to detect when 668 | # checksum off-loading is used. (default) 669 | # Warning: 'checksum-validation' must be set to yes to have checksum tested 670 | checksum-checks: auto 671 | 672 | # See "Advanced Capture Options" below for more options, including NETMAP 673 | # and PF_RING. 674 | 675 | 676 | ## 677 | ## Step 5: App Layer Protocol Configuration 678 | ## 679 | 680 | # Configure the app-layer parsers. The protocols section details each 681 | # protocol. 682 | # 683 | # The option "enabled" takes 3 values - "yes", "no", "detection-only". 684 | # "yes" enables both detection and the parser, "no" disables both, and 685 | # "detection-only" enables protocol detection only (parser disabled). 686 | app-layer: 687 | protocols: 688 | tls: 689 | enabled: yes 690 | detection-ports: 691 | dp: 443 692 | 693 | # Completely stop processing TLS/SSL session after the handshake 694 | # completed. If bypass is enabled this will also trigger flow 695 | # bypass. If disabled (the default), TLS/SSL session is still 696 | # tracked for Heartbleed and other anomalies. 697 | #no-reassemble: yes 698 | dcerpc: 699 | enabled: yes 700 | ftp: 701 | enabled: yes 702 | ssh: 703 | enabled: yes 704 | smtp: 705 | enabled: yes 706 | # Configure SMTP-MIME Decoder 707 | mime: 708 | # Decode MIME messages from SMTP transactions 709 | # (may be resource intensive) 710 | # This field supercedes all others because it turns the entire 711 | # process on or off 712 | decode-mime: yes 713 | 714 | # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) 715 | decode-base64: yes 716 | decode-quoted-printable: yes 717 | 718 | # Maximum bytes per header data value stored in the data structure 719 | # (default is 2000) 720 | header-value-depth: 2000 721 | 722 | # Extract URLs and save in state data structure 723 | extract-urls: yes 724 | # Set to yes to compute the md5 of the mail body. You will then 725 | # be able to journalize it. 726 | body-md5: no 727 | # Configure inspected-tracker for file_data keyword 728 | inspected-tracker: 729 | content-limit: 100000 730 | content-inspect-min-size: 32768 731 | content-inspect-window: 4096 732 | imap: 733 | enabled: detection-only 734 | msn: 735 | enabled: detection-only 736 | smb: 737 | enabled: yes 738 | detection-ports: 739 | dp: 139, 445 740 | # smb2 detection is disabled internally inside the engine. 741 | #smb2: 742 | # enabled: yes 743 | # Note: NFS parser depends on Rust support: pass --enable-rust 744 | # to configure. 745 | nfs: 746 | enabled: no 747 | dns: 748 | # memcaps. Globally and per flow/state. 749 | #global-memcap: 16mb 750 | #state-memcap: 512kb 751 | 752 | # How many unreplied DNS requests are considered a flood. 753 | # If the limit is reached, app-layer-event:dns.flooded; will match. 754 | #request-flood: 500 755 | 756 | tcp: 757 | enabled: yes 758 | detection-ports: 759 | dp: 53 760 | udp: 761 | enabled: yes 762 | detection-ports: 763 | dp: 53 764 | http: 765 | enabled: yes 766 | # memcap: 64mb 767 | 768 | # default-config: Used when no server-config matches 769 | # personality: List of personalities used by default 770 | # request-body-limit: Limit reassembly of request body for inspection 771 | # by http_client_body & pcre /P option. 772 | # response-body-limit: Limit reassembly of response body for inspection 773 | # by file_data, http_server_body & pcre /Q option. 774 | # double-decode-path: Double decode path section of the URI 775 | # double-decode-query: Double decode query section of the URI 776 | # response-body-decompress-layer-limit: 777 | # Limit to how many layers of compression will be 778 | # decompressed. Defaults to 2. 779 | # 780 | # server-config: List of server configurations to use if address matches 781 | # address: List of ip addresses or networks for this block 782 | # personalitiy: List of personalities used by this block 783 | # request-body-limit: Limit reassembly of request body for inspection 784 | # by http_client_body & pcre /P option. 785 | # response-body-limit: Limit reassembly of response body for inspection 786 | # by file_data, http_server_body & pcre /Q option. 787 | # double-decode-path: Double decode path section of the URI 788 | # double-decode-query: Double decode query section of the URI 789 | # 790 | # uri-include-all: Include all parts of the URI. By default the 791 | # 'scheme', username/password, hostname and port 792 | # are excluded. Setting this option to true adds 793 | # all of them to the normalized uri as inspected 794 | # by http_uri, urilen, pcre with /U and the other 795 | # keywords that inspect the normalized uri. 796 | # Note that this does not affect http_raw_uri. 797 | # Also, note that including all was the default in 798 | # 1.4 and 2.0beta1. 799 | # 800 | # meta-field-limit: Hard size limit for request and response size 801 | # limits. Applies to request line and headers, 802 | # response line and headers. Does not apply to 803 | # request or response bodies. Default is 18k. 804 | # If this limit is reached an event is raised. 805 | # 806 | # Currently Available Personalities: 807 | # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, 808 | # IIS_7_0, IIS_7_5, Apache_2 809 | libhtp: 810 | default-config: 811 | personality: IDS 812 | 813 | # Can be specified in kb, mb, gb. Just a number indicates 814 | # it's in bytes. 815 | request-body-limit: 100kb 816 | response-body-limit: 100kb 817 | 818 | # inspection limits 819 | request-body-minimal-inspect-size: 32kb 820 | request-body-inspect-window: 4kb 821 | response-body-minimal-inspect-size: 40kb 822 | response-body-inspect-window: 16kb 823 | 824 | # response body decompression (0 disables) 825 | response-body-decompress-layer-limit: 2 826 | 827 | # auto will use http-body-inline mode in IPS mode, yes or no set it statically 828 | http-body-inline: auto 829 | 830 | # Take a random value for inspection sizes around the specified value. 831 | # This lower the risk of some evasion technics but could lead 832 | # detection change between runs. It is set to 'yes' by default. 833 | #randomize-inspection-sizes: yes 834 | # If randomize-inspection-sizes is active, the value of various 835 | # inspection size will be choosen in the [1 - range%, 1 + range%] 836 | # range 837 | # Default value of randomize-inspection-range is 10. 838 | #randomize-inspection-range: 10 839 | 840 | # decoding 841 | double-decode-path: no 842 | double-decode-query: no 843 | 844 | server-config: 845 | 846 | #- apache: 847 | # address: [192.168.1.0/24, 127.0.0.0/8, "::1"] 848 | # personality: Apache_2 849 | # # Can be specified in kb, mb, gb. Just a number indicates 850 | # # it's in bytes. 851 | # request-body-limit: 4096 852 | # response-body-limit: 4096 853 | # double-decode-path: no 854 | # double-decode-query: no 855 | 856 | #- iis7: 857 | # address: 858 | # - 192.168.0.0/24 859 | # - 192.168.10.0/24 860 | # personality: IIS_7_0 861 | # # Can be specified in kb, mb, gb. Just a number indicates 862 | # # it's in bytes. 863 | # request-body-limit: 4096 864 | # response-body-limit: 4096 865 | # double-decode-path: no 866 | # double-decode-query: no 867 | 868 | # Note: Modbus probe parser is minimalist due to the poor significant field 869 | # Only Modbus message length (greater than Modbus header length) 870 | # And Protocol ID (equal to 0) are checked in probing parser 871 | # It is important to enable detection port and define Modbus port 872 | # to avoid false positive 873 | modbus: 874 | # How many unreplied Modbus requests are considered a flood. 875 | # If the limit is reached, app-layer-event:modbus.flooded; will match. 876 | #request-flood: 500 877 | 878 | enabled: no 879 | detection-ports: 880 | dp: 502 881 | # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it 882 | # is recommended to keep the TCP connection opened with a remote device 883 | # and not to open and close it for each MODBUS/TCP transaction. In that 884 | # case, it is important to set the depth of the stream reassembling as 885 | # unlimited (stream.reassembly.depth: 0) 886 | 887 | # Stream reassembly size for modbus. By default track it completely. 888 | stream-depth: 0 889 | 890 | # DNP3 891 | dnp3: 892 | enabled: no 893 | detection-ports: 894 | dp: 20000 895 | 896 | # SCADA EtherNet/IP and CIP protocol support 897 | enip: 898 | enabled: no 899 | detection-ports: 900 | dp: 44818 901 | sp: 44818 902 | 903 | # Note: parser depends on experimental Rust support 904 | # with --enable-rust-experimental passed to configure 905 | ntp: 906 | enabled: no 907 | 908 | # Limit for the maximum number of asn1 frames to decode (default 256) 909 | asn1-max-frames: 256 910 | 911 | 912 | ############################################################################## 913 | ## 914 | ## Advanced settings below 915 | ## 916 | ############################################################################## 917 | 918 | ## 919 | ## Run Options 920 | ## 921 | 922 | # Run suricata as user and group. 923 | #run-as: 924 | # user: suri 925 | # group: suri 926 | 927 | # Some logging module will use that name in event as identifier. The default 928 | # value is the hostname 929 | #sensor-name: suricata 930 | 931 | # Default location of the pid file. The pid file is only used in 932 | # daemon mode (start Suricata with -D). If not running in daemon mode 933 | # the --pidfile command line option must be used to create a pid file. 934 | #pid-file: /var/run/suricata.pid 935 | 936 | # Daemon working directory 937 | # Suricata will change directory to this one if provided 938 | # Default: "/" 939 | #daemon-directory: "/" 940 | 941 | # Suricata core dump configuration. Limits the size of the core dump file to 942 | # approximately max-dump. The actual core dump size will be a multiple of the 943 | # page size. Core dumps that would be larger than max-dump are truncated. On 944 | # Linux, the actual core dump size may be a few pages larger than max-dump. 945 | # Setting max-dump to 0 disables core dumping. 946 | # Setting max-dump to 'unlimited' will give the full core dump file. 947 | # On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size 948 | # to be 'unlimited'. 949 | 950 | coredump: 951 | max-dump: unlimited 952 | 953 | # If suricata box is a router for the sniffed networks, set it to 'router'. If 954 | # it is a pure sniffing setup, set it to 'sniffer-only'. 955 | # If set to auto, the variable is internally switch to 'router' in IPS mode 956 | # and 'sniffer-only' in IDS mode. 957 | # This feature is currently only used by the reject* keywords. 958 | host-mode: auto 959 | 960 | # Number of packets preallocated per thread. The default is 1024. A higher number 961 | # will make sure each CPU will be more easily kept busy, but may negatively 962 | # impact caching. 963 | # 964 | # If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules 965 | # apply. In that case try something like 60000 or more. This is because the CUDA 966 | # pattern matcher buffers and scans as many packets as possible in parallel. 967 | #max-pending-packets: 1024 968 | 969 | # Runmode the engine should use. Please check --list-runmodes to get the available 970 | # runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned 971 | # load balancing). 972 | runmode: workers 973 | 974 | # Specifies the kind of flow load balancer used by the flow pinned autofp mode. 975 | # 976 | # Supported schedulers are: 977 | # 978 | # round-robin - Flows assigned to threads in a round robin fashion. 979 | # active-packets - Flows assigned to threads that have the lowest number of 980 | # unprocessed packets (default). 981 | # hash - Flow alloted usihng the address hash. More of a random 982 | # technique. Was the default in Suricata 1.2.1 and older. 983 | # 984 | #autofp-scheduler: active-packets 985 | 986 | # Preallocated size for packet. Default is 1514 which is the classical 987 | # size for pcap on ethernet. You should adjust this value to the highest 988 | # packet size (MTU + hardware header) on your system. 989 | #default-packet-size: 1514 990 | 991 | # Unix command socket can be used to pass commands to suricata. 992 | # An external tool can then connect to get information from suricata 993 | # or trigger some modifications of the engine. Set enabled to yes 994 | # to activate the feature. In auto mode, the feature will only be 995 | # activated in live capture mode. You can use the filename variable to set 996 | # the file name of the socket. 997 | unix-command: 998 | enabled: auto 999 | #filename: custom.socket 1000 | 1001 | # Magic file. The extension .mgc is added to the value here. 1002 | #magic-file: /usr/share/file/magic 1003 | #magic-file: 1004 | 1005 | legacy: 1006 | uricontent: enabled 1007 | 1008 | ## 1009 | ## Detection settings 1010 | ## 1011 | 1012 | # Set the order of alerts bassed on actions 1013 | # The default order is pass, drop, reject, alert 1014 | # action-order: 1015 | # - pass 1016 | # - drop 1017 | # - reject 1018 | # - alert 1019 | 1020 | # IP Reputation 1021 | #reputation-categories-file: /etc/suricata/iprep/categories.txt 1022 | #default-reputation-path: /etc/suricata/iprep 1023 | #reputation-files: 1024 | # - reputation.list 1025 | 1026 | # When run with the option --engine-analysis, the engine will read each of 1027 | # the parameters below, and print reports for each of the enabled sections 1028 | # and exit. The reports are printed to a file in the default log dir 1029 | # given by the parameter "default-log-dir", with engine reporting 1030 | # subsection below printing reports in its own report file. 1031 | engine-analysis: 1032 | # enables printing reports for fast-pattern for every rule. 1033 | rules-fast-pattern: yes 1034 | # enables printing reports for each rule 1035 | rules: yes 1036 | 1037 | #recursion and match limits for PCRE where supported 1038 | pcre: 1039 | match-limit: 3500 1040 | match-limit-recursion: 1500 1041 | 1042 | ## 1043 | ## Advanced Traffic Tracking and Reconstruction Settings 1044 | ## 1045 | 1046 | # Host specific policies for defragmentation and TCP stream 1047 | # reassembly. The host OS lookup is done using a radix tree, just 1048 | # like a routing table so the most specific entry matches. 1049 | host-os-policy: 1050 | # Make the default policy windows. 1051 | windows: [0.0.0.0/0] 1052 | bsd: [] 1053 | bsd-right: [] 1054 | old-linux: [] 1055 | linux: [] 1056 | old-solaris: [] 1057 | solaris: [] 1058 | hpux10: [] 1059 | hpux11: [] 1060 | irix: [] 1061 | macos: [] 1062 | vista: [] 1063 | windows2k3: [] 1064 | 1065 | # Defrag settings: 1066 | 1067 | defrag: 1068 | memcap: 32mb 1069 | hash-size: 65536 1070 | trackers: 65535 # number of defragmented flows to follow 1071 | max-frags: 65535 # number of fragments to keep (higher than trackers) 1072 | prealloc: yes 1073 | timeout: 60 1074 | 1075 | # Enable defrag per host settings 1076 | # host-config: 1077 | # 1078 | # - dmz: 1079 | # timeout: 30 1080 | # address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"] 1081 | # 1082 | # - lan: 1083 | # timeout: 45 1084 | # address: 1085 | # - 192.168.0.0/24 1086 | # - 192.168.10.0/24 1087 | # - 172.16.14.0/24 1088 | 1089 | # Flow settings: 1090 | # By default, the reserved memory (memcap) for flows is 32MB. This is the limit 1091 | # for flow allocation inside the engine. You can change this value to allow 1092 | # more memory usage for flows. 1093 | # The hash-size determine the size of the hash used to identify flows inside 1094 | # the engine, and by default the value is 65536. 1095 | # At the startup, the engine can preallocate a number of flows, to get a better 1096 | # performance. The number of flows preallocated is 10000 by default. 1097 | # emergency-recovery is the percentage of flows that the engine need to 1098 | # prune before unsetting the emergency state. The emergency state is activated 1099 | # when the memcap limit is reached, allowing to create new flows, but 1100 | # prunning them with the emergency timeouts (they are defined below). 1101 | # If the memcap is reached, the engine will try to prune flows 1102 | # with the default timeouts. If it doens't find a flow to prune, it will set 1103 | # the emergency bit and it will try again with more agressive timeouts. 1104 | # If that doesn't work, then it will try to kill the last time seen flows 1105 | # not in use. 1106 | # The memcap can be specified in kb, mb, gb. Just a number indicates it's 1107 | # in bytes. 1108 | 1109 | flow: 1110 | memcap: 128mb 1111 | hash-size: 65536 1112 | prealloc: 10000 1113 | emergency-recovery: 30 1114 | #managers: 1 # default to one flow manager 1115 | #recyclers: 1 # default to one flow recycler thread 1116 | 1117 | # This option controls the use of vlan ids in the flow (and defrag) 1118 | # hashing. Normally this should be enabled, but in some (broken) 1119 | # setups where both sides of a flow are not tagged with the same vlan 1120 | # tag, we can ignore the vlan id's in the flow hashing. 1121 | vlan: 1122 | use-for-tracking: true 1123 | 1124 | # Specific timeouts for flows. Here you can specify the timeouts that the 1125 | # active flows will wait to transit from the current state to another, on each 1126 | # protocol. The value of "new" determine the seconds to wait after a hanshake or 1127 | # stream startup before the engine free the data of that flow it doesn't 1128 | # change the state to established (usually if we don't receive more packets 1129 | # of that flow). The value of "established" is the amount of 1130 | # seconds that the engine will wait to free the flow if it spend that amount 1131 | # without receiving new packets or closing the connection. "closed" is the 1132 | # amount of time to wait after a flow is closed (usually zero). "bypassed" 1133 | # timeout controls locally bypassed flows. For these flows we don't do any other 1134 | # tracking. If no packets have been seen after this timeout, the flow is discarded. 1135 | # 1136 | # There's an emergency mode that will become active under attack circumstances, 1137 | # making the engine to check flow status faster. This configuration variables 1138 | # use the prefix "emergency-" and work similar as the normal ones. 1139 | # Some timeouts doesn't apply to all the protocols, like "closed", for udp and 1140 | # icmp. 1141 | 1142 | flow-timeouts: 1143 | 1144 | default: 1145 | new: 30 1146 | established: 300 1147 | closed: 0 1148 | bypassed: 100 1149 | emergency-new: 10 1150 | emergency-established: 100 1151 | emergency-closed: 0 1152 | emergency-bypassed: 50 1153 | tcp: 1154 | new: 60 1155 | established: 600 1156 | closed: 60 1157 | bypassed: 100 1158 | emergency-new: 5 1159 | emergency-established: 100 1160 | emergency-closed: 10 1161 | emergency-bypassed: 50 1162 | udp: 1163 | new: 30 1164 | established: 300 1165 | bypassed: 100 1166 | emergency-new: 10 1167 | emergency-established: 100 1168 | emergency-bypassed: 50 1169 | icmp: 1170 | new: 30 1171 | established: 300 1172 | bypassed: 100 1173 | emergency-new: 10 1174 | emergency-established: 100 1175 | emergency-bypassed: 50 1176 | 1177 | # Stream engine settings. Here the TCP stream tracking and reassembly 1178 | # engine is configured. 1179 | # 1180 | # stream: 1181 | # memcap: 32mb # Can be specified in kb, mb, gb. Just a 1182 | # # number indicates it's in bytes. 1183 | # checksum-validation: yes # To validate the checksum of received 1184 | # # packet. If csum validation is specified as 1185 | # # "yes", then packet with invalid csum will not 1186 | # # be processed by the engine stream/app layer. 1187 | # # Warning: locally generated trafic can be 1188 | # # generated without checksum due to hardware offload 1189 | # # of checksum. You can control the handling of checksum 1190 | # # on a per-interface basis via the 'checksum-checks' 1191 | # # option 1192 | # prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread 1193 | # midstream: false # don't allow midstream session pickups 1194 | # async-oneside: false # don't enable async stream handling 1195 | # inline: no # stream inline mode 1196 | # drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine 1197 | # max-synack-queued: 5 # Max different SYN/ACKs to queue 1198 | # bypass: no # Bypass packets when stream.depth is reached 1199 | # 1200 | # reassembly: 1201 | # memcap: 64mb # Can be specified in kb, mb, gb. Just a number 1202 | # # indicates it's in bytes. 1203 | # depth: 1mb # Can be specified in kb, mb, gb. Just a number 1204 | # # indicates it's in bytes. 1205 | # toserver-chunk-size: 2560 # inspect raw stream in chunks of at least 1206 | # # this size. Can be specified in kb, mb, 1207 | # # gb. Just a number indicates it's in bytes. 1208 | # toclient-chunk-size: 2560 # inspect raw stream in chunks of at least 1209 | # # this size. Can be specified in kb, mb, 1210 | # # gb. Just a number indicates it's in bytes. 1211 | # randomize-chunk-size: yes # Take a random value for chunk size around the specified value. 1212 | # # This lower the risk of some evasion technics but could lead 1213 | # # detection change between runs. It is set to 'yes' by default. 1214 | # randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is 1215 | # # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size 1216 | # # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same 1217 | # # calculation for toclient-chunk-size. 1218 | # # Default value of randomize-chunk-range is 10. 1219 | # 1220 | # raw: yes # 'Raw' reassembly enabled or disabled. 1221 | # # raw is for content inspection by detection 1222 | # # engine. 1223 | # 1224 | # segment-prealloc: 2048 # number of segments preallocated per thread 1225 | # 1226 | # check-overlap-different-data: true|false 1227 | # # check if a segment contains different data 1228 | # # than what we've already seen for that 1229 | # # position in the stream. 1230 | # # This is enabled automatically if inline mode 1231 | # # is used or when stream-event:reassembly_overlap_different_data; 1232 | # # is used in a rule. 1233 | # 1234 | stream: 1235 | memcap: 12gb 1236 | prealloc-sessions: 200000 1237 | checksum-validation: no # reject wrong csums 1238 | inline: no # auto will use inline mode in IPS mode, yes or no set it statically 1239 | bypass: yes 1240 | reassembly: 1241 | memcap: 24gb 1242 | depth: 1mb # reassemble 1mb into a stream 1243 | toserver-chunk-size: 2560 1244 | toclient-chunk-size: 2560 1245 | randomize-chunk-size: yes 1246 | #randomize-chunk-range: 10 1247 | #raw: yes 1248 | #segment-prealloc: 2048 1249 | #check-overlap-different-data: true 1250 | 1251 | # Host table: 1252 | # 1253 | # Host table is used by tagging and per host thresholding subsystems. 1254 | # 1255 | host: 1256 | hash-size: 4096 1257 | prealloc: 1000 1258 | memcap: 32mb 1259 | 1260 | # IP Pair table: 1261 | # 1262 | # Used by xbits 'ippair' tracking. 1263 | # 1264 | #ippair: 1265 | # hash-size: 4096 1266 | # prealloc: 1000 1267 | # memcap: 32mb 1268 | 1269 | # Decoder settings 1270 | 1271 | decoder: 1272 | # Teredo decoder is known to not be completely accurate 1273 | # it will sometimes detect non-teredo as teredo. 1274 | teredo: 1275 | enabled: true 1276 | 1277 | 1278 | ## 1279 | ## Performance tuning and profiling 1280 | ## 1281 | 1282 | # The detection engine builds internal groups of signatures. The engine 1283 | # allow us to specify the profile to use for them, to manage memory on an 1284 | # efficient way keeping a good performance. For the profile keyword you 1285 | # can use the words "low", "medium", "high" or "custom". If you use custom 1286 | # make sure to define the values at "- custom-values" as your convenience. 1287 | # Usually you would prefer medium/high/low. 1288 | # 1289 | # "sgh mpm-context", indicates how the staging should allot mpm contexts for 1290 | # the signature groups. "single" indicates the use of a single context for 1291 | # all the signature group heads. "full" indicates a mpm-context for each 1292 | # group head. "auto" lets the engine decide the distribution of contexts 1293 | # based on the information the engine gathers on the patterns from each 1294 | # group head. 1295 | # 1296 | # The option inspection-recursion-limit is used to limit the recursive calls 1297 | # in the content inspection code. For certain payload-sig combinations, we 1298 | # might end up taking too much time in the content inspection code. 1299 | # If the argument specified is 0, the engine uses an internally defined 1300 | # default limit. On not specifying a value, we use no limits on the recursion. 1301 | detect: 1302 | profile: medium 1303 | custom-values: 1304 | toclient-groups: 3 1305 | toserver-groups: 25 1306 | sgh-mpm-context: auto 1307 | inspection-recursion-limit: 3000 1308 | # If set to yes, the loading of signatures will be made after the capture 1309 | # is started. This will limit the downtime in IPS mode. 1310 | #delayed-detect: yes 1311 | 1312 | prefilter: 1313 | # default prefiltering setting. "mpm" only creates MPM/fast_pattern 1314 | # engines. "auto" also sets up prefilter engines for other keywords. 1315 | # Use --list-keywords=all to see which keywords support prefiltering. 1316 | default: mpm 1317 | 1318 | # the grouping values above control how many groups are created per 1319 | # direction. Port whitelisting forces that port to get it's own group. 1320 | # Very common ports will benefit, as well as ports with many expensive 1321 | # rules. 1322 | grouping: 1323 | #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080 1324 | #udp-whitelist: 53, 135, 5060 1325 | 1326 | profiling: 1327 | # Log the rules that made it past the prefilter stage, per packet 1328 | # default is off. The threshold setting determines how many rules 1329 | # must have made it past pre-filter for that rule to trigger the 1330 | # logging. 1331 | #inspect-logging-threshold: 200 1332 | grouping: 1333 | dump-to-disk: false 1334 | include-rules: false # very verbose 1335 | include-mpm-stats: false 1336 | 1337 | # Select the multi pattern algorithm you want to run for scan/search the 1338 | # in the engine. 1339 | # 1340 | # The supported algorithms are: 1341 | # "ac" - Aho-Corasick, default implementation 1342 | # "ac-bs" - Aho-Corasick, reduced memory implementation 1343 | # "ac-cuda" - Aho-Corasick, CUDA implementation 1344 | # "ac-ks" - Aho-Corasick, "Ken Steele" variant 1345 | # "hs" - Hyperscan, available when built with Hyperscan support 1346 | # 1347 | # The default mpm-algo value of "auto" will use "hs" if Hyperscan is 1348 | # available, "ac" otherwise. 1349 | # 1350 | # The mpm you choose also decides the distribution of mpm contexts for 1351 | # signature groups, specified by the conf - "detect.sgh-mpm-context". 1352 | # Selecting "ac" as the mpm would require "detect.sgh-mpm-context" 1353 | # to be set to "single", because of ac's memory requirements, unless the 1354 | # ruleset is small enough to fit in one's memory, in which case one can 1355 | # use "full" with "ac". Rest of the mpms can be run in "full" mode. 1356 | # 1357 | # There is also a CUDA pattern matcher (only available if Suricata was 1358 | # compiled with --enable-cuda: b2g_cuda. Make sure to update your 1359 | # max-pending-packets setting above as well if you use b2g_cuda. 1360 | 1361 | mpm-algo: auto 1362 | 1363 | # Select the matching algorithm you want to use for single-pattern searches. 1364 | # 1365 | # Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only 1366 | # available if Suricata has been built with Hyperscan support). 1367 | # 1368 | # The default of "auto" will use "hs" if available, otherwise "bm". 1369 | 1370 | spm-algo: auto 1371 | 1372 | # Suricata is multi-threaded. Here the threading can be influenced. 1373 | threading: 1374 | set-cpu-affinity: yes 1375 | # Tune cpu affinity of threads. Each family of threads can be bound 1376 | # on specific CPUs. 1377 | # 1378 | # These 2 apply to the all runmodes: 1379 | # management-cpu-set is used for flow timeout handling, counters 1380 | # worker-cpu-set is used for 'worker' threads 1381 | # 1382 | # Additionally, for autofp these apply: 1383 | # receive-cpu-set is used for capture threads 1384 | # verdict-cpu-set is used for IPS verdict threads 1385 | # 1386 | cpu-affinity: 1387 | - management-cpu-set: 1388 | cpu: [ 12 ] # include only these cpus in affinity settings 1389 | mode: "balanced" 1390 | prio: 1391 | default: "low" 1392 | - worker-cpu-set: 1393 | cpu: [ "13-17" ] 1394 | mode: "exclusive" 1395 | # Use explicitely 3 threads and don't compute number by using 1396 | # detect-thread-ratio variable: 1397 | # threads: 3 1398 | prio: 1399 | low: [ 0 ] 1400 | medium: [ "1-2" ] 1401 | high: [ 3 ] 1402 | default: "high" 1403 | #- verdict-cpu-set: 1404 | # cpu: [ 0 ] 1405 | # prio: 1406 | # default: "high" 1407 | # 1408 | # By default Suricata creates one "detect" thread per available CPU/CPU core. 1409 | # This setting allows controlling this behaviour. A ratio setting of 2 will 1410 | # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this 1411 | # will result in 4 detect threads. If values below 1 are used, less threads 1412 | # are created. So on a dual core CPU a setting of 0.5 results in 1 detect 1413 | # thread being created. Regardless of the setting at a minimum 1 detect 1414 | # thread will always be created. 1415 | # 1416 | detect-thread-ratio: 1.0 1417 | 1418 | # Luajit has a strange memory requirement, it's 'states' need to be in the 1419 | # first 2G of the process' memory. 1420 | # 1421 | # 'luajit.states' is used to control how many states are preallocated. 1422 | # State use: per detect script: 1 per detect thread. Per output script: 1 per 1423 | # script. 1424 | luajit: 1425 | states: 128 1426 | 1427 | # Profiling settings. Only effective if Suricata has been built with the 1428 | # the --enable-profiling configure flag. 1429 | # 1430 | profiling: 1431 | # Run profiling for every xth packet. The default is 1, which means we 1432 | # profile every packet. If set to 1000, one packet is profiled for every 1433 | # 1000 received. 1434 | #sample-rate: 1000 1435 | 1436 | # rule profiling 1437 | rules: 1438 | 1439 | # Profiling can be disabled here, but it will still have a 1440 | # performance impact if compiled in. 1441 | enabled: yes 1442 | filename: rule_perf.log 1443 | append: yes 1444 | 1445 | # Sort options: ticks, avgticks, checks, matches, maxticks 1446 | # If commented out all the sort options will be used. 1447 | #sort: avgticks 1448 | 1449 | # Limit the number of sids for which stats are shown at exit (per sort). 1450 | limit: 10 1451 | 1452 | # output to json 1453 | json: yes 1454 | 1455 | # per keyword profiling 1456 | keywords: 1457 | enabled: yes 1458 | filename: keyword_perf.log 1459 | append: yes 1460 | 1461 | # per rulegroup profiling 1462 | rulegroups: 1463 | enabled: yes 1464 | filename: rule_group_perf.log 1465 | append: yes 1466 | 1467 | # packet profiling 1468 | packets: 1469 | 1470 | # Profiling can be disabled here, but it will still have a 1471 | # performance impact if compiled in. 1472 | enabled: yes 1473 | filename: packet_stats.log 1474 | append: yes 1475 | 1476 | # per packet csv output 1477 | csv: 1478 | 1479 | # Output can be disabled here, but it will still have a 1480 | # performance impact if compiled in. 1481 | enabled: no 1482 | filename: packet_stats.csv 1483 | 1484 | # profiling of locking. Only available when Suricata was built with 1485 | # --enable-profiling-locks. 1486 | locks: 1487 | enabled: no 1488 | filename: lock_stats.log 1489 | append: yes 1490 | 1491 | pcap-log: 1492 | enabled: no 1493 | filename: pcaplog_stats.log 1494 | append: yes 1495 | 1496 | ## 1497 | ## Netfilter integration 1498 | ## 1499 | 1500 | # When running in NFQ inline mode, it is possible to use a simulated 1501 | # non-terminal NFQUEUE verdict. 1502 | # This permit to do send all needed packet to suricata via this a rule: 1503 | # iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE 1504 | # And below, you can have your standard filtering ruleset. To activate 1505 | # this mode, you need to set mode to 'repeat' 1506 | # If you want packet to be sent to another queue after an ACCEPT decision 1507 | # set mode to 'route' and set next-queue value. 1508 | # On linux >= 3.1, you can set batchcount to a value > 1 to improve performance 1509 | # by processing several packets before sending a verdict (worker runmode only). 1510 | # On linux >= 3.6, you can set the fail-open option to yes to have the kernel 1511 | # accept the packet if suricata is not able to keep pace. 1512 | # bypass mark and mask can be used to implement NFQ bypass. If bypass mark is 1513 | # set then the NFQ bypass is activated. Suricata will set the bypass mark/mask 1514 | # on packet of a flow that need to be bypassed. The Nefilter ruleset has to 1515 | # directly accept all packets of a flow once a packet has been marked. 1516 | nfq: 1517 | # mode: accept 1518 | # repeat-mark: 1 1519 | # repeat-mask: 1 1520 | # bypass-mark: 1 1521 | # bypass-mask: 1 1522 | # route-queue: 2 1523 | # batchcount: 20 1524 | # fail-open: yes 1525 | 1526 | #nflog support 1527 | nflog: 1528 | # netlink multicast group 1529 | # (the same as the iptables --nflog-group param) 1530 | # Group 0 is used by the kernel, so you can't use it 1531 | - group: 2 1532 | # netlink buffer size 1533 | buffer-size: 18432 1534 | # put default value here 1535 | - group: default 1536 | # set number of packet to queue inside kernel 1537 | qthreshold: 1 1538 | # set the delay before flushing packet in the queue inside kernel 1539 | qtimeout: 100 1540 | # netlink max buffer size 1541 | max-size: 20000 1542 | 1543 | ## 1544 | ## Advanced Capture Options 1545 | ## 1546 | 1547 | # general settings affecting packet capture 1548 | capture: 1549 | # disable NIC offloading. It's restored when Suricata exists. 1550 | # Enabled by default 1551 | #disable-offloading: false 1552 | # 1553 | # disable checksum validation. Same as setting '-k none' on the 1554 | # commandline 1555 | #checksum-validation: none 1556 | 1557 | # Netmap support 1558 | # 1559 | # Netmap operates with NIC directly in driver, so you need FreeBSD wich have 1560 | # built-in netmap support or compile and install netmap module and appropriate 1561 | # NIC driver on your Linux system. 1562 | # To reach maximum throughput disable all receive-, segmentation-, 1563 | # checksum- offloadings on NIC. 1564 | # Disabling Tx checksum offloading is *required* for connecting OS endpoint 1565 | # with NIC endpoint. 1566 | # You can find more information at https://github.com/luigirizzo/netmap 1567 | # 1568 | netmap: 1569 | # To specify OS endpoint add plus sign at the end (e.g. "eth0+") 1570 | - interface: eth2 1571 | # Number of receive threads. "auto" uses number of RSS queues on interface. 1572 | #threads: auto 1573 | # You can use the following variables to activate netmap tap or IPS mode. 1574 | # If copy-mode is set to ips or tap, the traffic coming to the current 1575 | # interface will be copied to the copy-iface interface. If 'tap' is set, the 1576 | # copy is complete. If 'ips' is set, the packet matching a 'drop' action 1577 | # will not be copied. 1578 | # To specify the OS as the copy-iface (so the OS can route packets, or forward 1579 | # to a service running on the same machine) add a plus sign at the end 1580 | # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0 1581 | # for return packets. Hardware checksumming must be *off* on the interface if 1582 | # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD 1583 | # or 'ethtool -K eth0 tx off rx off' for Linux). 1584 | #copy-mode: tap 1585 | #copy-iface: eth3 1586 | # Set to yes to disable promiscuous mode 1587 | # disable-promisc: no 1588 | # Choose checksum verification mode for the interface. At the moment 1589 | # of the capture, some packets may be with an invalid checksum due to 1590 | # offloading to the network card of the checksum computation. 1591 | # Possible values are: 1592 | # - yes: checksum validation is forced 1593 | # - no: checksum validation is disabled 1594 | # - auto: suricata uses a statistical approach to detect when 1595 | # checksum off-loading is used. 1596 | # Warning: 'checksum-validation' must be set to yes to have any validation 1597 | #checksum-checks: auto 1598 | # BPF filter to apply to this interface. The pcap filter syntax apply here. 1599 | #bpf-filter: port 80 or udp 1600 | #- interface: eth3 1601 | #threads: auto 1602 | #copy-mode: tap 1603 | #copy-iface: eth2 1604 | # Put default values here 1605 | - interface: default 1606 | 1607 | # PF_RING configuration. for use with native PF_RING support 1608 | # for more info see http://www.ntop.org/products/pf_ring/ 1609 | pfring: 1610 | - interface: eth0 1611 | # Number of receive threads (>1 will enable experimental flow pinned 1612 | # runmode) 1613 | threads: 1 1614 | 1615 | # Default clusterid. PF_RING will load balance packets based on flow. 1616 | # All threads/processes that will participate need to have the same 1617 | # clusterid. 1618 | cluster-id: 99 1619 | 1620 | # Default PF_RING cluster type. PF_RING can load balance per flow. 1621 | # Possible values are cluster_flow or cluster_round_robin. 1622 | cluster-type: cluster_flow 1623 | # bpf filter for this interface 1624 | #bpf-filter: tcp 1625 | # Choose checksum verification mode for the interface. At the moment 1626 | # of the capture, some packets may be with an invalid checksum due to 1627 | # offloading to the network card of the checksum computation. 1628 | # Possible values are: 1629 | # - rxonly: only compute checksum for packets received by network card. 1630 | # - yes: checksum validation is forced 1631 | # - no: checksum validation is disabled 1632 | # - auto: suricata uses a statistical approach to detect when 1633 | # checksum off-loading is used. (default) 1634 | # Warning: 'checksum-validation' must be set to yes to have any validation 1635 | #checksum-checks: auto 1636 | # Second interface 1637 | #- interface: eth1 1638 | # threads: 3 1639 | # cluster-id: 93 1640 | # cluster-type: cluster_flow 1641 | # Put default values here 1642 | - interface: default 1643 | #threads: 2 1644 | 1645 | # For FreeBSD ipfw(8) divert(4) support. 1646 | # Please make sure you have ipfw_load="YES" and ipdivert_load="YES" 1647 | # in /etc/loader.conf or kldload'ing the appropriate kernel modules. 1648 | # Additionally, you need to have an ipfw rule for the engine to see 1649 | # the packets from ipfw. For Example: 1650 | # 1651 | # ipfw add 100 divert 8000 ip from any to any 1652 | # 1653 | # The 8000 above should be the same number you passed on the command 1654 | # line, i.e. -d 8000 1655 | # 1656 | ipfw: 1657 | 1658 | # Reinject packets at the specified ipfw rule number. This config 1659 | # option is the ipfw rule number AT WHICH rule processing continues 1660 | # in the ipfw processing system after the engine has finished 1661 | # inspecting the packet for acceptance. If no rule number is specified, 1662 | # accepted packets are reinjected at the divert rule which they entered 1663 | # and IPFW rule processing continues. No check is done to verify 1664 | # this will rule makes sense so care must be taken to avoid loops in ipfw. 1665 | # 1666 | ## The following example tells the engine to reinject packets 1667 | # back into the ipfw firewall AT rule number 5500: 1668 | # 1669 | # ipfw-reinjection-rule-number: 5500 1670 | 1671 | 1672 | napatech: 1673 | # The Host Buffer Allowance for all streams 1674 | # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) 1675 | # This may be enabled when sharing streams with another application. 1676 | # Otherwise, it should be turned off. 1677 | hba: -1 1678 | 1679 | # use_all_streams set to "yes" will query the Napatech service for all configured 1680 | # streams and listen on all of them. When set to "no" the streams config array 1681 | # will be used. 1682 | use-all-streams: yes 1683 | 1684 | # The streams to listen on. This can be either: 1685 | # a list of individual streams (e.g. streams: [0,1,2,3]) 1686 | # or 1687 | # a range of streams (e.g. streams: ["0-3"]) 1688 | streams: ["0-3"] 1689 | 1690 | # Tilera mpipe configuration. for use on Tilera TILE-Gx. 1691 | mpipe: 1692 | 1693 | # Load balancing modes: "static", "dynamic", "sticky", or "round-robin". 1694 | load-balance: dynamic 1695 | 1696 | # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536 1697 | iqueue-packets: 2048 1698 | 1699 | # List of interfaces we will listen on. 1700 | inputs: 1701 | - interface: xgbe2 1702 | - interface: xgbe3 1703 | - interface: xgbe4 1704 | 1705 | 1706 | # Relative weight of memory for packets of each mPipe buffer size. 1707 | stack: 1708 | size128: 0 1709 | size256: 9 1710 | size512: 0 1711 | size1024: 0 1712 | size1664: 7 1713 | size4096: 0 1714 | size10386: 0 1715 | size16384: 0 1716 | 1717 | ## 1718 | ## Hardware accelaration 1719 | ## 1720 | 1721 | # Cuda configuration. 1722 | cuda: 1723 | # The "mpm" profile. On not specifying any of these parameters, the engine's 1724 | # internal default values are used, which are same as the ones specified in 1725 | # in the default conf file. 1726 | mpm: 1727 | # The minimum length required to buffer data to the gpu. 1728 | # Anything below this is MPM'ed on the CPU. 1729 | # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. 1730 | # A value of 0 indicates there's no limit. 1731 | data-buffer-size-min-limit: 0 1732 | # The maximum length for data that we would buffer to the gpu. 1733 | # Anything over this is MPM'ed on the CPU. 1734 | # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. 1735 | data-buffer-size-max-limit: 1500 1736 | # The ring buffer size used by the CudaBuffer API to buffer data. 1737 | cudabuffer-buffer-size: 500mb 1738 | # The max chunk size that can be sent to the gpu in a single go. 1739 | gpu-transfer-size: 50mb 1740 | # The timeout limit for batching of packets in microseconds. 1741 | batching-timeout: 2000 1742 | # The device to use for the mpm. Currently we don't support load balancing 1743 | # on multiple gpus. In case you have multiple devices on your system, you 1744 | # can specify the device to use, using this conf. By default we hold 0, to 1745 | # specify the first device cuda sees. To find out device-id associated with 1746 | # the card(s) on the system run "suricata --list-cuda-cards". 1747 | device-id: 0 1748 | # No of Cuda streams used for asynchronous processing. All values > 0 are valid. 1749 | # For this option you need a device with Compute Capability > 1.0. 1750 | cuda-streams: 2 1751 | 1752 | ## 1753 | ## Include other configs 1754 | ## 1755 | 1756 | # Includes. Files included here will be handled as if they were 1757 | # inlined in this configuration file. 1758 | #include: include1.yaml 1759 | #include: include2.yaml 1760 | --------------------------------------------------------------------------------