├── .gitignore ├── .dockerignore ├── config.json ├── manifests ├── logincollector │ ├── service.yaml │ └── deployment.yaml └── grafana │ └── ingress.yaml ├── Dockerfile ├── package.json ├── app.js ├── dashboard.json ├── values ├── influxdb │ └── values.yaml └── grafana │ └── values.yaml ├── LICENSE.txt └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "influxHost": "influxdb.influxdb", 3 | "influxDatabase": "login-attempts" 4 | } 5 | -------------------------------------------------------------------------------- /manifests/logincollector/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: collector-svc 5 | spec: 6 | selector: 7 | app: collectorapp 8 | ports: 9 | - protocol: TCP 10 | port: 8080 11 | targetPort: 8080 12 | type: NodePort 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:8.10.0-alpine 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | # A wildcard is used to ensure both package.json AND package-lock.json are copied 8 | # where available (npm@5+) 9 | COPY package*.json ./ 10 | 11 | RUN npm install --production 12 | 13 | # Bundle app source 14 | COPY . . 15 | 16 | EXPOSE 8080 17 | CMD [ "npm", "start" ] -------------------------------------------------------------------------------- /manifests/grafana/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: grafana 5 | annotations: 6 | traefik.ingress.kubernetes.io/whitelist-source-range: "192.168.178.0/24" 7 | kubernetes.io/ingress.class: traefik 8 | spec: 9 | rules: 10 | - host: grafana. 11 | http: 12 | paths: 13 | - backend: 14 | serviceName: grafana 15 | servicePort: 80 16 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "logincollector", 3 | "version": "1.1.0", 4 | "repository": "https://github.com/asksven/locating-ssh-hackers", 5 | "description": "A tcp listener to syslogd receiving an IP address, resolving it's geo position and storing the result in a time series database.", 6 | "main": "app.js", 7 | "scripts": { 8 | "test": "echo \"Error: no test specified\" && exit 1", 9 | "start": "node app.js" 10 | }, 11 | "author": "Sven Knispel (sven.knispel@gmail.com)", 12 | "license": "ISC", 13 | "dependencies": { 14 | "axios": "^0.21.1", 15 | "express": "^4.16.4", 16 | "influx": "^5.0.7", 17 | "influxdb": "0.0.1", 18 | "ngeohash": "^0.6.3", 19 | "prom-client": "^11.2.1" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /manifests/logincollector/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: collectorapp 7 | name: collector-deployment 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: collectorapp 13 | strategy: 14 | rollingUpdate: 15 | maxSurge: 25% 16 | maxUnavailable: 25% 17 | type: RollingUpdate 18 | template: 19 | metadata: 20 | labels: 21 | app: collectorapp 22 | spec: 23 | containers: 24 | - image: asksven/logincollector:14 25 | imagePullPolicy: IfNotPresent 26 | name: login-collector 27 | ports: 28 | - containerPort: 8080 29 | protocol: TCP 30 | restartPolicy: Always 31 | -------------------------------------------------------------------------------- /app.js: -------------------------------------------------------------------------------- 1 | var geohash = require("ngeohash"); 2 | const config = require("./config.json"); 3 | const axios = require("axios"); 4 | const Influx = require("influx"); 5 | 6 | // our metrics 7 | const metricPort = 3001; 8 | const express = require('express') 9 | const app = express() 10 | 11 | const Prometheus = require('prom-client') 12 | 13 | const apiCallsTotal = new Prometheus.Counter({ 14 | name: 'api_calls_total', 15 | help: 'Total number of geo-ip calls', 16 | labelNames: ['status'] 17 | }); 18 | 19 | const apiRequestDurationMicroseconds = new Prometheus.Histogram({ 20 | name: 'api_request_duration_ms', 21 | help: 'Duration of api requests in ms', 22 | labelNames: ['response_time'], 23 | buckets: [0.10, 5, 15, 50, 100, 200, 300, 400, 500] // buckets for response time from 0.1ms to 500ms 24 | }) 25 | 26 | 27 | app.get('/metrics', (req, res) => { 28 | res.set('Content-Type', Prometheus.register.contentType) 29 | res.end(Prometheus.register.metrics()) 30 | }) 31 | 32 | console.log("config: " + JSON.stringify(config)); 33 | console.log("config.influxHost=" + config.influxHost); 34 | console.log("config.influxDatabase=" + config.influxDatabase); 35 | 36 | // TCP handles 37 | const net = require('net'); 38 | const port = 8080; 39 | const host = '0.0.0.0'; 40 | 41 | const server = net.createServer(); 42 | server.listen(port, host, () => { 43 | console.log('TCP Server is running on port ' + port + '.'); 44 | }); 45 | 46 | // // Runs before each requests 47 | // server.use((req, res, next) => { 48 | // res.locals.startEpoch = Date.now() 49 | // next() 50 | // }) 51 | 52 | // // Runs after each requests 53 | // server.use((req, res, next) => { 54 | // const responseTimeInMs = Date.now() - res.locals.startEpoch 55 | 56 | // requestDurationMicroseconds 57 | // .labels('response_time') 58 | // .observe(responseTimeInMs) 59 | 60 | // next() 61 | // }) 62 | 63 | 64 | // InfluxDB Initialization. 65 | const influx = new Influx.InfluxDB({ 66 | host: config.influxHost, 67 | database: config.influxDatabase 68 | }); 69 | 70 | let sockets = []; 71 | 72 | const metricsServer = app.listen(metricPort, () => { 73 | console.log(`Metrics app listening on port ${metricPort}!`) 74 | }) 75 | 76 | server.on('connection', function(sock) { 77 | console.log('CONNECTED: ' + sock.remoteAddress + ':' + sock.remotePort); 78 | sockets.push(sock); 79 | 80 | sock.on('data', function(data) { 81 | console.log("Received data: " + data); 82 | try { 83 | let message = JSON.parse("" + data) 84 | // API Initialization. 85 | const instance = axios.create({ 86 | baseURL: "http://ip-api.com/json" 87 | }); 88 | instance 89 | .get(`/${message.ip}?fields=status,lat,lon`) 90 | .then(function(response) { 91 | const apiResponse = response.data; 92 | console.log("ip-api.com response: "); 93 | console.log(" status: " + apiResponse.status); 94 | 95 | const success = apiResponse.status === 'success' ? 'success' : 'failure'; 96 | apiCallsTotal.inc({ 97 | status: success 98 | }) 99 | 100 | console.log(" lat : " + apiResponse.lat); 101 | console.log(" lon : " + apiResponse.lon); 102 | console.log("geohash: "+ geohash.encode(apiResponse.lat, apiResponse.lon)); 103 | 104 | influx.writePoints( 105 | [{ 106 | measurement: "geossh", 107 | fields: { 108 | value: 1 109 | }, 110 | tags: { 111 | geohash: geohash.encode(apiResponse.lat, apiResponse.lon), 112 | username: message.username, 113 | port: message.port, 114 | ip: message.ip 115 | } 116 | }] 117 | ); 118 | console.log("Intruder added") 119 | }) 120 | .catch(function(error) { 121 | console.log(error); 122 | }); 123 | } catch(error) { 124 | console.log(error); 125 | }; 126 | }); 127 | 128 | // Add a 'close' event handler to this instance of socket 129 | sock.on('close', function(data) { 130 | let index = sockets.findIndex(function(o) { 131 | return o.remoteAddress === sock.remoteAddress && o.remotePort === sock.remotePort; 132 | }) 133 | if (index !== -1) sockets.splice(index, 1); 134 | console.log('CLOSED: ' + sock.remoteAddress + ' ' + sock.remotePort); 135 | }); 136 | }); 137 | -------------------------------------------------------------------------------- /dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 1, 19 | "links": [], 20 | "panels": [ 21 | { 22 | "circleMaxSize": "10", 23 | "circleMinSize": "1", 24 | "colors": [ 25 | "rgba(245, 54, 54, 0.9)", 26 | "rgba(237, 129, 40, 0.89)", 27 | "rgba(50, 172, 45, 0.97)" 28 | ], 29 | "datasource": "InfluxDB", 30 | "decimals": 0, 31 | "esGeoPoint": "geohash", 32 | "esMetric": "metric", 33 | "gridPos": { 34 | "h": 19, 35 | "w": 14, 36 | "x": 0, 37 | "y": 0 38 | }, 39 | "hideEmpty": false, 40 | "hideZero": false, 41 | "id": 2, 42 | "initialZoom": "2", 43 | "links": [], 44 | "locationData": "geohash", 45 | "mapCenter": "Europe", 46 | "mapCenterLatitude": 46, 47 | "mapCenterLongitude": 14, 48 | "maxDataPoints": 1, 49 | "mouseWheelZoom": false, 50 | "showLegend": true, 51 | "stickyLabels": false, 52 | "tableQueryOptions": { 53 | "geohashField": "geohash", 54 | "latitudeField": "latitude", 55 | "longitudeField": "longitude", 56 | "metricField": "metric", 57 | "queryType": "geohash" 58 | }, 59 | "targets": [ 60 | { 61 | "groupBy": [ 62 | { 63 | "params": [ 64 | "geohash" 65 | ], 66 | "type": "tag" 67 | } 68 | ], 69 | "measurement": "geossh", 70 | "orderByTime": "ASC", 71 | "policy": "login-attempts", 72 | "query": "SELECT \"value\" AS \"metric\" FROM \"login-attempts\"..\"geossh\" WHERE $timeFilter GROUP BY \"geohash\"", 73 | "rawQuery": true, 74 | "refId": "A", 75 | "resultFormat": "table", 76 | "select": [ 77 | [ 78 | { 79 | "params": [ 80 | "value" 81 | ], 82 | "type": "field" 83 | }, 84 | { 85 | "params": [ 86 | "metric" 87 | ], 88 | "type": "alias" 89 | } 90 | ] 91 | ], 92 | "tags": [] 93 | } 94 | ], 95 | "thresholds": "0,10", 96 | "title": "Failed logins", 97 | "type": "grafana-worldmap-panel", 98 | "unitPlural": "", 99 | "unitSingle": "", 100 | "valueName": "total" 101 | }, 102 | { 103 | "cacheTimeout": null, 104 | "colorBackground": false, 105 | "colorValue": true, 106 | "colors": [ 107 | "#299c46", 108 | "rgba(237, 129, 40, 0.89)", 109 | "#d44a3a" 110 | ], 111 | "description": "Number of failed attempts within the timeframe", 112 | "format": "none", 113 | "gauge": { 114 | "maxValue": 100, 115 | "minValue": 0, 116 | "show": false, 117 | "thresholdLabels": false, 118 | "thresholdMarkers": true 119 | }, 120 | "gridPos": { 121 | "h": 3, 122 | "w": 2, 123 | "x": 14, 124 | "y": 0 125 | }, 126 | "id": 4, 127 | "interval": null, 128 | "links": [], 129 | "mappingType": 1, 130 | "mappingTypes": [ 131 | { 132 | "name": "value to text", 133 | "value": 1 134 | }, 135 | { 136 | "name": "range to text", 137 | "value": 2 138 | } 139 | ], 140 | "maxDataPoints": 100, 141 | "nullPointMode": "connected", 142 | "nullText": null, 143 | "postfix": "", 144 | "postfixFontSize": "50%", 145 | "prefix": "", 146 | "prefixFontSize": "50%", 147 | "rangeMaps": [ 148 | { 149 | "from": "null", 150 | "text": "N/A", 151 | "to": "null" 152 | } 153 | ], 154 | "sparkline": { 155 | "fillColor": "rgba(31, 118, 189, 0.18)", 156 | "full": false, 157 | "lineColor": "rgb(31, 120, 193)", 158 | "show": false 159 | }, 160 | "tableColumn": "", 161 | "targets": [ 162 | { 163 | "groupBy": [ 164 | { 165 | "params": [ 166 | "$__interval" 167 | ], 168 | "type": "time" 169 | }, 170 | { 171 | "params": [ 172 | "null" 173 | ], 174 | "type": "fill" 175 | } 176 | ], 177 | "orderByTime": "ASC", 178 | "policy": "default", 179 | "query": "SELECT count(*) FROM \"login-attempts\"..\"geossh\" WHERE $timeFilter ", 180 | "rawQuery": true, 181 | "refId": "A", 182 | "resultFormat": "time_series", 183 | "select": [ 184 | [ 185 | { 186 | "params": [ 187 | "value" 188 | ], 189 | "type": "field" 190 | }, 191 | { 192 | "params": [], 193 | "type": "mean" 194 | } 195 | ] 196 | ], 197 | "tags": [] 198 | } 199 | ], 200 | "thresholds": "", 201 | "title": "Failed Attempts", 202 | "type": "singlestat", 203 | "valueFontSize": "120%", 204 | "valueMaps": [ 205 | { 206 | "op": "=", 207 | "text": "N/A", 208 | "value": "null" 209 | } 210 | ], 211 | "valueName": "avg" 212 | } 213 | ], 214 | "refresh": "30s", 215 | "schemaVersion": 16, 216 | "style": "dark", 217 | "tags": [], 218 | "templating": { 219 | "list": [] 220 | }, 221 | "time": { 222 | "from": "now/d", 223 | "to": "now" 224 | }, 225 | "timepicker": { 226 | "refresh_intervals": [ 227 | "5s", 228 | "10s", 229 | "30s", 230 | "1m", 231 | "5m", 232 | "15m", 233 | "30m", 234 | "1h", 235 | "2h", 236 | "1d" 237 | ], 238 | "time_options": [ 239 | "5m", 240 | "15m", 241 | "1h", 242 | "6h", 243 | "12h", 244 | "24h", 245 | "2d", 246 | "7d", 247 | "30d" 248 | ] 249 | }, 250 | "timezone": "", 251 | "title": "Failed login attempts", 252 | "uid": "l4PDIHriz", 253 | "version": 15 254 | } -------------------------------------------------------------------------------- /values/influxdb/values.yaml: -------------------------------------------------------------------------------- 1 | ## influxdb image version 2 | ## ref: https://hub.docker.com/r/library/influxdb/tags/ 3 | image: 4 | repo: "influxdb" 5 | tag: "1.7.3-alpine" 6 | pullPolicy: IfNotPresent 7 | 8 | ## Specify a service type 9 | ## NodePort is default 10 | ## ref: http://kubernetes.io/docs/user-guide/services/ 11 | ## 12 | service: 13 | ## Add annotations to service 14 | # annotations: {} 15 | type: ClusterIP 16 | ## Add IP Cluster 17 | # clusterIP: "" 18 | ## Add external IPs that route to one or more cluster nodes 19 | # externalIPs: [] 20 | ## Specify LoadBalancer IP (only allow on some cloud provider) 21 | # loadBalancerIP: "" 22 | ## Allow source IPs to access on service (if empty, any access allow) 23 | # loadBalancerSourceRanges: [] 24 | 25 | ## Persist data to a persistent volume 26 | ## 27 | persistence: 28 | enabled: true 29 | ## If true will use an existing PVC instead of creating one 30 | # useExisting: false 31 | ## Name of existing PVC to be used in the influx deployment 32 | # name: 33 | ## influxdb data Persistent Volume Storage Class 34 | ## If defined, storageClassName: 35 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 36 | ## If undefined (the default) or set to null, no storageClassName spec is 37 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 38 | ## GKE, AWS & OpenStack) 39 | ## 40 | storageClass: "nfs-client" 41 | accessMode: ReadWriteOnce 42 | size: 8Gi 43 | 44 | ## Create default user through Kubernetes job 45 | ## Defaults indicated below 46 | ## 47 | setDefaultUser: 48 | enabled: true 49 | 50 | ## Image of the container used for job 51 | ## Default: appropriate/curl:latest 52 | ## 53 | image: appropriate/curl:latest 54 | 55 | ## Deadline for job so it does not retry forever. 56 | ## Default: activeDeadline: 300 57 | ## 58 | activeDeadline: 300 59 | 60 | ## Restart policy for job 61 | ## Default: OnFailure 62 | restartPolicy: OnFailure 63 | 64 | user: 65 | 66 | ## The user name 67 | ## Default: "admin" 68 | username: "admin" 69 | 70 | ## User password 71 | ## single quotes must be escaped (\') 72 | ## Default: (Randomly generated 10 characters of AlphaNum) 73 | # password: 74 | 75 | ## User privileges 76 | ## Default: "WITH ALL PRIVILEGES" 77 | privileges: "WITH ALL PRIVILEGES" 78 | 79 | ## Configure resource requests and limits 80 | ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ 81 | resources: 82 | requests: 83 | memory: 256Mi 84 | cpu: 0.1 85 | limits: 86 | memory: 2Gi 87 | cpu: 0.5 88 | 89 | ingress: 90 | enabled: false 91 | tls: false 92 | # secretName: my-tls-cert # only needed if tls above is true 93 | hostname: influxdb.foobar.com 94 | annotations: 95 | # kubernetes.io/ingress.class: "nginx" 96 | # kubernetes.io/tls-acme: "true" 97 | 98 | ## Use an alternate scheduler, e.g. "stork". 99 | ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ 100 | ## 101 | # schedulerName: 102 | 103 | ## Node labels for pod assignment 104 | ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ 105 | ## 106 | nodeSelector: {} 107 | 108 | ## Affinity for pod assignment 109 | ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity 110 | ## 111 | affinity: {} 112 | 113 | ## Tolerations for pod assignment 114 | ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ 115 | ## 116 | tolerations: [] 117 | # - key: "key" 118 | # operator: "Equal|Exists" 119 | # value: "value" 120 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" 121 | 122 | ## The InfluxDB image uses several environment variables to automatically 123 | ## configure certain parts of the server. 124 | ## Ref: https://hub.docker.com/_/influxdb/ 125 | env: 126 | # - name: INFLUXDB_DB 127 | # value: "demo" 128 | 129 | ## Change InfluxDB configuration parameters below: 130 | ## Defaults are indicated 131 | ## ref: https://docs.influxdata.com/influxdb/v1.1/administration/config/ 132 | config: 133 | reporting_disabled: false 134 | storage_directory: /var/lib/influxdb 135 | rpc: 136 | enabled: true 137 | bind_address: 8088 138 | meta: 139 | retention_autocreate: true 140 | logging_enabled: true 141 | data: 142 | query_log_enabled: true 143 | cache_max_memory_size: 1073741824 144 | cache_snapshot_memory_size: 26214400 145 | cache_snapshot_write_cold_duration: 10m0s 146 | compact_full_write_cold_duration: 4h0m0s 147 | max_series_per_database: 1000000 148 | max_values_per_tag: 100000 149 | trace_logging_enabled: false 150 | coordinator: 151 | write_timeout: 10s 152 | max_concurrent_queries: 0 153 | query_timeout: 0s 154 | log_queries_after: 0s 155 | max_select_point: 0 156 | max_select_series: 0 157 | max_select_buckets: 0 158 | retention: 159 | enabled: true 160 | check_interval: 30m0s 161 | shard_precreation: 162 | enabled: true 163 | check_interval: 10m0s 164 | advance_period: 30m0s 165 | admin: 166 | enabled: false 167 | bind_address: 8083 168 | https_enabled: false 169 | https_certificate: /etc/ssl/influxdb.pem 170 | monitor: 171 | store_enabled: true 172 | store_database: _internal 173 | store_interval: 10s 174 | subscriber: 175 | enabled: true 176 | http_timeout: 30s 177 | insecure_skip_verify: false 178 | ca_certs: "" 179 | write_concurrency: 40 180 | write_buffer_size: 1000 181 | http: 182 | enabled: true 183 | bind_address: 8086 184 | auth_enabled: false 185 | log_enabled: true 186 | write_tracing: false 187 | pprof_enabled: true 188 | https_enabled: false 189 | https_certificate: /etc/ssl/influxdb.pem 190 | https_private_key: "" 191 | max_row_limit: 10000 192 | max_connection_limit: 0 193 | shared_secret: "beetlejuicebeetlejuicebeetlejuice" 194 | realm: InfluxDB 195 | unix_socket_enabled: false 196 | bind_socket: /var/run/influxdb.sock 197 | graphite: 198 | enabled: false 199 | bind_address: 2003 200 | database: graphite 201 | retention_policy: autogen 202 | protocol: tcp 203 | batch_size: 5000 204 | batch_pending: 10 205 | batch_timeout: 1s 206 | consistency_level: one 207 | separator: . 208 | udp_read_buffer: 0 209 | # Uncomment to define graphite templates 210 | # templates: 211 | # - "graphite.metric.*.*.* measurement.run" 212 | collectd: 213 | enabled: false 214 | bind_address: 25826 215 | database: collectd 216 | retention_policy: autogen 217 | batch_size: 5000 218 | batch_pending: 10 219 | batch_timeout: 10s 220 | read_buffer: 0 221 | typesdb: /usr/share/collectd/types.db 222 | security_level: none 223 | auth_file: /etc/collectd/auth_file 224 | opentsdb: 225 | enabled: false 226 | bind_address: 4242 227 | database: opentsdb 228 | retention_policy: autogen 229 | consistency_level: one 230 | tls_enabled: false 231 | certificate: /etc/ssl/influxdb.pem 232 | batch_size: 1000 233 | batch_pending: 5 234 | batch_timeout: 1s 235 | log_point_errors: true 236 | udp: 237 | enabled: false 238 | bind_address: 8089 239 | database: udp 240 | retention_policy: autogen 241 | batch_size: 5000 242 | batch_pending: 10 243 | read_buffer: 0 244 | batch_timeout: 1s 245 | precision: "ns" 246 | continuous_queries: 247 | log_enabled: true 248 | enabled: true 249 | run_interval: 1s -------------------------------------------------------------------------------- /values/grafana/values.yaml: -------------------------------------------------------------------------------- 1 | rbac: 2 | create: true 3 | pspEnabled: true 4 | pspUseAppArmor: true 5 | namespaced: false 6 | serviceAccount: 7 | create: true 8 | name: 9 | 10 | replicas: 1 11 | 12 | deploymentStrategy: RollingUpdate 13 | 14 | readinessProbe: 15 | httpGet: 16 | path: /api/health 17 | port: 3000 18 | 19 | livenessProbe: 20 | httpGet: 21 | path: /api/health 22 | port: 3000 23 | initialDelaySeconds: 60 24 | timeoutSeconds: 30 25 | failureThreshold: 10 26 | 27 | image: 28 | repository: grafana/grafana 29 | tag: 5.4.3 30 | pullPolicy: IfNotPresent 31 | 32 | ## Optionally specify an array of imagePullSecrets. 33 | ## Secrets must be manually created in the namespace. 34 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 35 | ## 36 | # pullSecrets: 37 | # - myRegistrKeySecretName 38 | 39 | securityContext: 40 | runAsUser: 472 41 | fsGroup: 472 42 | 43 | 44 | extraConfigmapMounts: [] 45 | # - name: certs-configmap 46 | # mountPath: /etc/grafana/ssl/ 47 | # configMap: certs-configmap 48 | # readOnly: true 49 | 50 | 51 | ## Assign a PriorityClassName to pods if set 52 | # priorityClassName: 53 | 54 | downloadDashboardsImage: 55 | repository: appropriate/curl 56 | tag: latest 57 | pullPolicy: IfNotPresent 58 | 59 | chownDataImage: 60 | repository: busybox 61 | tag: 1.30.0 62 | pullPolicy: IfNotPresent 63 | 64 | ## Pod Annotations 65 | # podAnnotations: {} 66 | 67 | ## Deployment annotations 68 | # annotations: {} 69 | 70 | ## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). 71 | ## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. 72 | ## ref: http://kubernetes.io/docs/user-guide/services/ 73 | ## 74 | service: 75 | type: ClusterIP 76 | port: 80 77 | annotations: {} 78 | labels: {} 79 | 80 | ingress: 81 | enabled: false 82 | annotations: {} 83 | # kubernetes.io/ingress.class: nginx 84 | # kubernetes.io/tls-acme: "true" 85 | labels: {} 86 | path: / 87 | hosts: 88 | - chart-example.local 89 | tls: [] 90 | # - secretName: chart-example-tls 91 | # hosts: 92 | # - chart-example.local 93 | 94 | resources: 95 | limits: 96 | cpu: 200m 97 | memory: 512Mi 98 | requests: 99 | cpu: 100m 100 | memory: 128Mi 101 | 102 | ## Node labels for pod assignment 103 | ## ref: https://kubernetes.io/docs/user-guide/node-selection/ 104 | # 105 | nodeSelector: {} 106 | 107 | ## Tolerations for pod assignment 108 | ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ 109 | ## 110 | tolerations: [] 111 | 112 | ## Affinity for pod assignment 113 | ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity 114 | ## 115 | affinity: {} 116 | 117 | ## Enable persistence using Persistent Volume Claims 118 | ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ 119 | ## 120 | persistence: 121 | enabled: true 122 | initChownData: true 123 | storageClassName: nfs-client 124 | accessModes: 125 | - ReadWriteOnce 126 | size: 10Gi 127 | # annotations: {} 128 | # subPath: "" 129 | # existingClaim: 130 | 131 | # Administrator credentials when not using an existing secret (see below) 132 | adminUser: admin 133 | # adminPassword: strongpassword 134 | 135 | # Use an existing secret for the admin user. 136 | admin: 137 | existingSecret: "" 138 | userKey: admin-user 139 | passwordKey: admin-password 140 | 141 | ## Use an alternate scheduler, e.g. "stork". 142 | ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ 143 | ## 144 | # schedulerName: 145 | 146 | ## Extra environment variables that will be pass onto deployment pods 147 | env: {} 148 | 149 | ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment 150 | ## This can be useful for auth tokens, etc 151 | envFromSecret: "" 152 | 153 | ## Additional grafana server secret mounts 154 | # Defines additional mounts with secrets. Secrets must be manually created in the namespace. 155 | extraSecretMounts: [] 156 | # - name: secret-files 157 | # mountPath: /etc/secrets 158 | # secretName: grafana-secret-files 159 | # readOnly: true 160 | 161 | ## Additional grafana server volume mounts 162 | # Defines additional volume mounts. 163 | extraVolumeMounts: [] 164 | # - name: extra-volume 165 | # mountPath: /mnt/volume 166 | # readOnly: true 167 | # existingClaim: volume-claim 168 | 169 | ## Pass the plugins you want installed as a list. 170 | ## 171 | plugins: [] 172 | # - digrich-bubblechart-panel 173 | # - grafana-clock-panel 174 | 175 | ## Configure grafana datasources 176 | ## ref: http://docs.grafana.org/administration/provisioning/#datasources 177 | ## 178 | datasources: {} 179 | # datasources.yaml: 180 | # apiVersion: 1 181 | # datasources: 182 | # - name: Prometheus 183 | # type: prometheus 184 | # url: http://prometheus-prometheus-server 185 | # access: proxy 186 | # isDefault: true 187 | 188 | ## Configure grafana dashboard providers 189 | ## ref: http://docs.grafana.org/administration/provisioning/#dashboards 190 | ## 191 | ## `path` must be /var/lib/grafana/dashboards/ 192 | ## 193 | dashboardProviders: {} 194 | # dashboardproviders.yaml: 195 | # apiVersion: 1 196 | # providers: 197 | # - name: 'default' 198 | # orgId: 1 199 | # folder: '' 200 | # type: file 201 | # disableDeletion: false 202 | # editable: true 203 | # options: 204 | # path: /var/lib/grafana/dashboards/default 205 | 206 | ## Configure grafana dashboard to import 207 | ## NOTE: To use dashboards you must also enable/configure dashboardProviders 208 | ## ref: https://grafana.com/dashboards 209 | ## 210 | ## dashboards per provider, use provider name as key. 211 | ## 212 | dashboards: {} 213 | # default: 214 | # some-dashboard: 215 | # json: dashboards/custom-dashboard.json 216 | # prometheus-stats: 217 | # gnetId: 2 218 | # revision: 2 219 | # datasource: Prometheus 220 | # local-dashboard: 221 | # url: https://example.com/repository/test.json 222 | # local-dashboard-base64: 223 | # url: https://example.com/repository/test-b64.json 224 | # b64content: true 225 | 226 | ## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value. 227 | ## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. 228 | ## ConfigMap data example: 229 | ## 230 | ## data: 231 | ## example-dashboard.json: | 232 | ## RAW_JSON 233 | ## 234 | dashboardsConfigMaps: {} 235 | # default: "" 236 | 237 | ## Grafana's primary configuration 238 | ## NOTE: values in map will be converted to ini format 239 | ## ref: http://docs.grafana.org/installation/configuration/ 240 | ## 241 | grafana.ini: 242 | paths: 243 | data: /var/lib/grafana/data 244 | logs: /var/log/grafana 245 | plugins: /var/lib/grafana/plugins 246 | provisioning: /etc/grafana/provisioning 247 | analytics: 248 | check_for_updates: true 249 | log: 250 | mode: console 251 | grafana_net: 252 | url: https://grafana.net 253 | ## LDAP Authentication can be enabled with the following values on grafana.ini 254 | ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid 255 | # auth.ldap: 256 | # enabled: true 257 | # allow_sign_up: true 258 | # config_file: /etc/grafana/ldap.toml 259 | 260 | ## Grafana's LDAP configuration 261 | ## Templated by the template in _helpers.tpl 262 | ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled 263 | ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap 264 | ## ref: http://docs.grafana.org/installation/ldap/#configuration 265 | ldap: 266 | # `existingSecret` is a reference to an existing secret containing the ldap configuration 267 | # for Grafana in a key `ldap-toml`. 268 | existingSecret: "" 269 | # `config` is the content of `ldap.toml` that will be stored in the created secret 270 | config: "" 271 | # config: |- 272 | # verbose_logging = true 273 | 274 | # [[servers]] 275 | # host = "my-ldap-server" 276 | # port = 636 277 | # use_ssl = true 278 | # start_tls = false 279 | # ssl_skip_verify = false 280 | # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" 281 | 282 | ## Grafana's SMTP configuration 283 | ## NOTE: To enable, grafana.ini must be configured with smtp.enabled 284 | ## ref: http://docs.grafana.org/installation/configuration/#smtp 285 | smtp: 286 | # `existingSecret` is a reference to an existing secret containing the smtp configuration 287 | # for Grafana. 288 | existingSecret: "" 289 | userKey: "user" 290 | passwordKey: "password" 291 | 292 | ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders 293 | ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards 294 | sidecar: 295 | image: kiwigrid/k8s-sidecar:0.0.10 296 | imagePullPolicy: IfNotPresent 297 | resources: 298 | # limits: 299 | # cpu: 100m 300 | # memory: 100Mi 301 | # requests: 302 | # cpu: 50m 303 | # memory: 50Mi 304 | dashboards: 305 | enabled: false 306 | # label that the configmaps with dashboards are marked with 307 | label: grafana_dashboard 308 | # folder in the pod that should hold the collected dashboards 309 | folder: /tmp/dashboards 310 | # If specified, the sidecar will search for dashboard config-maps inside this namespace. 311 | # Otherwise the namespace in which the sidecar is running will be used. 312 | # It's also possible to specify ALL to search in all namespaces 313 | searchNamespace: null 314 | datasources: 315 | enabled: false 316 | # label that the configmaps with datasources are marked with 317 | label: grafana_datasource 318 | # If specified, the sidecar will search for datasource config-maps inside this namespace. 319 | # Otherwise the namespace in which the sidecar is running will be used. 320 | # It's also possible to specify ALL to search in all namespaces 321 | searchNamespace: null -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ssh login-attempt monitoring 2 | 3 | This repo is based on this create article called [Geolocating SSH Hackers In Real-Time](https://medium.com/schkn/geolocating-ssh-hackers-in-real-time-108cbc3b5665), and goes in a little more details in setting-up the environment. 4 | I will be using kubernetes to run the following pieces: 5 | - infludb: the time series database 6 | - grafana: for visualizing the login attempts on a map 7 | - login collector: tcp socket listener called by rsyslogd 8 | 9 | In the first step we will go trough the basic setup of the components: 10 | 1. influxdb 11 | 1. grafana 12 | 1. logincollector 13 | 1. rsyslog 14 | 15 | and in a second step will will configure these components. 16 | 17 | ## 1. Setting up influxdb 18 | 19 | ### 1.1. Install 20 | 21 | The setup of influxdb is pretty straignt forward: we will not expose influxdb to the network (because we don't need this in this use-case). We will be using helm as the package manager to do the install, based on a slightly modified `values.yml`. 22 | 23 | ``` 24 | helm install --name influxdb -f ./values7influxdb/values.yaml stable/influxdb --namespace influxdb 25 | ``` 26 | 27 | Done! 28 | 29 | The influxdb user is `admin` and you can retrieve the auto-generated password with: `kubectl -n influxdb get secret influxdb-influxdb-auth -o jsonpath="{.data.influxdb-password}" | base64 -d` 30 | 31 | ### 1.2. Create the database 32 | 33 | We will use the influxdb cli to connect and create the database `login-attempts`. 34 | 35 | First, retrieve the password: `export PASSWORD=$(kubectl -n influxdb get secret influxdb-influxdb-auth -o jsonpath="{.data.influxdb-password}" | base64 -d)` 36 | 37 | Then forward the local port 8086 to the influxdb pod: `kubectl port-forward --namespace influxdb $(kubectl get pods --namespace influxdb -l app=influxdb-influxdb -o jsonpath='{ .items[0].metadata.name }') 8086:8086` 38 | 39 | ...and finally connect to the database: `influx -host 127.0.0.1 -port 8086 -username admin -password $PASSWORD` 40 | 41 | Now create the database: `create database "login-attempts" with duration 30d` 42 | 43 | ## 2. Setting up grafana 44 | 45 | ### 2.1. Install 46 | We will use helm to setup grafana as well: 47 | ``` 48 | helm install --name grafana -f ./values/grafana/values.yaml stable/grafana --namespace grafana 49 | ``` 50 | 51 | and retrieve the password (the username is `admin`): 52 | ``` 53 | kubectl get secret --namespace grafana grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo 54 | ``` 55 | 56 | To expose grafana to the network we need an ingress and there are two ways to deploy that kubernetes object: 57 | 1. let helm create the ingress by setting `ingress.enabled` to `true` in `values/grafana/values.yaml` 58 | 1. Deploy an own ingress: this is the way I prefer to do things because I use traefik as an ingress controller on my kubernetes cluster, that takes care of generating Let's Encrypt certificates automatically for any new ingress. Aside from that I did not want to expose grafana to the internet so I added a `whitelist-source-range` definition to the ingress to make sure that grafana only accepts incoming traffic coming from my local network. 59 | 60 | To create the ingress using Traefik: 61 | 1. Edit the hostname in `manifests/grafana/ingress.yaml` 62 | 1. Deploy the ingress: `kubectl -n grafana apply -f manifests/grafana/ingress.yaml` 63 | 64 | 65 | ### 2.2. Add Worldmap Panel 66 | 67 | Next we will add the [Worldmap panel](https://grafana.com/plugins/grafana-worldmap-panel/installation) to grafana. 68 | 69 | 1. connect to the grafana pod: `kubectl -n grafana exec -it $(kubectl get pods --namespace grafana -l app=grafana -o jsonpath='{.items[0].metadata.name}') /bin/bash` 70 | 1. Install the panel: `grafana-cli plugins install grafana-worldmap-panel` 71 | 1. Force the pod to restart: `kubectl -n grafana delete pod $(kubectl get pods --namespace grafana -l app=grafana -o jsonpath='{.items[0].metadata.name}')` 72 | 1. Wait for grafana to restart and verify that the panel is available: click "Add Dashboard" and check that the "Worldmap Panel" is available 73 | 74 | ### 2.3. Add datasource to influxdb 75 | 76 | 1. Go to "Configuration - Datasources" and select "Add Datasource" 77 | 1. Select "InfluxDB" 78 | 1. Name the datasource `InfluxDB` 79 | 1. Add the cluster internal URL: `http://influxdb-influxdb.influxdb.svc:8086` 80 | 1. Enter the database name: `_internal` (the default database) 81 | 1. Add `admin` and his password as credentials 82 | 1. Hit save and test: you should see a positiv test-result 83 | 84 | ## 3. Deploying the log receiver 85 | 86 | ### ~~3.1. Create an ipstack.com account~~ 87 | 88 | ~~The tcp listener uses ipstack.com to resolve IPs to geohashes.~~ 89 | 90 | ~~1. Register a free account at `ipstack.com`~~ 91 | ~~1. Grab your API key and add it to `config.json`~~ 92 | 93 | **Update 2019-03-31: I reached ipstack.com's quota of free api-calls far too soon so I moved to the free (for non commercial use) ip-api.com. 94 | 95 | ### 3.2. Build and deploy 96 | The log receiver is a tcp socket listener written in nodejs that will get called from `rsyslogd`. 97 | 98 | We will deploy it as container to kubernetes, and expose it through a nodePort (we can not use an ingress as the communication will happen over tcp, not http): 99 | 100 | 1. Build the container: `docker build -t /logincollector:` 101 | 1. Push the container: `docker push /logincollector:` 102 | 1. Change the deployment in `manifests/logincollector/deployment.yaml` according to your docker image 103 | 1. Create the namespace: `kubectl create ns logincollector` 104 | 1. Deploy: `kubectl -n logincollector apply -f manifests/logincollector/` 105 | 106 | You should see the following output: 107 | ``` 108 | deployment.apps "collector-deployment" created 109 | service "collector-svc" created 110 | ``` 111 | 112 | Now get the details of the service: `kubectl -n logincollector get svc` 113 | 114 | That should display something like this: 115 | ``` 116 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 117 | collector-svc NodePort 10.97.41.164 8080:31829/TCP 40s 118 | ``` 119 | 120 | Where `31829` is the port that is exposed on each closter node. We will need this port to configure the log shipping in the `rsyslogd` config. 121 | 122 | ## 4. Configure rsyslogd 123 | 124 | ### 4.1. Find out the log pattern we are looking for 125 | 126 | On the node running `sshd` check the result of `less /var/log/auth.log | grep ssh`. The output may look like this: 127 | 128 | ``` 129 | Feb 20 11:59:55 pve sshd[31596]: Invalid user netflow from xxx.xxx.xxx.xxx port 48854 130 | Feb 20 11:59:55 pve sshd[31596]: Invalid user netflow from xxx.xxx.xxx.xxx port 48854 131 | Feb 20 11:59:55 pve sshd[31596]: input_userauth_request: invalid user netflow [preauth] 132 | Feb 20 11:59:55 pve sshd[31596]: input_userauth_request: invalid user netflow [preauth] 133 | Feb 20 11:59:55 pve sshd[31596]: Received disconnect from xxx.xxx.xxx.xxx port 48854:11: Bye Bye [preauth] 134 | Feb 20 11:59:55 pve sshd[31596]: Received disconnect from xxx.xxx.xxx.xxx port 48854:11: Bye Bye [preauth] 135 | Feb 20 11:59:55 pve sshd[31596]: Disconnected from xxx.xxx.xxx.xxx port 48854 [preauth] 136 | Feb 20 11:59:55 pve sshd[31596]: Disconnected from xxx.xxx.xxx.xxx port 48854 [preauth] 137 | Feb 20 12:00:20 pve sshd[31651]: Invalid user mc from xxx.xxx.xxx.xxx port 41876 138 | Feb 20 12:00:20 pve sshd[31651]: Invalid user mc from xxx.xxx.xxx.xxx port 41876 139 | Feb 20 12:00:20 pve sshd[31651]: input_userauth_request: invalid user mc [preauth] 140 | Feb 20 12:00:20 pve sshd[31651]: input_userauth_request: invalid user mc [preauth] 141 | Feb 20 12:00:20 pve sshd[31651]: Received disconnect from xxx.xxx.xxx.xxx port 41876:11: Bye Bye [preauth] 142 | Feb 20 12:00:20 pve sshd[31651]: Received disconnect from xxx.xxx.xxx.xxx port 41876:11: Bye Bye [preauth] 143 | Feb 20 12:00:20 pve sshd[31651]: Disconnected from xxx.xxx.xxx.xxx port 41876 [preauth] 144 | Feb 20 12:00:20 pve sshd[31651]: Disconnected from xxx.xxx.xxx.xxx port 41876 [preauth] 145 | Feb 20 12:01:36 pve sshd[31816]: Invalid user ts3 from xxx.xxx.xxx.xxx port 33903 146 | Feb 20 12:01:36 pve sshd[31816]: Invalid user ts3 from xxx.xxx.xxx.xxx port 33903 147 | Feb 20 12:01:36 pve sshd[31816]: input_userauth_request: invalid user ts3 [preauth] 148 | Feb 20 12:01:36 pve sshd[31816]: input_userauth_request: invalid user ts3 [preauth] 149 | Feb 20 12:01:36 pve sshd[31816]: Received disconnect from xxx.xxx.xxx.xxx port 33903:11: Bye Bye [preauth] 150 | Feb 20 12:01:36 pve sshd[31816]: Received disconnect from xxx.xxx.xxx.xxx port 33903:11: Bye Bye [preauth] 151 | Feb 20 12:01:36 pve sshd[31816]: Disconnected from xxx.xxx.xxx.xxx port 33903 [preauth] 152 | Feb 20 12:01:36 pve sshd[31816]: Disconnected from xxx.xxx.xxx.xxx port 33903 [preauth] 153 | ``` 154 | (IP addresses have been redacted) 155 | 156 | Your log may look different so you may have to adapt the patterns. What we are looking for here is: `Invalid user netflow from xxx.xxx.xxx.xxx port 48854`, so a `less /var/log/auth.log | grep ssh | grep " Invalid user"` will show the list of login attempts we are looking for. 157 | 158 | ### 4.2. Create the rsysdlog rules 159 | 160 | Edit `/etc/rsyslog.d/50-default.conf` and add: 161 | 162 | ``` 163 | # Default rules for rsyslog. 164 | # 165 | # For more information see rsyslog.conf(5) and /etc/rsyslog.conf 166 | 167 | # 168 | # First some standard log files. Log by facility. 169 | # 170 | 171 | if $programname == 'sshd' then { 172 | if $msg startswith ' Invalid user' then { 173 | # Transform and forward data! 174 | action(type="omfwd" queue.type="LinkedList" action.resumeRetryCount="-1" queue.size="10000" queue.saveonshutdown="on" target="192.168.178.72" port="31829" protocol="tcp" template="ip-json") 175 | } 176 | # stop 177 | } 178 | 179 | auth,authpriv.* /var/log/auth.log 180 | *.*;auth,authpriv.none -/var/log/syslog 181 | #cron.* /var/log/cron.log 182 | #daemon.* -/var/log/daemon.log 183 | kern.* -/var/log/kern.log 184 | #lpr.* -/var/log/lpr.log 185 | mail.* -/var/log/mail.log 186 | #user.* -/var/log/user.log 187 | ``` 188 | 189 | What this script does is pretty straight forward: 190 | 191 | - `if $programname == 'sshd' then {` filters for log entries coming from `sshd` 192 | - `if $msg startswith ' Invalid user' then {` describes the entries we are lookging for (see 4.1.) 193 | - `action(type="omfwd" target="" port="" protocol="tcp" template="ip-json")` uses the `omfwd` forwarder to send log entries as tcp packets (see `protocol`) to the IP defined in `target` and to the port defined in `port` 194 | - additional options to the `action` make sure that no message is lost. See here for more details: https://www.golinuxhub.com/2018/05/how-to-remote-logging-using-rsyslog-omfwd-redhat.html 195 | - `template="ip-json"` describes the format of the tcp packet that we are going to define in the next step 196 | 197 | 198 | ### 4.3. Create the message template 199 | 200 | Still on the node running `sshd`, edit `/etc/rsyslog.d/50-default.conf` and add: 201 | 202 | ``` 203 | # We handle "Invalid user q from xxx.xxx.xxx.xxx port 10664" 204 | template(name="ip-json" type="string" string="{\"username\":\"%msg:R,ERE,1,DFLT:^ Invalid.*user ([a-zA-Z]*).* ([0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*).* port ([0-9]*)--end%\",\"ip\":\"%msg:R,ERE,2,DFLT:^ Invalid.*user ([a-zA-Z]*).* ([0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*).* port ([0-9]*)--end%\",\"port\":\"%msg:R,ERE,3,DFLT:^ Invalid.*user ([a-zA-Z]*).* ([0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*.[0-9][0-9]*[0-9]*).* port ([0-9]*)--end%\"}") 205 | ``` 206 | 207 | Please note that the template needs to be consistent with the pattern we have identified in 4.1. 208 | 209 | ### 4.4. Start forwarding 210 | 211 | To start forwarding we now need to restart syslogd: `/etc/init.d/rsyslog restart`, and check that everything works well: `/etc/init.d/rsyslog status` 212 | 213 | ### 5. Verify that data flows to influxdb 214 | 215 | #### 5.1. Check the socket listener logs 216 | 217 | To check the logs execute: `kubectl -n logincollector logs -l app=collectorapp` 218 | 219 | If packets are received from syslogd and stored to the database you should see something like: 220 | ``` 221 | Received data: {"username":"b","ip":"xxx.xxx.xxx.xxx","port":"33614"} 222 | 223 | Intruder added 224 | Received data: {"username":"teamspeak","ip":"xxx.xxx.xxx.xxx","port":"34994"} 225 | 226 | Intruder added 227 | Received data: {"username":"admin","ip":"xxx.xxx.xxx.xxx","port":"62561"} 228 | 229 | Intruder added 230 | Received data: {"username":"sftp","ip":"xxx.xxx.xxx.xxx","port":"53614"} 231 | ``` 232 | 233 | #### 5.2. Check the database 234 | 235 | 1. Logon to influxdb (see 1.2.) and check if the data gets persisted: `select * from "login-attempts"..geossh` 236 | 237 | The result should look like: 238 | ``` 239 | name: geossh 240 | ------------ 241 | time geohash ip port username value 242 | 1550665352142176076 u09tvnqgx xxx.xxx.xxx.xxx 33614 b 1 243 | 1550665395520243219 w4rqnpjee xxx.xxx.xxx.xxx 34994 teamspeak 1 244 | 1550665416291991522 qw8nv8qdk xxx.xxx.xxx.xxx 62561 admin 1 245 | 1550665424640376618 w21z773kz xxx.xxx.xxx.xxx 53614 sftp 1 246 | 1550665465950918696 wx4g0kz6e xxx.xxx.xxx.xxx 39072 yarn 1 247 | 1550665718062144620 wx4g0kz6e xxx.xxx.xxx.xxx 11142 b 1 248 | ``` 249 | 250 | ### 6. Setup the dashboard 251 | 252 | Finally we will setup the dashboard in grafana: 253 | 1. Create a new dashboard names "Login Attempts" 254 | 1. Add the World panel and edit it 255 | 1. Go to [General] and edit the title: "Failed Login Attempts" 256 | 1. Go to [Metric] and add "InfluxDB" as datasource 257 | 1. Edit the query to be `SELECT "value" AS "metric" FROM "login-attempts".."geossh" WHERE $timeFilter GROUP BY "geohash"` 258 | 1. Set the format to `Table` 259 | 1. Go to [Worldmap] and edit the visual options to suit your needs (defaults are fine as well) 260 | 1. In "Map data options" set "Location data" to `geohash`, "Geohash field" to `geohash`, "Metric field" to `metric` 261 | 262 | 263 | Et voila! 264 | --------------------------------------------------------------------------------