├── .gitignore ├── LICENSE ├── README.md ├── bin └── opencanary-correlator ├── opencanary_correlator ├── __init__.py ├── common │ ├── __init__.py │ ├── config.py │ ├── constants.py │ ├── emailer.py │ ├── incidents.py │ ├── logs.py │ ├── notifications.py │ ├── queries.py │ ├── redismanager.py │ └── utils.py ├── dispatcher.py ├── handlers.py ├── opencanary_correlator.conf └── receiver.py ├── requirements.txt └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # https://github.com/github/gitignore/blob/master/Python.gitignore 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | env/ 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *,cover 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Thinkst Applied Research 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | 8 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 9 | 10 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 11 | 12 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | OpenCanary Correlator 2 | ======================= 3 | Thinkst Applied Research 4 | 5 | Overview 6 | -------- 7 | OpenCanary Correlator collects events from OpenCanary daemons and coalesces them. It sends alerts via email and sms 8 | 9 | Prerequisites 10 | ------------- 11 | * Redis 12 | * Python 2.7 13 | * Mandrill API keys for email 14 | * Twillio API keys for sms 15 | 16 | On Ubuntu install the following: 17 | 18 | ```$ sudo apt-get install redis-server libffi-dev python-dev``` 19 | 20 | Install 21 | ----------------- 22 | * Create a virtualenv 23 | 24 | ``` 25 | $ virtualenv env 26 | $ source env/bin/activate 27 | ``` 28 | 29 | * Install via pip, or 30 | 31 | ``` 32 | $ pip install opencanary-correlator 33 | ``` 34 | 35 | * Install from source 36 | 37 | ``` 38 | $ git clone https://github.com/thinkst/opencanary-correlator 39 | $ cd opencanary-correlator 40 | $ python setup.py install 41 | ``` 42 | 43 | Run 44 | --------------- 45 | 46 | * Start Redis 47 | * Locate the installed template config file, by running the correlator without any arguments 48 | 49 | ``` 50 | $ opencanary-correlator 51 | Warning: no config file specified. Using the template config (which does not have any alerting configured): 52 | /path/to/template/opencanary_correlator.conf 53 | $ cp /path/to/template/opencanary_correlator.conf ./ 54 | ``` 55 | 56 | * Edit the config file to add API keys, email address and/or phone numbers for alerts 57 | * Run the correlator with saved config file 58 | 59 | ``` 60 | opencanary-correlator --config=./opencanary_correlator.conf 61 | ``` 62 | 63 | * Configure instances of opencanaryd to send events to the correlator. 64 | -------------------------------------------------------------------------------- /bin/opencanary-correlator: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from opencanary_correlator.receiver import main 4 | 5 | if __name__ == "__main__": 6 | main() 7 | 8 | -------------------------------------------------------------------------------- /opencanary_correlator/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.1" 2 | -------------------------------------------------------------------------------- /opencanary_correlator/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thinkst/opencanary-correlator/294470db25eb68a0ce28dbf0e687e0a6a8cd9465/opencanary_correlator/common/__init__.py -------------------------------------------------------------------------------- /opencanary_correlator/common/config.py: -------------------------------------------------------------------------------- 1 | import os, sys, json, copy 2 | 3 | SETTINGS = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.json") 4 | 5 | class Config: 6 | def __init__(self, configfile=SETTINGS): 7 | self.__config = None 8 | self.__configfile = configfile 9 | 10 | # throw exception if fails 11 | with open(configfile, "r") as f: 12 | self.__config = json.load(f) 13 | 14 | def moduleEnabled(self, module_name): 15 | k = "%s.enabled" % module_name 16 | if k in self.__config: 17 | return bool(self.__config[k]) 18 | return False 19 | 20 | def getVal(self, key, default=None): 21 | # throw exception to caller 22 | #figures out the return type from the passed default, uses str if none is supplied 23 | return_type = type(default) 24 | if return_type == type(None): 25 | return_type = str 26 | try: 27 | return return_type(self.__config[key]) 28 | except KeyError as e: 29 | if default is not None: 30 | return default 31 | raise e 32 | 33 | def setVal(self, key, val): 34 | """Set value only if valid otherwise throw exception""" 35 | 36 | oldconfig = copy.deepcopy(self.__config) 37 | self.__config[key] = val 38 | 39 | err = self.valid() 40 | if err is not None: 41 | self.__config = oldconfig 42 | raise ConfigException(key, err) 43 | 44 | def valid(self): 45 | """ 46 | Test whether config is in a valid state 47 | Return None if valid and string error message on first failure 48 | """ 49 | 50 | return None 51 | 52 | 53 | def save(self): 54 | """Backup config file to older version and save to new file""" 55 | 56 | err = self.valid() 57 | if err is not None: 58 | raise ConfigException("config", "Can't save invalid config: %s" % err) 59 | 60 | try: 61 | cfg = self.__configfile 62 | os.rename(cfg, cfg + ".bak") 63 | 64 | with open(cfg, "w") as f: 65 | json.dump(self.__config, f) 66 | 67 | except Exception, e: 68 | print "[-] Failed to save config file %s" % e 69 | raise ConfigException("config", e) 70 | 71 | def __repr__(self): 72 | return self.__config.__repr__() 73 | 74 | def __str__(self): 75 | return self.__config.__str__() 76 | 77 | class ConfigException(Exception): 78 | """Exception raised on invalid config value""" 79 | 80 | def __init__(self, key, msg): 81 | self.key = key 82 | self.msg = msg 83 | 84 | def __repr__(self): 85 | return "<%s %s (%s)>" % (self.__class__.__name__, self.key, self.msg) 86 | 87 | config = None 88 | -------------------------------------------------------------------------------- /opencanary_correlator/common/constants.py: -------------------------------------------------------------------------------- 1 | 2 | LOG_INVALID = 0 3 | LOG_BASE_BOOT = 1000 4 | LOG_BASE_MSG = 1001 5 | LOG_BASE_DEBUG = 1002 6 | LOG_BASE_ERROR = 1003 7 | LOG_FTP_LOGIN_ATTEMPT = 2000 8 | LOG_HTTP_GET = 3000 9 | LOG_HTTP_POST_LOGIN_ATTEMPT = 3001 10 | LOG_SSH_NEW_CONNECTION = 4000 11 | LOG_SSH_REMOTE_VERSION_SENT = 4001 12 | LOG_SSH_LOGIN_ATTEMPT = 4002 13 | LOG_SMB_FILE_OPEN = 5000 14 | LOG_PORT_SYN = 5001 15 | LOG_PORT_SCAN_NET = 5002 16 | LOG_PORT_SCAN_HOST = 5003 17 | LOG_TELNET_LOGIN_ATTEMPT = 6001 18 | LOG_HTTPPROXY_LOGIN_ATTEMPT = 7001 19 | LOG_MYSQL_LOGIN_ATTEMPT = 8001 20 | LOG_MSSQL_LOGIN_SQLAUTH = 9001 21 | LOG_MSSQL_LOGIN_WINAUTH = 9002 22 | LOG_TFTP = 10001 23 | LOG_NTP_MONLIST = 11001 24 | LOG_VNC = 12001 25 | LOG_SNMP_CMD = 13001 26 | LOG_RDP = 14001 27 | LOG_SIP_REQUEST = 15001 28 | -------------------------------------------------------------------------------- /opencanary_correlator/common/emailer.py: -------------------------------------------------------------------------------- 1 | import smtplib 2 | import mandrill 3 | import opencanary_correlator.common.config as c 4 | from email.mime.text import MIMEText 5 | from opencanary_correlator.common.logs import logger 6 | 7 | def send_email(from_='notifications@opencanary.org', to='', subject='', message='', server='', port=25): 8 | logger.debug('Emailing %s' % to) 9 | if not server: 10 | return 11 | 12 | msg = MIMEText(message) 13 | 14 | msg['Subject'] = subject 15 | msg['From'] = from_ 16 | msg['To'] = to 17 | 18 | s = smtplib.SMTP(server, port) 19 | try: 20 | s.sendmail(from_, [to], msg.as_string()) 21 | logger.info('Email sent to %s' % (to)) 22 | except Exception as e: 23 | logger.error('Email sending produced exception %r' % e) 24 | s.quit() 25 | 26 | 27 | def mandrill_send(to=None, subject=None, message=None, reply_to=None): 28 | try: 29 | mandrill_client = mandrill.Mandrill(c.config.getVal("console.mandrill_key")) 30 | message = { 31 | 'auto_html': None, 32 | 'auto_text': None, 33 | 'from_email': 'notifications@opencanary.org', 34 | 'from_name': 'OpenCanary', 35 | 'text': message, 36 | 'subject': subject, 37 | 'to': [{'email': to, 38 | 'type': 'to'}], 39 | } 40 | if reply_to: 41 | message["headers"] = { "Reply-To": reply_to } 42 | 43 | result = mandrill_client.messages.send(message=message, async=False, ip_pool='Main Pool') 44 | 45 | except mandrill.Error, e: 46 | print 'A mandrill error occurred: %s - %s' % (e.__class__, e) 47 | -------------------------------------------------------------------------------- /opencanary_correlator/common/incidents.py: -------------------------------------------------------------------------------- 1 | from twisted.internet.threads import deferToThread 2 | from opencanary_correlator.common.redismanager import * 3 | from opencanary_correlator.common.constants import LOG_PORT_SCAN_NET, LOG_PORT_SCAN_HOST 4 | from utils import current_time_offset 5 | from notifications import notify 6 | from logs import logger 7 | import simplejson 8 | import datetime 9 | import opencanary_correlator.common.config as c 10 | 11 | class Incident(object): 12 | CONFIG_INCIDENT_HORIZON = '' 13 | DESCRIPTION = 'Not available' 14 | 15 | def __init__(self, data=None, write_object=False, key=None): 16 | super(Incident, self).__setattr__('data', data) 17 | 18 | if write_object: 19 | self.do_creation() 20 | 21 | if key: 22 | self.key = key 23 | 24 | @classmethod 25 | def lookup_id(cls, id_=None, key=None): 26 | """This method can be called either on the parent Incident class 27 | or one of its subclasses. 28 | 29 | If called on Incident, a full key must be provided and this method 30 | returns a subclass. 31 | 32 | If called on a subclass, only the the incident ID must be provided. 33 | 34 | Returns an Incident subclass or None. 35 | """ 36 | if key: 37 | if cls != Incident: 38 | return None 39 | 40 | if key.startswith(KEY_FTPLOGIN): 41 | cls = IncidentFTPLogin 42 | elif key.startswith(KEY_HTTP_LOGIN): 43 | cls = IncidentHTTPLogin 44 | elif key.startswith(KEY_SSH_LOGIN): 45 | cls = IncidentSSHLogin 46 | elif key.startswith(KEY_SMB_FILE_OPEN): 47 | cls = IncidentSMBFileOpen 48 | elif key.startswith(KEY_HOST_PORT_SCAN): 49 | cls = IncidentHostPortScan 50 | elif key.startswith(KEY_NETWORK_PORT_SCAN): 51 | cls = IncidentNetworkPortScan 52 | else: 53 | return None 54 | 55 | else: 56 | if id_ == None or cls == Incident: 57 | return None 58 | key = cls.INCIDENT_KEY_PREFIX+str(id_) 59 | 60 | fields = redis.hgetall(key) 61 | return cls(data=fields, key=key) 62 | 63 | def __getattr__(self, k): 64 | return self.data[k] 65 | 66 | def __setattr__(self, k, v): 67 | self.data[k] = v 68 | 69 | def save(self,): 70 | if not self.key: 71 | return False 72 | 73 | redis.hmset(self.key, self.data) 74 | 75 | def delete(self,): 76 | if not self.key: 77 | return False 78 | 79 | redis.zrem(KEY_INCIDENTS, self.key) 80 | if redis.delete(self.key) == 0: 81 | return False 82 | return True 83 | 84 | 85 | def find_incident(self, key_prefix=None, incident_horizon=None): 86 | start_time = current_time_offset()-incident_horizon 87 | 88 | recent_incidents = redis.zrevrangebyscore(KEY_INCIDENTS, '+inf', start_time) 89 | for incident in recent_incidents: 90 | if incident.startswith(key_prefix): 91 | return incident 92 | return None 93 | 94 | def make_key(self, time=''): 95 | if type(time) == float: 96 | time = repr(time) 97 | return self.INCIDENT_KEY_PREFIX + self.data['src_host'] + ':' + str(time) 98 | 99 | def add_log_data(self, current_incident): 100 | """ 101 | Updates the log data field for the incident. 102 | 103 | Default action is to simple append to a list of data, but certain incidents (such as portscans) 104 | may want to preserve previously existing data differently. Overriding this method lets 105 | an incident decide how its log data is stored. 106 | """ 107 | 108 | if current_incident.has_key('logdata'): 109 | current_incident['logdata'] = simplejson.loads(current_incident['logdata']) 110 | else: 111 | current_incident['logdata'] = [] 112 | 113 | current_incident['logdata'].append(self.data['logdata']) 114 | current_incident['logdata'] = simplejson.dumps(current_incident['logdata']) 115 | 116 | return current_incident 117 | 118 | def do_creation(self,): 119 | """ 120 | Insert an Incident hash. 121 | 122 | Before creating a new incident, a check is performed to see 123 | if the same src host has a live incident within the 124 | configured time horizon. 125 | """ 126 | now = current_time_offset() 127 | incident_key_prefix = self.INCIDENT_KEY_PREFIX + self.data['src_host'] 128 | incident_horizon = float(c.config.getVal( 129 | self.CONFIG_INCIDENT_HORIZON, 130 | default=c.config.getVal( 131 | 'console.incident_horizon', 132 | default=60))) 133 | current_incident_key = self.find_incident(key_prefix=incident_key_prefix, 134 | incident_horizon=incident_horizon) 135 | 136 | if current_incident_key: 137 | #an incident already exists, update it 138 | current_incident = redis.hgetall(current_incident_key) 139 | current_incident['events_list'] += ','+repr(now) 140 | current_incident['events_count'] = int(current_incident['events_count'])+1 141 | current_incident['updated'] = True 142 | 143 | #add new log data to old incident 144 | if self.data.has_key('logdata'): 145 | current_incident = self.add_log_data(current_incident) 146 | 147 | redis.hmset(current_incident_key, current_incident) 148 | redis.zrem(KEY_INCIDENTS, current_incident_key) 149 | redis.zadd(KEY_INCIDENTS, now, current_incident_key) 150 | else: 151 | #this is a new incident 152 | incident_key = self.make_key(time=now) 153 | 154 | self.data['created'] = now 155 | self.data['events_list'] = repr(now) 156 | self.data['events_count'] = 1 157 | self.data['acknowledged'] = False 158 | self.data['notified'] = False 159 | self.data['updated'] = True 160 | self.data['description'] = self.DESCRIPTION 161 | if self.data.has_key('logdata'): 162 | if type(self.data['logdata']) == list: 163 | self.data['logdata'] = simplejson.dumps(self.data['logdata']) 164 | else: 165 | self.data['logdata'] = simplejson.dumps([self.data['logdata']]) 166 | redis.hmset(incident_key, self.data) 167 | redis.zadd(KEY_INCIDENTS, now, incident_key) 168 | 169 | deferToThread(notify, self) 170 | 171 | def unacknowledge(self,): 172 | self.data['acknowledged'] = False 173 | self.save() 174 | 175 | def acknowledge(self,): 176 | self.data['acknowledged'] = True 177 | self.save() 178 | 179 | def format_title(self,): 180 | """Formatter for notifications""" 181 | return "{0} by {1} on {2} ({3})".format(self.DESCRIPTION, self.data['src_host'], 182 | self.data['node_id'], self.data['dst_host']) 183 | 184 | def _format_report(self,): 185 | """Formatter for notifications""" 186 | created = datetime.datetime.fromtimestamp(float(self.data['created'])).strftime('%Y-%m-%d %H:%M:%S (UTC)') 187 | return """ 188 | ====================================================================== 189 | ALERT 190 | ====================================================================== 191 | 192 | Incident: {0} 193 | Time : {1} 194 | Source : {2} 195 | Target : {3} (id {4}) 196 | EXTRA 197 | ====================================================================== 198 | """.format(self.DESCRIPTION, created, self.data['src_host'], 199 | self.data['dst_host'], self.data['node_id']) 200 | 201 | def format_report(self,): 202 | return self._format_report().replace(' EXTRA', '') 203 | 204 | def _format_report_short(self,): 205 | """Formatter for SMS notifications""" 206 | return \ 207 | """Canary Incident: {0}. Source {1}. Target {2} ({3}).EXTRA""".format( 208 | self.DESCRIPTION, 209 | self.data['src_host'], 210 | self.data['node_id'], 211 | self.data['dst_host']) 212 | 213 | def format_report_short(self,): 214 | return self._format_report_short().replace('EXTRA', '') 215 | 216 | 217 | class IncidentFTPLogin(Incident): 218 | CONFIG_INCIDENT_HORIZON = 'ftp.incident_horizon' 219 | INCIDENT_KEY_PREFIX = KEY_FTPLOGIN 220 | DESCRIPTION = 'FTP Login Attempt' 221 | 222 | class IncidentHTTPLogin(Incident): 223 | CONFIG_INCIDENT_HORIZON = 'http.incident_horizon' 224 | INCIDENT_KEY_PREFIX = KEY_HTTP_LOGIN 225 | DESCRIPTION = 'HTTP Login Attempt' 226 | 227 | class IncidentSSHLogin(Incident): 228 | CONFIG_INCIDENT_HORIZON = 'ssh.incident_horizon' 229 | INCIDENT_KEY_PREFIX = KEY_SSH_LOGIN 230 | DESCRIPTION = 'SSH Login Attempt' 231 | 232 | class IncidentTelnetLogin(Incident): 233 | CONFIG_INCIDENT_HORIZON = 'telnet.incident_horizon' 234 | INCIDENT_KEY_PREFIX = KEY_TELNET_LOGIN 235 | DESCRIPTION = 'Telnet Login Attempt' 236 | 237 | 238 | class IncidentHTTProxy(Incident): 239 | CONFIG_INCIDENT_HORIZON = 'httproxy.incident_horizon' 240 | INCIDENT_KEY_PREFIX = KEY_HTTPPROXY_LOGIN 241 | DESCRIPTION = 'HTTP Proxy Login Attempt' 242 | 243 | 244 | class IncidentMySQLLogin(Incident): 245 | CONFIG_INCIDENT_HORIZON = 'mysql.incident_horizon' 246 | INCIDENT_KEY_PREFIX = KEY_MYSQL_LOGIN 247 | DESCRIPTION = 'MySQL Login Attempt' 248 | 249 | 250 | class IncidentMSSQLLogin(Incident): 251 | CONFIG_INCIDENT_HORIZON = 'mssql.incident_horizon' 252 | INCIDENT_KEY_PREFIX = KEY_MSSQL_LOGIN 253 | DESCRIPTION = 'MSSQL Login Attempt' 254 | 255 | 256 | class IncidentTFTP(Incident): 257 | CONFIG_INCIDENT_HORIZON = 'tftp.incident_horizon' 258 | INCIDENT_KEY_PREFIX = KEY_TFTP 259 | DESCRIPTION = 'tftp login Attempt' 260 | 261 | class IncidentNTPMonList(Incident): 262 | CONFIG_INCIDENT_HORIZON = 'ntp.incident_horizon' 263 | INCIDENT_KEY_PREFIX = KEY_NTP_MON_LIST 264 | DESCRIPTION = 'NTP Login Attempt' 265 | 266 | 267 | class IncidentVNCLogin(Incident): 268 | CONFIG_INCIDENT_HORIZON = 'vnc.incident_horizon' 269 | INCIDENT_KEY_PREFIX = KEY_VNC_LOGIN 270 | DESCRIPTION = 'VNC Login Attempt' 271 | 272 | 273 | class IncidentSNMP(Incident): 274 | CONFIG_INCIDENT_HORIZON = 'snmp.incident_horizon' 275 | INCIDENT_KEY_PREFIX = KEY_SNMP_LOGIN 276 | DESCRIPTION = 'SNMP Command Received' 277 | 278 | 279 | class IncidentRDPLogin(Incident): 280 | CONFIG_INCIDENT_HORIZON = 'rdp.incident_horizon' 281 | INCIDENT_KEY_PREFIX = KEY_RDP_LOGIN 282 | DESCRIPTION = 'RDP Login Attempt' 283 | 284 | 285 | class IncidentSIPLogin(Incident): 286 | CONFIG_INCIDENT_HORIZON = 'sip.incident_horizon' 287 | INCIDENT_KEY_PREFIX = KEY_SIP_LOGIN 288 | DESCRIPTION = 'SIP Login Attempt' 289 | 290 | 291 | 292 | class IncidentSMBFileOpen(Incident): 293 | CONFIG_INCIDENT_HORIZON = 'smb.file_open_horizon' 294 | INCIDENT_KEY_PREFIX = KEY_SMB_FILE_OPEN 295 | DESCRIPTION = 'Shared File Opened' 296 | 297 | def format_report(self): 298 | report = self._format_report() 299 | try: 300 | d = simplejson.loads(self.data['logdata'])[0] 301 | filename = d['FILENAME'] 302 | user = d['USER'] 303 | report = report.replace(' EXTRA', ' File : {0}\n User : {1}\n'.format(filename, user)) 304 | except Exception as e: 305 | logger.error('Could not extract SMB filename from log data: %r' % (self.data)) 306 | return report 307 | 308 | 309 | def format_report_short(self,): 310 | report = self._format_report_short() 311 | try: 312 | d = simplejson.loads(self.data['logdata'])[0] 313 | filename = d['FILENAME'] 314 | user = d['USER'] 315 | report = report.replace('EXTRA', ' File "{0}", user "{1}".'.format(filename, user)) 316 | except Exception as e: 317 | logger.error('Could not extract SMB filename from log data: %r' % (self.data)) 318 | return report 319 | 320 | class IncidentHostPortScan(Incident): 321 | CONFIG_INCIDENT_HORIZON = 'scans.host_portscan_horizon' 322 | INCIDENT_KEY_PREFIX = KEY_HOST_PORT_SCAN 323 | DESCRIPTION = 'Host Port Scan' 324 | 325 | def add_log_data(self, current_incident): 326 | logger.debug('add_log_data(1)') 327 | if current_incident.has_key('logdata'): 328 | logger.debug('add_log_data(2)') 329 | logger.debug(current_incident['logdata']) 330 | logger.debug(simplejson.loads(current_incident['logdata'])) 331 | current_incident['logdata'] = set(simplejson.loads(current_incident['logdata'])) 332 | else: 333 | logger.debug('add_log_data(3)') 334 | current_incident['logdata'] = set() 335 | 336 | logger.debug('add_log_data(4)') 337 | current_incident['logdata'].update(self.data['logdata']) 338 | logger.debug(current_incident['logdata']) 339 | current_incident['logdata'] = simplejson.dumps(sorted([int(x) for x in current_incident['logdata']])) 340 | logger.debug(current_incident['logdata']) 341 | 342 | current_incident['logtype'] = LOG_PORT_SCAN_HOST 343 | return current_incident 344 | 345 | class IncidentNetworkPortScan(Incident): 346 | CONFIG_INCIDENT_HORIZON = 'scans.network_portscan_horizon' 347 | INCIDENT_KEY_PREFIX = KEY_NETWORK_PORT_SCAN 348 | DESCRIPTION = 'Network Port Scan' 349 | 350 | def add_log_data(self, current_incident): 351 | logger.debug('network_add_log_data(1)') 352 | if current_incident.has_key('logdata'): 353 | logger.debug('network_add_log_data(2)') 354 | logger.debug(current_incident['logdata']) 355 | logger.debug(simplejson.loads(current_incident['logdata'])) 356 | current_incident['logdata'] = set(simplejson.loads(current_incident['logdata'])) 357 | else: 358 | logger.debug('network_add_log_data(3)') 359 | current_incident['logdata'] = set() 360 | 361 | logger.debug('network_add_log_data(4)') 362 | current_incident['logdata'].update(self.data['logdata']) 363 | logger.debug(current_incident['logdata']) 364 | current_incident['logdata'] = simplejson.dumps(list(current_incident['logdata'])) 365 | logger.debug(current_incident['logdata']) 366 | 367 | current_incident['logtype'] = LOG_PORT_SCAN_NET 368 | return current_incident 369 | 370 | def format_report(self,): 371 | """Formatter for notifications""" 372 | try: 373 | targets = ', '.join([host for host in simplejson.loads(self.data['logdata'])]) 374 | except: 375 | targets = 'Target data absent' 376 | return """ 377 | ====================================================================== 378 | ALERT 379 | ====================================================================== 380 | 381 | Incident: {0} 382 | Time : {1} 383 | Source : {2} 384 | Port : {3} 385 | Targets : {4} 386 | 387 | ====================================================================== 388 | """.format(self.DESCRIPTION, self.data['created'], self.data['src_host'], 389 | self.data['dst_port'], targets) 390 | 391 | class IncidentFactory: 392 | 393 | @classmethod 394 | def create_incident(cls, type_, data=None): 395 | print '{0}: {1}'.format(type_, data) 396 | logger.debug('Creating incident type: {0}'.format(type_)) 397 | if type_ == 'ftp.login_attempt': 398 | IncidentFTPLogin(data=data, write_object=True) 399 | elif type_ == 'http.login_attempt': 400 | IncidentHTTPLogin(data=data, write_object=True) 401 | elif type_ == 'ssh.login_attempt': 402 | IncidentSSHLogin(data=data, write_object=True) 403 | elif type_ == 'smb.file_open': 404 | IncidentSMBFileOpen(data=data, write_object=True) 405 | elif type_ == 'scans.host_portscan': 406 | IncidentHostPortScan(data=data, write_object=True) 407 | elif type_ == 'scans.network_portscan': 408 | IncidentNetworkPortScan(data=data, write_object=True) 409 | elif type_ == 'telnet.login_attempt': 410 | IncidentTelnetLogin(data=data, write_object=True) 411 | elif type_ == 'httpproxy.login_attempt': 412 | IncidentHTTProxy(data=data, write_object=True) 413 | elif type_ == 'mysql.login_attempt': 414 | IncidentMySQLLogin(data=data, write_object=True) 415 | elif type_ == 'mssql.login_attempt': 416 | IncidentMSSQLLogin(data=data, write_object=True) 417 | elif type_ == 'tftp.action': 418 | IncidentTFTP(data=data, write_object=True) 419 | elif type_ == 'ntp.monlist': 420 | IncidentNTPMonList(data=data, write_object=True) 421 | elif type_ == 'vnc.login_attempt': 422 | IncidentVNCLogin(data=data, write_object=True) 423 | elif type_ == 'snmp.cmd': 424 | IncidentSNMP(data=data, write_object=True) 425 | elif type_ == 'rdp.login_attempt': 426 | IncidentRDPLogin(data=data, write_object=True) 427 | elif type_ == 'sip.login_attempt': 428 | IncidentSIPLogin(data=data, write_object=True) 429 | else: 430 | logger.error('Unknown incident type: {0}'.format(type_)) 431 | 432 | 433 | if __name__ == '__main__': 434 | test_events = [{'updated': 'True', 'src_port': '40198', 'logdata': "{u'USERNAME': u'qwe', u'PASSWORD': u'qwe'}", 'created': '1418997029.99313', 'notified': 'False', 'events_count': '4', 'acknowledged': 'False', 'logtype': '2000', 'dst_host': '192.168.233.1', 'node_id': '23456', 'local_time': '2014-12-19 11:29:43.671930', 'reported_dst_host': '127.0.0.1', 'events_list': '1418997029.99,1418997030.76,1418997031.3,1418997032.16', 'dst_port': '2100', 'src_host': '127.0.0.1'}, {'updated': 'True', 'src_port': '40198', 'logdata': "{u'USERNAME': u'qwe', u'PASSWORD': u'qwe'}", 'notified': 'False', 'events_count': '7', 'acknowledged': 'False', 'created': '1418996901.18268', 'logtype': '2000', 'dst_host': '192.168.233.1', 'node_id': '23456', 'local_time': '2014-12-19 11:29:43.671930', 'reported_dst_host': '127.0.0.1', 'events_list': '1418996901.18,1418996902.37,1418996903.1,1418996903.57,1418996904.0,1418996904.4,1418996904.85', 'dst_port': '2100', 'src_host': '127.0.0.1'}, {'updated': 'True', 'src_port': '40198', 'logdata': "{u'USERNAME': u'qwe', u'PASSWORD': u'qwe'}", 'created': '1418996840.670021', 'notified': 'False', 'events_count': '1', 'acknowledged': 'False', 'logtype': '2000', 'dst_host': '192.168.233.1', 'node_id': '23456', 'local_time': '2014-12-19 11:29:43.671930', 'reported_dst_host': '127.0.0.1', 'events_list': '1418996840.67', 'dst_port': '2100', 'src_host': '127.0.0.1'}] 435 | import pdb;pdb.set_trace() 436 | for data in test_events: 437 | IncidentFTPLogin(data=data, write_object=True) 438 | -------------------------------------------------------------------------------- /opencanary_correlator/common/logs.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class RedisHandler(logging.Handler): 4 | """ 5 | A class which sends records to a redis list. 6 | """ 7 | 8 | def __init__(self, level=logging.NOTSET): 9 | #this song and dance avoids a circular dependency at load time, 10 | #by importing only when this class is instatiated 11 | super(RedisHandler, self).__init__(level=level) 12 | from queries import write_log 13 | self.write_log = write_log 14 | 15 | def emit(self, record): 16 | """ 17 | Emit a record. 18 | """ 19 | try: 20 | self.write_log(self.format(record)) 21 | except: 22 | self.handleError(record) 23 | 24 | logger = None 25 | # Console and correlator use different logger names. Common modules 26 | # should still log to the logger for the process under which they're running. 27 | # Impact of this is we don't support multiple loggers per process 28 | existing_logger_names = logging.getLogger().manager.loggerDict.keys() 29 | if len(existing_logger_names) > 0: 30 | lgr = existing_logger_names[0] 31 | logger = logging.getLogger(lgr) 32 | 33 | 34 | -------------------------------------------------------------------------------- /opencanary_correlator/common/notifications.py: -------------------------------------------------------------------------------- 1 | from twilio.rest import TwilioRestClient 2 | from opencanary_correlator.common.logs import logger 3 | from opencanary_correlator.common.emailer import mandrill_send, send_email 4 | import opencanary_correlator.common.config as c 5 | import requests 6 | 7 | class SMS: 8 | def send(self, destination, message): 9 | ACCOUNT_SID = c.config.getVal('twilio.sid', default='') 10 | AUTH_TOKEN = c.config.getVal('twilio.auth_token', default='') 11 | from_ = c.config.getVal('twilio.from_number', default='') 12 | 13 | client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN) 14 | 15 | 16 | client.messages.create( 17 | to=destination, 18 | from_=from_, 19 | body=message 20 | ) 21 | 22 | def notify(incident): 23 | if c.config.getVal('console.email_notification_enable', False): 24 | logger.debug('Email notifications enabled') 25 | addresses = c.config.getVal('console.email_notification_address', default=[]) 26 | if c.config.getVal('console.mandrill_key', False): 27 | for address in addresses: 28 | logger.debug('Email sent to %s' % address) 29 | mandrill_send(to=address, 30 | subject=incident.format_title(), 31 | message=incident.format_report()) 32 | else: 33 | server = c.config.getVal('console.email_host', default='') 34 | port = int(c.config.getVal('console.email_port', default=25)) 35 | if len(addresses) > 0 and server: 36 | for address in addresses: 37 | send_email(to=address, 38 | subject=incident.format_title(), 39 | message=incident.format_report(), 40 | server=server, 41 | port=port) 42 | 43 | if c.config.getVal('console.sms_notification_enable', default=False): 44 | logger.debug('SMS notifications enabled') 45 | sms = SMS() 46 | sms_numbers = c.config.getVal('console.sms_notification_numbers', []) 47 | for to in sms_numbers: 48 | logger.debug('SMS sent to %s' % to) 49 | sms.send(to, incident.format_report_short()) 50 | 51 | if c.config.getVal('console.slack_notification_enable', default=False): 52 | logger.debug('Slack notifications enabled') 53 | webhooks = c.config.getVal('console.slack_notification_webhook', default=[]) 54 | for to in webhooks: 55 | response = requests.post( 56 | to, json={"text": incident.format_report_short()} 57 | ) 58 | if response.status_code != 200: 59 | logger.error("Error %s sending Slack message, the response was:\n%s" % (response.status_code, response.text)) 60 | -------------------------------------------------------------------------------- /opencanary_correlator/common/queries.py: -------------------------------------------------------------------------------- 1 | 2 | from opencanary_correlator.common.redismanager import redis, KEY_LOGS, KEY_INCIDENTS, KEY_DEVICE, \ 3 | KEY_USER, KEY_USER_COUNT,\ 4 | KEY_CONSOLE_SETTING_PREFIX, \ 5 | KEY_WHITELIST_IPS 6 | from opencanary_correlator.common.utils import timestamp_age, timestamp_printable, timestamp_js 7 | from opencanary_correlator.common.logs import logger 8 | from opencanary_correlator.common.incidents import Incident 9 | import nacl.hash 10 | import datetime 11 | import simplejson 12 | 13 | def get_device_id_hash(device_id): 14 | return nacl.hash.sha256(str(device_id), encoder=nacl.encoding.RawEncoder)[:12].encode("hex") 15 | 16 | def jsonify_incident(key, incident): 17 | incident['key'] = key 18 | incident['created_printable'] = timestamp_printable(incident['created']) 19 | incident['created_age'] = timestamp_age(incident['created']) 20 | incident['created_age_seconds'] = float(datetime.datetime.utcnow().strftime("%s")) - float(incident['created']) 21 | 22 | MAX_EVENT_COUNT = 100 23 | 24 | events_list_printable = [] 25 | events_list = [] 26 | events = incident['events_list'].split(',') 27 | for e in events[:MAX_EVENT_COUNT]: 28 | events_list_printable.append(timestamp_printable(e)) 29 | events_list.append(timestamp_js(e)) 30 | incident['events_list_printable'] = ','.join(events_list_printable) 31 | incident['events_list'] = events_list 32 | if len(events) > MAX_EVENT_COUNT: 33 | incident['events_skipped'] = len(events) - MAX_EVENT_COUNT 34 | 35 | if incident.has_key('logdata'): 36 | incident['logdata'] = simplejson.loads(incident['logdata']) 37 | if type(incident['logdata']) == list: 38 | incident['logdata'] = incident['logdata'][:MAX_EVENT_COUNT] 39 | 40 | return incident 41 | 42 | def _filter_incidents(filter_=None, include_hosts=True): 43 | host_cache = {} 44 | keys = redis.zrangebyscore(KEY_INCIDENTS, '-inf', '+inf') 45 | incidents = [] 46 | for key in keys: 47 | incident = redis.hgetall(key) 48 | try: 49 | if not filter_(incident): 50 | continue 51 | 52 | if include_hosts: 53 | if not host_cache.has_key(incident['node_id']): 54 | host_cache[incident['node_id']] = \ 55 | redis.hgetall(KEY_DEVICE+get_device_id_hash(incident['node_id'])) 56 | incident['host'] = host_cache[incident['node_id']] 57 | incidents.append(jsonify_incident(key, incident)) 58 | except Exception as e: 59 | logger.critical(e) 60 | 61 | return incidents 62 | 63 | def get_device(node_id): 64 | try: 65 | host = redis.hgetall(KEY_DEVICE+get_device_id_hash(node_id)) 66 | host['unacknowleged_incidents'] = get_unacknowledged_incidents(include_hosts=False, node_id=node_id) 67 | return host 68 | except: 69 | return None 70 | 71 | def should_notify(node_id): 72 | key = KEY_DEVICE+get_device_id_hash(node_id) 73 | return redis.hget(key, 'ignore_notifications_general') != 'True' 74 | 75 | def get_incident(key): 76 | try: 77 | incident = redis.hgetall(key) 78 | incident['host'] = redis.hgetall(KEY_DEVICE+get_device_id_hash(incident['node_id'])) 79 | return jsonify_incident(key, incident) 80 | except: 81 | return None 82 | 83 | def get_all_incidents(include_hosts=True, node_id=None): 84 | if node_id: 85 | return _filter_incidents(include_hosts=include_hosts, 86 | filter_=lambda incident: node_id == incident['node_id']) 87 | else: 88 | return _filter_incidents(include_hosts=include_hosts, 89 | filter_=lambda incident: True) 90 | 91 | def get_all_incidents_objs(): 92 | return [Incident.lookup_id(key=x['key']) for x in get_all_incidents()] 93 | 94 | def get_unacknowledged_incidents(include_hosts=True, node_id=None): 95 | if node_id: 96 | return _filter_incidents(include_hosts=include_hosts, 97 | filter_=lambda incident: incident['acknowledged'] == 'False' and 98 | node_id == incident['node_id']) 99 | else: 100 | return _filter_incidents(include_hosts=include_hosts, 101 | filter_=lambda incident: incident['acknowledged'] == 'False') 102 | 103 | def get_unacknowledged_incidents_objs(): 104 | return [Incident.lookup_id(key=x['key']) for x in get_unacknowledged_incidents()] 105 | 106 | def get_acknowledged_incidents(include_hosts=True, node_id=None): 107 | if node_id: 108 | return _filter_incidents(include_hosts=include_hosts, 109 | filter_=lambda incident: incident['acknowledged'] == 'True' and 110 | node_id == incident['node_id']) 111 | else: 112 | return _filter_incidents(include_hosts=include_hosts, 113 | filter_=lambda incident: incident['acknowledged'] == 'True') 114 | 115 | 116 | def get_acknowledged_incidents_objs(): 117 | return [Incident.lookup_id(key=x['key']) for x in get_acknowledged_incidents()] 118 | 119 | def clear_incidents(): 120 | for incident_key in redis.zrevrangebyscore(KEY_INCIDENTS, '+inf', '-inf'): 121 | redis.delete(incident_key) 122 | 123 | redis.delete(KEY_INCIDENTS) 124 | 125 | def write_log(logline): 126 | if redis.llen(KEY_LOGS) > 1000: 127 | redis.ltrim(KEY_LOGS, 0, 999) 128 | redis.lpush(KEY_LOGS, logline) 129 | 130 | def get_logs(limit=None): 131 | return redis.lrange(KEY_LOGS, 0, -1) 132 | 133 | def get_console_setting(setting, **kwargs): 134 | key = KEY_CONSOLE_SETTING_PREFIX+setting 135 | value = redis.get(key) 136 | if not value: 137 | return kwargs.get('default', None) 138 | return value 139 | 140 | def set_console_setting(setting, value): 141 | key = KEY_CONSOLE_SETTING_PREFIX+setting 142 | return redis.set(key, value) 143 | 144 | if __name__ == "__main__": 145 | print 'All: %r' % get_all_devices() 146 | print 'All Incidents: %r' % get_all_incidents() 147 | print 'All Unacknowledged Incidents: %r' % get_unacknowledged_incidents() 148 | -------------------------------------------------------------------------------- /opencanary_correlator/common/redismanager.py: -------------------------------------------------------------------------------- 1 | import redis 2 | redis = redis.StrictRedis(host='127.0.0.1', port=6379, db='0') 3 | redis.DEFAULT_EXPIRY = 120 4 | 5 | KEY_LOGS = 'logs' 6 | KEY_DEVICE = 'device:' 7 | KEY_INCIDENTS = 'incidents' 8 | KEY_FTPLOGIN = 'incident:ftplogin:' 9 | KEY_HTTP_LOGIN = 'incident:httplogin:' 10 | KEY_SSH_LOGIN = 'incident:sshlogin:' 11 | KEY_TELNET_LOGIN = 'incident:telnetlogin:' 12 | KEY_HTTPPROXY_LOGIN = 'incident:httpproxylogin:' 13 | KEY_MYSQL_LOGIN = 'incident:mysqllogin:' 14 | KEY_MSSQL_LOGIN = 'incident:mssqllogin:' 15 | KEY_TFTP = 'incident:tftp:' 16 | KEY_NTP_MON_LIST = 'incident:ntpmonlist:' 17 | KEY_VNC_LOGIN = 'incident:vnclogin:' 18 | KEY_SNMP_LOGIN = 'incident:snmplogin:' 19 | KEY_RDP_LOGIN = 'incident:rdplogin:' 20 | KEY_SIP_LOGIN = 'incident:siplogin:' 21 | KEY_SMB_FILE_OPEN = 'incident:smbfileopen:' 22 | KEY_TRACK_HOST_PORT_SCAN = 'scan:host:' 23 | KEY_HOST_PORT_SCAN = 'incident:hostportscan:' 24 | KEY_TRACK_NETWORK_PORT_SCAN = 'scan:network:' 25 | KEY_NETWORK_PORT_SCAN = 'incident:networkportscan:' 26 | KEY_USER_COUNT = 'usercount' 27 | KEY_USER = 'user:' 28 | KEY_WHITELIST_IPS = 'whitelist-ips:' 29 | KEY_CONSOLE_SETTING_PREFIX = 'console-setting:' 30 | -------------------------------------------------------------------------------- /opencanary_correlator/common/utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import string 3 | 4 | def current_time_offset(): 5 | return int(datetime.utcnow().strftime("%s")) 6 | 7 | def get_clock(): 8 | now = datetime.utcnow() 9 | day = string.lstrip(now.strftime("%d"),'0') 10 | return {'date': now.strftime("%B "+day+", %Y"), 'time': now.strftime('%H:%M')} 11 | 12 | def seconds_to_age(s): 13 | try: 14 | weeks, remainder = divmod(int(s), 60*60*24*7) 15 | days, remainder = divmod(remainder, 60*60*24) 16 | hours, remainder = divmod(remainder, 60*60) 17 | minutes, seconds = divmod(remainder, 60) 18 | if weeks > 0: 19 | num = weeks 20 | time_str = 'week' 21 | elif days > 0: 22 | num = days 23 | time_str = 'day' 24 | elif hours > 0: 25 | num = hours 26 | time_str = 'hour' 27 | elif minutes > 0: 28 | num = minutes 29 | time_str = 'minute' 30 | elif seconds >= 0: 31 | num = seconds 32 | time_str = 'second' 33 | else: 34 | num = -1 35 | time_str = 'age error' 36 | 37 | return '{0} {1}{2}'.format(int(num), time_str, 's' if num != 1 else '') 38 | except Exception as e: 39 | return 'age error' 40 | 41 | def timestamp_age(timestamp): 42 | t = datetime.fromtimestamp(float(timestamp)) 43 | now = datetime.utcnow() 44 | return seconds_to_age((now - t).total_seconds()) 45 | 46 | def timestamp_printable(timestamp): 47 | try: 48 | t = datetime.fromtimestamp(float(timestamp)) 49 | return t.strftime("%a %b %d %Y %H:%M:%S GMT+0000 (UTC)") 50 | except: 51 | return str(timestamp) 52 | 53 | def timestamp_js(timestamp): 54 | return int(float(timestamp)*1000) 55 | -------------------------------------------------------------------------------- /opencanary_correlator/dispatcher.py: -------------------------------------------------------------------------------- 1 | import logging 2 | logger = logging.getLogger('opencanary-correlator') 3 | 4 | from opencanary_correlator.common.constants import * 5 | 6 | from common.logs import RedisHandler 7 | 8 | logger.setLevel(logging.DEBUG) 9 | redis_h = RedisHandler() 10 | redis_h.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 12 | redis_h.setFormatter(formatter) 13 | logger.addHandler(redis_h) 14 | 15 | from opencanary_correlator.handlers import * 16 | 17 | logmap = { 18 | LOG_BASE_BOOT: [], 19 | LOG_BASE_MSG: [], 20 | LOG_BASE_DEBUG: [], 21 | LOG_BASE_ERROR: [], 22 | LOG_FTP_LOGIN_ATTEMPT: [handleFTPLogin], 23 | LOG_HTTP_GET: [], 24 | LOG_HTTP_POST_LOGIN_ATTEMPT: [handleHTTPLogin], 25 | LOG_SSH_NEW_CONNECTION: [], 26 | LOG_SSH_REMOTE_VERSION_SENT: [], 27 | LOG_SSH_LOGIN_ATTEMPT: [handleSSHLogin], 28 | LOG_SMB_FILE_OPEN: [handleSMBFileOpen], 29 | LOG_PORT_SYN: [handleSYNPacketHostPortscanDetector, handleSYNPacketNetworkPortscanDetector], 30 | LOG_TELNET_LOGIN_ATTEMPT: [handleTelnetLogin], 31 | LOG_HTTPPROXY_LOGIN_ATTEMPT: [handleHTTPProxy], 32 | LOG_MYSQL_LOGIN_ATTEMPT: [handleMySQL], 33 | LOG_MSSQL_LOGIN_SQLAUTH: [handleMSSQL], 34 | LOG_MSSQL_LOGIN_WINAUTH: [handleMSSQL], 35 | LOG_TFTP: [handleTFTP], 36 | LOG_NTP_MONLIST: [handleNTP], 37 | LOG_VNC: [handleVNC], 38 | LOG_SNMP_CMD: [handleSNMP], 39 | LOG_RDP: [handleRDP], 40 | LOG_SIP_REQUEST: [handleSIP] 41 | } 42 | 43 | 44 | def process_device_report(data=None): 45 | logtype = data['logtype'] 46 | if logtype not in logmap: 47 | logger.error('No handler for type %d ' % (logtype)) 48 | return 49 | 50 | for handler in logmap[logtype]: 51 | handler(data=data) 52 | -------------------------------------------------------------------------------- /opencanary_correlator/handlers.py: -------------------------------------------------------------------------------- 1 | from opencanary_correlator.common.redismanager import redis, KEY_DEVICE, KEY_TRACK_HOST_PORT_SCAN, KEY_TRACK_NETWORK_PORT_SCAN 2 | from opencanary_correlator.common.utils import current_time_offset 3 | from opencanary_correlator.common.incidents import IncidentFactory 4 | from opencanary_correlator.common.logs import logger 5 | import opencanary_correlator.common.config as c 6 | 7 | def defaultHandler(incidentName): 8 | def handler(data=None): 9 | IncidentFactory.create_incident(incidentName, data=data) 10 | return handler 11 | 12 | def handleHTTPLogin(data=None): 13 | IncidentFactory.create_incident('http.login_attempt', data=data) 14 | 15 | def handleSSHLogin(data=None): 16 | IncidentFactory.create_incident('ssh.login_attempt', data=data) 17 | 18 | def handleSMBFileOpen(data=None): 19 | IncidentFactory.create_incident('smb.file_open', data=data) 20 | 21 | handleFTPLogin = defaultHandler('ftp.login_attempt') 22 | handleTelnetLogin = defaultHandler('telnet.login_attempt') 23 | handleHTTPProxy = defaultHandler('httpproxy.login_attempt') 24 | handleMySQL = defaultHandler('mysql.login_attempt') 25 | handleMSSQL = defaultHandler('mssql.login_attempt') 26 | handleTFTP = defaultHandler('tftp.action') 27 | handleNTP = defaultHandler('ntp.monlist') 28 | handleVNC = defaultHandler('vnc.login_attempt') 29 | handleSNMP = defaultHandler('snmp.cmd') 30 | handleRDP = defaultHandler('rdp.login_attempt') 31 | handleSIP = defaultHandler('sip.login_attempt') 32 | 33 | 34 | def handleSYNPacketHostPortscanDetector(data=None): 35 | """ 36 | Creates an incident if a single canary receives SYN packets to more than 10 different 37 | ports in less than 50 seconds. 38 | """ 39 | try: 40 | host_scan_key = KEY_TRACK_HOST_PORT_SCAN + data['src_host'] + ':' + data['dst_host'] 41 | new_set = False 42 | if not redis.exists(host_scan_key): 43 | new_set = True 44 | if redis.sadd(host_scan_key, data['dst_port']): 45 | if new_set: 46 | redis.expire(host_scan_key, c.config.getVal('portscan.monitor_period', default=50)) 47 | 48 | if redis.scard(host_scan_key) >= c.config.getVal('portscan.packet_threshold', default=5): 49 | data['logdata'] = list(redis.smembers(host_scan_key)) 50 | IncidentFactory.create_incident('scans.host_portscan', data=data) 51 | redis.delete(host_scan_key) 52 | except Exception as e: 53 | import traceback 54 | logger.critical(traceback.format_exc()) 55 | 56 | def handleSYNPacketNetworkPortscanDetector(data=None): 57 | """ 58 | Creates an incident if two or more canaries report a syn packet to the same dst port, 59 | from the same host, in an hour or less. 60 | """ 61 | 62 | try: 63 | network_scan_target_key = KEY_TRACK_NETWORK_PORT_SCAN + data['src_host'] + ':' + data['dst_port'] + ':targets' 64 | try: 65 | dst_host = data['reported_dst_host'] 66 | except KeyError: 67 | dst_host = data['dst_host'] 68 | 69 | if not redis.sismember(network_scan_target_key, dst_host): 70 | redis.sadd(network_scan_target_key, dst_host) 71 | redis.expire(network_scan_target_key, c.config.getVal('networkscan.monitor_period', default=3600)) 72 | 73 | pckt_count = redis.scard(network_scan_target_key) 74 | 75 | if pckt_count >= c.config.getVal('networkscan.packet_threshold', default=2): 76 | data['logdata'] = list(redis.smembers(network_scan_target_key)) 77 | IncidentFactory.create_incident('scans.network_portscan', data=data) 78 | except Exception as e: 79 | import traceback 80 | logger.critical(traceback.format_exc()) 81 | -------------------------------------------------------------------------------- /opencanary_correlator/opencanary_correlator.conf: -------------------------------------------------------------------------------- 1 | { 2 | "twilio.auth_token": "", 3 | "console.sms_notification_enable": true, 4 | "console.sms_notification_numbers": [], 5 | "console.email_notification_enable": true, 6 | "console.email_notification_address": [], 7 | "twilio.from_number": "", 8 | "console.mandrill_key": "", 9 | "scans.network_portscan_horizon": 1000, 10 | "twilio.sid": "" 11 | } 12 | -------------------------------------------------------------------------------- /opencanary_correlator/receiver.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import simplejson as json 3 | from pkg_resources import resource_filename 4 | from twisted.protocols.basic import LineReceiver 5 | from twisted.python import usage 6 | from twisted.internet import protocol 7 | from twisted.internet import reactor 8 | from opencanary_correlator.dispatcher import process_device_report 9 | 10 | class CorrelatorOptions(usage.Options): 11 | optParameters = [['ip', 'i', '127.0.0.1', 'IP Address to listen on'], 12 | ['config', 'c', None, 'Config file']] 13 | 14 | def postOptions(self): 15 | if self.opts['config'] is None: 16 | conf = resource_filename(__name__, 'opencanary_correlator.conf') 17 | self.opts['config'] = conf 18 | print >> sys.stderr, "Warning: no config file specified. Using the template config (which does not have any alerting configured):\n%s\n" % conf 19 | 20 | class CorrelatorReceiver(LineReceiver): 21 | delimiter = "\n" 22 | MAX_LENGTH = 16384 23 | 24 | def lineReceived(self, line): 25 | try: 26 | event = json.loads(line) 27 | except Exception as e: 28 | print >> sys.stderr, "Failed to decode line" 29 | print e 30 | return 31 | 32 | process_device_report(event) 33 | 34 | class CorrelatorFactory(protocol.Factory): 35 | protocol = CorrelatorReceiver 36 | 37 | def main(): 38 | from twisted.python import log 39 | import common.config 40 | 41 | log.logfile=sys.stderr 42 | try: 43 | config = CorrelatorOptions() 44 | config.parseOptions() 45 | except usage.UsageError, ue: 46 | print >> sys.stderr, '%s:' % sys.argv[0], ue 47 | print config 48 | sys.exit(1) 49 | 50 | common.config.config = common.config.Config(config.opts['config']) 51 | 52 | f = CorrelatorFactory() 53 | reactor.listenTCP(1514, f, interface=config.opts['ip']) 54 | reactor.run() 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | from opencanary_correlator import __version__ 4 | 5 | setup( 6 | name='opencanary-correlator', 7 | version=__version__, 8 | url='http://www.thinkst.com/', 9 | author='Thinkst Applied Research', 10 | author_email='info@thinkst.com', 11 | description='opencanary correlator', 12 | install_requires=[ 13 | "simplejson", 14 | "cffi==1.1.2", 15 | "docopt==0.4.0", 16 | "httplib2==0.9.1", 17 | "mandrill==1.0.57", 18 | "pycparser==2.14", 19 | "PyNaCl==0.3.0", 20 | "pytz==2015.4", 21 | "redis==2.10.3", 22 | "requests==2.7.0", 23 | "six==1.9.0", 24 | "twilio==4.4.0", 25 | "Twisted==15.2.1", 26 | "wheel==0.24.0", 27 | "zope.interface==4.1.2" 28 | ], 29 | setup_requires=[ 30 | 'setuptools_git' 31 | ], 32 | license='BSD', 33 | packages = find_packages(exclude="test"), 34 | scripts=['bin/opencanary-correlator'], 35 | platforms='any', 36 | include_package_data=True, 37 | ) 38 | --------------------------------------------------------------------------------