├── .gitignore ├── README.md ├── daemon ├── Daemon │ ├── Daemon.py │ ├── DaemonIPC.py │ ├── DaemonState.py │ ├── IPCProtocol.py │ ├── StatsMonitor.py │ ├── Transports.py │ └── __init__.py ├── LICENSE ├── README.md ├── bindings │ ├── c │ │ ├── Makefile.am │ │ ├── README.md │ │ ├── configure.ac │ │ └── src │ │ │ ├── Makefile.am │ │ │ ├── libvigilant.pyx │ │ │ ├── test.c │ │ │ ├── vigilant.c │ │ │ └── vigilant.h │ └── nodejs │ │ ├── README.md │ │ ├── binding.gyp │ │ ├── test.js │ │ └── vigilant.cc ├── daemon.py ├── etc │ └── vigilant │ │ └── vigilant.json ├── jenkins.sh ├── requirements.txt └── setup.py ├── daemon2 ├── LICENSE ├── Makefile.am ├── README.md ├── config │ └── autogen.sh ├── configure.ac ├── etc │ └── vigilant │ │ └── vigilant.json ├── getdeps.sh ├── jenkins.sh └── src │ ├── Makefile.am │ ├── async_server.cc │ ├── daemon.cc │ ├── ipc_protocol.cc │ ├── stats_monitor.cc │ ├── transport.cc │ ├── udp_transport.cc │ └── vigilant.h ├── datastore ├── .bowerrc ├── README.md ├── bower.json ├── etc │ └── vigilant │ │ └── vigilant.json ├── jenkins.sh ├── jetty.sh ├── libexec │ └── jetty-runner-9.2.2.v20140723.jar ├── project │ ├── build.properties │ ├── build.scala │ └── plugins.sbt ├── sbt └── src │ ├── main │ ├── resources │ │ └── logback.xml │ ├── scala │ │ ├── ScalatraBootstrap.scala │ │ └── vigilant │ │ │ └── datastore │ │ │ ├── caches │ │ │ ├── HostCache.scala │ │ │ ├── LogCache.scala │ │ │ ├── ProcCache.scala │ │ │ └── buffer │ │ │ │ ├── Cache.scala │ │ │ │ └── RingBuffer.scala │ │ │ ├── controllers │ │ │ ├── StatsController.scala │ │ │ ├── VigilantStack.scala │ │ │ └── swagger │ │ │ │ └── StatsSwagger.scala │ │ │ ├── models │ │ │ ├── MessageModels.scala │ │ │ ├── NotificationModels.scala │ │ │ ├── Payloads.scala │ │ │ └── StatType.scala │ │ │ ├── monitors │ │ │ └── StatsMonitor │ │ │ │ └── AtmosphereStatsMonitor.scala │ │ │ └── services │ │ │ ├── aggregator │ │ │ ├── AggregatorService.scala │ │ │ ├── ProtocolFactory.scala │ │ │ └── StatsObserver.scala │ │ │ ├── configuration │ │ │ └── ConfigurationService.scala │ │ │ ├── database │ │ │ └── DatabaseService.scala │ │ │ ├── notifications │ │ │ ├── NotificationFactory.scala │ │ │ ├── email │ │ │ │ └── EmailClient.scala │ │ │ └── twillo │ │ │ │ └── TwilloClient.scala │ │ │ ├── transports │ │ │ ├── Transport.scala │ │ │ ├── TransportService.scala │ │ │ └── udp │ │ │ │ └── UDPTransport.scala │ │ │ └── triggers │ │ │ ├── Trigger.scala │ │ │ ├── TriggersService.scala │ │ │ ├── host │ │ │ ├── HostTrigger.scala │ │ │ └── HostUsageThresholdTrigger.scala │ │ │ ├── log │ │ │ ├── LogRegexTrigger.scala │ │ │ └── LogTrigger.scala │ │ │ └── proc │ │ │ ├── ProcTrigger.scala │ │ │ └── ProcUsageThresholdTrigger.scala │ └── webapp │ │ └── WEB-INF │ │ └── web.xml │ └── test │ └── scala │ └── vigilant │ └── datastore │ └── caches │ └── RingBufferTest.scala ├── front-end ├── .bowerrc ├── Dashboard │ ├── Resourses.py │ ├── Routes.py │ ├── __init__.py │ └── www │ │ ├── assets │ │ ├── peak.png │ │ └── realtime.png │ │ ├── css │ │ ├── app.css │ │ ├── fringe.css.map │ │ └── vigilant.css │ │ ├── fonts │ │ ├── glyphicons │ │ │ ├── flat-ui-pro-icons-regular.eot │ │ │ ├── flat-ui-pro-icons-regular.svg │ │ │ ├── flat-ui-pro-icons-regular.ttf │ │ │ ├── flat-ui-pro-icons-regular.woff │ │ │ └── selection.json │ │ └── lato │ │ │ ├── lato-black.eot │ │ │ ├── lato-black.svg │ │ │ ├── lato-black.ttf │ │ │ ├── lato-black.woff │ │ │ ├── lato-bold.eot │ │ │ ├── lato-bold.svg │ │ │ ├── lato-bold.ttf │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bolditalic.eot │ │ │ ├── lato-bolditalic.svg │ │ │ ├── lato-bolditalic.ttf │ │ │ ├── lato-bolditalic.woff │ │ │ ├── lato-italic.eot │ │ │ ├── lato-italic.svg │ │ │ ├── lato-italic.ttf │ │ │ ├── lato-italic.woff │ │ │ ├── lato-light.eot │ │ │ ├── lato-light.svg │ │ │ ├── lato-light.ttf │ │ │ ├── lato-light.woff │ │ │ ├── lato-regular.eot │ │ │ ├── lato-regular.svg │ │ │ ├── lato-regular.ttf │ │ │ └── lato-regular.woff │ │ ├── index.html │ │ └── js │ │ ├── dashboard │ │ ├── dashboard.html │ │ ├── dashboard.js │ │ └── host │ │ │ ├── host.html │ │ │ ├── host.js │ │ │ └── proc │ │ │ ├── proc.html │ │ │ └── proc.js │ │ ├── require-config.js │ │ ├── util │ │ └── graphs.js │ │ └── vigilant.js ├── LICENSE ├── README.md ├── bower.json ├── dashboard.py ├── etc │ └── nginx │ │ ├── nginx.cfg │ │ └── uwsgi_params ├── jenkins.sh ├── nginx.sh ├── requirements.txt ├── setup.py └── vigilant-front-end.iml └── screenshots ├── overview.png ├── real-time.png └── swagger.png /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | .idea 4 | build/ 5 | 6 | venv 7 | 8 | front-end/Dashboard/www/js/lib/ 9 | 10 | ### Python template 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Unit test / coverage reports 19 | htmlcov/ 20 | .tox/ 21 | .coverage 22 | .cache 23 | nosetests.xml 24 | coverage.xml 25 | 26 | # Translations 27 | *.mo 28 | *.pot 29 | 30 | # Django stuff: 31 | *.log 32 | 33 | # Sphinx documentation 34 | docs/_build/ 35 | 36 | # PyBuilder 37 | target/ 38 | 39 | # SCALA 40 | *.class 41 | *.log 42 | 43 | # sbt specific 44 | .cache 45 | .history 46 | .lib/ 47 | dist/* 48 | target/ 49 | lib_managed/ 50 | src_managed/ 51 | project/boot/ 52 | project/plugins/project/ 53 | 54 | # Scala-IDE specific 55 | .scala_dependencies 56 | .worksheet 57 | # Compiled Object files 58 | *.slo 59 | *.lo 60 | *.o 61 | *.obj 62 | 63 | # Precompiled Headers 64 | *.gch 65 | *.pch 66 | 67 | # Compiled Dynamic libraries 68 | *.so 69 | *.dylib 70 | *.dll 71 | 72 | # Fortran module files 73 | *.mod 74 | 75 | # Compiled Static libraries 76 | *.lai 77 | *.la 78 | *.a 79 | *.lib 80 | 81 | # Executables 82 | *.exe 83 | *.out 84 | *.app 85 | 86 | # misc 87 | _deps 88 | Makefile 89 | Makefile.in 90 | aclocal.m4 91 | autom4te.cache/ 92 | config.log 93 | config.status 94 | config/* 95 | configure 96 | libtool 97 | src/.deps/ 98 | src/Makefile 99 | src/Makefile.in 100 | src/config.h 101 | src/config.h.in 102 | src/config.h.in~ 103 | src/daemon 104 | src/stamp-h1 105 | 106 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vigilant 2 | [![Build Status](http://jenkins.vigilantlabs.co.uk/buildStatus/icon?job=Vigilant)](http://jenkins.vigilantlabs.co.uk/job/Vigilant/) 3 | [![MIT License](http://b.repl.ca/v1/License-MIT-red.png)](LICENSE) 4 | 5 | Vigilant provides application driven stats monitoring. When you integrate your application 6 | with the Daemon bindings/library every time your application starts a daemon is created and 7 | all other processes will attach to this daemon sending watch/log/alert messages which in turn 8 | are delivered to a datastore. Because your own applications know their pid (os.getpid()) you no 9 | longer need to manage your monitoring with runner scripts. 10 | 11 | ## Screenshots 12 | 13 | Dynamic cluster model visualized using vis.js. 14 | ![Overview](/screenshots/overview.png "Overview") 15 | 16 | Real-time graphs using the web-socket api using vis.js. 17 | ![Real-Time](/screenshots/real-time.png "Real-Time") 18 | 19 | ## Tutorial 20 | 21 | Detailed Setup tutorial can be found here https://github.com/redbrain/vigilant/wiki/QuickSetup 22 | 23 | ## Deps 24 | 25 | You will require Open/Oracle JDK >= 1.7, Python >= 3.4, Bower. 26 | 27 | Mac OSX 28 | ```bash 29 | # download orcale jdk. 30 | $ brew install python3 npm 31 | $ npm install -g bower 32 | ``` 33 | 34 | Ubuntu 35 | 36 | ```bash 37 | $ sudo apt-get install default-jdk python3.4 nodejs 38 | $ npm install -g bower 39 | ``` 40 | 41 | ### Setup Daemon Agent 42 | 43 | Currently daemon2 is a WIP and not ready but Daemon is proof of concept. It requires python >= 3.4 44 | 45 | ```bash 46 | $ cd daemon 47 | $ sudo pip3 install -r requirements.txt 48 | $ ./daemon.py -c etc/vigilant/vigilant.json --start 49 | ``` 50 | 51 | Editing the vigilant.json declares where data is sent and the protocol. Currently only udp is supported by the datastore. 52 | And i aim to keep using UDP as the main protocol. And use ack's for alerts/triggers from code to ensure they are sent. 53 | 54 | Using --stop or --status will stop the daemon or show status of what the agent is watching and sending the data to respectively. 55 | 56 | Watch another process: 57 | 58 | ```bash 59 | $ ps aux | grep -i spotify | grep -v grep 60 | redbrain 54243 0.0 0.7 3465888 59412 ?? S 4:32pm 4:06.33 /Applications/Spotify.app/Contents/MacOS/Spotify 61 | 62 | $ ./daemon.py -c etc/vigilant/vigilant.json --watch spotify:54243 63 | ``` 64 | 65 | If the process dies vigilant will stop watching the process automatically. 66 | 67 | ### Setup Datastore 68 | 69 | Once an agent is running the data needs to be recieved. The datastore will accept all the data and provide functionality over it. 70 | Written in Scala requires jdk >= 1.7. 71 | 72 | ```bash 73 | $ cd datastore 74 | $ bower install 75 | $ cd etc/vigilant 76 | $ export VIGILANT_HOME=`pwd` 77 | $ cd - 78 | $ ./sbt 79 | > compile 80 | > test 81 | > container:start 82 | ``` 83 | 84 | Currently deploying the .war onto jetty or tomcat runner the websocket api doesnt work. Editing the vigilant configuration: 85 | 86 | ```javascript 87 | { 88 | "transport": { 89 | "type": "udp", 90 | "host": "localhost", 91 | "port": 8080 92 | }, 93 | 94 | "triggers": { 95 | "notification_threshold": 120 // How often should notifications be send if data continues to activate triggers. To stop notification spam. 96 | }, 97 | 98 | "database": { 99 | "jdbc": "jdbc:h2:./vigilant" // FIXME 100 | }, 101 | 102 | "twillo": { 103 | "account_sid": "", # twillo details to enable twillo notifications 104 | "auth_token": "", 105 | "from": "" 106 | }, 107 | 108 | "email": { 109 | "smtp_server": "localhost", # email details doesnt handle tls/ssl like gmail.com 110 | "from": "someone@email.com" 111 | } 112 | } 113 | ``` 114 | 115 | View swagger api documentation: http://localhost:8080/api and use /api-doc as the location to the documenation. 116 | 117 | ![Swagger](/screenshots/swagger.png "Swagger") 118 | 119 | ### Front-end 120 | 121 | The front-end webapp is a seperate project abstracting datastores. 122 | 123 | ```bash 124 | $ sudo pip3 install -r requirements.txt 125 | $ bower install 126 | $ ./dashboard.py 127 | ``` 128 | 129 | Go to http://localhost:5000/#/dashboard?store=http://localhost:8080 130 | 131 | ## Daemon 132 | 133 | The proof-of-concept daemon, this is being re-written in C/C++ to increase portability and 134 | simplicity of language bindings. Currently because this is written in Python3 Node bindings require 135 | python3 to be installed which isn't very elegant. 136 | 137 | ## Daemon 2 138 | 139 | This is a WIP new Daemon written in C/C++ to increase the protability and simplicity of adding simple dependancies. 140 | 141 | ## Datastore 142 | 143 | The scala data-store listens for the real-time data and in turn provides a rest-api for working with 144 | this. The api is fully documented at http://localhost:8080/api using swagger. The web-socket api isn't 145 | supported by swagger but it is there. 146 | 147 | ## Front-end 148 | 149 | This is the current front-end it needs more work but its working quite well for now. It simply uses the 150 | data-store rest-api to access the monitoring data. 151 | -------------------------------------------------------------------------------- /daemon/Daemon/Daemon.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import asyncio 4 | import signal 5 | import psutil 6 | import traceback 7 | import syslog 8 | 9 | from . import DaemonIPC 10 | from . import DaemonState 11 | from . import IPCProtocol 12 | from . import StatsMonitor 13 | 14 | from urllib.parse import quote as urlencode 15 | 16 | 17 | class StatsDaemon: 18 | def __init__(self, key, transport, sigpid, pid, sock): 19 | self._transport = transport 20 | self._sock = sock 21 | self._sigpid = sigpid 22 | self._loop = None 23 | self._key = urlencode(key) 24 | self._watching = {} 25 | if DaemonIPC.is_pid_alive(DaemonIPC.get_pid_from_lock(pid)): 26 | raise Exception('Lock [%s] pid is already alive' % pid) 27 | 28 | @property 29 | def transport(self): 30 | return self._transport 31 | 32 | @property 33 | def host(self): 34 | return self._key 35 | 36 | @property 37 | def status(self): 38 | return { 39 | 'host': self.host, 40 | 'transport': self.transport.status(), 41 | 'watching': self._watching 42 | } 43 | 44 | def log(self, mess, proc=DaemonState.STATS_DAEMON_APP): 45 | syslog.syslog(syslog.LOG_ALERT, "%s: %s" % (proc, mess)) 46 | message = {'key': proc, 47 | 'type': 'log', 48 | 'host': self.host, 49 | 'payload': {'message': mess} 50 | } 51 | self.transport.post_message_on_transport(message) 52 | 53 | def watch_pid(self, pid, key): 54 | self._watching[urlencode(key)] = pid 55 | self.log('Watching pid [%i] for key [%s]' % (pid, key)) 56 | 57 | @asyncio.coroutine 58 | def signal_parent_ready(self): 59 | os.kill(self._sigpid, signal.SIGUSR1) 60 | 61 | def stop_event_loop(self, *args): 62 | self._loop.stop() 63 | 64 | def get_stats_for_pid(self, key, pid): 65 | try: 66 | return StatsMonitor.get_stats_for_pid(pid) 67 | except psutil.NoSuchProcess: 68 | self.log('Process [%i] key [%s] has stopped' % (pid, key)) 69 | del self._watching[key] 70 | 71 | @asyncio.coroutine 72 | def post_host_stats(self): 73 | while True: 74 | try: 75 | message = StatsMonitor.get_host_stats(self.host) 76 | self.transport.post_message_on_transport(message) 77 | except: 78 | self.log(str(sys.exc_info())) 79 | self.log(str(traceback.format_exc())) 80 | finally: 81 | yield from asyncio.sleep(3) 82 | 83 | @asyncio.coroutine 84 | def post_pid_stats(self): 85 | while True: 86 | try: 87 | for key in list(self._watching.keys()): 88 | payload = self.get_stats_for_pid(key, int(self._watching[key])) 89 | if payload: 90 | message = {'key': key, 91 | 'host': self.host, 92 | 'type': 'pid', 93 | 'payload': payload} 94 | self.transport.post_message_on_transport(message) 95 | except: 96 | self.log(str(sys.exc_info())) 97 | self.log(str(traceback.format_exc())) 98 | finally: 99 | yield from asyncio.sleep(3) 100 | 101 | def run_event_loop(self): 102 | self.log('Daemon ready...') 103 | try: 104 | self._loop.run_forever() 105 | finally: 106 | self._loop.close() 107 | 108 | def start_transport(self): 109 | self._transport.init_transport() 110 | DaemonState.STATS_DAEMON_TRANSPORT = self._transport 111 | 112 | def create_io_loop(self): 113 | self._loop = asyncio.get_event_loop() 114 | self._loop.add_signal_handler(signal.SIGTERM, self.stop_event_loop) 115 | 116 | def unlink_previous_socket(self): 117 | try: 118 | os.unlink(self._sock) 119 | except: 120 | pass 121 | 122 | def create_ipc_server(self): 123 | self.unlink_previous_socket() 124 | server = self._loop.create_unix_server(IPCProtocol.DaemonProtocol, path=self._sock) 125 | asyncio.async(server, loop=self._loop) 126 | asyncio.async(self.signal_parent_ready(), loop=self._loop) 127 | 128 | def create_monitor_routines(self): 129 | asyncio.async(self.post_host_stats(), loop=self._loop) 130 | asyncio.async(self.post_pid_stats(), loop=self._loop) 131 | 132 | def start(self): 133 | self.start_transport() 134 | self.create_io_loop() 135 | self.create_ipc_server() 136 | self.create_monitor_routines() 137 | self.watch_pid(os.getpid(), DaemonState.STATS_DAEMON_APP) 138 | self.run_event_loop() 139 | -------------------------------------------------------------------------------- /daemon/Daemon/DaemonIPC.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import time 5 | import socket 6 | import signal 7 | import select 8 | import daemonize 9 | import traceback 10 | 11 | import syslog 12 | import logging 13 | import logging.handlers 14 | 15 | from platform import platform 16 | from . import DaemonState 17 | 18 | SYSLOG_DAEMON = '/var/run/syslog' if 'Darwin' in platform() else '/dev/log' 19 | 20 | 21 | def is_pid_alive(pid: int) -> bool: 22 | """ 23 | :param pid: the pid to check 24 | :return: returns true or false if pid is alive 25 | """ 26 | if pid <= 0: 27 | return False 28 | try: 29 | os.kill(pid, 0) 30 | except OSError as err: 31 | import errno 32 | if err.errno == errno.ESRCH: 33 | return False 34 | return True 35 | 36 | 37 | def get_pid_from_lock(lock: str) -> bool: 38 | """ 39 | :param lock: the lock file to open and read for the pid 40 | :return: returns the pid inside the specified lock file else -1 41 | """ 42 | try: 43 | pid = -1 44 | with open(lock, 'r') as fd: 45 | pid = fd.read() 46 | return int(pid) 47 | except: 48 | return pid 49 | 50 | 51 | class ClientConnection: 52 | def __init__(self, pid, sock): 53 | self._pid = pid 54 | self._sock = sock 55 | self._connect() 56 | 57 | def _connect(self): 58 | if is_pid_alive(get_pid_from_lock(self._pid)) is False: 59 | raise Exception('Daemon process not alive [%s]' % self._pid) 60 | self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 61 | try: 62 | self._socket.connect(self._sock) 63 | resp = self.post_status_request() 64 | if resp is None: 65 | raise Exception('Invalid response from server') 66 | except: 67 | raise Exception('Unable to connect to daemon [%s]' % self._sock) 68 | 69 | def _wait_for_response(self, timeout=10) -> dict: 70 | """ 71 | :param timeout: length of timeout in seconds to wait for response 72 | :return: dictionary json response or Exception 73 | """ 74 | inputs = [self._socket] 75 | reads, _, __ = select.select(inputs, [], [], timeout) 76 | for i in reads: 77 | data = i.recv(1024) 78 | return json.loads(data.decode("utf-8")) 79 | raise Exception('Failed waiting [%i] seconds for response' % timeout) 80 | 81 | def close(self): 82 | self._socket.close() 83 | 84 | def post_watch_pid(self, key: str, pid: int): 85 | """ 86 | Send the daemon a message telling it to monitor the specified process 87 | 88 | :param key: string key to tie a process to could be process name or other 89 | :param pid: the pid of the process to monitor 90 | """ 91 | message = {'type': 'watch', 'key': key, 'pid': pid} 92 | self._socket.sendall(json.dumps(message).encode('utf-8')) 93 | 94 | def post_stop_watch_pid(self, pid: int): 95 | """ 96 | Send the daemon a message telling it to stop monitoring the specified process 97 | 98 | :param pid: the pid to stop watching 99 | """ 100 | message = {'type': 'stopWatchPid', 'pid': pid} 101 | self._socket.sendall(json.dumps(message).encode('utf-8')) 102 | 103 | def post_stop_watch_key(self, key: str): 104 | """ 105 | Send the daemon a message telling to stop monitoring the process with the specified key 106 | 107 | :param key: the key to stop watching 108 | """ 109 | message = {'type': 'stopWatchKey', 'key': key} 110 | self._socket.sendall(json.dumps(message).encode('utf-8')) 111 | 112 | def post_log_message_for_key(self, message: str, proc=DaemonState.STATS_DAEMON_APP): 113 | """ 114 | Send the daemon a log message to handle against a proc key 115 | 116 | :param message: the string log message 117 | """ 118 | message = {'type': 'postLog', 'proc': proc, 'message': message} 119 | self._socket.sendall(json.dumps(message).encode('utf-8')) 120 | 121 | def post_status_request(self) -> dict: 122 | """ 123 | Send the daemon a message to return the status of it 124 | :returns: returns json dict response or exception 125 | """ 126 | self._socket.sendall(json.dumps({'type': 'status'}).encode('utf-8')) 127 | resp = self._wait_for_response() 128 | if resp is None: 129 | raise Exception("Failed waiting for response for status") 130 | return json.dumps(resp, indent=4) 131 | 132 | def post_stop_daemon(self): 133 | """ 134 | Send the daemon a message to stop running 135 | """ 136 | message = {'type': 'stop'} 137 | self._socket.sendall(json.dumps(message).encode('utf-8')) 138 | 139 | 140 | def _daemon_ready_handler(*args): 141 | """ 142 | Wrapper to handle daemon ready signal 143 | """ 144 | DaemonState.STATS_DAEMON_READY = True 145 | 146 | 147 | def _daemonize_daemon(): 148 | """ 149 | Wrapper to handle the daemonization 150 | """ 151 | try: 152 | my_logger = logging.getLogger('root') 153 | my_logger.setLevel(logging.INFO) 154 | handler = logging.handlers.SysLogHandler(address=SYSLOG_DAEMON) 155 | my_logger.addHandler(handler) 156 | except: 157 | syslog.syslog(syslog.LOG_ALERT, 'Unable to setup logging [%s]' % str(sys.exc_info()[1])) 158 | syslog.syslog(syslog.LOG_ALERT, '%s' % str(traceback.format_exc())) 159 | finally: 160 | DaemonState.STATS_DAEMON_SERVER.start() 161 | 162 | 163 | def fork_daemon(daemon, timeout=5, lock='/tmp/vigilant.pid'): 164 | """ 165 | Fork the stats Daemon as a real system daemon to run in the background 166 | 167 | :param daemon: The StatServerDaemon from StatsAsyncCore 168 | :param timeout: optional keyword argument for the timeout to wait on daemon ready signal 169 | :param lock: optional specify the location of the daemon lock file 170 | :raise Exception: Raises an exception if the timeout elapses before daemon ready 171 | """ 172 | DaemonState.STATS_DAEMON_SERVER = daemon 173 | 174 | signal.signal(signal.SIGUSR1, _daemon_ready_handler) 175 | 176 | pid = os.fork() 177 | if pid == 0: 178 | daemon = daemonize.Daemonize(app=DaemonState.STATS_DAEMON_APP, 179 | pid=lock, action=_daemonize_daemon) 180 | daemon.start() 181 | sys.exit(0) 182 | 183 | for i in range(timeout): 184 | if DaemonState.STATS_DAEMON_READY: 185 | break 186 | time.sleep(1) 187 | if DaemonState.STATS_DAEMON_READY is False: 188 | raise Exception('Timeout of [%i] seconds, failed waiting for daemon to come alive' % timeout) 189 | -------------------------------------------------------------------------------- /daemon/Daemon/DaemonState.py: -------------------------------------------------------------------------------- 1 | STATS_DAEMON_APP = 'vigilant-daemon' 2 | STATS_DAEMON_SERVER = None 3 | STATS_DAEMON_READY = False 4 | STATS_DAEMON_VERSION = 0.2 5 | -------------------------------------------------------------------------------- /daemon/Daemon/IPCProtocol.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import signal 4 | import asyncio 5 | import logging 6 | 7 | from . import DaemonState 8 | 9 | 10 | class DaemonProtocol(asyncio.Protocol): 11 | _transport = None 12 | 13 | def connection_made(self, transport): 14 | self._transport = transport 15 | 16 | @staticmethod 17 | def _stop_stats_daemon(): 18 | os.kill(os.getpid(), signal.SIGTERM) 19 | 20 | @staticmethod 21 | def _watch_pid_for_key(pid, key): 22 | DaemonState.STATS_DAEMON_SERVER.watch_pid(pid, key) 23 | 24 | def _stop_watch_key_for_pid(self, pid): 25 | pass 26 | 27 | def _stop_watching_pid_for_key(self, key): 28 | pass 29 | 30 | def _post_log_message(self, payload): 31 | message = {'type': payload['proc'], 32 | 'key': 'internal', 33 | 'host': DaemonState.STATS_DAEMON_SERVER.host, 34 | 'payload': {'message': payload['message']} 35 | } 36 | transport = DaemonState.STATS_DAEMON_SERVER.transport 37 | transport.post_message_on_transport(message) 38 | 39 | def _protocol_handler(self, message): 40 | message_type = message['type'] 41 | logging.info("Message type [%s]" % message_type) 42 | if message_type == 'stop': 43 | self._stop_stats_daemon() 44 | elif message_type == 'status': 45 | message = DaemonState.STATS_DAEMON_SERVER.status 46 | self._transport.write(json.dumps(message).encode('utf-8')) 47 | elif message_type == 'watch': 48 | self._watch_pid_for_key(message['pid'], message['key']) 49 | elif message_type == 'stopWatchPid': 50 | self._stop_watch_key_for_pid([message['pid']]) 51 | elif message_type == 'stopWatchKey': 52 | self._stop_watching_pid_for_key(message['key']) 53 | elif message_type == 'postLog': 54 | self._post_log_message(message) 55 | else: 56 | logging.error("Unhandled message type [%s]" % message_type) 57 | 58 | def data_received(self, data): 59 | try: 60 | message = json.loads(data.decode("utf-8")) 61 | self._protocol_handler(message) 62 | except: 63 | logging.error("Failed message dispatch") 64 | 65 | def connection_lost(self, exc): 66 | self._transport.close() 67 | -------------------------------------------------------------------------------- /daemon/Daemon/StatsMonitor.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import datetime 3 | import platform 4 | 5 | 6 | def get_host_stats(key): 7 | return { 8 | 'key': key, 9 | 'type': 'host', 10 | 'payload': { 11 | 'platform': platform.platform(), 12 | 'hostname': platform.node(), 13 | 'machine': platform.machine(), 14 | 'version': platform.version(), 15 | 'cores': psutil.cpu_count(), 16 | 'cpu_stats': psutil.cpu_percent(interval=1, percpu=True), 17 | 'usage': psutil.cpu_times_percent().user, 18 | 'memory_total': psutil.virtual_memory().total, 19 | 'memory_used': psutil.virtual_memory().used, 20 | 'disk_total': psutil.disk_usage('/').total, 21 | 'disk_free': psutil.disk_usage('/').used, 22 | 'timestamp': datetime.datetime.now().isoformat(), 23 | 'processes': len(psutil.pids()) 24 | } 25 | } 26 | 27 | 28 | def trim_string(string, max_length=128): 29 | buffer = "" 30 | length = len(string) 31 | for i in range(max_length): 32 | if i > length - 4: 33 | buffer += "..." 34 | break 35 | buffer += string[i] 36 | return buffer 37 | 38 | 39 | def _stringify_string_list(data): 40 | retval = "" 41 | for i in data: 42 | retval += str(i) 43 | return trim_string(retval) 44 | 45 | 46 | def get_stats_for_pid(pid): 47 | p = psutil.Process(pid) 48 | return { 49 | 'pid': pid, 50 | 'name': p.name(), 51 | 'path': p.exe(), 52 | 'cwd': p.cwd(), 53 | 'cmdline': p.cmdline(), 54 | 'status': p.status(), 55 | 'user': p.username(), 56 | 'threads': p.num_threads(), 57 | 'fds': p.num_fds(), 58 | 'files': _stringify_string_list(p.open_files()), 59 | 'usage': p.cpu_percent(interval=1), 60 | 'memory_percent': p.memory_percent(), 61 | 'connections': _stringify_string_list(p.connections()) 62 | } 63 | -------------------------------------------------------------------------------- /daemon/Daemon/Transports.py: -------------------------------------------------------------------------------- 1 | import json 2 | import socket 3 | import datetime 4 | 5 | 6 | def get_time_stamp(): 7 | return str(datetime.datetime.now()) 8 | 9 | 10 | class Transport: 11 | def post(self, message: bytes): 12 | pass 13 | 14 | def post_message_on_transport(self, message: dict): 15 | message['ts'] = get_time_stamp() 16 | self.post(json.dumps(message).encode('utf-8')) 17 | 18 | 19 | class UDPStatsTransport(Transport): 20 | def __init__(self, host='localhost', port=8080): 21 | self._host = host 22 | self._port = port 23 | self._sock = None 24 | 25 | def status(self): 26 | return 'UDP Transport %s:%i' % (self._host, self._port) 27 | 28 | def init_transport(self): 29 | self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 30 | self._sock.setblocking(True) 31 | 32 | def post(self, message: bytes): 33 | self._sock.sendto(message, (self._host, self._port)) 34 | 35 | 36 | class TCPStatsTransport(Transport): 37 | def __init__(self, host='localhost', port=8080): 38 | self._host = host 39 | self._port = port 40 | self._sock = None 41 | 42 | def status(self): 43 | return 'TCP Transport %s:%i' % (self._host, self._port) 44 | 45 | def init_transport(self): 46 | self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 47 | self._sock.connect((self._host, self._port)) 48 | 49 | def post(self, message: bytes, exception=None, retry=0): 50 | if retry > 2: 51 | raise exception 52 | try: 53 | self._sock.send(message) 54 | except Exception as e: 55 | self.init_transport() 56 | self.post(message, exception=e, retry=retry + 1) 57 | 58 | -------------------------------------------------------------------------------- /daemon/Daemon/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from . import Daemon 4 | from . import DaemonIPC 5 | 6 | 7 | def attach_to_daemon(pid='/tmp/vigilant.pid', sock='/tmp/vigilant.sock'): 8 | """ 9 | Attach to existing daemon. 10 | 11 | :param pid: lock file default /tmp/observant.pid 12 | :param sock: unix socket path default /tmp/observant.sock 13 | :return: returns the StatsCore.StatsDaemon.ClientDaemonConnection 14 | """ 15 | return DaemonIPC.ClientConnection(pid, sock) 16 | 17 | 18 | def create_daemon(transport, pid='/tmp/vigilant.pid', sock='/tmp/vigilant.sock'): 19 | """ 20 | Create and fork a new Stats Daemon. 21 | 22 | :param transport: The transport for the daemon to use 23 | :param pid: the pid lock file to use 24 | :param sock: the unix socket path to listen on 25 | """ 26 | from socket import gethostname 27 | daemon = Daemon.StatsDaemon(gethostname(), transport, os.getpid(), pid, sock) 28 | DaemonIPC.fork_daemon(daemon, lock=pid) 29 | 30 | 31 | def attach_or_create_daemon(transport, pid='/tmp/vigilant.pid', sock='/tmp/vigilant.sock'): 32 | """ 33 | Attach to the system daemon or create it. 34 | 35 | :param transport: The transport object for the daemon to server 36 | :param pid: The pid lock file for the daemon 37 | :param sock: The socket path to communicate on 38 | :return: StatsCore.StatsDaemon.ClientDaemonConnection object or Exception 39 | """ 40 | if DaemonIPC.is_pid_alive(DaemonIPC.get_pid_from_lock(pid)): 41 | return attach_to_daemon(pid=pid, sock=sock) 42 | else: 43 | create_daemon(transport, pid=pid, sock=sock) 44 | return attach_to_daemon(pid=pid, sock=sock) 45 | -------------------------------------------------------------------------------- /daemon/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Philip Herron 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /daemon/README.md: -------------------------------------------------------------------------------- 1 | # Vigilant Daemon 2 | 3 | This is the user agent daemon required to aggregate stats on each host for n-applications 4 | 5 | ##Setup 6 | 7 | Stats Daemon (requires python3): 8 | 9 | ```python 10 | $ brew install python3 11 | $ pip3 install -r requirements.txt 12 | $ python3 setup.py install 13 | ``` 14 | 15 | Simple Daemon.py manager usage 16 | 17 | ```python 18 | # Start the Daemon 19 | $ ./daemon.py -c etc/vigilant/vigilant.json --start 20 | 21 | # Make sure it is running 22 | $ ./daemon.py -c etc/vigilant/vigilant.json --status 23 | { 24 | "transport": "UDP Transport localhost:8080", 25 | "watching": { 26 | "Philips-MacBook-Pro.local.StatsDaemon": 914 27 | }, 28 | "host": "Philips-MacBook-Pro.local" 29 | } 30 | True 31 | 32 | # kill the daemon gracefully 33 | $ ./daemon.py -c etc/vigilant/vigilant.json --kill 34 | ``` 35 | 36 | 37 | ## Configuration 38 | 39 | The configuration is meant to be as simple as possible. 40 | 41 | * Transport Section defines the data-store location and protocol type currently only udp is supported by the data-store 42 | * Daemon Section defines the lock and unix-socket path to use for the daemon. 43 | 44 | -------------------------------------------------------------------------------- /daemon/bindings/c/Makefile.am: -------------------------------------------------------------------------------- 1 | SUBDIRS = \ 2 | src 3 | AUTOMAKE_OPTIONS = foreign 4 | -------------------------------------------------------------------------------- /daemon/bindings/c/README.md: -------------------------------------------------------------------------------- 1 | # C/C++ - Bindings ⛨ 2 | This is C bindings direectly to the stats daemon. 3 | 4 | ```bash 5 | $ ./config/autogen.sh 6 | $ ./configure --prefix=/opt/observant 7 | $ make 8 | $ make install 9 | ``` 10 | 11 | ## Usage 12 | 13 | Test program such as: 14 | 15 | ```c 16 | // test.c 17 | #include 18 | 19 | int main(int argc, char **argv) { 20 | 21 | int retval = obs_attach("/tmp/observant.pid", "/tmp/observant.sock"); 22 | if (!retval) { 23 | fprintf(stderr, "Unable to attach to daemon"); 24 | return -1; 25 | } 26 | 27 | obs_watch_me(); 28 | 29 | obs_detach(); 30 | 31 | return 0; 32 | } 33 | ``` 34 | 35 | Compile: 36 | 37 | ```bash 38 | $ gcc/clang -g -O2 -Wall -c test.c -o test.o -I/opt/observant/include 39 | $ gcc/clang -g -O2 -Wall -o test test.o -L/opt/observant/lib -lobservant 40 | $ ./test 41 | ``` 42 | 43 | Note this build will result in the error: 44 | 45 | ```bash 46 | /libobservant.h:15:31: error: expected function body after function declarator 47 | __PYX_EXTERN_C DL_IMPORT(int) obs_attach_to_stats_daemon(char const *, char const *); 48 | ``` 49 | 50 | This is a bug with cython i have proposed the fix: https://github.com/cython/cython/pull/341 51 | 52 | To get around this error, run make once to generate the libobservant.h and add in this snippet: 53 | 54 | ```c 55 | #ifndef DL_IMPORT 56 | # define DL_IMPORT(_T) _T 57 | #endif 58 | ``` -------------------------------------------------------------------------------- /daemon/bindings/c/configure.ac: -------------------------------------------------------------------------------- 1 | AC_PREREQ(2.59) 2 | AC_REVISION($Revision$) 3 | 4 | AC_INIT(libvigilant, 0.1, https://github.com/redbrain/observant) 5 | AC_CONFIG_SRCDIR([src/libvigilant.pyx]) 6 | AC_CONFIG_HEADER([config.h]) 7 | AC_CONFIG_AUX_DIR([config]) 8 | AM_SANITY_CHECK 9 | 10 | dnl Last slash shouldn't be stripped if prefix=/ 11 | if test "$prefix" != "/"; then 12 | prefix=`echo "$prefix" | sed -e 's/\/$//g'` 13 | fi 14 | 15 | dnl Checks for programs. 16 | AC_DEFINE_UNQUOTED([CONFIG_CC], "$CC", [C Compiler used]) 17 | 18 | AC_PROG_AWK 19 | AC_PROG_CPP 20 | AC_PROG_INSTALL 21 | AC_PROG_LN_S 22 | AC_PROG_MAKE_SET 23 | AC_PROG_RANLIB 24 | AC_CHECK_PROGS(AR, ar aal, ar) 25 | AC_PROG_CC 26 | 27 | AC_EXEEXT 28 | AC_GNU_SOURCE 29 | AC_PROG_INSTALL 30 | 31 | AC_PROG_LIBTOOL 32 | AM_PROG_LIBTOOL 33 | 34 | AM_PROG_CC_C_O 35 | 36 | AC_CANONICAL_HOST 37 | 38 | # When CFLAGS isn't set at this stage and gcc is detected by the macro below, 39 | # autoconf will automatically use CFLAGS="-O2 -g". Prevent that by using an 40 | # empty default. 41 | : ${CFLAGS=""} 42 | 43 | AC_CHECK_PROG(CYTHON_CHECK,cython,yes) 44 | if test x"$CYTHON_CHECK" != x"yes" ; then 45 | AC_MSG_ERROR([Please install cython]) 46 | fi 47 | 48 | AC_CHECK_PROG(PYTHON_CHECK,python3-config,yes) 49 | if test x"$PYTHON_CHECK" != x"yes" ; then 50 | AC_MSG_ERROR([Please install python3-config]) 51 | fi 52 | 53 | PYINCS=`python3-config --includes` 54 | PYLIBS=`python3-config --ldflags` 55 | AC_SUBST(PYINCS) 56 | AC_SUBST(PYLIBS) 57 | 58 | # Check whether --with-system-type or --without-system-type was given. 59 | AC_ARG_WITH([system-type], 60 | [AS_HELP_STRING([--with-system-type], 61 | [Set the system type, like "sun-solaris10"])], 62 | [SYSTEM_TYPE="$withval"], 63 | [SYSTEM_TYPE="$host_vendor-$host_os"]) 64 | AC_ARG_WITH([machine-type], 65 | [AS_HELP_STRING([--with-machine-type], 66 | [Set the machine type, like "sparc"])], 67 | [MACHINE_TYPE="$withval"], 68 | [MACHINE_TYPE="$host_cpu"]) 69 | AC_SUBST(SYSTEM_TYPE) 70 | AC_DEFINE_UNQUOTED([SYSTEM_TYPE], ["$SYSTEM_TYPE"], 71 | [Name of system, eg sun-solaris]) 72 | AC_SUBST(MACHINE_TYPE) 73 | AC_DEFINE_UNQUOTED([MACHINE_TYPE], ["$MACHINE_TYPE"], 74 | [Machine type name, eg sparc]) 75 | 76 | 77 | AM_INIT_AUTOMAKE 78 | AC_CONFIG_FILES([Makefile 79 | src/Makefile]) 80 | AC_OUTPUT 81 | 82 | echo "---" 83 | echo "Configuration summary for $PACKAGE_NAME version $VERSION" 84 | echo "" 85 | echo " * Installation prefix: $prefix" 86 | echo " * System type: $SYSTEM_TYPE" 87 | echo " * Host CPU: $host_cpu" 88 | echo " * C Compiler: $CC" 89 | echo " * CFlags: $CFLAGS" 90 | echo " * Python Includes: $PYINCS" 91 | echo " * Python Libs: $PYLIBS" 92 | echo "" 93 | echo "---" 94 | 95 | dnl libtoolize scans configure.ac and needs to see some text 96 | m4_define([LIBTOOLIZE_AC_INIT], []) 97 | -------------------------------------------------------------------------------- /daemon/bindings/c/src/Makefile.am: -------------------------------------------------------------------------------- 1 | bin_PROGRAMS = test 2 | lib_LTLIBRARIES = libvigilant.la 3 | 4 | SUFFIXES = .pyx 5 | .pyx.c: 6 | cython -3 -o $@ $< 7 | 8 | test_SOURCES = test.c 9 | test_LDADD = libvigilant.la 10 | 11 | libvigilant_la_SOURCES = libvigilant.pyx vigilant.c 12 | include_HEADERS = libvigilant.h vigilant.h 13 | 14 | CFLAGS += $(PYINCS) 15 | libvigilant_la_LIBADD = $(PYLIBS) 16 | -------------------------------------------------------------------------------- /daemon/bindings/c/src/libvigilant.pyx: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import Daemon 4 | 5 | _Daemon = None 6 | _PROC_NAME = None 7 | 8 | cdef public int obs_attach_to_stats_daemon(const char *key, const char *pid, const char *sock): 9 | global _Daemon, _PROC_NAME 10 | _PROC_NAME = key.decode('utf-8') 11 | try: 12 | _Daemon = Daemon.attach_to_daemon(pid=pid, sock=sock) 13 | _Daemon.post_watch_pid(_PROC_NAME, os.getpid()) 14 | return 0 15 | except: 16 | import sys 17 | print(sys.exc_info()[1]) 18 | return -1 19 | 20 | cdef public int obs_post_message(const char *message): 21 | global _Daemon, _PROC_NAME 22 | if _Daemon is None: 23 | return -1 24 | _Daemon.post_log_message_for_key(message.decode('utf-8'), proc=_PROC_NAME) 25 | return 0 26 | 27 | cdef public void obs_detach_daemon(): 28 | global _Daemon 29 | if _Daemon: 30 | _Daemon.close() 31 | -------------------------------------------------------------------------------- /daemon/bindings/c/src/test.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | int main (int argc, char **argv) 8 | { 9 | if (obs_attach ("binding_test", "/tmp/vigilant.pid", "/tmp/vigilant.sock") != 0) 10 | { 11 | fprintf (stderr, "Unable to attach to daemon!\n"); 12 | return -1; 13 | } 14 | obs_post_message("Hello world from c land!!"); 15 | obs_detach (); 16 | return 0; 17 | } 18 | -------------------------------------------------------------------------------- /daemon/bindings/c/src/vigilant.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int 4 | obs_attach(const char *key, const char *lock, const char *sock) 5 | { 6 | Py_Initialize(); 7 | PyInit_libvigilant(); 8 | return obs_attach_to_stats_daemon(key, lock, sock); 9 | } 10 | 11 | void 12 | obs_detach(void) 13 | { 14 | obs_detach_daemon(); 15 | Py_Finalize(); 16 | } 17 | -------------------------------------------------------------------------------- /daemon/bindings/c/src/vigilant.h: -------------------------------------------------------------------------------- 1 | #ifndef OBSERVANT_H__ 2 | #define OBSERVANT_H__ 3 | 4 | #include 5 | 6 | #ifndef DL_IMPORT 7 | # define DL_IMPORT(_T) _T 8 | #endif 9 | #include 10 | 11 | // Initilize Python and Observant module 12 | extern int obs_attach(const char *, const char *, const char *); 13 | 14 | // detach from observant and cleanup python 15 | extern void obs_detach(void); 16 | 17 | #endif //OBSERVANT_H__ 18 | -------------------------------------------------------------------------------- /daemon/bindings/nodejs/README.md: -------------------------------------------------------------------------------- 1 | 2 | ```bash 3 | $ npm install -g node-gyp 4 | ``` 5 | -------------------------------------------------------------------------------- /daemon/bindings/nodejs/binding.gyp: -------------------------------------------------------------------------------- 1 | { 2 | 'targets': [ 3 | { 4 | 'target_name': 'vigilant', 5 | 'sources': [ 'vigilant.cc' ] 6 | } 7 | ] 8 | } -------------------------------------------------------------------------------- /daemon/bindings/nodejs/test.js: -------------------------------------------------------------------------------- 1 | var assert = require('assert'); 2 | var vigilant = require('./build/Release/vigilant'); 3 | 4 | -------------------------------------------------------------------------------- /daemon/bindings/nodejs/vigilant.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | using namespace v8; 5 | 6 | void Method(const FunctionCallbackInfo& args) { 7 | Isolate* isolate = Isolate::GetCurrent(); 8 | HandleScope scope(isolate); 9 | args.GetReturnValue().Set(String::NewFromUtf8(isolate, "world")); 10 | } 11 | 12 | void init(Handle target) { 13 | NODE_SET_METHOD(target, "hello", Method); 14 | } 15 | 16 | NODE_MODULE(binding, init); 17 | -------------------------------------------------------------------------------- /daemon/daemon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import json 6 | import optparse 7 | 8 | import Daemon 9 | from Daemon import DaemonIPC 10 | from Daemon import Transports 11 | 12 | 13 | def is_daemon_running(lock): 14 | return DaemonIPC.is_pid_alive(DaemonIPC.get_pid_from_lock(lock)) 15 | 16 | 17 | def get_daemon_status(lock, sock): 18 | try: 19 | client = Daemon.attach_to_daemon(pid=lock, sock=sock) 20 | mess = client.post_status_request() 21 | client.close() 22 | return mess 23 | except: 24 | return sys.exc_info()[1] 25 | 26 | 27 | def stop_daemon(lock, sock): 28 | if is_daemon_running(lock): 29 | client = Daemon.attach_to_daemon(pid=lock, sock=sock) 30 | client.post_stop_daemon() 31 | client.close() 32 | try: 33 | os.waitpid(DaemonIPC.get_pid_from_lock(lock), 0) 34 | except: 35 | pass 36 | 37 | 38 | def get_transport_from_config(config): 39 | transport_section = config['transport'] 40 | transport_type = transport_section['type'] 41 | if transport_type == 'udp': 42 | return Transports.UDPStatsTransport(host=transport_section['host'], port=transport_section['port']) 43 | elif transport_type == 'tcp': 44 | return Transports.TCPStatsTransport(host=transport_section['host'], port=transport_section['port']) 45 | else: 46 | raise Exception('Invalid transport in configuration') 47 | 48 | 49 | def get_daemon_connection(config): 50 | lock = config['daemon']['lock'] 51 | sock = config['daemon']['sock'] 52 | transport = get_transport_from_config(config) 53 | return Daemon.attach_or_create_daemon(transport, pid=lock, sock=sock) 54 | 55 | 56 | def start_daemon(config): 57 | client = get_daemon_connection(config) 58 | client.close() 59 | 60 | 61 | def watch_pid(config, key, pid): 62 | lock = config['daemon']['lock'] 63 | if not is_daemon_running(lock): 64 | sys.exit("ERROR: Daemon not running") 65 | if not DaemonIPC.is_pid_alive(pid): 66 | sys.exit("ERROR: pid [%i] does not exist" % pid) 67 | client = get_daemon_connection(config) 68 | client.post_watch_pid(key, pid) 69 | client.close() 70 | 71 | 72 | def log_message(message, lock, sock): 73 | try: 74 | client = Daemon.attach_to_daemon(pid=lock, sock=sock) 75 | client.post_log_message_for_key(message) 76 | client.close() 77 | except: 78 | return sys.exc_info()[1] 79 | 80 | 81 | def daemon(): 82 | parser = optparse.OptionParser() 83 | parser.add_option("-c", dest="config", help="Config file location", default=None) 84 | parser.add_option("--status", dest="status", help="Is daemon running", default=False, action="store_true") 85 | parser.add_option("--start", dest="start", help="Start Daemon with config", default=False, action="store_true") 86 | parser.add_option("--stop", dest="stop", help="Kill stats daemon", default=False, action="store_true") 87 | parser.add_option('--watch', dest='watch', default=None, help="Watch specified pid key:pid as argument") 88 | parser.add_option('--log', dest='log', default=None, help="Post log message") 89 | options, _ = parser.parse_args() 90 | if options.config is None: 91 | sys.exit('You must specify a configuration file, see --help') 92 | config = None 93 | with open(options.config) as fd: 94 | config = json.load(fd) 95 | lock = config['daemon']['lock'] 96 | sock = config['daemon']['sock'] 97 | if options.status: 98 | print(get_daemon_status(lock, sock)) 99 | if options.stop: 100 | stop_daemon(lock, sock) 101 | if options.start: 102 | start_daemon(config) 103 | if options.log: 104 | log_message(options.log, lock, sock) 105 | if options.watch: 106 | split = options.watch.split(':') 107 | if len(split) != 2: 108 | sys.exit('ERROR: Invalid watch argument [%s] needs to be key:pid' % options.watch) 109 | watch_pid(config, split[0], int(split[1])) 110 | 111 | 112 | if __name__ == "__main__": 113 | daemon() 114 | -------------------------------------------------------------------------------- /daemon/etc/vigilant/vigilant.json: -------------------------------------------------------------------------------- 1 | { 2 | "transport": { 3 | "type": "udp", 4 | "host": "localhost", 5 | "port": 8080 6 | }, 7 | 8 | "daemon": { 9 | "sock": "/tmp/vigilant.sock", 10 | "lock": "/tmp/vigilant.pid" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /daemon/jenkins.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | virtualenv env 5 | . env/bin/activate 6 | pip install -r requirements.txt 7 | python setup.py build 8 | python setup.py install 9 | -------------------------------------------------------------------------------- /daemon/requirements.txt: -------------------------------------------------------------------------------- 1 | daemonize==2.3.1 2 | psutil==2.1.3 3 | cython==0.21.1 4 | -------------------------------------------------------------------------------- /daemon/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from distutils.core import setup 4 | 5 | setup(name='vigilant-daemon', 6 | version='0.4', 7 | description='Data aggregation daemon', 8 | author='Philip Herron', 9 | author_email='herron.philip@googlemail.com', 10 | packages=['Daemon'], 11 | scripts=['daemon.py'] 12 | ) 13 | -------------------------------------------------------------------------------- /daemon2/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /daemon2/Makefile.am: -------------------------------------------------------------------------------- 1 | SUBDIRS = \ 2 | src 3 | AUTOMAKE_OPTIONS = foreign 4 | -------------------------------------------------------------------------------- /daemon2/README.md: -------------------------------------------------------------------------------- 1 | # Daemon2 2 | 3 | This is the work in progress new daemon in C/C++. 4 | 5 | WIP not functional. 6 | -------------------------------------------------------------------------------- /daemon2/config/autogen.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Taken from lighthttpd server (BSD). Thanks Jan! 3 | # Run this to generate all the initial makefiles, etc. 4 | 5 | die() { echo "$@"; exit 1; } 6 | 7 | # LIBTOOLIZE=${LIBTOOLIZE:-libtoolize} 8 | LIBTOOLIZE_FLAGS=" --automake --copy --force" 9 | # ACLOCAL=${ACLOCAL:-aclocal} 10 | ACLOCAL_FLAGS="-I config" 11 | # AUTOHEADER=${AUTOHEADER:-autoheader} 12 | # AUTOMAKE=${AUTOMAKE:-automake} 13 | # --add-missing instructs automake to install missing auxiliary files 14 | # --copy tells it to make copies and not symlinks 15 | AUTOMAKE_FLAGS="--gnu --add-missing --copy --foreign" 16 | # AUTOCONF=${AUTOCONF:-autoconf} 17 | 18 | ARGV0=$0 19 | ARGS="$@" 20 | 21 | 22 | run() { 23 | echo "$ARGV0: running \`$@' $ARGS" 24 | $@ $ARGS 25 | } 26 | 27 | ## jump out if one of the programs returns 'false' 28 | set -e 29 | 30 | if test x$LIBTOOLIZE = x; then 31 | if test \! "x`which glibtoolize 2> /dev/null | grep -v '^no'`" = x; then 32 | LIBTOOLIZE=glibtoolize 33 | elif test \! "x`which libtoolize-1.5 2> /dev/null | grep -v '^no'`" = x; then 34 | LIBTOOLIZE=libtoolize-1.5 35 | elif test \! "x`which libtoolize 2> /dev/null | grep -v '^no'`" = x; then 36 | LIBTOOLIZE=libtoolize 37 | elif test \! "x`which glibtoolize 2> /dev/null | grep -v '^no'`" = x; then 38 | LIBTOOLIZE=glibtoolize 39 | else 40 | echo "libtoolize 1.5.x wasn't found, exiting"; exit 1 41 | fi 42 | fi 43 | 44 | if test x$ACLOCAL = x; then 45 | if test \! "x`which aclocal-1.10 2> /dev/null | grep -v '^no'`" = x; then 46 | ACLOCAL=aclocal-1.10 47 | elif test \! "x`which aclocal110 2> /dev/null | grep -v '^no'`" = x; then 48 | ACLOCAL=aclocal110 49 | elif test \! "x`which aclocal 2> /dev/null | grep -v '^no'`" = x; then 50 | ACLOCAL=aclocal 51 | else 52 | echo "automake 1.10.x (aclocal) wasn't found, exiting"; exit 1 53 | fi 54 | fi 55 | 56 | if test x$AUTOMAKE = x; then 57 | if test \! "x`which automake-1.10 2> /dev/null | grep -v '^no'`" = x; then 58 | AUTOMAKE=automake-1.10 59 | elif test \! "x`which automake110 2> /dev/null | grep -v '^no'`" = x; then 60 | AUTOMAKE=automake110 61 | elif test \! "x`which automake 2> /dev/null | grep -v '^no'`" = x; then 62 | AUTOMAKE=automake 63 | else 64 | echo "automake 1.10.x wasn't found, exiting"; exit 1 65 | fi 66 | fi 67 | 68 | 69 | ## macosx has autoconf-2.59 and autoconf-2.60 70 | if test x$AUTOCONF = x; then 71 | if test \! "x`which autoconf-2.59 2> /dev/null | grep -v '^no'`" = x; then 72 | AUTOCONF=autoconf-2.59 73 | elif test \! "x`which autoconf259 2> /dev/null | grep -v '^no'`" = x; then 74 | AUTOCONF=autoconf259 75 | elif test \! "x`which autoconf 2> /dev/null | grep -v '^no'`" = x; then 76 | AUTOCONF=autoconf 77 | else 78 | echo "autoconf 2.59+ wasn't found, exiting"; exit 1 79 | fi 80 | fi 81 | 82 | if test x$AUTOHEADER = x; then 83 | if test \! "x`which autoheader-2.59 2> /dev/null | grep -v '^no'`" = x; then 84 | AUTOHEADER=autoheader-2.59 85 | elif test \! "x`which autoheader259 2> /dev/null | grep -v '^no'`" = x; then 86 | AUTOHEADER=autoheader259 87 | elif test \! "x`which autoheader 2> /dev/null | grep -v '^no'`" = x; then 88 | AUTOHEADER=autoheader 89 | else 90 | echo "autoconf 2.59+ (autoheader) wasn't found, exiting"; exit 1 91 | fi 92 | fi 93 | 94 | 95 | # --force means overwrite ltmain.sh script if it already exists 96 | run $LIBTOOLIZE $LIBTOOLIZE_FLAGS || die "Can't execute libtoolize" 97 | 98 | run $ACLOCAL $ACLOCAL_FLAGS || die "Can't execute aclocal" 99 | run $AUTOHEADER || die "Can't execute autoheader" 100 | run $AUTOMAKE $AUTOMAKE_FLAGS || die "Can't execute automake" 101 | run $AUTOCONF || die "Can't execute autoconf" 102 | echo -n "Automade with: " 103 | $AUTOMAKE --version | head -1 104 | echo -n "Configured with: " 105 | $AUTOCONF --version | head -1 106 | 107 | -------------------------------------------------------------------------------- /daemon2/configure.ac: -------------------------------------------------------------------------------- 1 | AC_PREREQ(2.59) 2 | AC_REVISION($Revision$) 3 | 4 | AC_INIT(vigilant, 0.01, http://vigilantlabs.co.uk) 5 | AC_CONFIG_SRCDIR([src/daemon.cc]) 6 | AC_CONFIG_HEADER([src/config.h]) 7 | dnl keep the autoconf generated stuff neatly in a folder :) 8 | AC_CONFIG_AUX_DIR([config]) 9 | AM_SANITY_CHECK 10 | 11 | dnl Last slash shouldn't be stripped if prefix=/ 12 | if test "$prefix" != "/"; then 13 | prefix=`echo "$prefix" | sed -e 's/\/$//g'` 14 | fi 15 | 16 | dnl Checks for programs. 17 | AC_DEFINE_UNQUOTED([CONFIG_CC], "$CC", [C Compiler used]) 18 | AC_DEFINE_UNQUOTED([CONFIG_CXX], "$CXX", [C++ Compiler used]) 19 | 20 | AC_PROG_AWK 21 | AC_PROG_CPP 22 | AC_PROG_INSTALL 23 | AC_PROG_LN_S 24 | AC_PROG_MAKE_SET 25 | AC_PROG_RANLIB 26 | AC_CHECK_PROGS(AR, ar aal, ar) 27 | AC_PROG_CC 28 | AC_PROG_CXX 29 | 30 | AC_EXEEXT 31 | AC_GNU_SOURCE 32 | AC_PROG_INSTALL 33 | 34 | AC_PROG_LIBTOOL 35 | AM_PROG_LIBTOOL 36 | 37 | AM_PROG_CC_C_O 38 | 39 | AC_CANONICAL_HOST 40 | 41 | # When CFLAGS isn't set at this stage and gcc is detected by the macro below, 42 | # autoconf will automatically use CFLAGS="-O2 -g". Prevent that by using an 43 | # empty default. 44 | : ${CFLAGS=""} 45 | 46 | # Just to make sure we dont do debug on default! 47 | debug=no 48 | AC_ARG_WITH(debug, 49 | [AS_HELP_STRING([--with-debug=yes/no], 50 | [With Debug symbols, default no.])], 51 | [debug="$withval"]) 52 | AM_CONDITIONAL(IS_DEBUG, test "x$debug" = xyes) 53 | 54 | # Is this a static build? 55 | AC_ARG_ENABLE( 56 | static, 57 | AC_HELP_STRING(--enable-static, create a static build), 58 | found_static=$enable_static 59 | ) 60 | if test "x$found_static" = xyes; then 61 | LDFLAGS="$LDFLAGS -static" 62 | fi 63 | 64 | # Is this gcc? 65 | AM_CONDITIONAL(IS_GCC, test "x$GCC" = xyes) 66 | AC_MSG_CHECKING(for gcc that whines about -I) 67 | AC_EGREP_CPP( 68 | yes, 69 | [ 70 | #if __GNUC__ > 3 71 | yes 72 | #endif 73 | ], 74 | found_gcc4=yes, 75 | found_gcc4=no 76 | ) 77 | AM_CONDITIONAL(IS_GCC4, test "x$found_gcc4" = xyes) 78 | AC_MSG_RESULT($found_gcc4) 79 | 80 | # Is this Sun CC? 81 | AC_EGREP_CPP( 82 | yes, 83 | [ 84 | #ifdef __SUNPRO_C 85 | yes 86 | #endif 87 | ], 88 | found_suncc=yes, 89 | found_suncc=no 90 | ) 91 | AM_CONDITIONAL(IS_SUNCC, test "x$found_suncc" = xyes) 92 | 93 | # Is this glibc? 94 | AC_MSG_CHECKING(for glibc) 95 | AC_EGREP_CPP( 96 | yes, 97 | [ 98 | #include 99 | #ifdef __GLIBC__ 100 | yes 101 | #endif 102 | ], 103 | found_glibc=yes, 104 | found_glibc=no 105 | ) 106 | AM_CONDITIONAL(IS_GLIBC, test "x$found_glibc" = xyes) 107 | AC_MSG_RESULT($found_glibc) 108 | 109 | # Check whether --with-system-type or --without-system-type was given. 110 | AC_ARG_WITH([system-type], 111 | [AS_HELP_STRING([--with-system-type], 112 | [Set the system type, like "sun-solaris10"])], 113 | [SYSTEM_TYPE="$withval"], 114 | [SYSTEM_TYPE="$host_vendor-$host_os"]) 115 | AC_ARG_WITH([machine-type], 116 | [AS_HELP_STRING([--with-machine-type], 117 | [Set the machine type, like "sparc"])], 118 | [MACHINE_TYPE="$withval"], 119 | [MACHINE_TYPE="$host_cpu"]) 120 | AC_SUBST(SYSTEM_TYPE) 121 | AC_DEFINE_UNQUOTED([SYSTEM_TYPE], ["$SYSTEM_TYPE"], 122 | [Name of system, eg sun-solaris]) 123 | AC_SUBST(MACHINE_TYPE) 124 | AC_DEFINE_UNQUOTED([MACHINE_TYPE], ["$MACHINE_TYPE"], 125 | [Machine type name, eg sparc]) 126 | 127 | AM_INIT_AUTOMAKE 128 | AM_SILENT_RULES([yes]) 129 | 130 | AC_HEADER_STDBOOL 131 | AC_HEADER_STDC 132 | 133 | # Checks for typedefs, structures, and compiler characteristics. 134 | AC_C_CONST 135 | AC_TYPE_SIZE_T 136 | 137 | # Checks for library functions. 138 | AC_FUNC_MALLOC 139 | AC_FUNC_VPRINTF 140 | AC_CONFIG_FILES([Makefile 141 | src/Makefile]) 142 | AC_OUTPUT 143 | 144 | echo "---" 145 | echo "Configuration summary for $PACKAGE_NAME version $VERSION" 146 | echo "" 147 | echo " * Installation prefix: $prefix" 148 | echo " * System type: $SYSTEM_TYPE" 149 | echo " * Host CPU: $host_cpu" 150 | echo " * C++ Compiler: $CXX" 151 | echo " * DEBUG Symbols: $debug" 152 | echo "" 153 | echo "---" 154 | 155 | dnl libtoolize scans configure.ac and needs to see some text 156 | m4_define([LIBTOOLIZE_AC_INIT], []) 157 | -------------------------------------------------------------------------------- /daemon2/etc/vigilant/vigilant.json: -------------------------------------------------------------------------------- 1 | { 2 | "transport": { 3 | "type": "udp", 4 | "host": "localhost", 5 | "port": 8080 6 | }, 7 | 8 | "daemon": { 9 | "type": "unix", 10 | "sock": "/tmp/vigilant.sock", 11 | "lock": "/tmp/vigilant.pid" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /daemon2/getdeps.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | rm -rf _deps 5 | mkdir _deps 6 | pushd _deps 7 | 8 | rm -rf _builds 9 | mkdir _builds 10 | 11 | pushd _builds 12 | BUILD_DIR=`pwd` 13 | popd 14 | 15 | wget https://github.com/HardySimpson/zlog/archive/latest-stable.tar.gz 16 | tar zxvf latest-stable.tar.gz 17 | pushd zlog-latest-stable 18 | make PREFIX=$BUILD_DIR 19 | make install PREFIX=$BUILD_DIR 20 | popd 21 | 22 | wget ftp://ftp.mirrorservice.org/pub/i-scream/libstatgrab/libstatgrab-0.91.tar.gz 23 | tar zxvf libstatgrab-0.91.tar.gz 24 | pushd libstatgrab-0.91 25 | ./configure --prefix=$BUILD_DIR 26 | make 27 | make install 28 | popd 29 | 30 | wget https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz 31 | tar zxvf protobuf-2.6.1.tar.gz 32 | pushd protobuf-2.6.1 33 | ./configure --prefix=$BUILD_DIR 34 | make 35 | make install 36 | popd 37 | 38 | wget https://sourceforge.net/projects/levent/files/libevent/libevent-2.0/libevent-2.0.22-stable.tar.gz 39 | tar zxvf libevent-2.0.22-stable.tar.gz 40 | pushd libevent-2.0.22-stable 41 | ./configure --prefix=$BUILD_DIR 42 | make 43 | make install 44 | popd 45 | 46 | wget http://www.digip.org/jansson/releases/jansson-2.7.tar.gz 47 | tar zxvf jansson-2.7.tar.gz 48 | pushd jansson-2.7 49 | ./configure --prefix=$BUILD_DIR 50 | make 51 | make install 52 | popd 53 | 54 | popd 55 | -------------------------------------------------------------------------------- /daemon2/jenkins.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | ./getdeps.sh 5 | rm -rf _build 6 | mkdir _build 7 | BUILD_DIR=`pwd`/_build 8 | ./config/autogen.sh 9 | ./configure --prefix=$BUILD_DIR 10 | make 11 | make install 12 | 13 | -------------------------------------------------------------------------------- /daemon2/src/Makefile.am: -------------------------------------------------------------------------------- 1 | bin_PROGRAMS = daemon$(EXEEXT) 2 | lib_LTLIBRARIES = libvigilant.la 3 | 4 | libvigilant_la_SOURCES = \ 5 | vigilant.h \ 6 | transport.cc \ 7 | udp_transport.cc \ 8 | stats_monitor.cc \ 9 | ipc_protocol.cc \ 10 | async_server.cc 11 | 12 | daemon_SOURCES = daemon.cc 13 | daemon_LDADD = libvigilant.la 14 | -------------------------------------------------------------------------------- /daemon2/src/async_server.cc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/daemon2/src/async_server.cc -------------------------------------------------------------------------------- /daemon2/src/daemon.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main (int argc, char **argv) 4 | { 5 | printf ("Hello World!\n"); 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /daemon2/src/ipc_protocol.cc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/daemon2/src/ipc_protocol.cc -------------------------------------------------------------------------------- /daemon2/src/stats_monitor.cc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/daemon2/src/stats_monitor.cc -------------------------------------------------------------------------------- /daemon2/src/transport.cc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/daemon2/src/transport.cc -------------------------------------------------------------------------------- /daemon2/src/udp_transport.cc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/daemon2/src/udp_transport.cc -------------------------------------------------------------------------------- /daemon2/src/vigilant.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/daemon2/src/vigilant.h -------------------------------------------------------------------------------- /datastore/.bowerrc: -------------------------------------------------------------------------------- 1 | { 2 | "directory": "src/main/webapp/lib", 3 | "interactive": false 4 | } -------------------------------------------------------------------------------- /datastore/README.md: -------------------------------------------------------------------------------- 1 | # DataStore 2 | 3 | This is the realtime datastore, providing a rest-api to access all data and add triggers. 4 | 5 | ##Setup 6 | 7 | Scala DataStore uses sbt (requires jdk >= 7): 8 | 9 | ```bash 10 | $ cd ./etc/vigilant 11 | $ export VIGILANT_HOME=`pwd` 12 | $ cd - 13 | 14 | $ ./sbt # -jvm-debug 5005 for debugging 15 | > container:start # start the container 16 | > ~ ;copy-resources;aux-compile # auto-reload on file changes 17 | ``` 18 | 19 | Standalone from sbt: 20 | 21 | ```bash 22 | # run standalone jetty container - The websocket api fails using this currently. 23 | $ ./jetty.sh 24 | ``` 25 | 26 | ## Configuration 27 | 28 | Currently vigilant supports twillo as a notification service. Stub configuration is already 29 | in [vigilant.json](./etc/vigilant/vigilant.json). 30 | 31 | ## Memory usage 32 | 33 | I spent alot of time making this run on a very low memory machines and it can go lower but its 34 | perfectly reasonable to run ./sbt -mem 128 to be enough for this. 35 | -------------------------------------------------------------------------------- /datastore/bower.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Vigilant", 3 | "private": true, 4 | "dependencies": { 5 | "swagger-ui": "latest" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /datastore/etc/vigilant/vigilant.json: -------------------------------------------------------------------------------- 1 | { 2 | "transport": { 3 | "type": "udp", 4 | "host": "localhost", 5 | "port": 8080 6 | }, 7 | 8 | "triggers": { 9 | "notification_threshold": 120 10 | }, 11 | 12 | "database": { 13 | "jdbc": "jdbc:h2:./vigilant" 14 | }, 15 | 16 | "twillo": { 17 | "account_sid": "", 18 | "auth_token": "", 19 | "from": "" 20 | }, 21 | 22 | "email": { 23 | "smtp_server": "localhost", 24 | "from": "someone@email.com" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /datastore/jenkins.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | bower install 5 | ./sbt -mem 256 compile package test 6 | -------------------------------------------------------------------------------- /datastore/jetty.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | ./sbt -mem 128 test package 5 | 6 | export VIGILANT_HOME=`pwd`/etc/vigilant/ 7 | 8 | java -Xms128m -Xmx128m \ 9 | -jar ./libexec/jetty-runner-9.2.2.v20140723.jar \ 10 | --port 9090 \ 11 | ./target/scala-2.11/vigilant_2.11-0.2.0-SNAPSHOT.war 12 | -------------------------------------------------------------------------------- /datastore/libexec/jetty-runner-9.2.2.v20140723.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/datastore/libexec/jetty-runner-9.2.2.v20140723.jar -------------------------------------------------------------------------------- /datastore/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=0.13.5 2 | -------------------------------------------------------------------------------- /datastore/project/build.scala: -------------------------------------------------------------------------------- 1 | import org.scalatra.sbt._ 2 | import sbt.Keys._ 3 | import sbt._ 4 | 5 | object VigilantBuild extends Build { 6 | val Organization = "io.github.redbrain" 7 | val Name = "vigilant" 8 | val Version = "0.4.0-SNAPSHOT" 9 | val ScalaVersion = "2.11.6" 10 | val ScalatraVersion = "2.3.0" 11 | 12 | lazy val project = Project ( 13 | "vigilant", 14 | file("."), 15 | settings = ScalatraPlugin.scalatraSettings ++ Seq( 16 | organization := Organization, 17 | name := Name, 18 | version := Version, 19 | scalaVersion := ScalaVersion, 20 | 21 | resolvers += "Sonatype OSS Snapshots" at "http://oss.sonatype.org/content/repositories/snapshots/", 22 | resolvers += "Akka Repo" at "http://repo.akka.io/repository", 23 | resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/", 24 | 25 | conflictWarning ~= { _.copy(failOnConflict = false) }, 26 | 27 | libraryDependencies ++= Seq( 28 | // Scalatra 29 | "org.scalatra" %% "scalatra" % ScalatraVersion, 30 | "org.scalatra" %% "scalatra-atmosphere" % ScalatraVersion, 31 | "org.scalatra" %% "scalatra-json" % ScalatraVersion, 32 | "org.scalatra" %% "scalatra-auth" % ScalatraVersion, 33 | "org.scalatra" %% "scalatra-swagger" % ScalatraVersion, 34 | "org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test", 35 | "org.scalatra" %% "scalatra-scalatest" % ScalatraVersion % "test", 36 | 37 | // Json 38 | "org.json4s" %% "json4s-jackson" % "3.2.9", 39 | "org.json4s" %% "json4s-native" % "3.2.9", 40 | "com.typesafe.play" %% "play-json" % "2.3.0", 41 | 42 | // database 43 | "org.sorm-framework" % "sorm" % "0.3.18", 44 | "com.h2database" % "h2" % "1.4.187", 45 | 46 | // notifications 47 | "com.twilio.sdk" % "twilio-java-sdk" % "4.0.1", 48 | "javax.mail" % "mail" % "1.4.1", 49 | "javax.activation" % "activation" % "1.1.1", 50 | 51 | // Akka 52 | "ch.qos.logback" % "logback-classic" % "1.0.13", 53 | "com.typesafe.akka" %% "akka-actor" % "2.3.4", 54 | "net.databinder.dispatch" %% "dispatch-core" % "0.11.1", 55 | 56 | // Jetty 57 | "org.eclipse.jetty" % "jetty-webapp" % "8.1.8.v20121106" % "container", 58 | "org.eclipse.jetty" % "jetty-websocket" % "8.1.8.v20121106" % "container;provided", 59 | "org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" 60 | ) 61 | ) 62 | ) 63 | } 64 | -------------------------------------------------------------------------------- /datastore/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.mojolly.scalate" % "xsbt-scalate-generator" % "0.5.0") 2 | 3 | addSbtPlugin("org.scalatra.sbt" % "scalatra-sbt" % "0.3.5") 4 | 5 | addSbtPlugin("com.github.mpeltonen" % "sbt-idea" % "1.5.2") -------------------------------------------------------------------------------- /datastore/sbt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # A more capable sbt runner, coincidentally also called sbt. 4 | # Author: Paul Phillips 5 | 6 | # todo - make this dynamic 7 | declare -r sbt_release_version=0.12.0 8 | declare -r sbt_snapshot_version=0.13.0-SNAPSHOT 9 | 10 | unset sbt_jar sbt_dir sbt_create sbt_snapshot sbt_launch_dir 11 | unset scala_version java_home sbt_explicit_version 12 | unset verbose debug quiet 13 | 14 | for arg in "$@"; do 15 | case $arg in 16 | -q|-quiet) quiet=1 ;; 17 | *) ;; 18 | esac 19 | done 20 | 21 | build_props_sbt () { 22 | if [[ -f project/build.properties ]]; then 23 | versionLine=$(grep ^sbt.version project/build.properties) 24 | versionString=${versionLine##sbt.version=} 25 | echo "$versionString" 26 | fi 27 | } 28 | 29 | update_build_props_sbt () { 30 | local ver="$1" 31 | local old=$(build_props_sbt) 32 | 33 | if [[ $ver == $old ]]; then 34 | return 35 | elif [[ -f project/build.properties ]]; then 36 | perl -pi -e "s/^sbt\.version=.*\$/sbt.version=${ver}/" project/build.properties 37 | grep -q '^sbt.version=' project/build.properties || echo "sbt.version=${ver}" >> project/build.properties 38 | 39 | echo !!! 40 | echo !!! Updated file project/build.properties setting sbt.version to: $ver 41 | echo !!! Previous value was: $old 42 | echo !!! 43 | fi 44 | } 45 | 46 | sbt_version () { 47 | if [[ -n $sbt_explicit_version ]]; then 48 | echo $sbt_explicit_version 49 | else 50 | local v=$(build_props_sbt) 51 | if [[ -n $v ]]; then 52 | echo $v 53 | else 54 | echo $sbt_release_version 55 | fi 56 | fi 57 | } 58 | 59 | echoerr () { 60 | [[ -z $quiet ]] && echo 1>&2 "$@" 61 | } 62 | vlog () { 63 | [[ $verbose || $debug ]] && echoerr "$@" 64 | } 65 | dlog () { 66 | [[ $debug ]] && echoerr "$@" 67 | } 68 | 69 | # this seems to cover the bases on OSX, and someone will 70 | # have to tell me about the others. 71 | get_script_path () { 72 | local path="$1" 73 | [[ -L "$path" ]] || { echo "$path" ; return; } 74 | 75 | local target=$(readlink "$path") 76 | if [[ "${target:0:1}" == "/" ]]; then 77 | echo "$target" 78 | else 79 | echo "$(dirname $path)/$target" 80 | fi 81 | } 82 | 83 | # a ham-fisted attempt to move some memory settings in concert 84 | # so they need not be dicked around with individually. 85 | get_mem_opts () { 86 | local mem=${1:-1536} 87 | local perm=$(( $mem / 4 )) 88 | (( $perm > 256 )) || perm=256 89 | (( $perm < 1024 )) || perm=1024 90 | local codecache=$(( $perm / 2 )) 91 | 92 | echo "-Xms${mem}m -Xmx${mem}m -XX:MaxPermSize=${perm}m -XX:ReservedCodeCacheSize=${codecache}m" 93 | } 94 | 95 | die() { 96 | echo "Aborting: $@" 97 | exit 1 98 | } 99 | 100 | make_url () { 101 | groupid="$1" 102 | category="$2" 103 | version="$3" 104 | 105 | echo "http://typesafe.artifactoryonline.com/typesafe/ivy-$category/$groupid/sbt-launch/$version/sbt-launch.jar" 106 | } 107 | 108 | declare -r default_jvm_opts="-Dfile.encoding=UTF8" 109 | declare -r default_sbt_opts="-XX:+CMSClassUnloadingEnabled" 110 | declare -r default_sbt_mem=1536 111 | declare -r noshare_opts="-Dsbt.global.base=project/.sbtboot -Dsbt.boot.directory=project/.boot -Dsbt.ivy.home=project/.ivy" 112 | declare -r sbt_opts_file=".sbtopts" 113 | declare -r jvm_opts_file=".jvmopts" 114 | declare -r latest_28="2.8.2" 115 | declare -r latest_29="2.9.2" 116 | declare -r latest_210="2.10.0-SNAPSHOT" 117 | 118 | declare -r script_path=$(get_script_path "$BASH_SOURCE") 119 | declare -r script_dir="$(dirname $script_path)" 120 | declare -r script_name="$(basename $script_path)" 121 | 122 | # some non-read-onlies set with defaults 123 | declare java_cmd=java 124 | declare sbt_launch_dir="$script_dir/.lib" 125 | declare sbt_universal_launcher="$script_dir/lib/sbt-launch.jar" 126 | declare sbt_mem=$default_sbt_mem 127 | declare sbt_jar=$sbt_universal_launcher 128 | 129 | # pull -J and -D options to give to java. 130 | declare -a residual_args 131 | declare -a java_args 132 | declare -a scalac_args 133 | declare -a sbt_commands 134 | 135 | build_props_scala () { 136 | if [[ -f project/build.properties ]]; then 137 | versionLine=$(grep ^build.scala.versions project/build.properties) 138 | versionString=${versionLine##build.scala.versions=} 139 | echo ${versionString%% .*} 140 | fi 141 | } 142 | 143 | execRunner () { 144 | # print the arguments one to a line, quoting any containing spaces 145 | [[ $verbose || $debug ]] && echo "# Executing command line:" && { 146 | for arg; do 147 | if printf "%s\n" "$arg" | grep -q ' '; then 148 | printf "\"%s\"\n" "$arg" 149 | else 150 | printf "%s\n" "$arg" 151 | fi 152 | done 153 | echo "" 154 | } 155 | 156 | exec "$@" 157 | } 158 | 159 | sbt_groupid () { 160 | case $(sbt_version) in 161 | 0.7.*) echo org.scala-tools.sbt ;; 162 | 0.10.*) echo org.scala-tools.sbt ;; 163 | 0.11.[12]) echo org.scala-tools.sbt ;; 164 | *) echo org.scala-sbt ;; 165 | esac 166 | } 167 | 168 | sbt_artifactory_list () { 169 | local version0=$(sbt_version) 170 | local version=${version0%-SNAPSHOT} 171 | local url="http://typesafe.artifactoryonline.com/typesafe/ivy-snapshots/$(sbt_groupid)/sbt-launch/" 172 | dlog "Looking for snapshot list at: $url " 173 | 174 | curl -s --list-only "$url" | \ 175 | grep -F $version | \ 176 | perl -e 'print reverse <>' | \ 177 | perl -pe 's#^/dev/null 191 | dlog "curl returned: $?" 192 | echo "$url" 193 | return 194 | done 195 | } 196 | 197 | jar_url () { 198 | case $(sbt_version) in 199 | 0.7.*) echo "http://simple-build-tool.googlecode.com/files/sbt-launch-0.7.7.jar" ;; 200 | *-SNAPSHOT) make_snapshot_url ;; 201 | *) make_release_url ;; 202 | esac 203 | } 204 | 205 | jar_file () { 206 | echo "$sbt_launch_dir/$1/sbt-launch.jar" 207 | } 208 | 209 | download_url () { 210 | local url="$1" 211 | local jar="$2" 212 | 213 | echo "Downloading sbt launcher $(sbt_version):" 214 | echo " From $url" 215 | echo " To $jar" 216 | 217 | mkdir -p $(dirname "$jar") && { 218 | if which curl >/dev/null; then 219 | curl --fail --silent "$url" --output "$jar" 220 | elif which wget >/dev/null; then 221 | wget --quiet -O "$jar" "$url" 222 | fi 223 | } && [[ -f "$jar" ]] 224 | } 225 | 226 | acquire_sbt_jar () { 227 | sbt_url="$(jar_url)" 228 | sbt_jar="$(jar_file $(sbt_version))" 229 | 230 | [[ -f "$sbt_jar" ]] || download_url "$sbt_url" "$sbt_jar" 231 | } 232 | 233 | usage () { 234 | cat < path to global settings/plugins directory (default: ~/.sbt/) 244 | -sbt-boot path to shared boot directory (default: ~/.sbt/boot in 0.11+) 245 | -ivy path to local Ivy repository (default: ~/.ivy2) 246 | -mem set memory options (default: $sbt_mem, which is 247 | $(get_mem_opts $sbt_mem) ) 248 | -no-share use all local caches; no sharing 249 | -offline put sbt in offline mode 250 | -jvm-debug Turn on JVM debugging, open at the given port. 251 | -batch Disable interactive mode 252 | 253 | # sbt version (default: from project/build.properties if present, else latest release) 254 | !!! The only way to accomplish this pre-0.12.0 if there is a build.properties file which 255 | !!! contains an sbt.version property is to update the file on disk. That's what this does. 256 | -sbt-version use the specified version of sbt 257 | -sbt-jar use the specified jar as the sbt launcher 258 | -sbt-snapshot use a snapshot version of sbt 259 | -sbt-launch-dir directory to hold sbt launchers (default: $sbt_launch_dir) 260 | 261 | # scala version (default: as chosen by sbt) 262 | -28 use $latest_28 263 | -29 use $latest_29 264 | -210 use $latest_210 265 | -scala-home use the scala build at the specified directory 266 | -scala-version use the specified version of scala 267 | 268 | # java version (default: java from PATH, currently $(java -version |& grep version)) 269 | -java-home alternate JAVA_HOME 270 | 271 | # jvm options and output control 272 | JAVA_OPTS environment variable holding jvm args, if unset uses "$default_jvm_opts" 273 | SBT_OPTS environment variable holding jvm args, if unset uses "$default_sbt_opts" 274 | .jvmopts if file is in sbt root, it is prepended to the args given to the jvm 275 | .sbtopts if file is in sbt root, it is prepended to the args given to **sbt** 276 | -Dkey=val pass -Dkey=val directly to the jvm 277 | -J-X pass option -X directly to the jvm (-J is stripped) 278 | -S-X add -X to sbt's scalacOptions (-J is stripped) 279 | 280 | In the case of duplicated or conflicting options, the order above 281 | shows precedence: JAVA_OPTS lowest, command line options highest. 282 | EOM 283 | } 284 | 285 | addJava () { 286 | dlog "[addJava] arg = '$1'" 287 | java_args=( "${java_args[@]}" "$1" ) 288 | } 289 | addSbt () { 290 | dlog "[addSbt] arg = '$1'" 291 | sbt_commands=( "${sbt_commands[@]}" "$1" ) 292 | } 293 | addScalac () { 294 | dlog "[addScalac] arg = '$1'" 295 | scalac_args=( "${scalac_args[@]}" "$1" ) 296 | } 297 | addResidual () { 298 | dlog "[residual] arg = '$1'" 299 | residual_args=( "${residual_args[@]}" "$1" ) 300 | } 301 | addResolver () { 302 | addSbt "set resolvers in ThisBuild += $1" 303 | } 304 | addDebugger () { 305 | addJava "-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=$1" 306 | } 307 | 308 | jrebelAgent () { 309 | SCALATRA_PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 310 | if [ -z "$SCALATRA_JREBEL" ]; 311 | then echo -n ''; 312 | else echo -n "-javaagent:$SCALATRA_JREBEL -Dscalatra_project_root=${SCALATRA_PROJECT_ROOT}"; 313 | fi 314 | } 315 | 316 | get_jvm_opts () { 317 | echo "${JAVA_OPTS:-$default_jvm_opts}" 318 | echo "`jrebelAgent` ${SBT_OPTS:-$default_sbt_opts}" 319 | 320 | [[ -f "$jvm_opts_file" ]] && cat "$jvm_opts_file" 321 | } 322 | 323 | process_args () 324 | { 325 | require_arg () { 326 | local type="$1" 327 | local opt="$2" 328 | local arg="$3" 329 | 330 | if [[ -z "$arg" ]] || [[ "${arg:0:1}" == "-" ]]; then 331 | die "$opt requires <$type> argument" 332 | fi 333 | } 334 | while [[ $# -gt 0 ]]; do 335 | case "$1" in 336 | -h|-help) usage; exit 1 ;; 337 | -v|-verbose) verbose=1 && shift ;; 338 | -d|-debug) debug=1 && shift ;; 339 | -q|-quiet) quiet=1 && shift ;; 340 | 341 | -ivy) require_arg path "$1" "$2" && addJava "-Dsbt.ivy.home=$2" && shift 2 ;; 342 | -mem) require_arg integer "$1" "$2" && sbt_mem="$2" && shift 2 ;; 343 | -no-colors) addJava "-Dsbt.log.noformat=true" && shift ;; 344 | -no-share) addJava "$noshare_opts" && shift ;; 345 | -sbt-boot) require_arg path "$1" "$2" && addJava "-Dsbt.boot.directory=$2" && shift 2 ;; 346 | -sbt-dir) require_arg path "$1" "$2" && sbt_dir="$2" && shift 2 ;; 347 | -debug-inc) addJava "-Dxsbt.inc.debug=true" && shift ;; 348 | -offline) addSbt "set offline := true" && shift ;; 349 | -jvm-debug) require_arg port "$1" "$2" && addDebugger $2 && shift 2 ;; 350 | -batch) exec 0 )) || echo "Starting $script_name: invoke with -help for other options" 412 | 413 | # verify this is an sbt dir or -create was given 414 | [[ -f ./build.sbt || -d ./project || -n "$sbt_create" ]] || { 415 | cat < 2 | 3 | 5 | 6 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /datastore/src/main/scala/ScalatraBootstrap.scala: -------------------------------------------------------------------------------- 1 | import java.io.{File, IOException} 2 | import javax.servlet.ServletContext 3 | 4 | import _root_.akka.actor.ActorSystem 5 | import org.scalatra._ 6 | import org.slf4j.LoggerFactory 7 | import vigilant.datastore.controllers.StatsController 8 | import vigilant.datastore.controllers.swagger.{StatsSwagger, ResourcesApp} 9 | import vigilant.datastore.services.aggregator.AggregatorService 10 | import vigilant.datastore.services.configuration.ConfigurationService 11 | import vigilant.datastore.services.database.DatabaseService 12 | import vigilant.datastore.services.transports.TransportService 13 | import vigilant.datastore.services.triggers.TriggersService 14 | 15 | class ScalatraBootstrap extends LifeCycle { 16 | 17 | val system = ActorSystem() 18 | val logger = LoggerFactory.getLogger(getClass) 19 | implicit val swagger = new StatsSwagger 20 | 21 | override def init(context: ServletContext) { 22 | val configHome = System.getenv("VIGILANT_HOME") 23 | if (configHome == null) { 24 | throw new IOException("Unable to find [VIGILANT_HOME]/vigilant.json") 25 | } 26 | 27 | ConfigurationService.load_configuration_from_file(new File(new File(configHome), "vigilant.json").getPath) 28 | logger.info("Configuration Loaded") 29 | 30 | AggregatorService.actor = system.actorOf(AggregatorService.props) 31 | logger.info("Started Stats Aggregator Service") 32 | 33 | DatabaseService.actor = system.actorOf(DatabaseService.props) 34 | DatabaseService.start() 35 | logger.info("Started Database Service") 36 | 37 | TriggersService.actor = system.actorOf(TriggersService.props) 38 | TriggersService.start() 39 | logger.info("Started Triggers Service") 40 | 41 | TransportService.setTransport( 42 | system.actorOf(ConfigurationService.transport_actor), 43 | ConfigurationService.transport) 44 | TransportService.start() 45 | logger.info("Started Transport Service") 46 | 47 | context.mount(new StatsController, "/api", "api") 48 | context.mount(new ResourcesApp, "/api-doc") 49 | logger.info("Restful Controllers ready") 50 | } 51 | 52 | override def destroy(context: ServletContext) { 53 | logger.info("Shutting Down Observant Data Service") 54 | TransportService.stop() 55 | TriggersService.stop() 56 | DatabaseService.stop() 57 | system.shutdown() 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/caches/HostCache.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.caches 2 | 3 | import vigilant.datastore.caches.buffer.Cache 4 | import vigilant.datastore.models.HostsDataModel 5 | import vigilant.datastore.services.configuration.ConfigurationService 6 | 7 | object HostCache extends Cache[HostsDataModel](ConfigurationService.cache_threshold) { } 8 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/caches/LogCache.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.caches 2 | 3 | import vigilant.datastore.caches.buffer.Cache 4 | import vigilant.datastore.models.LogDataModel 5 | import vigilant.datastore.services.configuration.ConfigurationService 6 | 7 | object LogCache extends Cache[LogDataModel](ConfigurationService.cache_threshold) { } -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/caches/ProcCache.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.caches 2 | 3 | import vigilant.datastore.caches.buffer.Cache 4 | import vigilant.datastore.models.ProcessDataModel 5 | import vigilant.datastore.services.configuration.ConfigurationService 6 | 7 | object ProcCache extends Cache[ProcessDataModel](ConfigurationService.cache_threshold) { 8 | 9 | /* return process keys for host */ 10 | def process_keys_on_host(key: String): Set[String] = { 11 | var procs = Set[String]() 12 | keys.foreach(p => { 13 | headForKey(p) match { 14 | case Some(data) => 15 | if (data.host == key) { 16 | procs += p 17 | } 18 | case _ => 19 | } 20 | }) 21 | procs 22 | } 23 | 24 | /* return head process data for host */ 25 | def process_head_for_host(host: String): Set[ProcessDataModel] = { 26 | var procs = Set[ProcessDataModel]() 27 | process_keys_on_host(host).foreach(k => { 28 | headForKey(k) match { 29 | case Some(data) => procs += data 30 | case _ => 31 | } 32 | }) 33 | procs 34 | } 35 | 36 | } -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/caches/buffer/Cache.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.caches.buffer 2 | 3 | class Cache[T](private val _size: Int)(implicit manifest : Manifest[T]) { 4 | var _cache = Map[String, RingBuffer[T]]() 5 | 6 | def keys: Set[String] = _cache.keySet 7 | 8 | def deleteKey(key: String) = _cache -= key 9 | 10 | def headForKey(key: String): Option[T] = { 11 | _cache.get(key) match { 12 | case None => None 13 | case Some(buffer) => Some(buffer.head) 14 | } 15 | } 16 | 17 | def bufferForKey(key: String): Array[T] = { 18 | _cache.get(key) match { 19 | case None => Array() 20 | case Some(data) => data.array 21 | } 22 | } 23 | 24 | def headForAllKeys: Array[T] = { 25 | var heads = List[T]() 26 | _cache.keys.foreach(k => { 27 | headForKey(k) match { 28 | case None => 29 | case Some(data) => { 30 | heads ::= data 31 | } 32 | } 33 | }) 34 | heads.toArray 35 | } 36 | 37 | def pushDataForKey(key: String, data: T) = { 38 | _cache.get(key) match { 39 | case None => { 40 | val ring = new RingBuffer[T](_size) 41 | ring.push(data) 42 | _cache += key -> ring 43 | } 44 | case Some(buffer) => buffer.push(data) 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/caches/buffer/RingBuffer.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.caches.buffer 2 | 3 | class RingBuffer[T](val size: Int)(implicit manifest : Manifest[T]) { 4 | private var _head = 0 5 | private var _tail = 0 6 | private var _length = 0 7 | private var _empty = true 8 | private val _buffer = new Array[T](size) 9 | 10 | def head: T = _buffer(_head) 11 | 12 | def tail: T = _buffer(_tail) 13 | 14 | private def isBufferFilled: Boolean = _length >= size 15 | 16 | /** 17 | * Order the data in a buffer and return it 18 | * @return returns ordered array oldest to fresh'est 19 | */ 20 | def array: Array[T] = { 21 | val len = if (isBufferFilled) size else _length 22 | val clone = new Array[T](len) 23 | var offset = _tail 24 | for (i <- 0 until len) { 25 | clone(i) = _buffer(offset) 26 | offset = offset + 1 27 | if (offset >= size) { 28 | offset = 0 29 | } 30 | } 31 | clone 32 | } 33 | 34 | /** 35 | * Reads weird but needs to stop side-effects on indexes 36 | * @param data data to push to ring 37 | */ 38 | def push(data: T) = { 39 | // if not empty move head forward 40 | if (!_empty) { 41 | _head += 1 42 | if (_head >= size) { 43 | _head = 0 44 | } 45 | } 46 | 47 | // to overwrite cell move tail forward 48 | if (isBufferFilled) { 49 | _tail = _tail + 1 50 | if (_tail >= size) { 51 | _tail = 0 52 | } 53 | } 54 | 55 | _empty = false 56 | _buffer(_head) = data 57 | 58 | _length = _length + 1 59 | if (_length >= Int.MaxValue) { 60 | _length = size 61 | } 62 | } 63 | 64 | } 65 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/controllers/StatsController.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.controllers 2 | 3 | import org.scalatra.atmosphere.JsonMessage 4 | import vigilant.datastore.caches.{HostCache, LogCache, ProcCache} 5 | import vigilant.datastore.models._ 6 | import vigilant.datastore.monitors.StatsMonitor.AtmosphereStatsMonitor 7 | import vigilant.datastore.services.triggers.TriggersService 8 | import vigilant.datastore.services.triggers.host.HostUsageThresholdTrigger 9 | import vigilant.datastore.services.triggers.proc.ProcUsageThresholdTrigger 10 | import scala.concurrent.ExecutionContext.Implicits.global 11 | import org.scalatra.swagger.{Swagger, SwaggerSupport} 12 | 13 | class StatsController(implicit val swagger: Swagger) extends VigilantStack with SwaggerSupport { 14 | 15 | protected val applicationDescription = "The Real-Time Stats API." 16 | 17 | // --- --- --- --- 18 | 19 | get("/") { 20 | redirect("/lib/swagger-ui/dist/index.html") 21 | } 22 | 23 | get("/state", operation( 24 | apiOperation[StateDataPayload]("getState") 25 | summary "Get node graph of cluster state")) { 26 | var nodes = List[Node]() 27 | var edges = List[Edge]() 28 | 29 | val root = Node(0, 3, "DataStore", "root", "box") 30 | nodes = nodes :+ root 31 | 32 | var nodeIndex = 1 33 | HostCache.keys.foreach(k => { 34 | HostCache.headForKey(k) match { 35 | case None => 36 | case Some(data) => 37 | val host = Node(nodeIndex, 2, k, online_status_for_timestamp(data.ts), "box") 38 | nodes = nodes :+ host 39 | edges = edges :+ Edge(root.id, host.id, 200, 4, "line") 40 | nodeIndex += 1 41 | 42 | ProcCache.process_keys_on_host(k).foreach(p => { 43 | val procNode = Node(nodeIndex, 1, p, online_status_for_timestamp(data.ts), "box") 44 | nodes = nodes :+ procNode 45 | nodeIndex += 1 46 | edges = edges :+ Edge(host.id, procNode.id, 200, 1, "dash-line") 47 | }) 48 | } 49 | }) 50 | 51 | StateDataPayload(nodes, edges) 52 | } 53 | 54 | // --- --- --- --- 55 | 56 | get("/hosts/state", operation( 57 | apiOperation[HostsState]("getHostsState") 58 | summary "Get quick state of all hosts in data store")) { 59 | var state = Set[HostDataSnap]() 60 | HostCache.headForAllKeys.foreach(data => { 61 | state += HostDataSnap(data.key, is_timestamp_alive(data.ts), data) 62 | }) 63 | HostsState(state) 64 | } 65 | 66 | get("/hosts/state/:key", operation( 67 | apiOperation[HostState]("getHostState") 68 | summary "Get process state of specified host in data store" 69 | parameters 70 | pathParam[String]("key").description("Host key"))) { 71 | val key = params("key") 72 | val procs = ProcCache.process_head_for_host(key) 73 | var state = Set[HostProcState]() 74 | procs.foreach(p => { 75 | state += HostProcState(p, is_timestamp_alive(p.ts)) 76 | }) 77 | HostState(state) 78 | } 79 | 80 | // --- --- --- --- 81 | 82 | get("/hosts/keys", operation( 83 | apiOperation[Keys]("getHostKeys") 84 | summary "Get all host keys in datastore")) { 85 | Keys(HostCache.keys) 86 | } 87 | 88 | get("/proc/keys", operation( 89 | apiOperation[Keys]("getProcKeys") 90 | summary "Get all process keys in datastore")) { 91 | Keys(ProcCache.keys) 92 | } 93 | 94 | get("/logs/keys", operation( 95 | apiOperation[Keys]("getLogKeys") 96 | summary "Get all log keys in datastore")) { 97 | Keys(LogCache.keys) 98 | } 99 | 100 | // --- --- --- --- 101 | 102 | get("/hosts/liveness/:key", operation( 103 | apiOperation[Liveness]("getHostLiveness") 104 | summary "Get liveness state of specified host" 105 | parameters 106 | pathParam[String]("key").description("Host key"))) { 107 | val key = params("key") 108 | HostCache.headForKey(key) match { 109 | case None => Liveness(alive = false) 110 | case Some(data) => Liveness(is_timestamp_alive(data.ts)) 111 | } 112 | } 113 | 114 | get("/proc/liveness/:key", operation( 115 | apiOperation[Liveness]("getProcessLiveness") 116 | summary "Get liveness state of specified process" 117 | parameters 118 | pathParam[String]("key").description("Process key"))) { 119 | val key = params("key") 120 | ProcCache.headForKey(key) match { 121 | case None => Liveness(alive = false) 122 | case Some(data) => Liveness(is_timestamp_alive(data.ts)) 123 | } 124 | } 125 | 126 | get("/logs/liveness/:key", operation( 127 | apiOperation[Liveness]("getLogLiveness") 128 | summary "Get liveness state of specified log" 129 | parameters 130 | pathParam[String]("key").description("Log key"))) { 131 | val key = params("key") 132 | LogCache.headForKey(key) match { 133 | case None => Liveness(alive = false) 134 | case Some(data) => Liveness(is_timestamp_alive(data.ts)) 135 | } 136 | } 137 | 138 | // --- --- --- --- 139 | 140 | get("/hosts/procs/:key", operation( 141 | apiOperation[HostProcList]("getHostProcessKeys") 142 | summary "Get process keys of processes running on specified host" 143 | parameters 144 | pathParam[String]("key").description("Host key"))) { 145 | val key = params("key") 146 | HostProcList(key, ProcCache.process_keys_on_host(key).toArray) 147 | } 148 | 149 | // --- --- --- --- 150 | 151 | get("/hosts/rest/:key", operation( 152 | apiOperation[HostDataPayload]("getHostBuffer") 153 | summary "Get current state buffer for specified host" 154 | parameters 155 | pathParam[String]("key").description("Host key"))) { 156 | val key = params("key") 157 | HostDataPayload(key, HostCache.bufferForKey(key)) 158 | } 159 | 160 | get("/hosts/head/:key", operation( 161 | apiOperation[HostDataSnap]("getHostHead") 162 | summary "Get latest host state in buffer" 163 | parameters 164 | pathParam[String]("key").description("Host key"))) { 165 | val key = params("key") 166 | HostCache.headForKey(key) match { 167 | case None => notFound() 168 | case Some(data) => 169 | val alive = is_timestamp_alive(data.ts) 170 | HostDataSnap(key, alive, data) 171 | } 172 | } 173 | 174 | delete("/hosts/:key", operation( 175 | apiOperation[Response]("deleteHost") 176 | summary "Delete specified host cache" 177 | parameters 178 | pathParam[String]("key").description("Host key"))) { 179 | val key = params("key") 180 | HostCache.deleteKey(key) 181 | Response(ok = true) 182 | } 183 | 184 | // --- --- --- --- 185 | 186 | get("/proc/rest/:key", operation( 187 | apiOperation[ProcessDataPayload]("getProcessBuffer") 188 | summary "Get current state buffer for specified host" 189 | parameters 190 | pathParam[String]("key").description("Process key"))) { 191 | val key = params("key") 192 | ProcessDataPayload(key, ProcCache.bufferForKey(key)) 193 | } 194 | 195 | delete("/proc/:key", operation( 196 | apiOperation[Response]("deleteProcess") 197 | summary "Delete process cache for specified key" 198 | parameters 199 | pathParam[String]("key").description("Process key"))) { 200 | val key = params("key") 201 | ProcCache.deleteKey(key) 202 | Response(ok = true) 203 | } 204 | 205 | // --- --- --- --- 206 | 207 | get("/logs/rest/:key", operation( 208 | apiOperation[LogDataPayload]("getLogBuffer") 209 | summary "Get current state buffer for specified Log" 210 | parameters 211 | pathParam[String]("key").description("Log key"))) { 212 | val key = params("key") 213 | LogDataPayload(key, LogCache.bufferForKey(key)) 214 | } 215 | 216 | delete("/logs/:key", operation( 217 | apiOperation[Response]("deleteLog") 218 | summary "Delete specified log cache" 219 | parameters 220 | pathParam[String]("key").description("Log key"))) { 221 | val key = params("key") 222 | LogCache.deleteKey(key) 223 | Response(ok = true) 224 | } 225 | 226 | // --- --- --- --- 227 | 228 | get("/hosts/triggers", operation( 229 | apiOperation[TriggerList]("listHostTriggers") 230 | summary "List host triggers")) { 231 | val triggers = TriggersService.host_triggers_list 232 | var payload = Set[TriggerSnapshot]() 233 | triggers.foreach(trigger => { 234 | payload += TriggerSnapshot(trigger.identifier, trigger.key, trigger.info, trigger.status) 235 | }) 236 | TriggerList(payload) 237 | } 238 | 239 | get("/hosts/triggers/:key", operation( 240 | apiOperation[TriggerList]("listHostTriggers") 241 | summary "List host triggers" 242 | parameters 243 | pathParam[String]("key").description("Host key"))) { 244 | val key = params("key") 245 | val triggers = TriggersService.host_triggers_list 246 | var payload = Set[TriggerSnapshot]() 247 | triggers.foreach(trigger => { 248 | if (trigger.key == key) { 249 | payload += TriggerSnapshot(trigger.identifier, trigger.key, trigger.info, trigger.status) 250 | } 251 | }) 252 | TriggerList(payload) 253 | } 254 | 255 | delete("/hosts/triggers/:identifier", operation( 256 | apiOperation[Response]("deleteHostTrigger") 257 | summary "Delete specified trigger" 258 | parameters 259 | pathParam[String]("identifier").description("Trigger identifier"))) { 260 | val identifier = params("identifier") 261 | Response(ok = TriggersService.remove_host_trigger(identifier)) 262 | } 263 | 264 | post("/hosts/usage_trigger", operation( 265 | apiOperation[Response]("addHostUsageTrigger") 266 | summary "Add a new host usage trigger" 267 | parameters 268 | bodyParam[HostUsageTrigger]("body").description("New host usage trigger"))) { 269 | val body = parsedBody.extract[HostUsageTrigger] 270 | val trigger = new HostUsageThresholdTrigger( 271 | body.sms, 272 | body.email, 273 | body.identifier, 274 | body.key, 275 | body.threshold) 276 | Response(ok = TriggersService.add_host_trigger(trigger)) 277 | } 278 | 279 | // --- --- --- --- 280 | 281 | get("/proc/triggers", operation( 282 | apiOperation[TriggerList]("listProcTriggers") 283 | summary "List host triggers")) { 284 | val triggers = TriggersService.proc_triggers_list 285 | var payload = Set[TriggerSnapshot]() 286 | triggers.foreach(trigger => { 287 | payload += TriggerSnapshot(trigger.identifier, trigger.key, trigger.info, trigger.status) 288 | }) 289 | TriggerList(payload) 290 | } 291 | 292 | delete("/proc/triggers/:identifier", operation( 293 | apiOperation[Response]("deleteProcTrigger") 294 | summary "Delete specified trigger" 295 | parameters 296 | pathParam[String]("identifier").description("Trigger identifier"))) { 297 | val identifier = params("identifier") 298 | Response(ok = TriggersService.remove_proc_trigger(identifier)) 299 | } 300 | 301 | post("/proc/usage_trigger", operation( 302 | apiOperation[Response]("addProcUsageTrigger") 303 | summary "Add a new process usage trigger" 304 | parameters 305 | bodyParam[ProcUsageTrigger]("body").description("New process usage trigger"))) { 306 | val body = parsedBody.extract[ProcUsageTrigger] 307 | val trigger = new ProcUsageThresholdTrigger( 308 | body.sms, 309 | body.email, 310 | body.identifier, 311 | body.key, 312 | body.threshold) 313 | Response(ok = TriggersService.add_proc_trigger(trigger)) 314 | } 315 | 316 | // --- --- --- --- 317 | 318 | atmosphere("/hosts/sock/:key") { 319 | new AtmosphereStatsMonitor(params("key"), StatType.HOST) { 320 | override def observedHostStat(data: HostsDataModel) { 321 | if (data.key == this.key) send(JsonMessage(encodeJson(data))) 322 | } 323 | } 324 | } 325 | 326 | atmosphere("/proc/sock/:key") { 327 | new AtmosphereStatsMonitor(params("key"), StatType.PROC) { 328 | override def observedProcStats(data: ProcessDataModel) { 329 | if (data.key == this.key) send(JsonMessage(encodeJson(data))) 330 | } 331 | } 332 | } 333 | 334 | atmosphere("/logs/sock/:key") { 335 | new AtmosphereStatsMonitor(params("key"), StatType.LOG) { 336 | override def observedLogStat(data: LogDataModel) { 337 | if (data.key == this.key) send(JsonMessage(encodeJson(data))) 338 | } 339 | } 340 | } 341 | 342 | // --- --- --- --- 343 | } -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/controllers/VigilantStack.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.controllers 2 | 3 | import java.util.{Calendar, Date} 4 | import org.json4s._ 5 | import org.scalatra._ 6 | import org.scalatra.atmosphere.AtmosphereSupport 7 | import org.scalatra.json.NativeJsonSupport 8 | import vigilant.datastore.services.aggregator.ProtocolFactory 9 | import vigilant.datastore.services.configuration.ConfigurationService 10 | 11 | case class ErrorResponse(error: String) 12 | 13 | trait VigilantStack extends ScalatraServlet 14 | with NativeJsonSupport 15 | with AtmosphereSupport 16 | with ProtocolFactory { 17 | 18 | protected implicit val jsonFormats: Formats = DefaultFormats 19 | 20 | notFound { 21 | status = 404 22 | ErrorResponse("No such resource") 23 | } 24 | 25 | before() { 26 | contentType = formats("json") 27 | } 28 | 29 | def online_status_for_timestamp(timestamp: String): String = { 30 | is_timestamp_alive(timestamp) match { 31 | case true => "online" 32 | case false => "offline" 33 | } 34 | } 35 | 36 | def is_timestamp_alive(ts: String): Boolean = { 37 | val timestamp = date_object_from_timestamp(ts) 38 | val timeout = ConfigurationService.cache_timeout 39 | val now = Calendar.getInstance.getTime 40 | val difference = (now.getTime - timestamp.getTime) / 1000 41 | difference <= timeout 42 | } 43 | 44 | def encodeJson(src: AnyRef): JValue = { 45 | import org.json4s.jackson.Serialization 46 | import org.json4s.{Extraction, NoTypeHints} 47 | implicit val formats = Serialization.formats(NoTypeHints) 48 | Extraction.decompose(src) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/controllers/swagger/StatsSwagger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.controllers.swagger 2 | 3 | import org.scalatra.ScalatraServlet 4 | import org.scalatra.swagger.{ApiInfo, NativeSwaggerBase, Swagger} 5 | 6 | class ResourcesApp(implicit val swagger: Swagger) extends ScalatraServlet with NativeSwaggerBase 7 | 8 | object StatsApiInfo extends ApiInfo( 9 | "Vigilant Stats API", 10 | "Docs for the Stats API", 11 | "http://vigilantlabs.co.uk", 12 | "support@vigilantlabs.co.uk", 13 | "License Type", 14 | "License URL") 15 | 16 | class StatsSwagger extends Swagger(Swagger.SpecVersion, "1.0.0", StatsApiInfo) -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/models/MessageModels.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.models 2 | 3 | case class HostsDataModel(key: String, 4 | hostname: String, 5 | timestamp: String, 6 | usage: Float, 7 | process: Int, 8 | cores: Int, 9 | memoryTotal: Long, 10 | memoryUsed: Long, 11 | platform: String, 12 | machine: String, 13 | version: String, 14 | diskTotal: Long, 15 | diskFree: Long, 16 | cpuStats: Seq[Float], 17 | ts: String) 18 | 19 | case class LogDataModel(key: String, 20 | host: String, 21 | message: String, 22 | ts: String) 23 | 24 | case class ProcessDataModel(key: String, 25 | host: String, 26 | pid: Int, 27 | usage: Float, 28 | ts: String) 29 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/models/NotificationModels.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.models 2 | 3 | case class Email(to: String) 4 | case class Sms(to: String) 5 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/models/Payloads.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.models 2 | 3 | case class Liveness(alive: Boolean) 4 | 5 | case class LogDataPayload(key: String, payload: Array[LogDataModel]) 6 | case class ProcessDataPayload(key: String, payload: Array[ProcessDataModel]) 7 | case class HostDataSnap(key: String, alive: Boolean, payload: HostsDataModel) 8 | case class HostDataPayload(key: String, payload: Array[HostsDataModel]) 9 | case class HostProcList(host: String, procs: Array[String]) 10 | 11 | case class Node(id: Int, value: Int, label: String, group: String, shape: String) 12 | case class Edge(from: Int, to: Int, length: Int, width: Int, style: String) 13 | case class StateDataPayload(nodes: List[Node], edges: List[Edge]) 14 | 15 | case class Keys(keys: Set[String]) 16 | case class HostState(state: Set[HostProcState]) 17 | case class HostsState(hosts: Set[HostDataSnap]) 18 | 19 | case class Response(ok: Boolean) 20 | 21 | case class TriggerSnapshot(name: String, key: String, info: String, status: Boolean) 22 | case class TriggerList(triggers: Set[TriggerSnapshot]) 23 | 24 | case class HostUsageTrigger(identifier: String, key: String, threshold: Float, sms: Sms, email: Email) 25 | case class ProcUsageTrigger(identifier: String, key: String, threshold: Float, sms: Sms, email: Email) 26 | 27 | case class HostProcState(data: ProcessDataModel, alive: Boolean) -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/models/StatType.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.models 2 | 3 | object StatType extends Enumeration { 4 | type T = Value 5 | val LOG, PROC, HOST = Value 6 | } 7 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/monitors/StatsMonitor/AtmosphereStatsMonitor.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.monitors.StatsMonitor 2 | 3 | import org.scalatra.atmosphere._ 4 | import vigilant.datastore.models.{ProcessDataModel, LogDataModel, HostsDataModel, StatType} 5 | import vigilant.datastore.services.aggregator.{AggregatorService, StatsObserver} 6 | 7 | class AtmosphereStatsMonitor(val key:String, val T:StatType.T) extends AtmosphereClient with StatsObserver { 8 | 9 | override def receive: AtmoReceive = { 10 | case Connected => 11 | T match { 12 | case StatType.LOG => AggregatorService.registerLogStatObserver(this) 13 | case StatType.HOST => AggregatorService.registerHostStatObsever(this) 14 | case StatType.PROC => AggregatorService.registerProcStatObserver(this) 15 | } 16 | case Disconnected(disconnector, Some(error)) => 17 | T match { 18 | case StatType.LOG => AggregatorService.unregisterLogStatObserver(this) 19 | case StatType.HOST => AggregatorService.unregisterHostStatObserver(this) 20 | case StatType.PROC => AggregatorService.unregisterProcStatObserver(this) 21 | } 22 | } 23 | 24 | def observedHostStat(data: HostsDataModel) {} 25 | def observedProcStats(data: ProcessDataModel) {} 26 | def observedLogStat(data: LogDataModel) {} 27 | } -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/aggregator/AggregatorService.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.aggregator 2 | 3 | import akka.actor.{Actor, ActorRef, Props} 4 | import org.slf4j.LoggerFactory 5 | import play.api.libs.json._ 6 | import vigilant.datastore.caches.{HostCache, LogCache, ProcCache} 7 | import vigilant.datastore.models.StatType 8 | 9 | sealed trait Message 10 | case class DataMessage(buffer: String) extends Message 11 | case class RegisterObserver(stype: StatType.T, observer: StatsObserver) extends Message 12 | case class DeregisterObserver(stype: StatType.T, observer: StatsObserver) extends Message 13 | 14 | object AggregatorService { 15 | var actor:ActorRef = null 16 | 17 | def props: Props = { 18 | Props(classOf[AggregatorService]) 19 | } 20 | 21 | def handleMessage(message: String) = { 22 | actor ! DataMessage(message) 23 | } 24 | 25 | def registerHostStatObsever(observer: StatsObserver) = { 26 | actor ! RegisterObserver(StatType.HOST, observer) 27 | } 28 | 29 | def unregisterHostStatObserver(observer: StatsObserver) = { 30 | actor ! DeregisterObserver(StatType.HOST, observer) 31 | } 32 | 33 | def registerLogStatObserver(observer: StatsObserver) = { 34 | actor ! RegisterObserver(StatType.LOG, observer) 35 | } 36 | 37 | def unregisterLogStatObserver(observer: StatsObserver) = { 38 | actor ! DeregisterObserver(StatType.LOG, observer) 39 | } 40 | 41 | def registerProcStatObserver(observer: StatsObserver) = { 42 | actor ! RegisterObserver(StatType.PROC, observer) 43 | } 44 | 45 | def unregisterProcStatObserver(observer: StatsObserver) = { 46 | actor ! DeregisterObserver(StatType.PROC, observer) 47 | } 48 | } 49 | 50 | class AggregatorService extends Actor with ProtocolFactory { 51 | 52 | private var hostsObservers = Set[StatsObserver]() 53 | private var procObservers = Set[StatsObserver]() 54 | private var logObservers = Set[StatsObserver]() 55 | 56 | private val logger = LoggerFactory.getLogger(getClass) 57 | 58 | private def handleHostMessage(json: JsValue) = { 59 | val model = host_data_model_from_json(json) 60 | HostCache.pushDataForKey(model.key, model) 61 | hostsObservers.foreach(_.observedHostStat(model)) 62 | } 63 | 64 | private def handleLogMessage(json: JsValue) = { 65 | val model = log_data_model_from_json(json) 66 | val key = model.host + "." + model.key 67 | LogCache.pushDataForKey(key, model) 68 | logObservers.foreach(_.observedLogStat(model)) 69 | } 70 | 71 | private def handlePidMessage(json: JsValue) = { 72 | val model = process_data_model_from_json(json) 73 | val key = model.host + "." + model.key 74 | ProcCache.pushDataForKey(key, model) 75 | procObservers.foreach(_.observedProcStats(model)) 76 | } 77 | 78 | private def handleMessage(json: JsValue) = { 79 | val messageType:String = (json \ "type").as[String] 80 | messageType match { 81 | case "host" => handleHostMessage(json) 82 | case "log" => handleLogMessage(json) 83 | case "pid" => handlePidMessage(json) 84 | case _ => logger.error("Unhandled message of type [{}]", messageType) 85 | } 86 | } 87 | 88 | private def registerHostStatObsever(observer: StatsObserver) = { 89 | hostsObservers += observer 90 | } 91 | 92 | private def unregisterHostStatObserver(observer: StatsObserver) = { 93 | hostsObservers -= observer 94 | } 95 | 96 | private def registerLogStatObserver(observer: StatsObserver) = { 97 | logObservers += observer 98 | } 99 | 100 | private def unregisterLogStatObserver(observer: StatsObserver) = { 101 | logObservers -= observer 102 | } 103 | 104 | private def registerProcStatObserver(observer: StatsObserver) = { 105 | procObservers += observer 106 | } 107 | 108 | private def unregisterProcStatObserver(observer: StatsObserver) = { 109 | procObservers -= observer 110 | } 111 | 112 | def receive = { 113 | case DataMessage(buffer) => { 114 | scala.util.control.Exception.ignoring(classOf[Exception]) { 115 | val json = Json.parse(buffer) 116 | handleMessage(json) 117 | } 118 | } 119 | case RegisterObserver(stype, observer) => { 120 | stype match { 121 | case StatType.LOG => registerLogStatObserver(observer) 122 | case StatType.HOST => registerHostStatObsever(observer) 123 | case StatType.PROC => registerProcStatObserver(observer) 124 | } 125 | } 126 | case DeregisterObserver(stype, observer) => { 127 | stype match { 128 | case StatType.LOG => unregisterLogStatObserver(observer) 129 | case StatType.HOST => unregisterHostStatObserver(observer) 130 | case StatType.PROC => unregisterProcStatObserver(observer) 131 | } 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/aggregator/ProtocolFactory.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.aggregator 2 | 3 | import play.api.libs.json.JsValue 4 | import vigilant.datastore.models.{ProcessDataModel, LogDataModel, HostsDataModel} 5 | 6 | trait ProtocolFactory { 7 | 8 | def date_object_from_timestamp(timestamp: String): java.util.Date = { 9 | val format = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss") 10 | format.parse(timestamp) 11 | } 12 | 13 | def host_data_model_from_json(json: JsValue): HostsDataModel = { 14 | val key = (json \ "key").as[String] 15 | val ts = (json \ "ts").as[String] 16 | val hostname = (json \ "payload" \ "hostname").as[String] 17 | val timestamp = (json \ "payload" \ "timestamp").as[String] 18 | val usage = (json \ "payload" \ "usage").as[Float] 19 | val processes = (json \ "payload" \ "processes").as[Int] 20 | val cores = (json \ "payload" \ "cores").as[Int] 21 | val memoryTotal = (json \ "payload" \ "memory_total").as[Long] 22 | val memoryUsed = (json \ "payload" \ "memory_used").as[Long] 23 | val platform = (json \ "payload" \ "platform").as[String] 24 | val machine = (json \ "payload" \ "machine").as[String] 25 | val version = (json \ "payload" \ "version").as[String] 26 | val diskTotal = (json \ "payload" \ "disk_total").as[Long] 27 | val diskFree = (json \ "payload" \ "disk_free").as[Long] 28 | val stats = (json \ "payload" \ "cpu_stats").as[Seq[Float]] 29 | 30 | new HostsDataModel(key, hostname, timestamp, usage, processes, cores, memoryTotal, 31 | memoryUsed, platform, machine, version, diskTotal, diskFree, stats, ts) 32 | } 33 | 34 | def log_data_model_from_json(json: JsValue): LogDataModel = { 35 | val key = (json \ "key").as[String] 36 | val ts = (json \ "ts").as[String] 37 | val message = (json \ "payload" \ "message").as[String] 38 | val host = (json \ "host").as[String] 39 | 40 | new LogDataModel(key, host, message, ts) 41 | } 42 | 43 | def process_data_model_from_json(json: JsValue): ProcessDataModel = { 44 | val key = (json \ "key").as[String] 45 | val ts = (json \ "ts").as[String] 46 | val host = (json \ "host").as[String] 47 | val pid = (json \ "payload" \ "pid").as[Int] 48 | val usage = (json \ "payload" \ "usage").as[Float] 49 | 50 | new ProcessDataModel(key, host, pid, usage, ts) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/aggregator/StatsObserver.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.aggregator 2 | 3 | import vigilant.datastore.models.{LogDataModel, ProcessDataModel, HostsDataModel} 4 | 5 | trait StatsObserver { 6 | def observedHostStat(data: HostsDataModel) 7 | def observedProcStats(data: ProcessDataModel) 8 | def observedLogStat(data: LogDataModel) 9 | } 10 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/configuration/ConfigurationService.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.configuration 2 | 3 | import java.net.InetSocketAddress 4 | import java.nio.file.{Paths, Files} 5 | 6 | import akka.actor.Props 7 | import play.api.libs.json.{JsValue, Json} 8 | import vigilant.datastore.services.notifications.NotificationFactory 9 | import vigilant.datastore.services.transports.Transport 10 | import vigilant.datastore.services.transports.udp.UDPTransport 11 | 12 | object ConfigurationService { 13 | 14 | private var _configuration: JsValue = null 15 | 16 | def load_configuration_from_file(filepath: String) = { 17 | _configuration = Json.parse(Files.readAllBytes(Paths.get(filepath))) 18 | try { 19 | configure_email 20 | configure_twillo 21 | } catch { 22 | case _ => 23 | } 24 | } 25 | 26 | def configure_twillo = { 27 | val account_sid = (_configuration \ "twillo" \ "account_sid").as[String] 28 | val auth_token = (_configuration \ "twillo" \ "auth_token").as[String] 29 | val from = (_configuration \ "twillo" \ "from").as[String] 30 | NotificationFactory.configure_twillo(account_sid, auth_token, from) 31 | } 32 | 33 | def configure_email = { 34 | val smtp_server = (_configuration \ "email" \ "smtp_server").as[String] 35 | val from = (_configuration \ "email" \ "from").as[String] 36 | NotificationFactory.configure_email(smtp_server, from) 37 | } 38 | 39 | def cache_timeout: Int = { 40 | try { 41 | (_configuration \ "cache" \ "timeout").as[Int] 42 | } catch { 43 | case e: Exception => 30 44 | } 45 | } 46 | 47 | def cache_threshold: Int = { 48 | try { 49 | (_configuration \ "cache" \ "threshold").as[Int] 50 | } catch { 51 | case e: Exception => 40 52 | } 53 | } 54 | 55 | def trigger_notification_grace_period: Long = { 56 | try { 57 | (_configuration \ "triggers" \ "notification_threshold").as[Long] 58 | } catch { 59 | case e: Exception => 120 60 | } 61 | } 62 | 63 | def database_jdbc: String = (_configuration \ "database" \ "jdbc").as[String] 64 | 65 | def transport_type: String = (_configuration \ "transport" \ "type").as[String] 66 | 67 | def transport_bind: String = (_configuration \ "transport" \ "host").as[String] 68 | 69 | def transport_port: Int = (_configuration \ "transport" \ "port").as[Int] 70 | 71 | def transport_address: InetSocketAddress = new InetSocketAddress(transport_bind, transport_port) 72 | 73 | def transport: Transport = { 74 | transport_type match { 75 | case "udp" => UDPTransport 76 | case _ => throw new Exception("Invalid transport type") 77 | } 78 | } 79 | 80 | def transport_actor: Props = { 81 | transport_type match { 82 | case "udp" => Props(classOf[UDPTransport], transport_address) 83 | case _ => throw new Exception("Invalid transport type") 84 | } 85 | } 86 | 87 | } 88 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/database/DatabaseService.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.database 2 | 3 | import akka.actor.{Actor, ActorRef, Props} 4 | import sorm.{InitMode, Entity, Instance} 5 | import org.slf4j.LoggerFactory 6 | import vigilant.datastore.models.{ProcessDataModel, LogDataModel, HostsDataModel} 7 | import vigilant.datastore.services.aggregator.{AggregatorService, StatsObserver} 8 | import vigilant.datastore.services.configuration.ConfigurationService 9 | 10 | sealed trait Message 11 | case class Stop() extends Message 12 | case class Start() extends Message 13 | 14 | object DatabaseService extends StatsObserver { 15 | var actor: ActorRef = null 16 | 17 | def props: Props = { 18 | Props(classOf[DatabaseService]) 19 | } 20 | 21 | def start() = { 22 | AggregatorService.registerLogStatObserver(this) 23 | AggregatorService.registerHostStatObsever(this) 24 | AggregatorService.registerProcStatObserver(this) 25 | actor ! Start() 26 | } 27 | 28 | def stop() = { 29 | AggregatorService.unregisterLogStatObserver(this) 30 | AggregatorService.unregisterHostStatObserver(this) 31 | AggregatorService.unregisterProcStatObserver(this) 32 | actor ! Stop() 33 | } 34 | 35 | override def observedHostStat(data: HostsDataModel) = { 36 | actor ! data 37 | } 38 | 39 | override def observedProcStats(data: ProcessDataModel) = { 40 | actor ! data 41 | } 42 | 43 | override def observedLogStat(data: LogDataModel) = { 44 | actor ! data 45 | } 46 | 47 | } 48 | 49 | class DatabaseService extends Actor { 50 | private val logger = LoggerFactory.getLogger(getClass) 51 | 52 | private val _TYPE_FIELD_NAME = "TYPE" 53 | private val _LOG_KEY_TABLE_NAME = "LOG_KEYS" 54 | private val _PROC_KEY_TABLE_NAME = "PROC_KEYS" 55 | private val _HOST_KEY_TABLE_NAME = "HOST_KEYS" 56 | 57 | case class KeySet(TYPE: String, keys: Set[String]) 58 | private object _database extends Instance( 59 | entities = Set( 60 | Entity[KeySet](), 61 | Entity[HostsDataModel](), 62 | Entity[ProcessDataModel](), 63 | Entity[LogDataModel]() 64 | ), 65 | url = ConfigurationService.database_jdbc, 66 | initMode = InitMode.Create 67 | ) 68 | 69 | private var _host_key_set = Set[String]() 70 | private var _log_key_set = Set[String]() 71 | private var _proc_key_set = Set[String]() 72 | 73 | def start() = { 74 | 75 | } 76 | 77 | def receive = { 78 | case _ => 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/notifications/NotificationFactory.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.notifications 2 | 3 | import org.slf4j.LoggerFactory 4 | import vigilant.datastore.services.notifications.email.EmailClient 5 | import vigilant.datastore.services.notifications.twillo.TwilloClient 6 | 7 | object NotificationFactory { 8 | private val logger = LoggerFactory.getLogger(getClass) 9 | 10 | private var _twillo: TwilloClient = null 11 | private var _email: EmailClient = null 12 | 13 | def configure_twillo(account_sid: String, auth_token: String, from: String) = { 14 | _twillo = new TwilloClient(account_sid, auth_token, from) 15 | } 16 | 17 | def send_sms_notification(to: String, from: String, body: String) = { 18 | _twillo match { 19 | case null => 20 | case _ => _twillo.send_sms(to, from, body) 21 | } 22 | } 23 | 24 | def configure_email(smtp_server: String, from: String) = { 25 | _email = new EmailClient(smtp_server, from) 26 | } 27 | 28 | def send_email_notification(to: String, subject: String, body: String) = { 29 | _email match { 30 | case null => 31 | case _ => _email.send_email(to, subject, body) 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/notifications/email/EmailClient.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.notifications.email 2 | 3 | import javax.mail.{Transport, Message, Session} 4 | import javax.mail.internet.{InternetAddress, MimeMessage} 5 | 6 | class EmailClient(smtp_server: String, from: String) { 7 | 8 | private val properties = System.getProperties() 9 | properties.setProperty("mail.smtp.host", smtp_server) 10 | 11 | val session= Session.getDefaultInstance(properties) 12 | 13 | def send_email(to: String, subject: String, body: String) = { 14 | val message = new MimeMessage(session) 15 | 16 | message.setFrom(new InternetAddress(from)) 17 | message.addRecipient(Message.RecipientType.TO, new InternetAddress(to)) 18 | message.setSubject(subject) 19 | message.setText(body) 20 | 21 | Transport.send(message) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/notifications/twillo/TwilloClient.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.notifications.twillo 2 | 3 | import com.twilio.sdk.TwilioRestClient 4 | import org.apache.http.NameValuePair 5 | import org.apache.http.message.BasicNameValuePair 6 | import org.slf4j.LoggerFactory 7 | 8 | class TwilloClient(account_sid: String, auth_token: String, from: String) { 9 | private val logger = LoggerFactory.getLogger(getClass) 10 | 11 | private val _from = from 12 | private val _client = new TwilioRestClient(account_sid, auth_token) 13 | 14 | def send_sms(to: String, from: String, body: String) = { 15 | val params = new java.util.ArrayList[NameValuePair]() 16 | params.add(new BasicNameValuePair("Body", body)) 17 | params.add(new BasicNameValuePair("To", to)) 18 | params.add(new BasicNameValuePair("From", if (from == null || from.length == 0) _from else from)) 19 | 20 | val message_factory = _client.getAccount.getMessageFactory 21 | val message = message_factory.create(params) 22 | logger.info(message.getSid) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/transports/Transport.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.transports 2 | 3 | trait Transport { 4 | def startMessage: AnyRef 5 | def stopMessage: AnyRef 6 | } 7 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/transports/TransportService.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.transports 2 | 3 | import akka.actor.ActorRef 4 | 5 | object TransportService { 6 | 7 | private var _transport: Transport = null 8 | private var _actor: ActorRef = null 9 | 10 | def start() = _actor ! _transport.startMessage 11 | 12 | def stop() = _actor ! _transport.stopMessage 13 | 14 | def setTransport(actor: ActorRef, transport: Transport) = { 15 | _actor = actor 16 | _transport = transport 17 | } 18 | 19 | } 20 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/transports/udp/UDPTransport.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.transports.udp 2 | 3 | import java.net.InetSocketAddress 4 | 5 | import akka.actor.{Actor, ActorRef} 6 | import akka.io.{IO, Udp} 7 | import vigilant.datastore.services.aggregator.AggregatorService 8 | import vigilant.datastore.services.transports.Transport 9 | 10 | object UDPTransport extends Transport { 11 | def startMessage = Udp.Bound 12 | def stopMessage = Udp.Unbound 13 | } 14 | 15 | class UDPTransport(address: InetSocketAddress) extends Actor { 16 | import context.system 17 | IO(Udp) ! Udp.Bind(self, address) 18 | 19 | def receive = { 20 | case Udp.Bound(local) => 21 | context.become(ready(sender())) 22 | } 23 | 24 | def ready(socket: ActorRef): Receive = { 25 | case Udp.Received(data, remote) => 26 | AggregatorService.handleMessage(data.decodeString("UTF-8")) 27 | case Udp.Unbind => socket ! Udp.Unbind 28 | case Udp.Unbound => context.stop(self) 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/Trigger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers 2 | 3 | import vigilant.datastore.models.{Email, Sms} 4 | import vigilant.datastore.services.configuration.ConfigurationService 5 | import vigilant.datastore.services.notifications.NotificationFactory 6 | 7 | class Trigger(val identifier: String, val sms_notification: Sms, val email_notification: Email) { 8 | 9 | private var _subject: String = "" 10 | private var _body: String = "" 11 | private var _ts: java.util.Date = null 12 | 13 | def sms(subject: String) = { 14 | sms_notification match { 15 | case null => 16 | case _ => 17 | // this is really bad but ok for now but we cannot go live with this 18 | scala.util.control.Exception.ignoring(classOf[Exception]) { 19 | NotificationFactory.send_sms_notification(sms_notification.to, null, subject) 20 | } 21 | } 22 | } 23 | 24 | def email(subject: String, body: String) = { 25 | email_notification match { 26 | case null => 27 | case _ => 28 | scala.util.control.Exception.ignoring(classOf[Exception]) { 29 | NotificationFactory.send_email_notification(email_notification.to, subject, body) 30 | } 31 | } 32 | } 33 | 34 | def now: java.util.Date = new java.util.Date() 35 | 36 | def grace_period: Long = ConfigurationService.trigger_notification_grace_period 37 | 38 | def grace_period_expired: Boolean = { 39 | _ts match { 40 | case null => true 41 | case _ => ((now.getTime - _ts.getTime) / 1000) >= grace_period 42 | } 43 | } 44 | 45 | def data_changed(subject: String, body: String): Boolean = { 46 | val change = !((_subject == subject) && (_body == body)) 47 | if (change) { 48 | _subject = subject 49 | _body = body 50 | } 51 | change 52 | } 53 | 54 | def notify(subject: String, body: String) = { 55 | if (data_changed(subject, body)) { 56 | _ts = now 57 | sms(subject) 58 | email(subject, body) 59 | } 60 | } 61 | 62 | } 63 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/TriggersService.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers 2 | 3 | import akka.actor._ 4 | import akka.pattern.ask 5 | import akka.util.Timeout 6 | import vigilant.datastore.caches.HostCache 7 | import vigilant.datastore.models.{HostsDataModel, ProcessDataModel, LogDataModel} 8 | import vigilant.datastore.services.aggregator.{AggregatorService, StatsObserver} 9 | import vigilant.datastore.services.triggers.host.HostTrigger 10 | import vigilant.datastore.services.triggers.log.LogTrigger 11 | import vigilant.datastore.services.triggers.proc.ProcTrigger 12 | 13 | import scala.concurrent.Await 14 | import scala.concurrent.duration._ 15 | import scala.language.postfixOps 16 | 17 | import org.slf4j.LoggerFactory 18 | 19 | sealed trait Message 20 | case class Start() extends Message 21 | case class Stop() extends Message 22 | 23 | case class AddHostTrigger(trigger: HostTrigger) extends Message 24 | case class RemoveHostTrigger(identifier: String) extends Message 25 | 26 | case class AddProcTrigger(trigger: ProcTrigger) extends Message 27 | case class RemoveProcTrigger(identifier: String) extends Message 28 | 29 | case class AddLogTrigger(trigger: LogTrigger) extends Message 30 | case class RemoveLogTrigger(identifier: String) extends Message 31 | 32 | case class ListHostTriggers() extends Message 33 | case class ListProcTriggers() extends Message 34 | case class ListLogTriggers() extends Message 35 | 36 | object TriggersService extends StatsObserver { 37 | var actor: ActorRef = null 38 | 39 | implicit val timeout = Timeout(5 seconds) 40 | 41 | def props: Props = { 42 | Props(classOf[TriggersService]) 43 | } 44 | 45 | def start() = { 46 | AggregatorService.registerLogStatObserver(this) 47 | AggregatorService.registerHostStatObsever(this) 48 | AggregatorService.registerProcStatObserver(this) 49 | } 50 | 51 | def stop() = { 52 | AggregatorService.unregisterLogStatObserver(this) 53 | AggregatorService.unregisterHostStatObserver(this) 54 | AggregatorService.unregisterProcStatObserver(this) 55 | } 56 | 57 | override def observedHostStat(data: HostsDataModel) = { 58 | actor ! data 59 | } 60 | 61 | override def observedProcStats(data: ProcessDataModel) = { 62 | actor ! data 63 | } 64 | 65 | override def observedLogStat(data: LogDataModel) = { 66 | actor ! data 67 | } 68 | 69 | def add_host_trigger(trigger: HostTrigger): Boolean = { 70 | val future = actor ? AddHostTrigger(trigger) 71 | Await.result(future, timeout.duration).asInstanceOf[Boolean] 72 | } 73 | 74 | def add_proc_trigger(trigger: ProcTrigger): Boolean = { 75 | val future = actor ? AddProcTrigger(trigger) 76 | Await.result(future, timeout.duration).asInstanceOf[Boolean] 77 | } 78 | 79 | def add_proc_trigger(trigger: LogTrigger): Boolean = { 80 | val future = actor ? AddLogTrigger(trigger) 81 | Await.result(future, timeout.duration).asInstanceOf[Boolean] 82 | } 83 | 84 | def remove_host_trigger(identifier: String): Boolean = { 85 | val future = actor ? RemoveHostTrigger(identifier) 86 | Await.result(future, timeout.duration).asInstanceOf[Boolean] 87 | } 88 | 89 | def remove_proc_trigger(identifier: String): Boolean = { 90 | val future = actor ? RemoveProcTrigger(identifier) 91 | Await.result(future, timeout.duration).asInstanceOf[Boolean] 92 | } 93 | 94 | def remove_log_trigger(identifier: String): Boolean = { 95 | val future = actor ? RemoveLogTrigger(identifier) 96 | Await.result(future, timeout.duration).asInstanceOf[Boolean] 97 | } 98 | 99 | def host_triggers_list: Set[HostTrigger] = { 100 | val future = actor ? ListHostTriggers 101 | Await.result(future, timeout.duration).asInstanceOf[Set[HostTrigger]] 102 | } 103 | 104 | def proc_triggers_list: Set[ProcTrigger] = { 105 | val future = actor ? ListProcTriggers 106 | Await.result(future, timeout.duration).asInstanceOf[Set[ProcTrigger]] 107 | } 108 | 109 | def log_triggers_list: Set[LogTrigger] = { 110 | val future = actor ? ListLogTriggers 111 | Await.result(future, timeout.duration).asInstanceOf[Set[LogTrigger]] 112 | } 113 | } 114 | 115 | class TriggersService extends Actor { 116 | 117 | val logger = LoggerFactory.getLogger(getClass) 118 | 119 | var log_triggers = Set[LogTrigger]() 120 | var proc_triggers = Set[ProcTrigger]() 121 | var host_triggers = Set[HostTrigger]() 122 | 123 | def handle_log_data(data: LogDataModel) = { 124 | log_triggers.foreach(trigger => { 125 | val previous_status = trigger.status 126 | val status = trigger.evaluate(data) 127 | if (status) { 128 | trigger.notify(trigger.error_subject, trigger.error_message) 129 | } else if (previous_status && !status) { 130 | trigger.notify(trigger.back_to_normal_subject, trigger.back_to_normal_message) 131 | } 132 | }) 133 | } 134 | 135 | def handle_proc_data(data: ProcessDataModel) = { 136 | proc_triggers.foreach(trigger => { 137 | val previous_status = trigger.status 138 | val status = trigger.evaluate(data) 139 | if (status) { 140 | trigger.notify(trigger.error_subject, trigger.error_message) 141 | } else if (previous_status && !status) { 142 | trigger.notify(trigger.back_to_normal_subject, trigger.back_to_normal_message) 143 | } 144 | }) 145 | } 146 | 147 | def handle_host_data(data: HostsDataModel) = { 148 | host_triggers.foreach(trigger => { 149 | if (trigger.key == data.key) { 150 | val previous_status = trigger.status 151 | if (trigger.evaluate(data)) { 152 | trigger.notify(trigger.error_subject, trigger.error_message) 153 | } else if (previous_status) { 154 | trigger.notify(trigger.back_to_normal_subject, trigger.back_to_normal_message) 155 | } 156 | } 157 | }) 158 | } 159 | 160 | def remove_host_trigger(identifier: String): Boolean = { 161 | logger.info("Trying to remove host trigger [{}]", identifier) 162 | host_triggers.find(_.identifier == identifier) match { 163 | case Some(trigger) => 164 | host_triggers -= trigger 165 | true 166 | case _ => false 167 | } 168 | } 169 | 170 | def add_host_trigger(trigger: HostTrigger): Boolean = { 171 | logger.info("Trying to add new host trigger [{}]", trigger.identifier) 172 | host_triggers.find(_.identifier == trigger.identifier) match { 173 | case Some(trigger) => false 174 | case _ => { 175 | // check to make sure the key exists 176 | HostCache.keys.find(_ == trigger.key) match { 177 | case Some(key) => 178 | host_triggers += trigger 179 | true 180 | case _ => 181 | false 182 | } 183 | } 184 | } 185 | } 186 | 187 | def remove_proc_trigger(identifier: String): Boolean = { 188 | logger.info("Trying to remove proc trigger [%s]", identifier) 189 | proc_triggers.find(_.identifier == identifier) match { 190 | case Some(trigger) => 191 | proc_triggers -= trigger 192 | true 193 | case _ => false 194 | } 195 | } 196 | 197 | def add_proc_trigger(trigger: ProcTrigger): Boolean = { 198 | logger.info("Trying to add new process trigger [%s]", trigger.identifier) 199 | proc_triggers.find(_.identifier == trigger.identifier) match { 200 | case Some(trigger) => false 201 | case _ => 202 | proc_triggers += trigger 203 | true 204 | } 205 | } 206 | 207 | def remove_log_trigger(identifier: String): Boolean = { 208 | logger.info("Trying to remove log trigger [%s]", identifier) 209 | log_triggers.find(_.identifier == identifier) match { 210 | case Some(trigger) => 211 | log_triggers -= trigger 212 | true 213 | case _ => false 214 | } 215 | } 216 | 217 | def add_log_trigger(trigger: LogTrigger): Boolean = { 218 | logger.info("Trying to add new log trigger [%s]", trigger.identifier) 219 | log_triggers.find(_.identifier == trigger.identifier) match { 220 | case Some(trigger) => false 221 | case _ => 222 | log_triggers += trigger 223 | true 224 | } 225 | } 226 | 227 | def receive = { 228 | case ListHostTriggers => sender ! host_triggers 229 | case ListProcTriggers => sender ! proc_triggers 230 | case ListLogTriggers => sender ! log_triggers 231 | 232 | case AddHostTrigger(trigger) => sender ! add_host_trigger(trigger) 233 | case RemoveHostTrigger(identifier) => sender ! remove_host_trigger(identifier) 234 | 235 | case AddProcTrigger(trigger) => sender ! add_proc_trigger(trigger) 236 | case RemoveProcTrigger(identifier) => sender ! remove_proc_trigger(identifier) 237 | 238 | case AddLogTrigger(trigger) => sender ! add_log_trigger(trigger) 239 | case RemoveLogTrigger(identifier) => sender ! remove_log_trigger(identifier) 240 | 241 | case data: HostsDataModel => handle_host_data(data) 242 | case data: ProcessDataModel => handle_proc_data(data) 243 | case data: LogDataModel => handle_log_data(data) 244 | 245 | case _ => 246 | } 247 | 248 | } 249 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/host/HostTrigger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers.host 2 | 3 | import vigilant.datastore.models.HostsDataModel 4 | 5 | trait HostTrigger { 6 | def status: Boolean 7 | def identifier: String 8 | def key: String 9 | def info: String 10 | def evaluate(data: HostsDataModel): Boolean 11 | def notify(subject: String, body: String) 12 | def error_subject: String 13 | def error_message: String 14 | def back_to_normal_subject: String 15 | def back_to_normal_message: String 16 | } 17 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/host/HostUsageThresholdTrigger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers.host 2 | 3 | import vigilant.datastore.models.{Sms, Email, HostsDataModel} 4 | import vigilant.datastore.services.triggers.Trigger 5 | 6 | class HostUsageThresholdTrigger(sms_notification: Sms, email_notification: Email, identifier: String, 7 | val key: String, val threshold: Float) 8 | extends Trigger(identifier, sms_notification, email_notification) with HostTrigger { 9 | 10 | private var _status = false 11 | override def status: Boolean = _status 12 | override def info: String = key + ".host.usage >= " + threshold + "%" 13 | 14 | override def error_subject: String = 15 | "VIGILANT - Host Usage Exceeded" + "[" + key + "]" 16 | 17 | override def error_message: String = 18 | "Host usage exceeded [" + key + ":" + threshold + "]" 19 | 20 | override def back_to_normal_subject: String = "VIGILANT - Host Usage back to normal" 21 | 22 | override def back_to_normal_message: String = "Host Usage back to normal" 23 | 24 | override def evaluate(data: HostsDataModel): Boolean = { 25 | if (data.usage >= threshold) { 26 | _status = true 27 | } else { 28 | _status = false 29 | } 30 | _status 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/log/LogRegexTrigger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers.log 2 | 3 | import vigilant.datastore.models.{LogDataModel, Email, Sms} 4 | import vigilant.datastore.services.triggers.Trigger 5 | 6 | class LogRegexTrigger(sms_notification: Sms, email_notification: Email, identifier: String, 7 | val key: String, val regex: String) 8 | extends Trigger(identifier, sms_notification, email_notification) with LogTrigger { 9 | 10 | private var _status = false 11 | override def status: Boolean = _status 12 | override def info: String = "" 13 | 14 | override def error_subject: String = "" 15 | 16 | override def error_message: String = "" 17 | 18 | override def back_to_normal_subject: String = "" 19 | 20 | override def back_to_normal_message: String = "" 21 | 22 | override def evaluate(data: LogDataModel): Boolean = { 23 | // TODO 24 | 25 | _status = false 26 | _status 27 | } 28 | 29 | } 30 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/log/LogTrigger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers.log 2 | 3 | import vigilant.datastore.models.LogDataModel 4 | 5 | trait LogTrigger { 6 | def status: Boolean 7 | def identifier: String 8 | def key: String 9 | def info: String 10 | def evaluate(data: LogDataModel): Boolean 11 | def notify(subject: String, body: String) 12 | def error_subject: String 13 | def error_message: String 14 | def back_to_normal_subject: String 15 | def back_to_normal_message: String 16 | } 17 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/proc/ProcTrigger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers.proc 2 | 3 | import vigilant.datastore.models.ProcessDataModel 4 | 5 | trait ProcTrigger { 6 | def status: Boolean 7 | def identifier: String 8 | def key: String 9 | def info: String 10 | def evaluate(data: ProcessDataModel): Boolean 11 | def notify(subject: String, body: String) 12 | def error_subject: String 13 | def error_message: String 14 | def back_to_normal_subject: String 15 | def back_to_normal_message: String 16 | } 17 | -------------------------------------------------------------------------------- /datastore/src/main/scala/vigilant/datastore/services/triggers/proc/ProcUsageThresholdTrigger.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.services.triggers.proc 2 | 3 | import vigilant.datastore.models.{ProcessDataModel, Email, Sms} 4 | import vigilant.datastore.services.triggers.Trigger 5 | 6 | 7 | class ProcUsageThresholdTrigger(sms_notification: Sms, email_notification: Email, identifier: String, 8 | val key: String, val threshold: Float) 9 | extends Trigger(identifier, sms_notification, email_notification) with ProcTrigger { 10 | 11 | private var _status = false 12 | override def status: Boolean = _status 13 | override def info: String = "" 14 | 15 | override def error_subject: String = 16 | "VIGILANT - Process Usage Exceeded" + "[" + key + "]" 17 | 18 | override def error_message: String = 19 | "Process usage exceeded [" + key + ":" + threshold + "]" 20 | 21 | override def back_to_normal_subject: String = "VIGILANT - Process Usage back to normal" 22 | 23 | override def back_to_normal_message: String = "Process Usage back to normal" 24 | 25 | override def evaluate(data: ProcessDataModel): Boolean = { 26 | if (data.key == key) { 27 | _status = data.usage >= threshold 28 | return _status 29 | } 30 | false 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /datastore/src/main/webapp/WEB-INF/web.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 12 | 13 | org.scalatra.servlet.ScalatraListener 14 | 15 | 16 | -------------------------------------------------------------------------------- /datastore/src/test/scala/vigilant/datastore/caches/RingBufferTest.scala: -------------------------------------------------------------------------------- 1 | package vigilant.datastore.caches 2 | 3 | import org.scalatest.FunSuiteLike 4 | import org.scalatra.test.scalatest.ScalatraSuite 5 | import vigilant.datastore.caches.buffer.RingBuffer 6 | 7 | class RingBufferTest extends ScalatraSuite with FunSuiteLike { 8 | 9 | test("That pushing data works") { 10 | val mockBuffer = new RingBuffer[Int](5) 11 | 12 | mockBuffer.push(1) 13 | 14 | assert(mockBuffer.head == 1) 15 | assert(mockBuffer.tail == 1) 16 | 17 | val buffer = mockBuffer.array 18 | assert(buffer(0) == 1) 19 | assert(buffer.length == 1) 20 | } 21 | 22 | test("That pushing multiple pieces of data queues correctly") { 23 | val mockBuffer = new RingBuffer[Int](5) 24 | 25 | mockBuffer.push(1) 26 | mockBuffer.push(2) 27 | 28 | val buffer = mockBuffer.array 29 | assert(buffer(0) == 1) 30 | assert(buffer(1) == 2) 31 | assert(buffer.length == 2) 32 | assert(mockBuffer.head == 2) 33 | assert(mockBuffer.tail == 1) 34 | } 35 | 36 | test("That pushing data greater than the size of the cache the buffer is a ring") { 37 | val mockBuffer = new RingBuffer[Int](5) 38 | mockBuffer.push(0) 39 | mockBuffer.push(1) 40 | mockBuffer.push(2) 41 | mockBuffer.push(3) 42 | mockBuffer.push(4) 43 | 44 | assert(mockBuffer.head == 4) 45 | assert(mockBuffer.tail == 0) 46 | var buf = mockBuffer.array 47 | assert(buf(0) == 0) 48 | assert(buf(1) == 1) 49 | assert(buf(2) == 2) 50 | assert(buf(3) == 3) 51 | assert(buf(4) == 4) 52 | assert(buf.length == 5) 53 | 54 | mockBuffer.push(5) 55 | 56 | assert(mockBuffer.head == 5) 57 | assert(mockBuffer.tail == 1) 58 | buf = mockBuffer.array 59 | assert(buf(0) == 1) 60 | assert(buf(1) == 2) 61 | assert(buf(2) == 3) 62 | assert(buf(3) == 4) 63 | assert(buf(4) == 5) 64 | assert(buf.length == 5) 65 | 66 | mockBuffer.push(6) 67 | 68 | assert(mockBuffer.head == 6) 69 | assert(mockBuffer.tail == 2) 70 | buf = mockBuffer.array 71 | assert(buf(0) == 2) 72 | assert(buf(1) == 3) 73 | assert(buf(2) == 4) 74 | assert(buf(3) == 5) 75 | assert(buf(4) == 6) 76 | assert(buf.length == 5) 77 | 78 | mockBuffer.push(7) 79 | mockBuffer.push(8) 80 | mockBuffer.push(9) 81 | 82 | assert(mockBuffer.head == 9) 83 | assert(mockBuffer.tail == 5) 84 | buf = mockBuffer.array 85 | assert(buf(0) == 5) 86 | assert(buf(1) == 6) 87 | assert(buf(2) == 7) 88 | assert(buf(3) == 8) 89 | assert(buf(4) == 9) 90 | assert(buf.length == 5) 91 | } 92 | 93 | } -------------------------------------------------------------------------------- /front-end/.bowerrc: -------------------------------------------------------------------------------- 1 | { 2 | "directory": "Dashboard/www/js/lib", 3 | "interactive": false 4 | } -------------------------------------------------------------------------------- /front-end/Dashboard/Resourses.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | 5 | def get_aliveness_for_host(store, key) -> bool: 6 | resp = requests.get(store + '/api/hosts/liveness/' + key) 7 | return resp.json()['alive'] 8 | 9 | 10 | def get_host_stat_from_store(store, key) -> dict: 11 | resp = requests.get(store + '/api/hosts/rest/' + key) 12 | response = resp.json() 13 | response['alive'] = get_aliveness_for_host(store, key) 14 | return response 15 | 16 | 17 | def get_host_head_stat_from_store(store, key) -> dict: 18 | resp = requests.get(store + '/api/hosts/head/' + key) 19 | response = resp.json() 20 | response['alive'] = get_aliveness_for_host(store, key) 21 | return response 22 | 23 | 24 | def get_host_keys_from_store(store) -> dict: 25 | response = requests.get(store + '/api/hosts/keys') 26 | return response.json() 27 | 28 | 29 | def get_host_state_from_store(store, key) -> dict: 30 | resp = requests.get(store + '/api/hosts/state/' + key) 31 | return resp.json() 32 | 33 | 34 | def get_host_triggers_from_store(store, key) -> dict: 35 | resp = requests.get(store + '/api/hosts/triggers/' + key) 36 | return resp.json() 37 | 38 | 39 | def delete_host_trigger_from_store(store, key) -> dict: 40 | resp = requests.delete(store + '/api/hosts/triggers/' + key) 41 | return resp.json() 42 | 43 | 44 | def add_host_usage_trigger(store, payload) -> dict: 45 | headers = {'Content-type': 'application/json'} 46 | resp = requests.post(store + '/api/hosts/usage_trigger', 47 | data=json.dumps(payload), 48 | headers=headers) 49 | return resp.json() 50 | 51 | 52 | def get_cluster_state(store) -> dict: 53 | resp = requests.get(store + '/api/state') 54 | return resp.json() 55 | 56 | 57 | def get_hosts_state(store) -> dict: 58 | resp = requests.get(store + '/api/hosts/state') 59 | return resp.json() 60 | -------------------------------------------------------------------------------- /front-end/Dashboard/Routes.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from . import Resourses 4 | 5 | from flask import Flask 6 | from flask import jsonify 7 | from flask import request 8 | 9 | 10 | public = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'www') 11 | app = Flask(__name__, static_folder=public) 12 | 13 | 14 | @app.errorhandler(Exception) 15 | def not_found(error=None): 16 | message = {'message': 'Not Found: ' + request.url, 17 | 'error': str(error)} 18 | resp = jsonify(message) 19 | resp.status_code = 404 20 | return resp 21 | 22 | 23 | @app.route("/") 24 | def index(): 25 | return app.send_static_file('index.html') 26 | 27 | 28 | @app.route("/") 29 | def public(path): 30 | return app.send_static_file(path) 31 | 32 | 33 | @app.route("/api/state") 34 | def state(): 35 | store = request.args.get('store') 36 | return jsonify(Resourses.get_cluster_state(store)) 37 | 38 | 39 | @app.route("/api/hosts/state") 40 | def hosts_state(): 41 | store = request.args.get('store') 42 | return jsonify(Resourses.get_hosts_state(store)) 43 | 44 | 45 | @app.route("/api/hosts/state/") 46 | def hosts_process_state(key): 47 | store = request.args.get('store') 48 | return jsonify(Resourses.get_host_state_from_store(store, key)) 49 | 50 | 51 | @app.route("/api/hosts/triggers/", methods=['GET', 'DELETE']) 52 | def host_triggers(key): 53 | store = request.args.get('store') 54 | if request.method == 'GET': 55 | return jsonify(Resourses.get_host_triggers_from_store(store, key)) 56 | else: 57 | return jsonify(Resourses.delete_host_trigger_from_store(store, key)) 58 | 59 | 60 | @app.route("/api/hosts/usage_trigger", methods=['POST']) 61 | def add_host_usage_trigger(): 62 | store = request.args.get('store') 63 | return jsonify(Resourses.add_host_usage_trigger(store, request.get_json())) 64 | 65 | 66 | @app.route("/api/hosts/keys") 67 | def host_keys(): 68 | store = request.args.get('store') 69 | return jsonify(Resourses.get_host_keys_from_store(store)) 70 | 71 | 72 | @app.route("/api/hosts/head/") 73 | def host_head_stat(key): 74 | store = request.args.get('store') 75 | return jsonify(Resourses.get_host_head_stat_from_store(store, key)) 76 | 77 | 78 | @app.route("/api/hosts/rest/") 79 | def host_rest_stat(key): 80 | store = request.args.get('store') 81 | return jsonify(Resourses.get_host_stat_from_store(store, key)) 82 | -------------------------------------------------------------------------------- /front-end/Dashboard/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['Routes', 'Resources'] 2 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/assets/peak.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/assets/peak.png -------------------------------------------------------------------------------- /front-end/Dashboard/www/assets/realtime.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/assets/realtime.png -------------------------------------------------------------------------------- /front-end/Dashboard/www/css/vigilant.css: -------------------------------------------------------------------------------- 1 | body, html, .row-offcanvas { 2 | height: 100%; 3 | } 4 | 5 | body { 6 | padding-top: 50px; 7 | padding-bottom: 20px; 8 | } 9 | 10 | #cluster { 11 | width: 100%; 12 | height: 500px; 13 | border: 1px solid lightgray; 14 | background-color:#222222; 15 | } 16 | 17 | .host_grid { 18 | width: 100%; 19 | height: 80px; 20 | text-align: center; 21 | } 22 | 23 | .centered{ 24 | margin: 0 auto; 25 | } 26 | 27 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/glyphicons/flat-ui-pro-icons-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/glyphicons/flat-ui-pro-icons-regular.eot -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/glyphicons/flat-ui-pro-icons-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/glyphicons/flat-ui-pro-icons-regular.ttf -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/glyphicons/flat-ui-pro-icons-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/glyphicons/flat-ui-pro-icons-regular.woff -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-black.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-black.eot -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-black.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-black.ttf -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-black.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-black.woff -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-bold.eot -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-bold.ttf -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-bold.woff -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-italic.eot -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-italic.ttf -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-italic.woff -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-light.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-light.eot -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-light.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-light.ttf -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-light.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-light.woff -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-regular.eot -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-regular.ttf -------------------------------------------------------------------------------- /front-end/Dashboard/www/fonts/lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/front-end/Dashboard/www/fonts/lato/lato-regular.woff -------------------------------------------------------------------------------- /front-end/Dashboard/www/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Vigilant Dashboard 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 34 | 35 |
36 |
37 |
38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/dashboard/dashboard.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/dashboard/dashboard.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | define(['angular', 4 | 'underscore', 5 | 'vis', 6 | 'angularRoute', 7 | "angularLoadingBar", 8 | 'angularSpinner', 9 | "dashboard/host/host" 10 | ], function(angular, _, vis) 11 | { 12 | var app = angular.module('vigilant.dashboard', [ 13 | 'ngRoute', 14 | 'angular-loading-bar', 15 | 'angularSpinner', 16 | 'vigilant.dashboard.host']); 17 | 18 | app.config(['$routeProvider', function($routeProvider) { 19 | $routeProvider.when('/dashboard', { 20 | templateUrl: 'js/dashboard/dashboard.html', 21 | controller: 'dashboard_controller' 22 | }); 23 | }]); 24 | 25 | app.controller('dashboard_controller', [ 26 | '$scope', 27 | '$http', 28 | '$interval', 29 | '$routeParams', 30 | 'graphs', 31 | 'usSpinnerService', 32 | function($scope, $http, $interval, $routeParams, graphs) 33 | { 34 | var store = encodeURI($routeParams.store); 35 | $scope.store = store; 36 | 37 | var model = function () { 38 | $http.get('/api/state' + '?store=' + store).success(function (resp) { 39 | var nodes = resp['nodes']; 40 | var edges = resp['edges']; 41 | 42 | new vis.Network(document.getElementById('cluster'), { 43 | nodes: nodes, 44 | edges: edges 45 | }, 46 | graphs.node_options); 47 | }); 48 | }; 49 | 50 | var state = function () { 51 | $http.get('/api/hosts/state?store=' + store).success(function (data) { 52 | $scope.hosts = data.hosts; 53 | }); 54 | }; 55 | 56 | state(); 57 | model(); 58 | 59 | var promise = $interval(state, 5000); 60 | $scope.$on("$destroy", function () { 61 | $interval.cancel(promise); 62 | }); 63 | } 64 | ]); 65 | 66 | return app; 67 | }); 68 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/dashboard/host/host.html: -------------------------------------------------------------------------------- 1 | 32 | 33 | 47 | 48 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/dashboard/host/host.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | define(['jquery', 4 | 'angular', 5 | 'underscore', 6 | 'vis', 7 | "angularRoute", 8 | "angularUiGrid", 9 | "angularLoadingBar", 10 | "dashboard/host/proc/proc" 11 | ], function($, angular, _, vis) 12 | { 13 | var app = angular.module('vigilant.dashboard.host', [ 14 | 'ngRoute', 15 | 'ui.grid', 16 | 'angular-loading-bar', 17 | 'vigilant.dashboard.host.proc' 18 | ]); 19 | 20 | app.config(['$routeProvider', function($routeProvider) { 21 | $routeProvider.when('/host/:key', { 22 | templateUrl: 'js/dashboard/host/host.html', 23 | controller: 'host_controller' 24 | }); 25 | }]); 26 | 27 | var store = null; 28 | app.controller('add_trigger_controller', function($scope, $http, $modalInstance) { 29 | 30 | $scope.trigger = { 31 | identifier: "", 32 | key: "", 33 | string_threshold: "", 34 | sms: { 35 | to: "" 36 | }, 37 | email: { 38 | to: "" 39 | } 40 | }; 41 | 42 | $http.get('/api/hosts/keys' + '?store=' + store).success(function(resp) { 43 | console.log("Retrieved list of hosts: ", resp.keys); 44 | $scope.hosts = resp.keys; 45 | }); 46 | 47 | $scope.cancel = function () { 48 | $modalInstance.dismiss('cancel', null); 49 | }; 50 | 51 | $scope.confirm = function() { 52 | var trigger = { 53 | identifier: $scope.trigger.identifier, 54 | key: $scope.trigger.key, 55 | threshold: parseInt($scope.trigger.string_threshold), 56 | sms: { 57 | to: $scope.trigger.sms.to 58 | }, 59 | email: { 60 | to: $scope.trigger.email.to 61 | } 62 | }; 63 | 64 | $http.post('/api/hosts/usage_trigger' + '?store=' + store, trigger).success(function (resp) { 65 | if (resp.ok) { 66 | console.log("New trigger added: ", trigger); 67 | $modalInstance.close(trigger); 68 | } else { 69 | alert("New trigger failed: " + trigger); 70 | } 71 | }).error(function (resp) { 72 | console.log(resp); 73 | }); 74 | } 75 | }); 76 | 77 | app.controller('delete_trigger_controller', function($scope, $http, $modalInstance) { 78 | 79 | $scope.cancel = function () { 80 | $modalInstance.dismiss('cancel', null); 81 | }; 82 | 83 | $scope.confirm = function() { 84 | $modalInstance.close(); 85 | } 86 | }); 87 | 88 | app.controller('host_controller', [ 89 | '$scope', 90 | '$http', 91 | '$modal', 92 | '$interval', 93 | '$route', 94 | '$routeParams', 95 | 'graphs', 96 | function($scope, $http, $modal, $interval, $route, $routeParams, graphs) 97 | { 98 | var sock = null; 99 | store = encodeURI($routeParams.store); 100 | var host = encodeURI($routeParams.key); 101 | $scope.host = host; 102 | $scope.store = store; 103 | $scope.state = []; 104 | 105 | $http.get('/api/hosts/state/' + host + '?store=' + store).success(function (resp) { 106 | $scope.state = resp.state; 107 | }); 108 | 109 | // -- -- -- -- -- -- -- -- -- -- -- 110 | 111 | var triggers = function() { 112 | $http.get('/api/hosts/triggers/' + host + '?store=' + store).success(function (resp) { 113 | $scope.triggers = resp.triggers; 114 | }); 115 | }; 116 | triggers(); 117 | 118 | // -- -- -- -- -- -- -- -- -- -- -- 119 | 120 | $scope.add_trigger = function() { 121 | var modalInstance = $modal.open({ 122 | templateUrl: 'AddTrigger.html', 123 | controller: 'add_trigger_controller', 124 | resolve: { 125 | trigger: function () { 126 | return $scope.trigger; 127 | } 128 | 129 | } 130 | }); 131 | 132 | modalInstance.result.then(function (trigger) { 133 | triggers(); 134 | }); 135 | }; 136 | 137 | // -- -- -- -- -- -- -- -- -- -- -- 138 | 139 | $scope.delete_trigger = function(identifier) { 140 | console.log("Trying to delete trigger: ", identifier); 141 | var modalInstance = $modal.open({ 142 | templateUrl: 'DeleteTrigger.html', 143 | controller: 'delete_trigger_controller', 144 | resolve: { 145 | trigger: function () { 146 | return $scope.trigger; 147 | } 148 | 149 | } 150 | }); 151 | 152 | modalInstance.result.then(function () { 153 | $http.delete('/api/hosts/triggers/' + identifier + '?store=' + store).success(function (resp) { 154 | if (resp.ok) { 155 | console.log("Trigger deleted: ", identifier); 156 | } else { 157 | console.log("Trigger failed to delete: ", identifier); 158 | } 159 | triggers(); 160 | }).error(function (resp) { 161 | console.log(resp); 162 | }); 163 | }); 164 | }; 165 | 166 | // -- -- -- -- -- -- -- -- -- -- -- 167 | 168 | var sock_addr = null; 169 | // wee bit of a hack but meh it will be fine for now 170 | if (store.substring(0, 7) == "http://") { 171 | sock_addr = store.substring(7, store.length); 172 | sock_addr.replace(/^\s+|\s+$/g, ''); 173 | } 174 | console.log("Trying ws://", sock_addr); 175 | 176 | // create a graph2d with an (currently empty) dataset 177 | var cpu_usage_graph = document.getElementById('cpu'); 178 | var memory_usage_graph = document.getElementById('memory'); 179 | var overall_usage_graph = document.getElementById('usage'); 180 | 181 | var cpu_data_set = new vis.DataSet(); 182 | var cpu_groups = new vis.DataSet(); 183 | 184 | var usage_data_set = new vis.DataSet(); 185 | var usage_group = new vis.DataSet(); 186 | 187 | var memory_data_set = new vis.DataSet(); 188 | var memory_group = new vis.DataSet(); 189 | 190 | $http.get('/api/hosts/rest/' + host + '?store=' + store).success(function (resp) { 191 | 192 | var data = resp.payload; 193 | 194 | if (data.length > 0) { 195 | var head = data[data.length - 1]; 196 | $scope.grid_data = [ 197 | { 198 | "Hostname": head.hostname, 199 | "Platform": head.platform, 200 | "Processes": head.process, 201 | "Cores": head.cores, 202 | "Machine": head.machine, 203 | "Disk Free": head.diskFree /1024/102, 204 | "Disk Total": head.diskTotal /1024/1024/1024 205 | } 206 | ]; 207 | } 208 | 209 | for (var i in data[0].cpuStats) { 210 | cpu_groups.add({ 211 | id: i, 212 | content: 'Core ' + (parseInt(i) + 1), 213 | options: { 214 | drawPoints: { 215 | style: 'circle' // square, circle 216 | } 217 | } 218 | }); 219 | } 220 | memory_group.add({ 221 | id: 0, 222 | content: "Memory Usage (mb)", 223 | options: { 224 | drawPoints: { 225 | style: 'circle' // square, circle 226 | } 227 | } 228 | }); 229 | usage_group.add({ 230 | id: 0, 231 | content: "Usage Usage (%)", 232 | options: { 233 | drawPoints: { 234 | style: 'circle' // square, circle 235 | } 236 | } 237 | }); 238 | 239 | _.each(data, function(stat) { 240 | for (var i in stat.cpuStats) { 241 | cpu_data_set.add({ 242 | x: stat.ts, 243 | y: stat.cpuStats[i], 244 | group: i 245 | }); 246 | } 247 | usage_data_set.add({ 248 | x: stat.ts, 249 | y: stat.usage, 250 | group: 0 251 | }); 252 | memory_data_set.add({ 253 | x: stat.ts, 254 | y: stat.memoryUsed/1024/1024, 255 | group: 0 256 | }); 257 | }); 258 | 259 | var cpu_graph = new vis.Graph2d( 260 | cpu_usage_graph, 261 | cpu_data_set, 262 | cpu_groups, 263 | graphs.cpu_utilization_options( 264 | data[0].ts, 265 | data[data.length - 1].ts, 266 | "Percentage Utilization Per Core" 267 | )); 268 | 269 | var usage_graph = new vis.Graph2d( 270 | overall_usage_graph, 271 | usage_data_set, 272 | usage_group, 273 | graphs.cpu_utilization_options( 274 | data[0].ts, 275 | data[data.length - 1].ts, 276 | "Percentage Overall Usage" 277 | )); 278 | 279 | var memory_graph = new vis.Graph2d( 280 | memory_usage_graph, 281 | memory_data_set, 282 | memory_group, 283 | graphs.memory_utilization_options( 284 | data[0].ts, 285 | data[data.length - 1].ts, 286 | "Memory Usage (mb)", 287 | data[0].memoryTotal/1024/1024 288 | )); 289 | 290 | // if it is alive we can listen to new data on the web-socket interface 291 | if (resp.alive) { 292 | sock = new WebSocket("ws://" + sock_addr + "/api/hosts/sock/" + host); 293 | sock.onmessage = function (event) { 294 | var data = $.parseJSON(event.data); 295 | 296 | for (var i in data.cpuStats) { 297 | cpu_data_set.add({ 298 | x: vis.moment(), 299 | y: data.cpuStats[i], 300 | group: i 301 | }); 302 | usage_data_set.add({ 303 | x: data.ts, 304 | y: data.usage, 305 | group: 0 306 | }); 307 | memory_data_set.add({ 308 | x: data.ts, 309 | y: data.memoryUsed/1024/1024, 310 | group: 0 311 | }); 312 | } 313 | 314 | graphs.remove_old_data(cpu_graph, cpu_data_set); 315 | graphs.remove_old_data(memory_graph, memory_data_set); 316 | graphs.remove_old_data(usage_graph, usage_data_set); 317 | 318 | graphs.render_step(cpu_graph); 319 | graphs.render_step(memory_graph); 320 | graphs.render_step(usage_graph); 321 | } 322 | } 323 | }); 324 | 325 | $scope.$on("$destroy", function() { 326 | if (sock) { 327 | sock.close(); 328 | } 329 | }); 330 | } 331 | ]); 332 | 333 | return app; 334 | }); 335 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/dashboard/host/proc/proc.html: -------------------------------------------------------------------------------- 1 |
2 | 7 |
8 | 9 |
10 |
11 |
12 |
13 | 14 |
15 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/dashboard/host/proc/proc.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | define(['angular', 4 | "vis", 5 | "angularRoute", 6 | "angularUiGrid", 7 | "angularLoadingBar" 8 | ], function(angular, vis) 9 | { 10 | var app = angular.module('vigilant.dashboard.host.proc', 11 | [ 12 | 'ngRoute', 13 | 'angular-loading-bar' 14 | ]); 15 | 16 | app.config(['$routeProvider', function($routeProvider) { 17 | $routeProvider.when('/proc/:process', { 18 | templateUrl: 'js/dashboard/host/proc/proc.html', 19 | controller: 'proc_controller' 20 | }); 21 | }]); 22 | 23 | var usageOptions = { 24 | start: vis.moment().add(-30, 'seconds'), // changed so its faster 25 | end: vis.moment(), 26 | dataAxis: { 27 | title: { 28 | left: { 29 | text: "Usage (%)" 30 | } 31 | }, 32 | customRange: { 33 | left: { 34 | min:0, max: 100 35 | } 36 | } 37 | }, 38 | drawPoints: { 39 | style: 'circle' // square, circle 40 | }, 41 | shaded: { 42 | orientation: 'bottom' // top, bottom 43 | }, 44 | zoomMax: 100000, 45 | zoomMin: 100000 46 | }; 47 | 48 | // remove all data points which are no longer visible 49 | var removeOldData = function(graph, dataset) { 50 | var range = graph.getWindow(); 51 | var interval = range.end - range.start; 52 | var oldIds = dataset.getIds({ 53 | filter: function (item) { 54 | return item.x < range.start - interval; 55 | } 56 | }); 57 | dataset.remove(oldIds); 58 | }; 59 | 60 | 61 | app.controller('proc_controller', [ 62 | '$scope', 63 | '$http', 64 | '$interval', 65 | '$route', 66 | '$routeParams', 67 | function($scope, $http, $interval, $route, $routeParams) 68 | { 69 | var store = encodeURI($routeParams.store); 70 | var host = encodeURI($routeParams.key); 71 | var proc = $routeParams.process; 72 | 73 | $scope.host = host; 74 | $scope.store = store; 75 | $scope.proc = proc; 76 | 77 | var sock = null; 78 | var sockAddr = null; 79 | if (store.substring(0, 7) == "http://") { 80 | sockAddr = store.substring(7, store.length); 81 | } 82 | 83 | $scope.procData = []; 84 | 85 | $http.get('/api/proc/' + proc + '?store=' + store).success(function (data) { 86 | 87 | // create a graph2d with an (currently empty) dataset 88 | var usageContainer = document.getElementById('usage'); 89 | var memoryContainer = document.getElementById('memory'); 90 | 91 | var usageDataSet = new vis.DataSet(); 92 | var memoryDataSet = new vis.DataSet(); 93 | 94 | var memoryOptions = { 95 | start: vis.moment().add(-30, 'seconds'), // changed so its faster 96 | end: vis.moment(), 97 | dataAxis: { 98 | title: { 99 | left: { 100 | text: "Memory Usage (%)" 101 | } 102 | }, 103 | customRange: { 104 | left: { 105 | min:0, max: 100 106 | } 107 | } 108 | }, 109 | drawPoints: { 110 | style: 'circle' // square, circle 111 | }, 112 | shaded: { 113 | orientation: 'bottom' // top, bottom 114 | }, 115 | zoomMax: 100000, 116 | zoomMin: 100000 117 | }; 118 | 119 | var usageGraph = new vis.Graph2d(usageContainer, usageDataSet, usageOptions); 120 | var memoryGraph = new vis.Graph2d(memoryContainer, memoryDataSet, memoryOptions); 121 | 122 | function renderStepUsage() { 123 | // move the window (you can think of different strategies). 124 | var now = vis.moment(); 125 | var range = usageGraph.getWindow(); 126 | var interval = range.end - range.start; 127 | 128 | // continuously move the window 129 | usageGraph.setWindow(now - interval, now, {animate: false}); 130 | requestAnimationFrame(renderStepUsage); 131 | } 132 | function renderStepMemory() { 133 | // move the window (you can think of different strategies). 134 | var now = vis.moment(); 135 | var range = memoryGraph.getWindow(); 136 | var interval = range.end - range.start; 137 | 138 | // continuously move the window 139 | memoryGraph.setWindow(now - interval, now, {animate: false}); 140 | requestAnimationFrame(renderStepMemory); 141 | } 142 | 143 | if (data.alive) { 144 | renderStepUsage(); 145 | renderStepMemory(); 146 | 147 | sock = new WebSocket("ws://" + sockAddr + "/api/proc/sock/" + proc); 148 | sock.onmessage = function (event) { 149 | var data = $.parseJSON(event.data); 150 | 151 | usageDataSet.add({ 152 | x: data.ts, 153 | y: data.usage 154 | }); 155 | memoryDataSet.add({ 156 | x: data.ts, 157 | y: data.memory 158 | }); 159 | 160 | removeOldData(usageGraph, usageDataSet); 161 | removeOldData(memoryGraph, memoryDataSet); 162 | } 163 | } 164 | }); 165 | 166 | $scope.$on("$destroy", function(){ 167 | if (sock) { sock.close(); } 168 | }); 169 | } 170 | ]); 171 | 172 | return app; 173 | }); 174 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/require-config.js: -------------------------------------------------------------------------------- 1 | require.config({ 2 | paths: { 3 | jquery: [ 4 | //'//code.jquery.com/jquery-2.1.1.min', 5 | '/js/lib/jquery/dist/jquery.min'], 6 | 7 | underscore: [ 8 | '/js/lib/underscore/underscore-min' 9 | ], 10 | 11 | spin: [ 12 | '/js/lib/spin.js/spin' 13 | ], 14 | 15 | vis: [ 16 | //'//cdnjs.cloudflare.com/ajax/libs/vis/3.7.1/vis.min', 17 | '/js/lib/vis/dist/vis.min' 18 | ], 19 | 20 | bootstrap: [ 21 | '/js/lib/bootstrap/dist/js/bootstrap.min' 22 | ], 23 | 24 | bootstrapAutoHiding: [ 25 | '/js/lib/bootstrap-autohidingnavbar/dist/jquery.bootstrap-autohidingnavbar' 26 | ], 27 | 28 | angular: [ 29 | //'//cdnjs.cloudflare.com/ajax/libs/angular.js/1.2.20/angular.min', 30 | '/js/lib/angular/angular.min' 31 | ], 32 | 33 | angularRoute: [ 34 | //'//cdnjs.cloudflare.com/ajax/libs/angular.js/1.2.20/angular-route.min', 35 | '/js/lib/angular-route/angular-route.min' 36 | ], 37 | 38 | angularLoadingBar: [ 39 | '/js/lib/angular-loading-bar/build/loading-bar.min' 40 | ], 41 | 42 | angularUiGrid: [ 43 | '/js/lib/angular-ui-grid/ui-grid.min' 44 | ], 45 | 46 | angularBootstrap: [ 47 | '/js/lib/angular-bootstrap/ui-bootstrap-tpls.min' 48 | ], 49 | 50 | angularSpinner: [ 51 | '/js/lib/angular-spinner/angular-spinner' 52 | ] 53 | }, 54 | shim: { 55 | 'bootstrapAutoHiding': { 56 | deps: ['bootstrap'], 57 | exports: 'bootstrapAutoHiding' 58 | }, 59 | 'bootstrap': { 60 | deps: ['jquery'], 61 | exports: 'bootstrap' 62 | }, 63 | 'angularSpinner': { 64 | deps: ['angular', 'spin'], 65 | exports: 'angular' 66 | }, 67 | 'angularUiGrid': { 68 | deps: ['angular'], 69 | exports: 'angularUiGrid' 70 | }, 71 | 'angularLoadingBar': { 72 | deps: ['angular'], 73 | exports: 'angularLoadingBar' 74 | }, 75 | 'angularRoute': { 76 | deps: ['angular'], 77 | exports: 'angular' 78 | }, 79 | 'angularBootstrap': { 80 | deps: ['angular'], 81 | exports: 'angular' 82 | }, 83 | 'angular': { 84 | deps: ['jquery'], 85 | exports: 'angular' 86 | } 87 | } 88 | }); 89 | 90 | require(["vigilant", "jquery", "angular", "bootstrap", "bootstrapAutoHiding"], 91 | function(vigilant, $, angular) 92 | { 93 | angular.bootstrap(document, ["vigilant"]); 94 | $("div.navbar-fixed-top").autoHidingNavbar(); 95 | }); 96 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/util/graphs.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | define(['angular', "vis"], function(angular, vis) 4 | { 5 | var app = angular.module('vigilant.util.graphs', []); 6 | 7 | app.service('graphs', function() { 8 | 9 | this.remove_old_data = function(graph, dataset) { 10 | var range = graph.getWindow(); 11 | var interval = range.end - range.start; 12 | var oldIds = dataset.getIds({ 13 | filter: function (item) { 14 | return item.x < range.start - interval; 15 | } 16 | }); 17 | dataset.remove(oldIds); 18 | }; 19 | 20 | this.cpu_utilization_options = function(start, end, name) { 21 | return { 22 | legend: true, 23 | start: start, 24 | end: end, 25 | dataAxis: { 26 | customRange: { 27 | left: { 28 | min:0, 29 | max: 100 30 | } 31 | }, 32 | title: { 33 | left: { 34 | text: name 35 | } 36 | } 37 | }, 38 | zoomMax: 100000, 39 | zoomMin: 100000 40 | }; 41 | }; 42 | 43 | this.memory_utilization_options = function(start, end, name, max) { 44 | return { 45 | legend: true, 46 | start: start, 47 | end: end, 48 | dataAxis: { 49 | customRange: { 50 | left: { 51 | min: 0, 52 | max: max 53 | } 54 | }, 55 | title: { 56 | left: { 57 | text: name 58 | } 59 | } 60 | }, 61 | zoomMax: 100000, 62 | zoomMin: 100000 63 | }; 64 | }; 65 | 66 | this.render_step = function(graph) { 67 | // move the window (you can think of different strategies). 68 | var now = vis.moment(); 69 | var range = graph.getWindow(); 70 | var interval = range.end - range.start; 71 | 72 | // continuously move the window 73 | graph.setWindow(now - interval, now, {animate: true}); 74 | }; 75 | 76 | this.node_options = { 77 | nodes: { 78 | shape: 'dot', 79 | radius: 30, 80 | borderWidth: 2 81 | }, 82 | groups: { 83 | online: { 84 | border: 'black', 85 | color: 'green' 86 | }, 87 | offline: { 88 | border: 'black', 89 | color: 'red' 90 | }, 91 | root: { 92 | color: { 93 | border: 'black', 94 | background: 'gray', 95 | highlight: { 96 | border: 'black', 97 | background: 'lightgray' 98 | } 99 | }, 100 | fontSize: 18, 101 | fontFace: 'arial', 102 | shape: 'circle' 103 | } 104 | } 105 | }; 106 | 107 | }); 108 | 109 | return app; 110 | }); 111 | -------------------------------------------------------------------------------- /front-end/Dashboard/www/js/vigilant.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | define(['angular', 4 | 'angularRoute', 5 | 'angularBootstrap', 6 | 'angularSpinner', 7 | 'util/graphs', 8 | 'dashboard/dashboard' 9 | ], function(angular, angularRoute) 10 | { 11 | var app = angular.module('vigilant', [ 12 | 'ngRoute', 13 | 'ui.bootstrap', 14 | 'angularSpinner', 15 | 'vigilant.dashboard', 16 | 'vigilant.util.graphs' 17 | ]); 18 | 19 | app.config(['$routeProvider', function($routeProvider) { 20 | $routeProvider 21 | .otherwise({ 22 | redirectTo: '/dashboard' 23 | }); 24 | }]); 25 | 26 | return app; 27 | }); 28 | -------------------------------------------------------------------------------- /front-end/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Philip Herron 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /front-end/README.md: -------------------------------------------------------------------------------- 1 | # Front-end 2 | Front-end Webapp for Vigilant 3 | 4 | ##Setup 5 | 6 | Flask Angularjs Dashbaord 7 | 8 | ```bash 9 | $ bower install 10 | $ pip3 install requirements.txt 11 | 12 | # development 13 | $ ./dashboard.py 14 | 15 | # optional nginx local development 16 | $ ./nginx.sh 17 | ``` 18 | 19 | ## Nginx real deployment 20 | 21 | Nginx configuration: 22 | 23 | ```bash 24 | $ cat /etc/nginx/sites-available/default 25 | server { 26 | listen 80; 27 | server_name dashboard.vigilantlabs.co.uk; 28 | 29 | location / { try_files $uri @yourapplication; } 30 | location @yourapplication { 31 | include uwsgi_params; 32 | uwsgi_pass unix:/tmp/uwsgi.sock; 33 | } 34 | } 35 | ``` 36 | 37 | Run the app. 38 | 39 | ```bash 40 | $ sudo uwsgi -s /tmp/uwsgi.sock -w dashboard:app --chown-socket www-data:www-data --uid www-data --gid www-data 41 | ``` 42 | -------------------------------------------------------------------------------- /front-end/bower.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Vigilant", 3 | "private": true, 4 | "dependencies": { 5 | "requirejs": "latest", 6 | "underscore": "latest", 7 | "jquery": "2.1.1", 8 | "vis": "3.12.0", 9 | "bootstrap": "~3.2.0", 10 | "bootstrap-autohidingnavbar": "1.0.0", 11 | "semantic": "1.12.1", 12 | "angular": "1.2.23", 13 | "angular-route": "1.2.23", 14 | "angular-loading-bar": "~0.6.0", 15 | "angular-spinner": "0.6.1", 16 | "angular-ui-grid": "~3.0.0-rc.16", 17 | "angular-bootstrap": "0.11.0", 18 | "angular-spinner": "latest" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /front-end/dashboard.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from Dashboard.Routes import app 4 | 5 | if __name__ == "__main__": 6 | app.run(debug=True, host="0.0.0.0") 7 | -------------------------------------------------------------------------------- /front-end/etc/nginx/nginx.cfg: -------------------------------------------------------------------------------- 1 | http { 2 | server { 3 | listen 8080 default_server; 4 | 5 | location / { try_files $uri @yourapplication; } 6 | location @yourapplication { 7 | include uwsgi_params; 8 | uwsgi_pass unix:/tmp/uwsgi.sock; 9 | } 10 | 11 | error_log error.log warn; 12 | } 13 | } 14 | 15 | events { 16 | worker_connections 768; 17 | # multi_accept on; 18 | } 19 | -------------------------------------------------------------------------------- /front-end/etc/nginx/uwsgi_params: -------------------------------------------------------------------------------- 1 | uwsgi_param QUERY_STRING $query_string; 2 | uwsgi_param REQUEST_METHOD $request_method; 3 | uwsgi_param CONTENT_TYPE $content_type; 4 | uwsgi_param CONTENT_LENGTH $content_length; 5 | uwsgi_param REQUEST_URI $request_uri; 6 | uwsgi_param PATH_INFO $document_uri; 7 | uwsgi_param DOCUMENT_ROOT $document_root; 8 | uwsgi_param SERVER_PROTOCOL $server_protocol; 9 | uwsgi_param REMOTE_ADDR $remote_addr; 10 | uwsgi_param REMOTE_PORT $remote_port; 11 | uwsgi_param SERVER_ADDR $server_addr; 12 | uwsgi_param SERVER_PORT $server_port; 13 | uwsgi_param SERVER_NAME $server_name; -------------------------------------------------------------------------------- /front-end/jenkins.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | bower install 5 | 6 | virtualenv env 7 | . env/bin/activate 8 | pip install -r requirements.txt 9 | python setup.py build 10 | 11 | # add karma 12 | -------------------------------------------------------------------------------- /front-end/nginx.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cd $(dirname ${BASH_SOURCE[0]}) 4 | nginx -s stop 5 | 6 | set -e 7 | nginx -p `pwd` -c ./etc/nginx/nginx.cfg -t 8 | nginx -p `pwd` -c ./etc/nginx/nginx.cfg 9 | uwsgi -s /tmp/uwsgi.sock -w dashboard:app 10 | -------------------------------------------------------------------------------- /front-end/requirements.txt: -------------------------------------------------------------------------------- 1 | flask==0.10.1 2 | requests==2.3.0 3 | uwsgi==2.0.9 4 | -------------------------------------------------------------------------------- /front-end/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from distutils.core import setup 4 | 5 | setup(name='vigilant-www', 6 | version='0.4', 7 | description='Front-end webapp to vigilant', 8 | author='Philip Herron', 9 | author_email='herron.philip@googlemail.com', 10 | url='https://.github.com/vigilantlabs', 11 | packages=['Dashboard'], 12 | package_data={'Dashboard': ['www']}, 13 | scripts=['dashboard.py'] 14 | ) 15 | -------------------------------------------------------------------------------- /front-end/vigilant-front-end.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /screenshots/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/screenshots/overview.png -------------------------------------------------------------------------------- /screenshots/real-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/screenshots/real-time.png -------------------------------------------------------------------------------- /screenshots/swagger.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/philberty/vigilant/7c891df61d6370471bb68710599c518050da068b/screenshots/swagger.png --------------------------------------------------------------------------------