├── .dockerignore ├── table.png ├── flamegraph.png ├── entrypoint.sh ├── etc └── supervisord.d │ ├── kernelscope.ini │ ├── kernelscope-visualiser.ini │ └── kernelscope-offcputime.ini ├── kernelscope-sqlite.sql ├── docker-compose.yml ├── src ├── test-logger.py ├── web │ ├── kernelscope.css │ ├── index.html │ └── d3.flameGraph.js ├── test-flamegraph.py ├── test-service.py ├── KernelscopeLogger.py ├── Database.py ├── KernelscopeLoggerService.py ├── FlameGraph.py ├── Constraints.py ├── KernelscopeCategories.py ├── KernelscopeService.py └── offcputime.py ├── Dockerfile ├── kernelscope-mysql.sql ├── README.md └── json-api.md /.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | docker-compose.yml 3 | -------------------------------------------------------------------------------- /table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/josefbacik/kernelscope/HEAD/table.png -------------------------------------------------------------------------------- /flamegraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/josefbacik/kernelscope/HEAD/flamegraph.png -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | mount -t debugfs none /sys/kernel/debug/ 4 | exec "$@" 5 | -------------------------------------------------------------------------------- /etc/supervisord.d/kernelscope.ini: -------------------------------------------------------------------------------- 1 | [program:kernelscope] 2 | command=python KernelscopeLoggerService.py --sqlite /var/lib/kernelscope.db 8081 3 | directory=/opt/kernelscope/src/ 4 | redirect_stderr=true 5 | autostart=true 6 | stdout_logfile=/var/log/supervisor/%(program_name)s.log 7 | -------------------------------------------------------------------------------- /etc/supervisord.d/kernelscope-visualiser.ini: -------------------------------------------------------------------------------- 1 | [program:kernelscope-visualiser] 2 | command=python KernelscopeService.py --sqlite /var/lib/kernelscope.db 8080 3 | directory=/opt/kernelscope/src/ 4 | redirect_stderr=true 5 | autostart=true 6 | stdout_logfile=/var/log/supervisor/%(program_name)s.log 7 | -------------------------------------------------------------------------------- /etc/supervisord.d/kernelscope-offcputime.ini: -------------------------------------------------------------------------------- 1 | [program:kernelscope-offcputime] 2 | command=python offcputime.py --logger 'http://localhost:8081' --threshold 5000 3 | directory=/opt/kernelscope/src/ 4 | redirect_stderr=true 5 | autostart=true 6 | stdout_logfile=/var/log/supervisor/%(program_name)s.log 7 | -------------------------------------------------------------------------------- /kernelscope-sqlite.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE `offcputime` ( 2 | `id` int(11) PRIMARY KEY, 3 | `hostname` varchar(256) NOT NULL, 4 | `time` datetime NOT NULL, 5 | `process` varchar(256) NOT NULL, 6 | `pid` int(11) NOT NULL, 7 | `stack` text NOT NULL, 8 | `elapsed` bigint(20) NOT NULL 9 | ); 10 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | kernelscope: 4 | image: qnib/kernelscope 5 | hostname: kernelscope 6 | container_name: kernelscope 7 | volumes: 8 | - /lib/modules:/lib/modules:ro 9 | - /usr/src:/usr/src:ro 10 | - /etc/localtime:/etc/localtime:ro 11 | ports: 12 | - 8080:8080 13 | privileged: true 14 | -------------------------------------------------------------------------------- /src/test-logger.py: -------------------------------------------------------------------------------- 1 | from KernelscopeLogger import KernelscopeLogger 2 | 3 | logger = KernelscopeLogger("http://localhost:8080") 4 | foo = {} 5 | foo["process"] = "fs_mark" 6 | foo["pid"] = 1345 7 | foo["stack"] = "write;btrfs_write;sleep" 8 | foo["elapsed"] = 1234 9 | logger.add_entry('offcputime', foo) 10 | foo = foo.copy() 11 | foo["process"] = "bar" 12 | logger.add_entry('offcputime', foo) 13 | logger.submit() 14 | -------------------------------------------------------------------------------- /src/web/kernelscope.css: -------------------------------------------------------------------------------- 1 | .containerchild { 2 | float: left; 3 | margin-right: 5px; 4 | } 5 | 6 | #filter { 7 | margin-top: 5px; 8 | } 9 | 10 | .querycontainer { 11 | display: block; 12 | } 13 | 14 | .querychild { 15 | display: block; 16 | text-align: right; 17 | } 18 | 19 | .inputContainer input { 20 | width: 100%; 21 | box-sizing: border-box; 22 | } 23 | 24 | .selectContainer { 25 | display: flex; 26 | justify-content: space-between; 27 | } 28 | 29 | table { 30 | width: 100%; 31 | } 32 | 33 | td { 34 | max-width: 150px; 35 | padding: 15px; 36 | overflow: hidden; 37 | } 38 | -------------------------------------------------------------------------------- /src/test-flamegraph.py: -------------------------------------------------------------------------------- 1 | import json 2 | import urllib2 3 | 4 | req = urllib2.Request("http://localhost:8080") 5 | query = {} 6 | query['elements'] = ['stack', 'elapsed'] 7 | #query['limit'] = 10 8 | query['format'] = "flamegraph" 9 | 10 | constraint = {} 11 | constraint['oper'] = 'and' 12 | constraint['conditions'] = [ { 'process': 'fsstress', 'constraint': '='} ] 13 | query['constraints'] = [ constraint ] 14 | 15 | categories = { 'offcputime': query } 16 | 17 | req.add_header('Content-type', 'application/json') 18 | data = json.dumps(categories) 19 | req.add_header('Content-Length', len(data)) 20 | response = urllib2.urlopen(req, data) 21 | data = json.load(response) 22 | print json.dumps(data) 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM qnib/u-supervisor 2 | 3 | RUN echo "deb [trusted=yes] http://repo.iovisor.org/apt/xenial xenial-nightly main" > /etc/apt/sources.list.d/iovisor.list \ 4 | && apt-get update \ 5 | && apt-get install -y sqlite python3 python-pip gcc apt-transport-https libmysqlclient-dev libelf1 bcc-tools libbcc-examples \ 6 | && pip install mysql 7 | 8 | ADD . /opt/kernelscope/ 9 | RUN cat /opt/kernelscope/kernelscope-sqlite.sql | sqlite3 /var/lib/kernelscope.db 10 | ADD etc/supervisord.d/kernelscope-visualiser.ini \ 11 | etc/supervisord.d/kernelscope.ini \ 12 | etc/supervisord.d/kernelscope-offcputime.ini \ 13 | /etc/supervisord.d/ 14 | ADD entrypoint.sh /usr/bin/ 15 | CMD ["/usr/bin/entrypoint.sh", "/opt/qnib/supervisor/bin/start.sh", "-n"] 16 | 17 | -------------------------------------------------------------------------------- /src/test-service.py: -------------------------------------------------------------------------------- 1 | import json 2 | import urllib2 3 | 4 | req = urllib2.Request("http://localhost:8080/api") 5 | query = {} 6 | query['elements'] = ['hostname', 'process', 'elapsed'] 7 | query['limit'] = 1 8 | 9 | constraint = {} 10 | constraint['oper'] = 'and' 11 | constraint['conditions'] = [ { 'process': 'fsstress', 'constraint': '='} ] 12 | query['constraints'] = [ constraint ] 13 | 14 | categories = { 'offcputime': query } 15 | 16 | req.add_header('Content-type', 'application/json') 17 | data = json.dumps(categories) 18 | req.add_header('Content-Length', len(data)) 19 | response = urllib2.urlopen(req, data) 20 | data = json.load(response) 21 | print data 22 | 23 | constraint['conditions'][0]['constraint'] = "contains" 24 | constraint['conditions'][0]['process'] = "btrfs" 25 | req.add_header('Content-type', 'application/json') 26 | data = json.dumps(categories) 27 | req.add_header('Content-Length', len(data)) 28 | response = urllib2.urlopen(req, data) 29 | data = json.load(response) 30 | print data 31 | 32 | -------------------------------------------------------------------------------- /src/KernelscopeLogger.py: -------------------------------------------------------------------------------- 1 | import json 2 | import urllib2 3 | import socket 4 | from datetime import datetime 5 | 6 | class KernelscopeLogger: 7 | def __init__(self, url): 8 | self.url = url 9 | self.hostname = socket.gethostname() 10 | self.payload = {} 11 | self.payload['hostname'] = socket.gethostname() 12 | 13 | def add_entry(self, category, entry): 14 | if category not in self.payload: 15 | self.payload[category] = [] 16 | self.payload[category].append(entry) 17 | 18 | def submit(self): 19 | if len(self.payload) == 0: 20 | return 21 | self.payload['time'] = str(datetime.now()) 22 | self.payload['hostname'] = self.hostname 23 | 24 | req = urllib2.Request(self.url) 25 | req.add_header('Content-type', 'application/json') 26 | data = json.dumps(self.payload) 27 | req.add_header('Content-Length', len(data)) 28 | response = urllib2.urlopen(req, data); 29 | self.payload = {} 30 | 31 | if __name__ == "main": 32 | print("NotLikeThis") 33 | -------------------------------------------------------------------------------- /src/Database.py: -------------------------------------------------------------------------------- 1 | import MySQLdb 2 | import MySQLdb.cursors 3 | import sqlite3 4 | 5 | # Shamefully copied from stackoverflow 6 | def _dict_factory(cursor, row): 7 | d = {} 8 | for idx, col in enumerate(cursor.description): 9 | d[col[0]] = row[idx] 10 | return d 11 | 12 | class Database: 13 | TYPE_MYSQL=1 14 | TYPE_SQLITE=2 15 | 16 | def __init__(self): 17 | self.db = None 18 | self.type = 0 19 | 20 | def connect_mysql(self, dbhost, dbuser, dbpassword, dbname): 21 | self.type = self.TYPE_MYSQL 22 | self.db = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpassword, 23 | db=dbname, cursorclass=MySQLdb.cursors.DictCursor) 24 | 25 | def connect_sqlite(self, filename): 26 | self.type = self.TYPE_SQLITE 27 | self.db = sqlite3.connect(filename) 28 | self.db.row_factory = _dict_factory 29 | 30 | def cursor(self): 31 | return self.db.cursor() 32 | 33 | def arg_str(self): 34 | if self.type == self.TYPE_MYSQL: 35 | return '%s' 36 | elif self.type == self.TYPE_SQLITE: 37 | return '?' 38 | return '' 39 | 40 | def commit(self): 41 | self.db.commit() 42 | -------------------------------------------------------------------------------- /src/KernelscopeLoggerService.py: -------------------------------------------------------------------------------- 1 | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer 2 | import KernelscopeCategories 3 | from sys import argv 4 | import SocketServer 5 | import json 6 | import argparse 7 | from Database import Database 8 | 9 | db = Database() 10 | 11 | class KernelscopeLoggerService(BaseHTTPRequestHandler): 12 | def do_POST(self): 13 | json_data = self.rfile.read(int(self.headers['Content-Length'])) 14 | 15 | self.send_response(200) 16 | self.end_headers() 17 | 18 | data = json.loads(json_data) 19 | print(data) 20 | KernelscopeCategories.dump(db, data) 21 | 22 | def run(port=80): 23 | server_address = ('', port) 24 | kscope = HTTPServer(server_address, KernelscopeLoggerService) 25 | print("Starting Kernelscope Logger Service") 26 | kscope.serve_forever() 27 | 28 | parser = argparse.ArgumentParser(description="Start the logger service") 29 | parser.add_argument("--sqlite", help="sqlite database to use") 30 | parser.add_argument("--mysql", help="mysql database host to use") 31 | parser.add_argument("--dbuser", help="database username") 32 | parser.add_argument("--dbpassword", help="database password") 33 | parser.add_argument("--dbname", help="database name to use", default="kernelscope") 34 | parser.add_argument("PORT", help="port number to listen on", type=int) 35 | 36 | args = parser.parse_args() 37 | if args.mysql: 38 | db.connect_mysql(args.mysql, args.dbuser, args.dbpassword, args.dbname) 39 | elif args.sqlite: 40 | db.connect_sqlite(args.sqlite) 41 | else: 42 | print("Must specify either a sqlite or mysql database") 43 | exit(1) 44 | 45 | run(args.PORT) 46 | -------------------------------------------------------------------------------- /kernelscope-mysql.sql: -------------------------------------------------------------------------------- 1 | -- phpMyAdmin SQL Dump 2 | -- version 4.6.4 3 | -- https://www.phpmyadmin.net/ 4 | -- 5 | -- Host: localhost 6 | -- Generation Time: Nov 01, 2016 at 08:08 PM 7 | -- Server version: 10.0.26-MariaDB 8 | -- PHP Version: 5.6.26 9 | 10 | SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO"; 11 | SET time_zone = "+00:00"; 12 | 13 | 14 | /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; 15 | /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; 16 | /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; 17 | /*!40101 SET NAMES utf8mb4 */; 18 | 19 | -- 20 | -- Database: `kernelscope` 21 | -- 22 | CREATE DATABASE IF NOT EXISTS `kernelscope` DEFAULT CHARACTER SET latin1 COLLATE latin1_swedish_ci; 23 | USE `kernelscope`; 24 | 25 | -- -------------------------------------------------------- 26 | 27 | -- 28 | -- Table structure for table `offcputime` 29 | -- 30 | 31 | CREATE TABLE `offcputime` ( 32 | `id` int(11) NOT NULL, 33 | `hostname` varchar(256) NOT NULL, 34 | `time` datetime NOT NULL, 35 | `process` varchar(256) NOT NULL, 36 | `pid` int(11) NOT NULL, 37 | `stack` text NOT NULL, 38 | `elapsed` bigint(20) NOT NULL 39 | ) ENGINE=InnoDB DEFAULT CHARSET=latin1; 40 | 41 | -- 42 | -- Indexes for dumped tables 43 | -- 44 | 45 | -- 46 | -- Indexes for table `offcputime` 47 | -- 48 | ALTER TABLE `offcputime` 49 | ADD PRIMARY KEY (`id`); 50 | 51 | -- 52 | -- AUTO_INCREMENT for dumped tables 53 | -- 54 | 55 | -- 56 | -- AUTO_INCREMENT for table `offcputime` 57 | -- 58 | ALTER TABLE `offcputime` 59 | MODIFY `id` int(11) NOT NULL AUTO_INCREMENT; 60 | /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; 61 | /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; 62 | /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; 63 | -------------------------------------------------------------------------------- /src/FlameGraph.py: -------------------------------------------------------------------------------- 1 | class StackGraph: 2 | def __init__(self): 3 | self.nodes = { "name": "root", "value": 0, "children":[] } 4 | 5 | def add_stack(self, stack, value): 6 | cur_node = self.nodes 7 | self.nodes['value'] += value; 8 | for sym in stack: 9 | node = {} 10 | if 'children' not in cur_node: 11 | cur_node['children'] = [] 12 | cur_children = cur_node['children'] 13 | for n in cur_children: 14 | if sym == n['name']: 15 | node = n 16 | break 17 | if node == {}: 18 | node['name'] = sym 19 | node['value'] = value 20 | cur_children.append(node) 21 | else: 22 | node['value'] += value 23 | cur_node = node 24 | 25 | def build_flamegraph(values): 26 | stack_graph = StackGraph() 27 | if len(values) == 0: 28 | return stack_graph.nodes 29 | 30 | # Need to figure out which key has the stack in it, and if we selected an 31 | # elapsed value 32 | stack_key = "" 33 | value_key = "" 34 | for k,v in values[0].items(): 35 | try: 36 | if stack_key == "" and ";" in v: 37 | stack_key = k 38 | continue 39 | except: 40 | pass 41 | 42 | if value_key != "": 43 | continue 44 | try: 45 | numval = int(v) 46 | value_key = k 47 | except: 48 | continue 49 | if stack_key == "": 50 | return stack_graph.nodes 51 | 52 | for v in values: 53 | print v 54 | value = 1 55 | if value_key != "" and value_key in v: 56 | value = int(v[value_key]) 57 | stack = v[stack_key].split(';') 58 | stack.reverse() 59 | stack_graph.add_stack(stack, value) 60 | return stack_graph.nodes 61 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | A service for logging and visualizing data from bpf scripts. 2 | 3 | ![Flame Graph](flamegraph.png) 4 | ![Table](table.png) 5 | 6 | # Setup the database 7 | 8 | For sqlite you do the following 9 | 10 | ``` 11 | cat kernelscope-sqlite.sql | sqlite3 yourdatabase.db 12 | ``` 13 | 14 | For mysql you can just run the following 15 | 16 | ``` 17 | mysql -u username -p < kernelscope-mysql.sql 18 | ``` 19 | 20 | # Start the logging daemon 21 | 22 | This is the daemon that accepts connections from your hosts that want to log 23 | their trace data. You need to point it at your database, the sqllite and mysql 24 | commands follow (use whichever one is relevant) 25 | 26 | ``` 27 | python KernelscopeLoggerService.py --mysql localhost --dbuser user --dbpassword password --dbname kernelscope 8081 28 | python KernelscopeLoggerService.py --sqlite kernelscope.db 8081 29 | ``` 30 | 31 | 32 | # Run the tracing tool on your target host 33 | 34 | Currently the only script in this repo is offcputime.py, which is just Brendan 35 | Gregg's offcputime.py from bcc/tools/offcputime.py that has been modified to 36 | dump it's information into a kernelscope service. You run it as follows 37 | 38 | ``` 39 | python offcputime.py --logger 'http://localhost:8081' --threshold 5000 40 | ``` 41 | 42 | This will log to the service running on localhost and will only log events that 43 | take longer than 5000 microseconds, and will send information to the service 44 | once a minute. 45 | 46 | # Run the visualization service 47 | 48 | This is the read-only side of kernelscope that runs the webapp. Simply run the 49 | following command for whichever database backend you are using 50 | 51 | ``` 52 | python KernelscopeService.py --mysql localhost --dbuser user --dbpassword password --dbname kernelscope 8080 53 | python KernelscopeService.py --sqlite kernelscope.db 8080 54 | ``` 55 | 56 | and then navigate your browser to the appropriate host and port. 57 | 58 | # Extending for new data types 59 | 60 | This is meant to be as easily extendable as possible. That said I, a kernel 61 | developer, wrote it so it made sense to me. If there are cleaner approaches I 62 | accept patches. All you should have to do is add entry to two dicts at the top 63 | of src/KernelscopeCategories.py. The format is the following 64 | 65 | ``` 66 | _categories['DATABASE TABLE NAME'] = [ {'name': 'COLUMN NAME 1', 'type':'TYPE', 'prettyname':'SOME PRETTY NAME TO DESCRIBE THE COLUMN'}] 67 | ``` 68 | 69 | TYPE has to be of one of the types described in json-api.txt. _valid_columns 70 | must be just a dict of the tables column names, so in the following format 71 | 72 | ``` 73 | _valid_columns['DATABASE TABLE NAME'] = [ 'COLUMN NAME 1', 'COLUMN NAME 2' ] 74 | ``` 75 | 76 | Then your script can log with the proper dict values set and it'll log into the 77 | datbase properly and then the webapp should show the data properly. 78 | -------------------------------------------------------------------------------- /src/Constraints.py: -------------------------------------------------------------------------------- 1 | import Database 2 | 3 | _valid_constraints = ('=', '<', '<=', '>', '>=', '!=', 'contains') 4 | 5 | # TODO: should probably throw errors for the various incorrect statement 6 | # building, but for now assume we are perfect and just silently ignore invalid 7 | # queries 8 | def build_constraints(db, query, valid_columns): 9 | if 'constraints' not in query: 10 | return ('', ()) 11 | constraints = query['constraints'] 12 | strs = [] 13 | args = [] 14 | for constraint in constraints: 15 | # Each constraint requires a conditions array 16 | if 'conditions' not in constraint: 17 | continue 18 | conditions = constraint['conditions'] 19 | 20 | # If we have more than one condition we must have an operation to join 21 | # the two of them together 22 | if len(conditions) > 1 and 'oper' not in constraint: 23 | continue 24 | 25 | cur_strs = [] 26 | for statement in conditions: 27 | # Each condition statement must have exactly 2 fields, one is the 28 | # column and value pair, the other is the constraint type. 29 | if len(statement) != 2: 30 | continue 31 | if 'expr' not in statement: 32 | continue 33 | column = "" 34 | expr = "" 35 | for k,v in statement.items(): 36 | if k == 'expr': 37 | if v not in _valid_constraints: 38 | continue 39 | expr = v 40 | else: 41 | column = k 42 | if expr == "" or column == "": 43 | continue 44 | 45 | #Sql doesn't take escaped columns for it's query, so we have to add 46 | #it directly to the string, so we need to make sure its only one 47 | #of the defined values to avoid bobby droptables 48 | if column not in valid_columns: 49 | continue 50 | 51 | value = statement[column] 52 | 53 | # Gotta make this mysql-y. If we do other databases eventually 54 | # we'll need to tell this thing about it so we can do the right 55 | # thing here 56 | if len(cur_strs) > 0: 57 | cur_strs.append(constraint['oper']) 58 | if expr == 'contains': 59 | expr = "LIKE" 60 | value = '%' + value + '%' 61 | 62 | cur_strs.append(column) 63 | cur_strs.append(expr) 64 | cur_strs.append(db.arg_str()) 65 | args.append(value) 66 | 67 | # Right now just AND all groups together, not sure if this will ever 68 | # happen in practice and I lack the desire for foresight 69 | if len(strs) > 0: 70 | strs.append("AND") 71 | strs.extend(cur_strs) 72 | return (" ".join(strs), tuple(args)) 73 | -------------------------------------------------------------------------------- /src/KernelscopeCategories.py: -------------------------------------------------------------------------------- 1 | import MySQLdb 2 | import Constraints 3 | import FlameGraph 4 | import Database 5 | 6 | # This is where you add tablename and columns 7 | _categories = {} 8 | _categories['offcputime'] = [ { "name": "hostname", "type": "string", "prettyname": "Hostname"}, 9 | { "name": "time", "type": "timestamp", "prettyname": "Time"}, 10 | { "name": "process", "type": "string", "prettyname": "Process"}, 11 | { "name": "pid", "type": "int", "prettyname": "Pid"}, 12 | { "name": "stack", "type": "stack", "prettyname": "Stack"}, 13 | { "name": "elapsed", "type": "elapsed", "prettyname": "Elapsed"} ] 14 | 15 | _valid_columns = {} 16 | _valid_columns['offcputime'] = [ "hostname", "time", "process", "pid", "stack", "elapsed" ] 17 | 18 | def _insert_entry(db, category, entry): 19 | columns = [] 20 | for k,v in entry.items(): 21 | if k in _valid_columns[category]: 22 | columns.append(k) 23 | cmd = "INSERT INTO " + category + " (" + ",".join(columns) + ") VALUES (" 24 | cmd += (db.arg_str() + ",") * len(columns) 25 | cmd = cmd[:-1] 26 | cmd += ")" 27 | values = [] 28 | for c in columns: 29 | values.append(entry[c]) 30 | print cmd 31 | print values 32 | cur = db.cursor() 33 | cur.execute(cmd, tuple(values)) 34 | 35 | def _load(db, category, query): 36 | # Queries can't be escaped, so we have to sanity check the elements we are 37 | # wanting to select to avoid bobby droptables 38 | columns = [] 39 | for e in query['elements']: 40 | if e not in _valid_columns[category]: 41 | continue 42 | columns.append(e) 43 | if len(columns) == 0: 44 | return {} 45 | 46 | cmd = "SELECT " + ",".join(columns) 47 | cmd += " FROM " + category 48 | (constraintstr, constraintargs) = Constraints.build_constraints(db, query, _valid_columns[category]) 49 | if len(constraintstr) > 0: 50 | cmd += " WHERE " + constraintstr 51 | if 'limit' in query: 52 | cmd += " LIMIT " + db.arg_str() 53 | constraintargs += (query['limit'],) 54 | print cmd 55 | print constraintargs 56 | cur = db.cursor() 57 | cur.execute(cmd, constraintargs) 58 | return cur.fetchall() 59 | 60 | def dump(db, obj): 61 | if 'hostname' not in obj or 'time' not in obj: 62 | return 63 | for k,v in obj.items(): 64 | if k in _valid_columns: 65 | for e in obj[k]: 66 | e['hostname'] = obj['hostname'] 67 | e['time'] = obj['time'] 68 | _insert_entry(db, k, e) 69 | db.commit() 70 | 71 | def load(db, constraints): 72 | retval = {} 73 | for k,v in constraints.items(): 74 | if k in _valid_columns: 75 | vals = _load(db, k, constraints[k]) 76 | if 'format' in constraints[k] and 'flamegraph' == constraints[k]['format']: 77 | return FlameGraph.build_flamegraph(vals) 78 | else: 79 | retval[k] = vals 80 | return retval 81 | 82 | def get_categories(): 83 | return _categories 84 | -------------------------------------------------------------------------------- /src/KernelscopeService.py: -------------------------------------------------------------------------------- 1 | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer 2 | import KernelscopeCategories 3 | from sys import argv 4 | import SocketServer 5 | import json 6 | from os import curdir 7 | import argparse 8 | from Database import Database 9 | from datetime import datetime 10 | 11 | db = Database() 12 | 13 | def date_serial(obj): 14 | if isinstance(obj, datetime): 15 | serial = obj.isoformat() 16 | return serial 17 | raise TypeError ("Type not serializable") 18 | 19 | class KernelscopeService(BaseHTTPRequestHandler): 20 | def _handle_api(self): 21 | if self.path == "/api/getcategories": 22 | response = KernelscopeCategories.get_categories() 23 | data = json.dumps(response) 24 | print data 25 | self.send_response(200) 26 | self.send_header('Content-type', 'application/json') 27 | self.send_header('Content-Length', len(data)) 28 | self.end_headers(); 29 | self.wfile.write(data) 30 | else: 31 | self.send_error(404, 'File not found') 32 | 33 | def do_GET(self): 34 | if self.path.startswith("/api"): 35 | self._handle_api() 36 | return 37 | 38 | if self.path == "/": 39 | self.path = "/index.html" 40 | 41 | reply = False 42 | if self.path.endswith('.html'): 43 | mimetype = "text/html" 44 | reply = True 45 | elif self.path.endswith('.json'): 46 | mimetype = 'application/json' 47 | reply = True 48 | elif self.path.endswith('.js'): 49 | mimetype = 'application/javascript' 50 | reply = True 51 | elif self.path.endswith('.css'): 52 | mimetype = 'text/css' 53 | reply = True 54 | print self.path 55 | if not reply: 56 | self.send_response(404); 57 | return 58 | try: 59 | f = open(curdir + "/web" + self.path) 60 | self.send_response(200) 61 | self.send_header('Content-type', mimetype) 62 | self.end_headers() 63 | self.wfile.write(f.read()) 64 | f.close() 65 | except IOError: 66 | self.send_error(404, 'File not found') 67 | 68 | def do_POST(self): 69 | if self.path != "/api": 70 | self.send_response(405, 'Cannot post to this url') 71 | return 72 | 73 | json_data = self.rfile.read(int(self.headers['Content-Length'])) 74 | 75 | self.send_response(200) 76 | self.send_header('Content-type', 'application/json') 77 | data = json.loads(json_data) 78 | print(data) 79 | response = KernelscopeCategories.load(db, data) 80 | data = json.dumps(response, default=date_serial) 81 | self.send_header('Content-Length', len(data)) 82 | self.end_headers() 83 | self.wfile.write(data) 84 | 85 | def run(port=80): 86 | server_address = ('', port) 87 | kscope = HTTPServer(server_address, KernelscopeService) 88 | print("Starting Kernelscope Service") 89 | kscope.serve_forever() 90 | 91 | parser = argparse.ArgumentParser(description="Start the kernelscope service") 92 | parser.add_argument("--sqlite", help="sqlite database to use") 93 | parser.add_argument("--mysql", help="mysql database host to use") 94 | parser.add_argument("--dbuser", help="database username") 95 | parser.add_argument("--dbpassword", help="database password") 96 | parser.add_argument("--dbname", help="database name to use", default="kernelscope") 97 | parser.add_argument("PORT", help="port number to listen on", type=int) 98 | 99 | args = parser.parse_args() 100 | if args.mysql: 101 | db.connect_mysql(args.mysql, args.dbuser, args.dbpassword, args.dbname) 102 | elif args.sqlite: 103 | db.connect_sqlite(args.sqlite) 104 | else: 105 | print("Must specify either a sqlite or mysql database") 106 | exit(1) 107 | 108 | run(args.PORT) 109 | -------------------------------------------------------------------------------- /json-api.md: -------------------------------------------------------------------------------- 1 | # Generic events submitted to the logger from the profiled machine 2 | 3 | - categoryname: Must be a valid category, so offcputime for now. 4 | - columnX: A column name for that category 5 | 6 | ``` 7 | { 8 | "hostname": "destiny", 9 | "time": "2016-10-24 17:12:43.288693", 10 | "categoryname": 11 | [ 12 | { 13 | "column1": "value", 14 | "column2": "value", 15 | }, 16 | { 17 | "column1": "value", 18 | "column1": value", 19 | } 20 | ], 21 | } 22 | ``` 23 | 24 | # Offcputimecategory 25 | 26 | elapsed is in nanoseconds 27 | 28 | ``` 29 | { 30 | "hostname": "destiny", 31 | "time": "2016-10-24 17:12:43.288693", 32 | "offcputime": 33 | [ 34 | { 35 | "process": "dd", 36 | "pid": 1234, 37 | "stack": "sys_write;btrfs_file_write;some_enospc_function_that_sucks", 38 | "elapsed": 123456, 39 | } 40 | ] 41 | } 42 | ``` 43 | 44 | # Queries format, submitted to the query service by the visualizer 45 | 46 | categoryname - Must be a valid category, so offcputime for now. 47 | - limit: The number of items to fetch, if not present it returns all of them 48 | - elementN: Valid column names for the category 49 | - expr: Must be "=", "<", "<=", ">", ">=", "!=", "contains" 50 | - oper: Must be "and" or "or" 51 | - format: Must be "list" or "flamegraph". If not specified we assume "list". 52 | 53 | ``` 54 | { 55 | "categoryname": 56 | { 57 | "elements": ["element1", "element2", "element3"], 58 | "format": "list" 59 | "limit": 5 60 | "constraints": 61 | [ 62 | { 63 | "oper": "and", 64 | "conditions": [ 65 | { 66 | "element1": "value", 67 | "expr": "=" 68 | }, 69 | { 70 | "element2": "value", 71 | "expr": "contains" 72 | }, 73 | ] 74 | } 75 | ] 76 | } 77 | } 78 | ``` 79 | 80 | The results will be in the following format for "format": "list" 81 | 82 | ``` 83 | { 84 | "categoryname": [ 85 | { 86 | "element1": "value", 87 | "element2": "value", 88 | "element3": "value", 89 | }, 90 | ] 91 | } 92 | ``` 93 | 94 | The results will be in the following format for "format": "flamegraph" 95 | 96 | ``` 97 | { 98 | "name": "sys_write", 99 | "value": 123456, 100 | "children": [ 101 | { 102 | "name": "btrfs_file_write", 103 | "value": 123456, 104 | "children": [ 105 | { 106 | "name": "some_enospce_function_that_sucks", 107 | "value": 123450, 108 | }, 109 | { 110 | "name": "prepare_pages", 111 | "value": 6, 112 | } 113 | ] 114 | } 115 | ] 116 | } 117 | ``` 118 | 119 | This is meant to be used in conjunction with one of the d3 javascript such as 120 | 121 | https://github.com/cimi/d3-flame-graphs 122 | 123 | If "flamegraph" is specificed then "elements" must contain the field that 124 | contains the collapsed stacktrace. If no other field is specified then the 125 | flamegraph is built based on frequency of the stack. If a time field is also 126 | specified it will be used as the time weight. Specifying more than 2 fields 127 | will probably return something weird or error out. Also you must only specify 128 | one category, otherwise you will only get one categories flamegraph back and 129 | it'll be random which one python finds first. 130 | 131 | # Get categories 132 | 133 | This is just a basic URL API, just open http://kernelscope/api/getcategories and 134 | you will get a json response in the following format 135 | 136 | column_type can currently be one of the following 137 | - string: Just a normal string 138 | - timestamp: A timestamp 139 | - int: An integer 140 | - stack: A stacktrace in the standard stacktrace format 141 | - elapsed: A time in usecs, usually for the given stacktrace 142 | 143 | ``` 144 | { 145 | "categoryname": [ 146 | { 147 | "name": "database column name", 148 | "type": "column_type", 149 | "prettyname": "A pretty name for the column", 150 | } 151 | ] 152 | } 153 | ``` 154 | -------------------------------------------------------------------------------- /src/offcputime.py: -------------------------------------------------------------------------------- 1 | # 2 | # Based on offcputime from bcc by Brendan Gregg. 3 | 4 | from bcc import BPF 5 | from KernelscopeLogger import KernelscopeLogger 6 | from time import sleep 7 | import signal 8 | import platform 9 | import re 10 | import socket 11 | import argparse 12 | 13 | parser = argparse.ArgumentParser(description="Log stacktraces and times when we are off the cpu") 14 | parser.add_argument("--threshold", type=int, help="only catch things that sleep for this many usecs") 15 | parser.add_argument("--sleeptype", type=int, help="only log certain sleep types, based on TASK_* states") 16 | parser.add_argument("--logger", help="the url for the logger service") 17 | 18 | args = parser.parse_args() 19 | if not args.logger: 20 | print("Must specify a logger service url") 21 | exit(1) 22 | threshold = 0 23 | if args.threshold: 24 | threshold = args.threshold 25 | 26 | duration = 60 27 | debug = 0 28 | maxdepth = 20 # and MAXDEPTH 29 | 30 | # signal handler 31 | def signal_ignore(signal, frame): 32 | print() 33 | 34 | # define BPF program 35 | bpf_text = """ 36 | #include 37 | #include 38 | #include 39 | #include 40 | 41 | struct key_t { 42 | char target[TASK_COMM_LEN]; 43 | u32 pid; 44 | u32 tret; 45 | }; 46 | BPF_HASH(counts, struct key_t); 47 | BPF_HASH(start, u32); 48 | 49 | BPF_STACK_TRACE(stackmap, 10000) 50 | 51 | int oncpu(struct pt_regs *ctx, struct task_struct *p) { 52 | u32 pid = p->pid; 53 | u64 ts, *tsp; 54 | 55 | // record previous thread sleep time 56 | if (SLEEP_TYPE_FILTER) { 57 | ts = bpf_ktime_get_ns(); 58 | start.update(&pid, &ts); 59 | } 60 | 61 | // calculate current thread's delta time 62 | pid = bpf_get_current_pid_tgid(); 63 | tsp = start.lookup(&pid); 64 | if (tsp == 0) 65 | return 0; // missed start or filtered 66 | u64 cur = bpf_ktime_get_ns(); 67 | if (cur < *tsp) 68 | return 0; // skip entries that go backwards 69 | u64 delta = bpf_ktime_get_ns() - *tsp; 70 | start.delete(&pid); 71 | delta = delta / 1000; 72 | if (delta < MINBLOCK_US) 73 | return 0; 74 | 75 | // create map key 76 | u64 zero = 0, *val; 77 | struct key_t key = {}; 78 | 79 | bpf_get_current_comm(&key.target, sizeof(key.target)); 80 | key.tret = stackmap.get_stackid(ctx, BPF_F_FAST_STACK_CMP); 81 | key.pid = pid; 82 | 83 | val = counts.lookup_or_init(&key, &zero); 84 | (*val) += delta; 85 | return 0; 86 | } 87 | """ 88 | 89 | bpf_text = bpf_text.replace('MINBLOCK_US', str(threshold)) 90 | sleep_type_filter = '1' 91 | if args.sleeptype: 92 | sleep_type_filter = ('p->state == %d' % (args.sleeptype)) 93 | bpf_text = bpf_text.replace('SLEEP_TYPE_FILTER', sleep_type_filter) 94 | if debug: 95 | print(bpf_text) 96 | 97 | # initialize BPF 98 | b = BPF(text=bpf_text) 99 | b.attach_kprobe(event="finish_task_switch", fn_name="oncpu") 100 | matched = b.num_open_kprobes() 101 | if matched != 1: 102 | print("%d functions traced. Exiting." % (matched)) 103 | exit() 104 | 105 | logger = KernelscopeLogger(args.logger) 106 | done = 0 107 | # output 108 | while (done == 0): 109 | try: 110 | sleep(duration) 111 | except KeyboardInterrupt: 112 | # as cleanup can take many seconds, trap Ctrl-C: 113 | signal.signal(signal.SIGINT, signal_ignore) 114 | done = 1 115 | 116 | counts = b.get_table("counts") 117 | stacks = b.get_table("stackmap") 118 | for k, v in sorted(counts.items(), key=lambda counts: counts[1].value): 119 | # TODO convert to StackWalker next time somebody updates bcc-py 120 | try: 121 | sleeper = stacks[stacks.Key(k.tret)] 122 | except: 123 | continue 124 | sleep_trace = [] 125 | wake_trace = [] 126 | # print default multi-line stack output 127 | for i in range(0, maxdepth): 128 | if sleeper.ip[i] == 0: 129 | break 130 | sleep_trace.append(b.ksym(sleeper.ip[i])) 131 | data = { 132 | 'process': str(k.target), 133 | 'pid': int(k.pid), 134 | 'elapsed': v.value, 135 | 'stack': ";".join(sleep_trace), 136 | } 137 | logger.add_entry('offcputime', data) 138 | logger.submit() 139 | counts.clear() 140 | stacks.clear() 141 | exit() 142 | -------------------------------------------------------------------------------- /src/web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
10 |
11 |
12 |
13 |
14 | 15 | 16 |
17 |
18 | 19 | 23 |
24 |
25 |
26 |
27 | 28 | 29 |
30 |
31 | 32 |
33 |
34 | Submit 35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | 43 | 44 | 45 | 46 | 47 | 256 | 257 | 258 | 259 | -------------------------------------------------------------------------------- /src/web/d3.flameGraph.js: -------------------------------------------------------------------------------- 1 | (function() { 2 | 'use strict'; 3 | 4 | function flameGraph() { 5 | 6 | var w = 960, // graph width 7 | h = 540, // graph height 8 | c = 18, // cell height 9 | selection = null, // selection 10 | tooltip = true, // enable tooltip 11 | title = "", // graph title 12 | transitionDuration = 750, 13 | transitionEase = "cubic-in-out", // tooltip offset 14 | sort = true, 15 | reversed = false, // reverse the graph direction 16 | clickHandler = null; 17 | 18 | var tip = d3.tip() 19 | .direction("s") 20 | .offset([8, 0]) 21 | .attr('class', 'd3-flame-graph-tip') 22 | .html(function(d) { return label(d); }); 23 | 24 | var labelFormat = function(d) { 25 | return d.name + " (" + d3.round(100 * d.dx, 3) + "%, " + d.value + " samples)"; 26 | }; 27 | 28 | function setDetails(t) { 29 | var details = document.getElementById("details"); 30 | if (details) 31 | details.innerHTML = t; 32 | } 33 | 34 | function label(d) { 35 | if (!d.dummy) { 36 | return labelFormat(d); 37 | } else { 38 | return ""; 39 | } 40 | } 41 | 42 | function name(d) { 43 | return d.name; 44 | } 45 | 46 | var colorMapper = function(d) { 47 | return d.highlight ? "#E600E6" : colorHash(d.name); 48 | }; 49 | 50 | function generateHash(name) { 51 | // Return a vector (0.0->1.0) that is a hash of the input string. 52 | // The hash is computed to favor early characters over later ones, so 53 | // that strings with similar starts have similar vectors. Only the first 54 | // 6 characters are considered. 55 | var hash = 0, weight = 1, max_hash = 0, mod = 10, max_char = 6; 56 | if (name) { 57 | for (var i = 0; i < name.length; i++) { 58 | if (i > max_char) { break; } 59 | hash += weight * (name.charCodeAt(i) % mod); 60 | max_hash += weight * (mod - 1); 61 | weight *= 0.70; 62 | } 63 | if (max_hash > 0) { hash = hash / max_hash; } 64 | } 65 | return hash; 66 | } 67 | 68 | function colorHash(name) { 69 | // Return an rgb() color string that is a hash of the provided name, 70 | // and with a warm palette. 71 | var vector = 0; 72 | if (name) { 73 | name = name.replace(/.*`/, ""); // drop module name if present 74 | name = name.replace(/\(.*/, ""); // drop extra info 75 | vector = generateHash(name); 76 | } 77 | var r = 200 + Math.round(55 * vector); 78 | var g = 0 + Math.round(230 * (1 - vector)); 79 | var b = 0 + Math.round(55 * (1 - vector)); 80 | return "rgb(" + r + "," + g + "," + b + ")"; 81 | } 82 | 83 | function augment(data) { 84 | // Augment partitioning layout with "dummy" nodes so that internal nodes' 85 | // values dictate their width. Annoying, but seems to be least painful 86 | // option. https://github.com/mbostock/d3/pull/574 87 | if (data.children && (data.children.length > 0)) { 88 | data.children.forEach(augment); 89 | var childValues = 0; 90 | data.children.forEach(function(child) { 91 | childValues += child.value; 92 | }); 93 | if (childValues < data.value) { 94 | data.children.push( 95 | { 96 | "name": "", 97 | "value": data.value - childValues, 98 | "dummy": true 99 | } 100 | ); 101 | } 102 | } 103 | } 104 | 105 | function hide(d) { 106 | if(!d.original) { 107 | d.original = d.value; 108 | } 109 | d.value = 0; 110 | if(d.children) { 111 | d.children.forEach(hide); 112 | } 113 | } 114 | 115 | function show(d) { 116 | d.fade = false; 117 | if(d.original) { 118 | d.value = d.original; 119 | } 120 | if(d.children) { 121 | d.children.forEach(show); 122 | } 123 | } 124 | 125 | function getSiblings(d) { 126 | var siblings = []; 127 | if (d.parent) { 128 | var me = d.parent.children.indexOf(d); 129 | siblings = d.parent.children.slice(0); 130 | siblings.splice(me, 1); 131 | } 132 | return siblings; 133 | } 134 | 135 | function hideSiblings(d) { 136 | var siblings = getSiblings(d); 137 | siblings.forEach(function(s) { 138 | hide(s); 139 | }); 140 | if(d.parent) { 141 | hideSiblings(d.parent); 142 | } 143 | } 144 | 145 | function fadeAncestors(d) { 146 | if(d.parent) { 147 | d.parent.fade = true; 148 | fadeAncestors(d.parent); 149 | } 150 | } 151 | 152 | function getRoot(d) { 153 | if(d.parent) { 154 | return getRoot(d.parent); 155 | } 156 | return d; 157 | } 158 | 159 | function zoom(d) { 160 | tip.hide(d); 161 | hideSiblings(d); 162 | show(d); 163 | fadeAncestors(d); 164 | update(); 165 | if (typeof clickHandler === 'function') { 166 | clickHandler(d); 167 | } 168 | } 169 | 170 | function searchTree(d, term) { 171 | var re = new RegExp(term), 172 | searchResults = []; 173 | 174 | function searchInner(d) { 175 | var label = d.name; 176 | 177 | if (d.children) { 178 | d.children.forEach(function (child) { 179 | searchInner(child); 180 | }); 181 | } 182 | 183 | if (label.match(re)) { 184 | d.highlight = true; 185 | searchResults.push(d); 186 | } else { 187 | d.highlight = false; 188 | } 189 | } 190 | 191 | searchInner(d); 192 | return searchResults; 193 | } 194 | 195 | function clear(d) { 196 | d.highlight = false; 197 | if(d.children) { 198 | d.children.forEach(function(child) { 199 | clear(child); 200 | }); 201 | } 202 | } 203 | 204 | function doSort(a, b) { 205 | if (typeof sort === 'function') { 206 | return sort(a, b); 207 | } else if (sort) { 208 | return d3.ascending(a.name, b.name); 209 | } else { 210 | return 0; 211 | } 212 | } 213 | 214 | var partition = d3.layout.partition() 215 | .sort(doSort) 216 | .value(function(d) {return d.v || d.value;}) 217 | .children(function(d) {return d.c || d.children;}); 218 | 219 | function update() { 220 | 221 | selection.each(function(data) { 222 | 223 | var x = d3.scale.linear().range([0, w]), 224 | y = d3.scale.linear().range([0, c]); 225 | 226 | var nodes = partition(data); 227 | 228 | var kx = w / data.dx; 229 | 230 | var g = d3.select(this).select("svg").selectAll("g").data(nodes); 231 | 232 | g.transition() 233 | .duration(transitionDuration) 234 | .ease(transitionEase) 235 | .attr("transform", function(d) { return "translate(" + x(d.x) + "," 236 | + (reversed ? y(d.depth) : (h - y(d.depth) - c)) + ")"; }); 237 | 238 | g.select("rect").transition() 239 | .duration(transitionDuration) 240 | .ease(transitionEase) 241 | .attr("width", function(d) { return d.dx * kx; }); 242 | 243 | var node = g.enter() 244 | .append("svg:g") 245 | .attr("transform", function(d) { return "translate(" + x(d.x) + "," 246 | + (reversed ? y(d.depth) : (h - y(d.depth) - c)) + ")"; }); 247 | 248 | node.append("svg:rect") 249 | .attr("width", function(d) { return d.dx * kx; }); 250 | 251 | if (!tooltip) 252 | node.append("svg:title"); 253 | 254 | node.append("foreignObject") 255 | .append("xhtml:div"); 256 | 257 | g.attr("width", function(d) { return d.dx * kx; }) 258 | .attr("height", function(d) { return c; }) 259 | .attr("name", function(d) { return d.name; }) 260 | .attr("class", function(d) { return d.fade ? "frame fade" : "frame"; }); 261 | 262 | g.select("rect") 263 | .attr("height", function(d) { return c; }) 264 | .attr("fill", function(d) { return colorMapper(d); }) 265 | .style("visibility", function(d) {return d.dummy ? "hidden" : "visible";}); 266 | 267 | if (!tooltip) 268 | g.select("title") 269 | .text(label); 270 | 271 | g.select("foreignObject") 272 | .attr("width", function(d) { return d.dx * kx; }) 273 | .attr("height", function(d) { return c; }) 274 | .select("div") 275 | .attr("class", "label") 276 | .style("display", function(d) { return (d.dx * kx < 35) || d.dummy ? "none" : "block";}) 277 | .text(name); 278 | 279 | g.on('click', zoom); 280 | 281 | g.exit().remove(); 282 | 283 | g.on('mouseover', function(d) { 284 | if(!d.dummy) { 285 | if (tooltip) tip.show(d); 286 | setDetails(label(d)); 287 | } 288 | }).on('mouseout', function(d) { 289 | if(!d.dummy) { 290 | if (tooltip) tip.hide(d); 291 | setDetails(""); 292 | } 293 | }); 294 | }); 295 | } 296 | 297 | function merge(data, samples) { 298 | samples.forEach(function (sample) { 299 | var node = _.find(data, function (element) { 300 | return element.name === sample.name; 301 | }); 302 | 303 | if (node) { 304 | node.value += sample.value; 305 | if (sample.children) { 306 | if (!node.children) { 307 | node.children = []; 308 | } 309 | merge(node.children, sample.children) 310 | } 311 | } else { 312 | data.push(sample); 313 | } 314 | }); 315 | } 316 | 317 | function chart(s) { 318 | 319 | selection = s; 320 | 321 | if (!arguments.length) return chart; 322 | 323 | selection.each(function(data) { 324 | 325 | var svg = d3.select(this) 326 | .append("svg:svg") 327 | .attr("width", w) 328 | .attr("height", h) 329 | .attr("class", "partition d3-flame-graph") 330 | .call(tip); 331 | 332 | svg.append("svg:text") 333 | .attr("class", "title") 334 | .attr("text-anchor", "middle") 335 | .attr("y", "25") 336 | .attr("x", w/2) 337 | .attr("fill", "#808080") 338 | .text(title); 339 | 340 | augment(data); 341 | 342 | // "creative" fix for node ordering when partition is called for the first time 343 | partition(data); 344 | 345 | }); 346 | 347 | // first draw 348 | update(); 349 | } 350 | 351 | chart.height = function (_) { 352 | if (!arguments.length) { return h; } 353 | h = _; 354 | return chart; 355 | }; 356 | 357 | chart.width = function (_) { 358 | if (!arguments.length) { return w; } 359 | w = _; 360 | return chart; 361 | }; 362 | 363 | chart.cellHeight = function (_) { 364 | if (!arguments.length) { return c; } 365 | c = _; 366 | return chart; 367 | }; 368 | 369 | chart.tooltip = function (_) { 370 | if (!arguments.length) { return tooltip; } 371 | if (typeof _ === "function") { 372 | tip = _; 373 | } 374 | tooltip = true; 375 | return chart; 376 | }; 377 | 378 | chart.title = function (_) { 379 | if (!arguments.length) { return title; } 380 | title = _; 381 | return chart; 382 | }; 383 | 384 | chart.transitionDuration = function (_) { 385 | if (!arguments.length) { return transitionDuration; } 386 | transitionDuration = _; 387 | return chart; 388 | }; 389 | 390 | chart.transitionEase = function (_) { 391 | if (!arguments.length) { return transitionEase; } 392 | transitionEase = _; 393 | return chart; 394 | }; 395 | 396 | chart.sort = function (_) { 397 | if (!arguments.length) { return sort; } 398 | sort = _; 399 | return chart; 400 | }; 401 | 402 | chart.reversed = function (_) { 403 | if (!arguments.length) { return reversed; } 404 | reversed = _; 405 | return chart; 406 | }; 407 | 408 | chart.label = function(_) { 409 | if (!arguments.length) { return labelFormat; } 410 | labelFormat = _; 411 | return chart; 412 | }; 413 | 414 | chart.search = function(term) { 415 | var searchResults = []; 416 | selection.each(function(data) { 417 | searchResults = searchTree(data, term); 418 | update(); 419 | }); 420 | return searchResults; 421 | }; 422 | 423 | chart.clear = function() { 424 | selection.each(function(data) { 425 | clear(data); 426 | update(); 427 | }); 428 | }; 429 | 430 | chart.zoomTo = function(d) { 431 | zoom(d); 432 | }; 433 | 434 | chart.resetZoom = function() { 435 | selection.each(function (data) { 436 | zoom(data); // zoom to root 437 | }); 438 | }; 439 | 440 | chart.onClick = function(_) { 441 | if (!arguments.length) { 442 | return clickHandler; 443 | } 444 | clickHandler = _; 445 | return chart; 446 | }; 447 | 448 | chart.merge = function(samples) { 449 | selection.each(function (data) { 450 | merge([data], [samples]); 451 | augment(data); 452 | }); 453 | update(); 454 | } 455 | 456 | chart.color = function(_) { 457 | if (!arguments.length) { return colorMapper; } 458 | colorMapper = _; 459 | return chart; 460 | }; 461 | 462 | return chart; 463 | } 464 | 465 | if (typeof module !== 'undefined' && module.exports){ 466 | module.exports = flameGraph; 467 | } 468 | else { 469 | d3.flameGraph = flameGraph; 470 | } 471 | })(); 472 | --------------------------------------------------------------------------------