├── .gitignore ├── README.md ├── __init__.py ├── client.py ├── config.json ├── orchestrator ├── README ├── api.py ├── dbadapter.py ├── helper.py ├── logger.py ├── permutation_details.json ├── permutator.py ├── results.py └── wsgi.py └── src ├── __init__.py ├── algorithms ├── __init__.py └── peakDetection │ ├── __init__.py │ ├── peakDetector.py │ ├── peakFuncs.py │ ├── postProcessing.py │ ├── preProcessing.py │ ├── smoothingFilter.py │ └── windowedPeakDetection.py ├── constants.py ├── infra ├── __init__.py ├── dataStructure.py ├── fancyPrinter.py ├── inputPipe.py ├── queue.py ├── simpleDataStructure.py └── workerThread.py ├── main.py ├── ui ├── __init__.py └── ui.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | #Ignore txt log files 2 | *.txt 3 | 4 | #Ignore compiled python 5 | *.pyc 6 | 7 | #Ignore PyCharm generated stuff 8 | .idea/* 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Step counter algorithm python testing framework 2 | 3 | This repository contains the Python implementation of the Windowed Peak Detection step counter algorithm with all its variations. 4 | 5 | The algorithm is wrapped in a client/server architecture that allows running multiple instances of the algorithm with different sets of parameters. 6 | The orchestrator is available in the orchestrator folder. 7 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oxford-step-counter/Step-Counting-Algorithms-Testing/275138eaea7fd2e978400837101b0facb2a10147/__init__.py -------------------------------------------------------------------------------- /client.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import requests 3 | import json 4 | import os 5 | import time 6 | sys.dont_write_bytecode = True 7 | 8 | from src.algorithms.peakDetection.windowedPeakDetection import Wpd 9 | 10 | 11 | def main(): 12 | 13 | # Set up stuff 14 | get_url = 'http://api.jamiebrynes.com/get_next' 15 | return_url = 'http://api.jamiebrynes.com/return' 16 | headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} 17 | databank = getDataBank('./data/') 18 | # Check server for new value 19 | response = requests.get(get_url) 20 | config = response.json() 21 | while 'status' not in config: 22 | 23 | print(config) 24 | 25 | # Unpack params 26 | pre = config['algorithm']['params']['pre'] 27 | filter = config['algorithm']['params']['filter'] 28 | scoring = config['algorithm']['params']['scoring'] 29 | detection = config['algorithm']['params']['detection'] 30 | post = config['algorithm']['params']['post'] 31 | 32 | # Initialize results dictionary 33 | config['results'] = dict() 34 | 35 | # Initialize stats dictionary 36 | stats = dict() 37 | stats['steps'] = 0 38 | stats['ground_truth'] = 0 39 | config['stats'] = stats 40 | 41 | # Start new batch sim 42 | for data in databank: 43 | fp = data + '/' 44 | algo = Wpd(fp, pre, filter, scoring, detection, post) 45 | getAlgoResults(algo, config) 46 | 47 | # Calculate algorithm accuracy 48 | score = 0 49 | n = 0 50 | for key in list(config['results'].keys()): 51 | score += config['results'][key]['accuracy'] 52 | n += 1 53 | config['stats']['accuracy'] = score / n 54 | 55 | res = requests.post(return_url, headers=headers, data=json.dumps(config)) 56 | 57 | response = requests.get(get_url) 58 | config = response.json() 59 | 60 | 61 | def getDataBank(data_path): 62 | 63 | # Find all subdirectories in the data path 64 | dirs = [os.path.join(data_path, d) for d in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, d))] 65 | return dirs 66 | 67 | 68 | def getAlgoResults(algorithm, config): 69 | 70 | print('Starting new algorithm') 71 | algorithm.start() 72 | while algorithm.isRunning(): 73 | time.sleep(1) 74 | 75 | # Algorithm is finished. Run comparison 76 | result = algorithm.compare() 77 | # Update stats 78 | config['stats']['steps'] += result[0] 79 | config['stats']['ground_truth'] += result[1] 80 | 81 | # Add entry to results. 82 | config['results'][algorithm.filelocation] = dict() 83 | # Accuracy 84 | config['results'][algorithm.filelocation]['accuracy'] = 1 - abs(result[0] - result[1]) / result[1] 85 | 86 | if __name__ == "__main__": 87 | main() 88 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "algorithm": { 3 | "name":"wpd", 4 | "params": { 5 | "pre":{ 6 | "inter_ts": 10, 7 | "ts_factor": 1000000 8 | }, 9 | "filter": { 10 | "window_size": 51, 11 | "type": "gaussian", 12 | "std": 0.3, 13 | "cutoff_freq": 3, 14 | "sample_freq": 100 15 | }, 16 | "scoring": { 17 | "window_size": 11, 18 | "type": "mean_diff" 19 | }, 20 | "detection": { 21 | "threshold": 1.2 22 | }, 23 | "post": { 24 | "time_threshold": 200 25 | } 26 | } 27 | }, 28 | "file_path": "../data/Samsung_Carmelo_Hard_FrontPocket_1/" 29 | } -------------------------------------------------------------------------------- /orchestrator/README: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oxford-step-counter/Step-Counting-Algorithms-Testing/275138eaea7fd2e978400837101b0facb2a10147/orchestrator/README -------------------------------------------------------------------------------- /orchestrator/api.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.dont_write_bytecode = True 3 | 4 | from flask import Flask, request 5 | from flask_restful import Resource, Api 6 | 7 | from permutator import Permutator 8 | from results import Results 9 | from helper import formatResults 10 | from dbadapter import DbAdapter 11 | 12 | app = Flask(__name__) 13 | api = Api(app) 14 | 15 | db = DbAdapter() 16 | permutator = Permutator(db) 17 | results = Results(len(permutator.permutations)) 18 | 19 | class Reset(Resource): 20 | def get(self): 21 | db.cleanDatabase(false) 22 | if permutator.reset(): 23 | return {'status': 'successful'} 24 | else: 25 | return {'status': 'failed'} 26 | 27 | class GetNext(Resource): 28 | def get(self): 29 | return permutator.getNext() 30 | 31 | class Results(Resource): 32 | def get(self): 33 | return formatResults(db.getBest(5)) 34 | 35 | @app.route('/return', methods=['GET', 'POST']) 36 | def ReceiveData(): 37 | content = request.json 38 | db.addEntry(content) 39 | return 'Successful' 40 | 41 | 42 | 43 | api.add_resource(Reset, '/reset') 44 | api.add_resource(GetNext, '/get_next') 45 | api.add_resource(Results, '/results') 46 | 47 | if __name__ == '__main__': 48 | app.run(host="0.0.0.0") 49 | -------------------------------------------------------------------------------- /orchestrator/dbadapter.py: -------------------------------------------------------------------------------- 1 | import psycopg2 2 | 3 | from logger import log 4 | 5 | class DbAdapter: 6 | 7 | 8 | def __init__(self): 9 | self.LOG_TAG = "DBADAPTER" 10 | self.cur = None 11 | self.conn = None 12 | 13 | log(1, self.LOG_TAG, 'Initializing results database.') 14 | 15 | 16 | self.conn = psycopg2.connect(dbname="4yp_results", user="4yp", password="4yp", host="127.0.0.1", port=7001) 17 | self.cur = self.conn.cursor() 18 | self.cleanDatabase() 19 | 20 | def cleanDatabase(self, cleanParams = True): 21 | log(1, self.LOG_TAG, 'Cleaning databse to pure state.') 22 | self.cur.execute("DROP TABLE IF EXISTS file_results") 23 | self.cur.execute("DROP TABLE IF EXISTS overall_results") 24 | if cleanParams: 25 | self.cur.execute("DROP TABLE IF EXISTS parameters") 26 | 27 | log(1, self.LOG_TAG, 'Creating parameters table') 28 | self.cur.execute("""CREATE TABLE parameters (parameter_id serial PRIMARY KEY, filter text NOT NULL, scoring text NOT NULL, detection text NOT NULL, post text NOT NULL);""") 29 | 30 | log(1, self.LOG_TAG, 'Creating file_results table') 31 | self.cur.execute("""CREATE TABLE file_results (id serial PRIMARY KEY, phone text NOT NULL, person text NOT NULL, surface text NOT NULL, position text NOT NULL, accuracy real NOT NULL, parameters int NOT NULL REFERENCES parameters(parameter_id));""") 32 | 33 | log(1, self.LOG_TAG, 'Creating overall_results table') 34 | self.cur.execute("""CREATE TABLE overall_results (id serial PRIMARY KEY, parameters int NOT NULL REFERENCES parameters(parameter_id), accuracy real NOT NULL);""") 35 | 36 | self.conn.commit() 37 | 38 | def addEntry(self, result): 39 | 40 | acc = result['stats']['accuracy'] 41 | param_id = result['algorithm']['params']['key'] 42 | 43 | self.cur.execute("""INSERT INTO overall_results (parameters, accuracy) VALUES (%s, %s);""", (param_id,acc)) 44 | 45 | for filename in result['results']: 46 | (phone, person, surface, position) = self.decodeFileName(filename) 47 | acc = result['results'][filename]['accuracy'] 48 | self.cur.execute("""INSERT INTO file_results (phone, person, surface, position, accuracy, parameters) VALUES (%s,%s,%s,%s,%s,%s)""", (phone, person, surface, position, acc, param_id)) 49 | 50 | self.conn.commit() 51 | 52 | 53 | def addParameterSet(self, params): 54 | 55 | sFilter = "/" 56 | sScoring = "/" 57 | sDetection = "/" 58 | sPost = "/" 59 | 60 | for key in params['filter']: 61 | sFilter += key + ":" + str(params['filter'][key]) + "/" 62 | for key in params['scoring']: 63 | sScoring += key + ":" + str(params['scoring'][key]) + "/" 64 | for key in params['detection']: 65 | sDetection += key + ":" + str(params['detection'][key]) + "/" 66 | for key in params['post']: 67 | sPost += key + ":" + str(params['post'][key]) + "/" 68 | 69 | self.cur.execute("""INSERT INTO parameters (filter, scoring, detection, post) VALUES(%s, %s, %s, %s) RETURNING parameter_id;""", (sFilter, sScoring, sDetection, sPost)) 70 | p_key = self.cur.fetchone()[0] 71 | self.conn.commit() 72 | return p_key 73 | 74 | def getBest(self, number): 75 | 76 | self.cur.execute("""SELECT * FROM overall_results ORDER BY accuracy DESC LIMIT %s""", (number,)) 77 | return self.cur.fetchall() 78 | 79 | 80 | def decodeFileName(self, filename): 81 | split = filename.split('_') 82 | return (split[0], split[1], split[2], split[3]) -------------------------------------------------------------------------------- /orchestrator/helper.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def formatResults(results): 4 | 5 | # Take in the list of tuples and return the accuracies. 6 | 7 | ret = "" 8 | 9 | for res in results: 10 | 11 | (rid, pid, acc) = res 12 | ret += "

" + str(acc) + "

" 13 | 14 | return ret 15 | -------------------------------------------------------------------------------- /orchestrator/logger.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | infoLevel = dict() 4 | infoLevel[0] = "DEBUG" 5 | infoLevel[1] = "INFO" 6 | infoLevel[2] = "WARN" 7 | infoLevel[3] = "ERROR" 8 | 9 | def log(level,src,msg): 10 | """[This is the logging functionality for the backend api.] 11 | 12 | [It takes in three arguments, the info level, the source of the message, and the message itself to display. It then formats a date and outputs the message to the console. 13 | 14 | The info level is decoded by a helper function.] 15 | 16 | Arguments: 17 | level {int} -- [This describes the level of the message: DEBUG, INFO, WARN, ERR] 18 | src {string} -- [This describes the source of the message] 19 | msg {string} -- [This is the message content] 20 | """ 21 | 22 | d = decodeTime() 23 | line = "[{0}] [{1}] [{2}] {3}".format(decodeLevel(level), d, src, msg) 24 | print(line) 25 | 26 | 27 | def decodeTime(): 28 | """[This function returns the formatted date string for the logging message]""" 29 | 30 | d = datetime.today() 31 | return "{0}-{1}-{2} {3}:{4}:{5}".format(str(d.year), str(d.month), str(d.day), str(d.hour), str(d.minute), str(d.second)) 32 | 33 | def decodeLevel(level): 34 | """[This function decodes the level of the message.] 35 | 36 | [We take in a level and decode it to the correct level with a simple mapping. 37 | 0 - DEBUG 38 | 1 - INFO 39 | 2 - WARN 40 | 3 - ERROR 41 | Other - UNKNOWN 42 | ] 43 | 44 | Arguments: 45 | level {[type]} -- [description] 46 | """ 47 | 48 | if level in infoLevel: 49 | return infoLevel[level] 50 | return "UNKNOWN" 51 | -------------------------------------------------------------------------------- /orchestrator/permutation_details.json: -------------------------------------------------------------------------------- 1 | { 2 | "filter": { 3 | "kaiser_bessel": { 4 | "cutoff_freq": 3, 5 | "sample_freq": 100, 6 | "window_size": { 7 | "min": 13, 8 | "max": 53, 9 | "step": 8 10 | } 11 | }, 12 | "gaussian": { 13 | "std": 0.35, 14 | "window_size": { 15 | "min": 13, 16 | "max": 53, 17 | "step": 8 18 | } 19 | }, 20 | "hann": { 21 | "window_size":{ 22 | "min": 13, 23 | "max": 53, 24 | "step": 8 25 | } 26 | }, 27 | "center_moving_avg": { 28 | "window_size": { 29 | "min": 13, 30 | "max": 53, 31 | "step": 8 32 | } 33 | } 34 | }, 35 | "scoring": { 36 | "mean_diff": { 37 | "window_size": { 38 | "min": 3, 39 | "max": 51, 40 | "step": 8 41 | } 42 | }, 43 | "pan_tompkins": { 44 | "window_size": { 45 | "min": 11, 46 | "max": 51, 47 | "step": 8 48 | } 49 | }, 50 | "max_diff": { 51 | "window_size": { 52 | "min": 3, 53 | "max": 51, 54 | "step": 8 55 | } 56 | }, 57 | "pass_through": { 58 | "window_size": 1 59 | } 60 | }, 61 | "detection": { 62 | "solo": { 63 | "threshold": { 64 | "min": 1.2, 65 | "max": 1.5, 66 | "step": 0.2 67 | } 68 | } 69 | }, 70 | "post": { 71 | "solo": { 72 | "time_threshold": 200 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /orchestrator/permutator.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | from logger import log 4 | 5 | class Permutator(): 6 | 7 | def __init__(self, db): 8 | self.count = 0 9 | self.permutations = [] 10 | self.permutation_details = Permutator.loadJson("permutation_details.json", self.permutations, db) 11 | self.start_time = None 12 | log(1, 'Permutator', 'Permutator initialized') 13 | 14 | def getNext(self): 15 | if self.start_time == None: 16 | self.start_time = time.time() 17 | if self.count < len(self.permutations): 18 | data = self.permutations[self.count] 19 | else: 20 | data = {'status': 'end'} 21 | self.count += 1 22 | log(1, 'Permutator', 'Sending next permutation. ' + str(self.count) + '/' + str(len(self.permutations))) 23 | log(1, 'Permutator', 'Time elapsed: ' + Permutator.timeConvert(time.time() - self.start_time)) 24 | log(1, 'Permutator', 'Estimated Time Left: ' + Permutator.estimateTimeLeft(self.count, len(self.permutations), time.time() - self.start_time)) 25 | return data 26 | 27 | def reset(self): 28 | self.count = 0 29 | self.start_time = None 30 | log(1, 'Permutator', 'Permutator reset.') 31 | return True 32 | 33 | @staticmethod 34 | def timeConvert(seconds): 35 | 36 | minutes = int(seconds) / 60 37 | _seconds = int(seconds) % 60 38 | _minutes = int(minutes % 60) 39 | _hours = int(minutes / 60) 40 | 41 | return str(_hours) + ":" + str(_minutes) + ":" + str(_seconds) 42 | 43 | @staticmethod 44 | def estimateTimeLeft(current, total, elapsed): 45 | 46 | estimated_left = (elapsed * total / current) - elapsed 47 | r = Permutator.timeConvert(estimated_left) 48 | return r 49 | 50 | @staticmethod 51 | def loadJson(filepath, permutations, db): 52 | 53 | permutation_data = json.load(open(filepath, 'r')) 54 | 55 | permutations_temp = [] 56 | permutations_temp2 = [] 57 | 58 | # Permute over each type and merge 59 | filter_data = permutation_data['filter'] 60 | Permutator.permuteSection(permutations_temp, filter_data, 'filter') 61 | scoring_data = permutation_data['scoring'] 62 | Permutator.permuteSection(permutations_temp2, scoring_data, 'scoring') 63 | Permutator.mergePermutations(permutations_temp, permutations_temp2) 64 | detection_data = permutation_data['detection'] 65 | Permutator.permuteSection(permutations_temp2, detection_data, 'detection') 66 | Permutator.mergePermutations(permutations_temp, permutations_temp2) 67 | post_data = permutation_data['post'] 68 | Permutator.permuteSection(permutations_temp2, post_data, 'post') 69 | Permutator.mergePermutations(permutations_temp, permutations_temp2) 70 | 71 | # Add the standard prefix onto the beginning. 72 | for perm in permutations_temp: 73 | 74 | key = db.addParameterSet(perm) 75 | 76 | d = dict() 77 | d['algorithm'] = dict() 78 | d['algorithm']['name'] = 'wpd' 79 | d['algorithm']['params'] = dict() 80 | d['algorithm']['params']['key'] = key 81 | d['algorithm']['params']['pre'] = {'inter_ts': 10, 'ts_factor': 1000000} 82 | for key in perm: 83 | d['algorithm']['params'][key] = perm[key] 84 | permutations.append(d) 85 | 86 | @staticmethod 87 | def mergePermutations(temp1, temp2): 88 | 89 | temp = temp1[:] 90 | temp1[:] = [] 91 | for t in temp: 92 | for t2 in temp2: 93 | d = dict() 94 | for key in list(t.keys()): 95 | d[key] = t[key] 96 | for key in list(t2.keys()): 97 | d[key] = t2[key] 98 | temp1.append(d) 99 | 100 | temp2[:] = [] 101 | 102 | 103 | @staticmethod 104 | def permuteSection(permutations_temp, data, key): 105 | 106 | # Unpack all params. 107 | for typ in data.keys(): 108 | permutables = [] 109 | statics = [] 110 | type_data = data[typ] 111 | for param in type_data.keys(): 112 | if type(type_data[param]) is int or type(type_data[param]) is float: 113 | statics.append({param: type_data[param]}) 114 | else: 115 | permutables.append({param: type_data[param]}) 116 | 117 | Permutator.recursiveConstruct(permutations_temp, key, typ, statics, permutables) 118 | 119 | 120 | 121 | @staticmethod 122 | def recursiveConstruct(permutations_temp, key, typ, statics, permutables): 123 | 124 | # Recursion base case. Build the dictionary 125 | if len(permutables) == 0: 126 | d = dict() 127 | d[key] = dict() 128 | p = d[key] 129 | if typ != 'solo': 130 | p['type'] = typ 131 | for static in statics: 132 | p[list(static.keys())[0]] = static[list(static.keys())[0]] 133 | permutations_temp.append(d) 134 | return 135 | 136 | p = permutables[:] 137 | s = statics[:] 138 | permutable = p.pop() 139 | key1 = list(permutable.keys())[0] 140 | data = permutable[key1].copy() 141 | while data['min'] <= data['max']: 142 | s.append({key1: data['min']}) 143 | Permutator.recursiveConstruct(permutations_temp, key, typ, s, p) 144 | data['min'] += data['step'] 145 | s = statics[:] 146 | -------------------------------------------------------------------------------- /orchestrator/results.py: -------------------------------------------------------------------------------- 1 | from logger import log 2 | # Class to contain the results of each permutation of parameters. 3 | # Will store the top 5 results. 4 | class Results: 5 | 6 | def __init__(self, size): 7 | self.content = [] 8 | self.errors = [] 9 | self.maximum = [] 10 | self.min_max = 0 11 | self.size = size 12 | 13 | def reset(self): 14 | self.content = [] 15 | self.errors = [] 16 | self.maximum = [] 17 | self.min_max = 0 18 | 19 | def parse(self,content): 20 | if 'error' in content: 21 | self.errors.append(content) 22 | log(2, 'Results', 'Received score with error.') 23 | else: 24 | self.content.append(content) 25 | log(1, 'Results', 'New score received: ' + str(content['stats']['score'])) 26 | # Determine if we are in the top 5. 27 | if len(self.maximum) < 5: 28 | self.maximum.append(content) 29 | if self.min_max > content['stats']['score']: 30 | self.min_max = content['stats']['score'] 31 | else: 32 | if content['stats']['score'] > self.min_max: 33 | self.insertNewMax(content) 34 | 35 | if len(self.content) + len(self.errors) == self.size: 36 | # We have received all data points. Dump to file. 37 | with open('results.json', 'w') as outfile: 38 | json.dump(self.maximum, outfile) 39 | 40 | def show(self): 41 | return self.maximum 42 | 43 | def get(self): 44 | return self.content 45 | 46 | def insertNewMax(self, content): 47 | 48 | log(1, 'Results', 'New maximum found!') 49 | 50 | # Find minimum in max. 51 | _min = 1000000000 52 | n = 0 53 | for i, maxValue in enumerate(self.maximum): 54 | if maxValue['stats']['score'] < _min: 55 | _min = maxValue['stats']['score'] 56 | n = i 57 | 58 | # Remove minimum and insert new value 59 | del self.maximum[n] 60 | self.maximum.append(content) 61 | 62 | #Find minimum again 63 | _min = 1000000000 64 | n = 0 65 | for i, maxValue in enumerate(self.maximum): 66 | if maxValue['stats']['score'] < _min: 67 | _min = maxValue['stats']['score'] 68 | n = i 69 | 70 | self.min_max = self.maximum[n]['stats']['score'] 71 | -------------------------------------------------------------------------------- /orchestrator/wsgi.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.dont_write_bytecode = True 3 | 4 | from api import app 5 | 6 | if __name__ == "__main__": 7 | app.run() 8 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oxford-step-counter/Step-Counting-Algorithms-Testing/275138eaea7fd2e978400837101b0facb2a10147/src/__init__.py -------------------------------------------------------------------------------- /src/algorithms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oxford-step-counter/Step-Counting-Algorithms-Testing/275138eaea7fd2e978400837101b0facb2a10147/src/algorithms/__init__.py -------------------------------------------------------------------------------- /src/algorithms/peakDetection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oxford-step-counter/Step-Counting-Algorithms-Testing/275138eaea7fd2e978400837101b0facb2a10147/src/algorithms/peakDetection/__init__.py -------------------------------------------------------------------------------- /src/algorithms/peakDetection/peakDetector.py: -------------------------------------------------------------------------------- 1 | # ======================================================================== # 2 | # 3 | # peakDetector.py 4 | # Jamieson Brynes 5 | # 10/22/2016 6 | # 7 | # This class contains the implementation of the peak detection 8 | # algorithm. It uses a simple mean and standard deviation method 9 | # to find significant values of peak scores. 10 | # 11 | # ======================================================================== # 12 | 13 | import math 14 | 15 | from src.infra.simpleDataStructure import Sds 16 | from src.infra.workerThread import WorkerThread 17 | 18 | 19 | class PeakDetector(WorkerThread): 20 | 21 | # Worker function for peak detection. 22 | def peakDetect(self): 23 | while self.active: 24 | if not self.inputQueue.isEmpty(): 25 | 26 | # Get next data point 27 | dp = self.inputQueue.dequeue() 28 | 29 | # Special handling for end case 30 | if dp == 'end': 31 | self.active = False 32 | self.completed = True 33 | self.outputQueue.enqueue('end') 34 | return 35 | 36 | # Add to data list 37 | self.data.append(dp) 38 | 39 | # Update statistics 40 | self.n += 1 41 | if self.n == 1: 42 | # First data point 43 | self.mean = dp.mag 44 | self.std = 0 45 | elif self.n == 2: 46 | # Second data point 47 | o_mean = self.mean 48 | self.mean = (dp.mag + self.mean) /2 49 | self.std = math.sqrt((math.pow(dp.mag - self.mean, 2) + math.pow(o_mean - self.mean, 2)) / 2) 50 | else: 51 | # Iteratively update mean and standard deviation 52 | o_mean = self.mean 53 | self.mean = (dp.mag + (self.n - 1) * self.mean) / self.n 54 | self.std = math.sqrt(((self.n - 2) * math.pow(self.std, 2) / (self.n - 1)) + math.pow(o_mean - self.mean,2) + math.pow(dp.mag - self.mean, 2) / self.n) 55 | if self.n > 15: 56 | # Check if we are above the threshold 57 | if (dp.mag - self.mean) > self.std * self.threshold: 58 | # Declare this a peak 59 | self.outputQueue.enqueue(Sds(dp.time, dp.oldMag)) 60 | self.dataout.append(Sds(dp.time, dp.oldMag)) 61 | 62 | # Constructor for the object 63 | # @args: 64 | # 1. params - dictionary with parameters 65 | # a. 'threshold' - standard deviation threshold to call a peak a peak 66 | # 2. peakScores - input data queue containing the peak scores 67 | # 3. peakScoreData - list to put the peak score data 68 | # 4. peaks - output data queue containing identified peaks 69 | # 5. peakData - output data list to put identified peaks 70 | def __init__(self, params, peakScores, peakScoreData, peaks, peakData): 71 | 72 | super(PeakDetector, self).__init__() 73 | self.target = self.peakDetect 74 | 75 | # Internal data representations 76 | self.inputQueue = peakScores 77 | self.data = peakScoreData 78 | self.outputQueue = peaks 79 | self.dataout = peakData 80 | 81 | # Internal statistics 82 | self.n = 0 83 | self.mean = 0 84 | self.std = 0 85 | 86 | # Param unpacking 87 | self.threshold = params['threshold'] 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /src/algorithms/peakDetection/peakFuncs.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from src.infra.queue import Queue 4 | from src.infra.simpleDataStructure import Sds 5 | from src.infra.workerThread import WorkerThread 6 | 7 | 8 | class PeakScorer(WorkerThread): 9 | 10 | def passThrough(self): 11 | while self.active: 12 | if not self.inputQueue.isEmpty(): 13 | 14 | # Get next data point 15 | dp = self.inputQueue.dequeue() 16 | 17 | if dp == 'end': 18 | self.completed = True 19 | self.active = False 20 | self.outputQueue.enqueue('end') 21 | return 22 | 23 | self.data.append(dp) 24 | self.outputQueue.enqueue(dp) 25 | 26 | def maxDiff(self): 27 | while self.active: 28 | if not self.inputQueue.isEmpty(): 29 | 30 | # Get next data point 31 | dp = self.inputQueue.dequeue() 32 | 33 | # Special case handling for end data point. 34 | if dp == 'end': 35 | self.completed = True 36 | self.active = False 37 | self.outputQueue.enqueue('end') 38 | return 39 | 40 | # Add data point to list and queue 41 | self.data.append(dp) 42 | self.window.enqueue(dp) 43 | 44 | # Once we reach the window size, do some processing! 45 | if len(self.window) == self.windowSize: 46 | 47 | # Calculate peak score 48 | midPoint = int(self.windowSize / 2) 49 | maxDiffLeft = -100 50 | maxDiffRight = -100 51 | 52 | # Find max difference on left 53 | for i in range(0, midPoint): 54 | value = self.window[midPoint].mag - self.window[i].mag 55 | if value > maxDiffLeft: 56 | maxDiffLeft = value 57 | 58 | # Find max difference on right 59 | for i in range(midPoint + 1, len(self.window)): 60 | value = self.window[midPoint].mag - self.window[i].mag 61 | if value > maxDiffRight: 62 | maxDiffRight = value 63 | 64 | # Calculate peak score and create a new point 65 | avg = (maxDiffRight + maxDiffLeft) / 2 66 | new_dp = Sds(self.window[midPoint].time, avg, self.window[midPoint].mag) 67 | self.outputQueue.enqueue(new_dp) 68 | self.window.dequeue() 69 | 70 | def meanDiff(self): 71 | while self.active: 72 | if not self.inputQueue.isEmpty(): 73 | 74 | # Get next data point 75 | dp = self.inputQueue.dequeue() 76 | 77 | # Special case handling for end data point. 78 | if dp == 'end': 79 | self.completed = True 80 | self.active = False 81 | self.outputQueue.enqueue('end') 82 | return 83 | 84 | # Add data point to list and queue 85 | self.data.append(dp) 86 | self.window.enqueue(dp) 87 | 88 | # Once we reach the window size, do some processing! 89 | if len(self.window) == self.windowSize: 90 | 91 | # Calculate peak score 92 | midPoint = int(self.windowSize / 2) 93 | diffLeft = 0 94 | diffRight = 0 95 | 96 | # Find total difference on left 97 | for i in range(0, midPoint): 98 | value = self.window[midPoint].mag - self.window[i].mag 99 | diffLeft += value 100 | 101 | # Find total difference on right 102 | for i in range(midPoint + 1, len(self.window)): 103 | value = self.window[midPoint].mag - self.window[i].mag 104 | diffRight += value 105 | 106 | # Calculate peak score and create a new point 107 | avg = (diffLeft + diffRight) / (self.windowSize - 1) 108 | new_dp = Sds(self.window[midPoint].time, avg, self.window[midPoint].mag) 109 | self.outputQueue.enqueue(new_dp) 110 | self.window.dequeue() 111 | 112 | 113 | def panTompkins(self): 114 | while self.active: 115 | if not self.inputQueue.isEmpty(): 116 | 117 | # Get next data point 118 | dp = self.inputQueue.dequeue() 119 | 120 | # Special case handling for end data point. 121 | if dp == 'end': 122 | self.completed = True 123 | self.active = False 124 | self.outputQueue.enqueue('end') 125 | return 126 | 127 | # Add data point to list and queue 128 | self.data.append(dp) 129 | self.window.enqueue(dp) 130 | 131 | # Once we reach the window size, do some processing! 132 | if len(self.window) == self.windowSize: 133 | 134 | midPoint = int(self.windowSize / 2) 135 | 136 | # Calculate mean of window 137 | ssum = 0 138 | for i in range(0,self.windowSize): 139 | ssum += self.window[i].mag 140 | mean = ssum / self.windowSize 141 | 142 | new_mag = 0 if self.window[midPoint].mag - mean < 0 else self.window[midPoint].mag - mean 143 | # Square it. 144 | new_mag *= new_mag 145 | 146 | # Calculate peak score and create a new point 147 | new_dp = Sds(self.window[midPoint].time, new_mag, self.window[midPoint].mag) 148 | self.outputQueue.enqueue(new_dp) 149 | self.window.dequeue() 150 | 151 | # Constructor for the object 152 | # @args: 153 | # 1. params - dictionary containing relevant parameters 154 | # a. 'window_size' - size of the window for maxDiff 155 | # b. 'type' - the type of scorer to use 156 | # 2. smoothData - queue containing smoothed data 157 | # 3. smoothDataList - list containing smoothed data 158 | # 4. peakScores - queue containing peak scores data 159 | def __init__(self, params, smoothData, smoothDataList, peakScores): 160 | 161 | super(PeakScorer, self).__init__() 162 | 163 | # Internal references for smooth data 164 | self.inputQueue = smoothData 165 | self.data = smoothDataList 166 | self.outputQueue = peakScores 167 | 168 | # Internal window 169 | self.window = Queue() 170 | 171 | # Parameter unpacking 172 | self.windowSize = params['window_size'] 173 | self.typ = params['type'] 174 | 175 | # Assign target 176 | if self.typ == 'max_diff': 177 | self.target = self.maxDiff 178 | elif self.typ == 'mean_diff': 179 | self.target = self.meanDiff 180 | elif self.typ == 'pan_tompkins': 181 | self.target = self.panTompkins 182 | elif self.typ == 'pass_through': 183 | self.target = self.passThrough 184 | else: 185 | raise Exception('Unknown peak scorer type: ' + self.typ) 186 | 187 | 188 | -------------------------------------------------------------------------------- /src/algorithms/peakDetection/postProcessing.py: -------------------------------------------------------------------------------- 1 | # ======================================================================== # 2 | # 3 | # postProcessing.py 4 | # Jamieson Brynes 5 | # 10/22/2016 6 | # 7 | # This class contains the implementation of the post-processing stage. 8 | # 9 | # ========================================================================= # 10 | 11 | from src.infra.queue import Queue 12 | from src.infra.workerThread import WorkerThread 13 | 14 | 15 | class WpdPostProcessor(WorkerThread): 16 | 17 | # Worker function for the post processing 18 | def postProcess(self): 19 | while self.active: 20 | if not self.inputQueue.isEmpty(): 21 | 22 | # Get next data point 23 | dp = self.inputQueue.dequeue() 24 | 25 | # Special case handling for last data point 26 | if dp == 'end': 27 | self.completed = True 28 | self.active = False 29 | pop = self.queue.dequeue() 30 | self.outputList.append(pop) 31 | return 32 | 33 | # If we have less than 2 points in the queue, just enqueue 34 | if len(self.queue) < 1: 35 | self.queue.enqueue(dp) 36 | else: 37 | # If the time difference exceeds the threshold, pop the old point 38 | if (dp.time - self.queue[0].time) > self.timeThreshold: 39 | pop = self.queue.dequeue() 40 | self.outputList.append(pop) 41 | self.queue.enqueue(dp) 42 | # Else only keep the maximum value point 43 | else: 44 | if dp.mag >= self.queue[0].mag: 45 | pop = self.queue.dequeue() 46 | self.queue.enqueue(dp) 47 | 48 | # Constructor 49 | # @args: 50 | # 1. params - dictionary to contain parameters 51 | # a. 'time_threshold' - time threshold for eliminating peaks 52 | # 2. inputPeaks - input data queue with potential peaks 53 | # 3. confirmedPeaks - output data list for confirmed peaks 54 | def __init__(self, params, inputPeaks, confirmedPeaks): 55 | 56 | super(WpdPostProcessor, self).__init__() 57 | self.target = self.postProcess 58 | 59 | # Internal references 60 | self.inputQueue = inputPeaks 61 | self.outputList = confirmedPeaks 62 | 63 | # Param unpacking 64 | self.timeThreshold = params['time_threshold'] 65 | 66 | # Internal queue 67 | self.queue = Queue() 68 | -------------------------------------------------------------------------------- /src/algorithms/peakDetection/preProcessing.py: -------------------------------------------------------------------------------- 1 | # ======================================================================== # 2 | # 3 | # preProcessing.py 4 | # Jamieson Brynes 5 | # 10/22/2016 6 | # 7 | # This class contains the pre processing element for the data points. 8 | # It will compute their magnitude, scale the time, and interpolate 9 | # between points 10 | # 11 | # 12 | # ======================================================================== # 13 | 14 | import time 15 | import math 16 | 17 | import src.utils as utils 18 | from src.infra.queue import Queue 19 | from src.constants import Constants 20 | from src.infra.workerThread import WorkerThread 21 | 22 | 23 | class WpdPreProcessor(WorkerThread): 24 | 25 | def preProcess(self): 26 | while self.active: 27 | if not self.inputQueue.isEmpty(): 28 | # Pop oldest point on the queue 29 | ds = self.inputQueue.dequeue() 30 | 31 | # Special handling for the 'end' of the data stream 32 | if ds == 'end': 33 | self.dataQueue.enqueue('end') 34 | self.completed = True 35 | self.active = False 36 | return 37 | 38 | # Handling for the first data point received 39 | if self.startTime is None: 40 | self.startTime = ds.getTime() 41 | 42 | # Scale time and compute magnitude 43 | ds.scaleTime(self.startTime, self.ts_factor) 44 | ds.computeMagnitude() 45 | 46 | # Add the datapoint to the working window and the list of data 47 | self.window.enqueue(ds) 48 | self.dataList.append(ds) 49 | 50 | # If we have more than 1 point in the queue 51 | if self.window.size() >= 2: 52 | 53 | # Timestamps 54 | time1 = self.window[0].getTime() 55 | time2 = self.window[1].getTime() 56 | 57 | # Check how many interpolation points COULD lie in between the timestamps 58 | for i in range(math.ceil((time2 - time1) / self.interp_ts)): 59 | interp_time = self.interpolation_count * self.interp_ts 60 | # If the interpolated time lies in this range, create the new data point and add it 61 | if time1 <= interp_time < time2: 62 | sds = utils.linearInterp(self.window[0], self.window[1], interp_time) 63 | self.dataQueue.enqueue(sds) 64 | self.interpolation_count += 1 65 | 66 | # Pop the most recent element 67 | self.window.dequeue() 68 | else: 69 | time.sleep(Constants.THREAD_SLEEP_PERIOD) 70 | 71 | # Constructor for object. 72 | # @args: 73 | # 1. params - a dictionary containing parameters for the preprocessor 74 | # a. 'inter_ts' : interpolation time scale in ms 75 | # b. 'ts_factor' : time scale factor (i.e. if you want to go from ns to ms, this should be 1,000,000) 76 | # 2. inputQueue - the queue from the inputPipe, raw data 77 | # 3. dataList - a list of data to permanently store the unaltered data 78 | # 4. dataQuee - a queue for the preprocessed data to be put into 79 | # Worker function for thread 80 | def __init__(self, params, inputQueue, dataList, dataQueue): 81 | 82 | super(WpdPreProcessor, self).__init__() 83 | self.target = self.preProcess 84 | 85 | # Internal references to data structures 86 | self.inputQueue = inputQueue 87 | self.dataList = dataList 88 | self.dataQueue = dataQueue 89 | 90 | # Params unpacking 91 | self.interp_ts = params['inter_ts'] 92 | self.ts_factor = params['ts_factor'] 93 | 94 | # Internal buffer 95 | self.window = Queue() 96 | 97 | # Data processing parameters 98 | self.startTime = None 99 | self.interpolation_count = 0 100 | 101 | -------------------------------------------------------------------------------- /src/algorithms/peakDetection/smoothingFilter.py: -------------------------------------------------------------------------------- 1 | # ======================================================================== # 2 | # 3 | # smoothingFilter.py 4 | # Jamieson Brynes 5 | # 10/21/2016 6 | # 7 | # This class contains the various smoothing functions for the smoothing 8 | # stage. 9 | # 10 | # ========================================================================= # 11 | import math 12 | 13 | from src.infra.workerThread import WorkerThread 14 | from src.infra.queue import Queue 15 | from src.infra.simpleDataStructure import Sds 16 | 17 | from scipy import special 18 | 19 | class SmoothingFilter(WorkerThread): 20 | 21 | # Worker function for the centered moving average filter. 22 | def centeredMovingAvg(self): 23 | self.windowSize = self.params['window_size'] 24 | while self.active: 25 | if not self.inputQueue.isEmpty(): 26 | 27 | # Get next data point 28 | dp = self.inputQueue.dequeue() 29 | 30 | # Special handling for end data stream 31 | if dp == 'end': 32 | self.outputQueue.enqueue(dp) 33 | self.completed = True 34 | self.active = False 35 | return 36 | 37 | self.data.append(dp) 38 | self.window.enqueue(dp) 39 | 40 | if len(self.window) == self.windowSize: 41 | # Do smoothing action and pop. 42 | ssum = 0 43 | for i in range(len(self.window)): 44 | ssum += self.window[i].mag 45 | # Average of all points in the window 46 | new_dp = Sds(self.window[int(self.windowSize / 2)].time, ssum / self.windowSize, self.window[int(self.windowSize / 2)].mag) 47 | self.outputQueue.enqueue(new_dp) 48 | self.window.dequeue() 49 | 50 | # Worker function for the Hann windowed filter 51 | def hann(self): 52 | 53 | self.windowSize = self.params['window_size'] 54 | self.hann_window = SmoothingFilter.hannCoeffs(self.windowSize) 55 | self.hann_sum = sum(self.hann_window) 56 | 57 | while self.active: 58 | if not self.inputQueue.isEmpty(): 59 | 60 | # Get next dp 61 | dp = self.inputQueue.dequeue() 62 | 63 | # Special handling for end data stream 64 | if dp == 'end': 65 | self.outputQueue.enqueue(dp) 66 | self.completed = True 67 | self.active = False 68 | return 69 | 70 | self.data.append(dp) 71 | self.window.enqueue(dp) 72 | 73 | if len(self.window) == self.windowSize: 74 | # Do smoothing action and pop. 75 | ssum = 0 76 | for i in range(len(self.window)): 77 | ssum += self.window[i].mag * self.hann_window[i] 78 | # Average of all points in the window 79 | new_dp = Sds(self.window[int(self.windowSize / 2)].time, ssum / self.hann_sum, self.window[int(self.windowSize / 2)].mag) 80 | self.outputQueue.enqueue(new_dp) 81 | self.window.dequeue() 82 | 83 | # Worker function for the Gaussian filter 84 | def gaussian(self): 85 | 86 | self.windowSize = self.params['window_size'] 87 | self.std = self.params['std'] 88 | self.gauss_window = SmoothingFilter.gaussianCoeffs(self.windowSize, self.std) 89 | self.gauss_sum = sum(self.gauss_window) 90 | 91 | while self.active: 92 | if not self.inputQueue.isEmpty(): 93 | 94 | # Get next dp 95 | dp = self.inputQueue.dequeue() 96 | 97 | # Special handling for end data stream 98 | if dp == 'end': 99 | self.outputQueue.enqueue(dp) 100 | self.completed = True 101 | self.active = False 102 | return 103 | 104 | self.data.append(dp) 105 | self.window.enqueue(dp) 106 | 107 | if len(self.window) == self.windowSize: 108 | # Do smoothing action and pop. 109 | ssum = 0 110 | for i in range(len(self.window)): 111 | ssum += self.window[i].mag * self.gauss_window[i] 112 | # Average of all points in the window 113 | new_dp = Sds(self.window[int(self.windowSize / 2)].time, ssum / self.gauss_sum, self.window[int(self.windowSize / 2)].mag) 114 | self.outputQueue.enqueue(new_dp) 115 | self.window.dequeue() 116 | 117 | # Worker function for the Kaiser-Bessel filter. 118 | def kaiserBessel(self): 119 | self.windowSize = self.params['window_size'] 120 | self.cutoff_freq = self.params['cutoff_freq'] 121 | self.sample_freq = self.params['sample_freq'] 122 | self.filter_coeff = SmoothingFilter.kaiserBesselCoeffs(self.windowSize, self.cutoff_freq, self.sample_freq) 123 | self.filter_sum = sum(self.filter_coeff) 124 | 125 | while self.active: 126 | if not self.inputQueue.isEmpty(): 127 | 128 | # Get next dp 129 | dp = self.inputQueue.dequeue() 130 | 131 | # Special handling for end data stream 132 | if dp == 'end': 133 | self.outputQueue.enqueue(dp) 134 | self.completed = True 135 | self.active = False 136 | return 137 | 138 | self.data.append(dp) 139 | self.window.enqueue(dp) 140 | 141 | if len(self.window) == self.windowSize: 142 | # Do smoothing action and pop. 143 | ssum = 0 144 | for i in range(len(self.window)): 145 | ssum += self.window[i].mag * self.filter_coeff[i] 146 | # Average of all points in the window 147 | new_dp = Sds(self.window[int(self.windowSize / 2)].time, ssum / self.filter_sum, self.window[int(self.windowSize / 2)].mag) 148 | self.outputQueue.enqueue(new_dp) 149 | self.window.dequeue() 150 | 151 | # Constructor for the window 152 | # @args: 153 | # 1. params - dictionary containing relevant parameter 154 | # a. 'window_size' - size of the window. NOTE: Should be odd. 155 | # 2. inputData - queue object containing the pre-processed data 156 | # 3. smoothData - queue object containing the smoothed data 157 | def __init__(self, params, inputData, dataList, smoothData): 158 | 159 | super(SmoothingFilter, self).__init__() 160 | 161 | # Internal references to queues 162 | self.inputQueue = inputData 163 | self.outputQueue = smoothData 164 | self.data = dataList 165 | 166 | # Params unpacking 167 | self.params = params 168 | self.typ = params['type'] 169 | 170 | # Internal buffer 171 | self.window = Queue() 172 | 173 | # Set correct target 174 | if self.typ == 'hann': 175 | self.target = self.hann 176 | elif self.typ == 'gaussian': 177 | self.target = self.gaussian 178 | elif self.typ == 'kaiser_bessel': 179 | self.target = self.kaiserBessel 180 | else: 181 | self.target = self.centeredMovingAvg 182 | 183 | # Function to generate the hann window coefficients 184 | # @args: 185 | # 1. windowSize - size of the Hann window 186 | # @return: 187 | # 1. window - list of window coefficients. 188 | @staticmethod 189 | def hannCoeffs(windowSize): 190 | 191 | window = [] 192 | for n in range(windowSize): 193 | value = 0.5 * (1 - math.cos(2*math.pi * n / (windowSize - 1))) 194 | window.append(value) 195 | return window 196 | 197 | # Function to generate the Gaussian window coefficients 198 | # @args: 199 | # 1. windowSize - size of the Gaussian window 200 | # 2. std - adjusted standard deviation (scaling factor w/ windowSize) 201 | # @return: 202 | # 1. window - list of window coefficients. 203 | @staticmethod 204 | def gaussianCoeffs(windowSize, std): 205 | window = [] 206 | 207 | for n in range(windowSize): 208 | value = math.exp(-0.5 * math.pow((n - (windowSize - 1) / 2) / (std * (windowSize - 1) / 2), 2)) 209 | window.append(value) 210 | 211 | return window 212 | 213 | # Function to generate the Kaiser-Bessel filter coefficients 214 | # @args: 215 | # 1. windowSize - size of the filter window 216 | # 2. cutoff_f - cutoff frequency of the filter 217 | # 3. sampling_f - sampling frequency of the data 218 | # @return: 219 | # 1. coeffs - coefficients of the Kaiser-Bessel filter 220 | @staticmethod 221 | def kaiserBesselCoeffs(windowSize, cutoff_f, sampling_f): 222 | coeffs = [] 223 | Np = (windowSize - 1) / 2 224 | 225 | # Assume we always want a attenuation of 60dB at cutoff frequency 226 | alpha = 5.65326 227 | Io_alpha = special.iv(0, alpha) 228 | 229 | # Calculate Kaiser-Bessel window coefficients 230 | window = [] 231 | for i in range(0, windowSize): 232 | val = alpha * math.sqrt(1 - math.pow((i - Np) / Np, 2)) 233 | window.append(special.iv(0, val) / Io_alpha) 234 | 235 | # Sinc function coefficients 236 | sinc = [] 237 | for i in range(0, windowSize): 238 | val = 2 * (i - Np) * cutoff_f / sampling_f 239 | sinc.append(SmoothingFilter.sinc(val)) 240 | 241 | # Multiple the coeffs together 242 | for i in range(0, windowSize): 243 | coeffs.append(window[i] * sinc[i]) 244 | 245 | return coeffs 246 | 247 | # Implementation of the sinc(x) function in Python 248 | # @return: 249 | # 1. value - result of the sinc operator. 250 | @staticmethod 251 | def sinc(x): 252 | if x == 0: 253 | return 1 254 | else: 255 | return math.sin(math.pi * x) / (math.pi * x) 256 | 257 | -------------------------------------------------------------------------------- /src/algorithms/peakDetection/windowedPeakDetection.py: -------------------------------------------------------------------------------- 1 | # ======================================================================== # 2 | # 3 | # windowedPeakDetection.py 4 | # Jamieson Brynes 5 | # 10/22/2016 6 | # 7 | # This class contains the implementation of the windowed peak 8 | # detection algorithm. Agnostic to smoothing window type and 9 | # peak 'score' function calculator. 10 | # 11 | # ======================================================================== # 12 | from src import utils 13 | 14 | from src.infra.inputPipe import InputPipe 15 | from src.infra.queue import Queue 16 | 17 | from src.algorithms.peakDetection.preProcessing import WpdPreProcessor 18 | from src.algorithms.peakDetection.smoothingFilter import SmoothingFilter 19 | from src.algorithms.peakDetection.peakFuncs import PeakScorer 20 | from src.algorithms.peakDetection.peakDetector import PeakDetector 21 | from src.algorithms.peakDetection.postProcessing import WpdPostProcessor 22 | 23 | 24 | 25 | class Wpd: 26 | 27 | # Constructor for the object. 28 | # @args : 29 | # 1. filelocation - location of the accelerometer_test.csv and stepcounter.csv 30 | # 2. preProcessingParams - parameters for the preprocessor, see preProcessing.py for docs 31 | # 3. windowType - type of windowing for the smoothing process. See constants.py for list. 32 | # 4. windowParams - parameters for the preprocessor, see relevant windowing function for docs. 33 | # 5. peakFuncType - type of peak scoring function. See constants.py for the list 34 | # 6. peakFuncParams - parameters for the peak scoring function, see relevant scoring function for docs. 35 | # 7. peakDetectorParams - parameters for the peak detector, see peakDetector.py for docs. 36 | # 8. postProcessingParams - parameters for post processing, see postProcessing.py for docs. 37 | def __init__(self, filelocation, preProcessingParams, windowParams, peakFuncParams, peakDetectorParams, postProcessingParams): 38 | 39 | self.name = 'wpd' 40 | self.filelocation = filelocation 41 | 42 | # Internal queues for data flow 43 | self.inputQueue = Queue() 44 | self.dataQueue = Queue() 45 | self.smoothedDataQueue = Queue() 46 | self.peakScores = Queue() 47 | self.peaks = Queue() 48 | 49 | # Internal plottable lists for plottable data 50 | self.data = [] 51 | self.preprocessData = [] 52 | self.smoothedData = [] 53 | self.peakScoreData = [] 54 | self.peakData = [] 55 | self.confirmedPeaks = [] 56 | self.steps = [] 57 | 58 | # Internal 'worker threads' in the form of objects 59 | self.pipe = InputPipe(self.filelocation + 'accelerometer.csv', self.inputQueue) 60 | self.preProcessing = WpdPreProcessor(preProcessingParams, self.inputQueue, self.data, self.dataQueue) 61 | self.smoothingFilter = SmoothingFilter(windowParams, self.dataQueue, self.preprocessData, self.smoothedDataQueue) 62 | self.peakScorer = PeakScorer(peakFuncParams, self.smoothedDataQueue, self.smoothedData, self.peakScores) 63 | self.peakDetection = PeakDetector(peakDetectorParams, self.peakScores, self.peakScoreData, self.peaks, self.peakData) 64 | self.postProcessing = WpdPostProcessor(postProcessingParams, self.peaks, self.confirmedPeaks) 65 | 66 | # Start algorithm signal, kicks off all the worker threads for the various stages 67 | def start(self): 68 | 69 | self.pipe.start() 70 | self.preProcessing.start() 71 | self.smoothingFilter.start() 72 | self.peakScorer.start() 73 | self.peakDetection.start() 74 | self.postProcessing.start() 75 | 76 | # Stop algorithm signal, halts all the worker threads after the current operation 77 | def stop(self): 78 | 79 | self.pipe.start() 80 | self.preProcessing.stop() 81 | self.smoothingFilter.stop() 82 | self.peakScorer.stop() 83 | self.peakDetection.stop() 84 | self.postProcessing.stop() 85 | 86 | def getStatus(self): 87 | 88 | return 'Input Data: ' + str(len(self.inputQueue)) \ 89 | + ' Pre-Processed Data: ' + str(len(self.dataQueue)) \ 90 | + ' Smoothed Data: ' + str(len(self.smoothedDataQueue)) \ 91 | + ' Peak Scores: ' + str(len(self.peakScores)) \ 92 | + ' Peaks: ' + str(len(self.peaks)) \ 93 | + ' Confirmed Peaks: ' + str(len(self.confirmedPeaks)) 94 | 95 | def getCsvStatus(self): 96 | 97 | return str(len(self.inputQueue)) + ',' \ 98 | + str(len(self.dataQueue)) + ',' \ 99 | + str(len(self.smoothedDataQueue)) \ 100 | + ',' + str(len(self.peakScores)) \ 101 | + ',' + str(len(self.peaks)) \ 102 | + ',' + str(len(self.confirmedPeaks)) 103 | 104 | def compare(self): 105 | 106 | timeData = {'scale': self.preProcessing.ts_factor, 'offset': self.preProcessing.startTime} 107 | self.steps = utils.loadStepCsv(self.filelocation + 'stepcounter.csv', timeData) 108 | return [len(self.confirmedPeaks), len(self.steps)] 109 | 110 | # Check if the algorithm is done 111 | def isDone(self): 112 | return self.preProcessing.isDone()and self.smoothingFilter.isDone() and self.peakScorer.isDone() and self.peakDetection.isDone() and self.postProcessing.isDone() 113 | 114 | # Check if the algorithm is still running 115 | def isRunning(self): 116 | return self.preProcessing.isRunning() or self.smoothingFilter.isRunning() or self.peakScorer.isRunning() or self.peakDetection.isRunning() or self.postProcessing.isRunning() 117 | 118 | # Getters 119 | 120 | def getData(self): 121 | return [self.data, self.preprocessData, self.smoothedData, self.peakScoreData, self.peakData, self.confirmedPeaks] 122 | 123 | def getName(self): 124 | return self.name 125 | 126 | def getSteps(self): 127 | return len(self.confirmedPeaks) 128 | -------------------------------------------------------------------------------- /src/constants.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # constants.py 4 | # Jamieson Brynes 5 | # 10/21/2016 6 | # 7 | # This class contains all of the constants for the step counting 8 | # programs. 9 | # 10 | #========================================================================# 11 | 12 | 13 | # Class to contain constants 14 | class Constants: 15 | 16 | THREAD_SLEEP_PERIOD = 0.00 # Note that this is in seconds. (10 ms) 17 | SAMPLE_PERIOD = 0.00 # Note this is in seconds. (10 ms) 18 | 19 | # Dictionary for graph iterables for UI 20 | UI_GRAPHS = dict() 21 | UI_GRAPHS['wpd'] = ['raw_data', 'pre_process_data', 'smooth_data', 'peak_score_data', 'peak_data', 'confirmed_peak_data'] 22 | 23 | # Dictionary for labels on axes. 24 | UI_GRAPHS_AXES = dict() 25 | 26 | UI_GRAPHS_AXES['wpd'] = dict() 27 | UI_GRAPHS_AXES['wpd']['raw_data'] = {'x' : 'time (ms)', 'y' : 'magnitude (m/s^2)'} 28 | UI_GRAPHS_AXES['wpd']['pre_process_data'] = UI_GRAPHS_AXES['wpd']['raw_data'] 29 | UI_GRAPHS_AXES['wpd']['smooth_data'] = UI_GRAPHS_AXES['wpd']['raw_data'] 30 | UI_GRAPHS_AXES['wpd']['peak_score_data'] = {'x' : 'time (ms)', 'y' : 'peak score'} 31 | UI_GRAPHS_AXES['wpd']['peak_data'] = UI_GRAPHS_AXES['wpd']['raw_data'] 32 | UI_GRAPHS_AXES['wpd']['confirmed_peak_data'] = UI_GRAPHS_AXES['wpd']['raw_data'] 33 | 34 | # Dictionary for type of lines on each plot. 35 | UI_GRAPHS_LINE = dict() 36 | 37 | UI_GRAPHS_LINE['wpd'] = dict() 38 | UI_GRAPHS_LINE['wpd'] = dict() 39 | UI_GRAPHS_LINE['wpd']['raw_data'] = {'marker': None, 'line': '-'} 40 | UI_GRAPHS_LINE['wpd']['pre_process_data'] = UI_GRAPHS_LINE['wpd']['raw_data'] 41 | UI_GRAPHS_LINE['wpd']['smooth_data'] = UI_GRAPHS_LINE['wpd']['raw_data'] 42 | UI_GRAPHS_LINE['wpd']['peak_score_data'] = UI_GRAPHS_LINE['wpd']['raw_data'] 43 | UI_GRAPHS_LINE['wpd']['peak_data'] = {'marker': 'x', 'line': None} 44 | UI_GRAPHS_LINE['wpd']['confirmed_peak_data'] = UI_GRAPHS_LINE['wpd']['peak_data'] 45 | 46 | # Dictionary for data about lines on each plot. 47 | UI_GRAPHS_POS = dict() 48 | 49 | UI_GRAPHS_POS['wpd'] = dict() 50 | UI_GRAPHS_POS['wpd']['raw_data'] = [231] 51 | UI_GRAPHS_POS['wpd']['pre_process_data'] = [232] 52 | UI_GRAPHS_POS['wpd']['smooth_data'] = [233, 235, 236] 53 | UI_GRAPHS_POS['wpd']['peak_score_data'] = [234] 54 | UI_GRAPHS_POS['wpd']['peak_data'] = [235] 55 | UI_GRAPHS_POS['wpd']['confirmed_peak_data'] = [236] 56 | UI_GRAPHS_POS['wpd']['steps'] = [236] 57 | 58 | # Log file locations 59 | THREAD_LOG = 'thread_log.txt' 60 | ERROR_LOG = 'error_log.txt' 61 | 62 | -------------------------------------------------------------------------------- /src/infra/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oxford-step-counter/Step-Counting-Algorithms-Testing/275138eaea7fd2e978400837101b0facb2a10147/src/infra/__init__.py -------------------------------------------------------------------------------- /src/infra/dataStructure.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # dataStructure.py 4 | # Jamieson Brynes 5 | # 10/21/2016 6 | # 7 | # This class contains the data structure format for a data point. 8 | # 9 | #=========================================================================# 10 | import math 11 | 12 | 13 | # Data structure for accelerometer data point. 14 | class DataStructure: 15 | 16 | # Constructor 17 | # @args: 18 | # 1. time - timestamp 19 | # 2. x - x-coordinate of acceleration 20 | # 3. y - y-coordinate of acceleration 21 | # 4. z - z-coordinate of acceleration 22 | def __init__(self, time, x, y, z) : 23 | 24 | self.time = time 25 | self.x = x 26 | self.y = y 27 | self.z = z 28 | self.mag = None 29 | # Boolean for tracking if this data point was smoothed 30 | self.modified = False 31 | 32 | # Function to scale and shift time axis. 33 | # @args: 34 | # 1. startTime - time that the data trace started (time = 0 point) 35 | # 2. factor - the scaling factor for converting units. i.e. --> to go from ns to ms this should be 10^6 36 | def scaleTime(self, startTime, factor): 37 | self.time = (self.time - startTime) / factor 38 | 39 | # Function to compute magnitude 40 | def computeMagnitude(self): 41 | self.mag = math.sqrt(math.pow(self.x, 2) + math.pow(self.y, 2) + math.pow(self.z, 2)) 42 | 43 | # Function to dump this data to a csv line entry 44 | def toCsv(self): 45 | return str(self.time) + ',' + str(self.x) + ',' + str(self.y) + ',' + str(self.z) + '\n' 46 | 47 | # Getters and setters 48 | def setX(self, x): 49 | self.x = x 50 | 51 | def setY(self, y): 52 | self.y = y 53 | 54 | def setZ(self, z): 55 | self.z = z 56 | 57 | def setTime(self, time): 58 | self.time = time 59 | 60 | def setMagnitude(self, mag): 61 | self.mag = mag 62 | 63 | def getX(self): 64 | return self.x 65 | 66 | def getY(self): 67 | return self.y 68 | 69 | def getZ(self): 70 | return self.z 71 | 72 | def getTime(self): 73 | return self.time 74 | 75 | def getMagnitude(self): 76 | return self.mag 77 | -------------------------------------------------------------------------------- /src/infra/fancyPrinter.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # dataStructure.py 4 | # Jamieson Brynes 5 | # 10/22/2016 6 | # 7 | # This class contains a class for printing data in place in the 8 | # console. i.e. - if you wanted a updating ticker. 9 | # 10 | #=========================================================================# 11 | 12 | 13 | class FancyPrinter : 14 | 15 | def __init__(self) : 16 | self.prev_len = 0 17 | 18 | # Print function. Takes any object that can be turned into a string. 19 | # Overwrites the previous message with spaces to erase it and then 20 | # returns to the start of the line. 21 | def fprint(self, obj): 22 | 23 | print(' ' * self.prev_len, end='\r') 24 | print(str(obj), end='\r') 25 | self.prev_len = len(str(obj)) 26 | -------------------------------------------------------------------------------- /src/infra/inputPipe.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # inputPipe.py 4 | # Jamieson Brynes 5 | # 10/21/2016 6 | # 7 | # This class is designed to simulate the real time collection of data 8 | # points. It will deposit the data into a queue at a fixed sampling 9 | # rate. 10 | # 11 | #=========================================================================# 12 | 13 | from threading import Thread 14 | import time 15 | from src.constants import Constants 16 | import src.utils as utils 17 | 18 | 19 | class InputPipe: 20 | 21 | # Constructor 22 | # @args : 23 | # 1. filepath - path to the input accelerometer data to parse 24 | # 2. queue - the Queue object to add the data to. 25 | def __init__(self, filepath, queue): 26 | self.filepath = filepath 27 | self.queue = queue 28 | 29 | self.thread = None 30 | 31 | # Start the piping operation 32 | def start(self): 33 | 34 | try: 35 | self.thread = Thread(target = self.pipeInput, args = ()) 36 | self.thread.daemon = True 37 | self.thread.start() 38 | except: 39 | print('Error: Cannot start piping thread') 40 | 41 | # Check if the piping operation is still running 42 | def isRunning(self): 43 | 44 | if hasattr(self, 'thread'): 45 | return self.thread.isAlive() 46 | else: 47 | return False 48 | 49 | # Stop the piping operation 50 | def stop(self): 51 | 52 | if self.thread: 53 | self.thread.stop() 54 | 55 | # Worker function for the thread 56 | def pipeInput(self): 57 | 58 | data = utils.loadAccelCsv(self.filepath) 59 | 60 | for datapoint in data: 61 | self.queue.enqueue(datapoint) 62 | # time.sleep(Constants.SAMPLE_PERIOD) 63 | 64 | # Add an 'end' signal to the pipe to indicate the end of the data stream 65 | self.queue.enqueue('end') 66 | -------------------------------------------------------------------------------- /src/infra/queue.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # queue.py 4 | # Jamieson Brynes 5 | # 10/21/2016 6 | # 7 | # This class is designed to be a simple implementation of a queue 8 | # in Python. 9 | # 10 | #=========================================================================# 11 | 12 | from collections import deque 13 | 14 | 15 | # Basic implementation of queue wrapping around the deque class. 16 | class Queue: 17 | 18 | def __init__(self): 19 | self.queue = deque() 20 | 21 | def isEmpty(self): 22 | return len(self.queue) == 0 23 | 24 | def enqueue(self, item): 25 | self.queue.append(item) 26 | 27 | def dequeue(self): 28 | return self.queue.popleft() 29 | 30 | def size(self): 31 | return len(self.queue) 32 | 33 | def __getitem__(self, i): 34 | return self.queue[i] 35 | 36 | def __len__(self): 37 | return len(self.queue) 38 | -------------------------------------------------------------------------------- /src/infra/simpleDataStructure.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # simpleDataStructure.py 4 | # Jamieson Brynes 5 | # 10/21/2016 6 | # 7 | # This class contains the data structure format for a data point 8 | # after preprocessing. 9 | # 10 | #=========================================================================# 11 | 12 | 13 | class Sds: 14 | 15 | # Constructor 16 | # @args: 17 | # 1. time - timestamp 18 | # 2. mag - magnitude of the signal 19 | # 3. old_mag (optional) - old magnitude, for when we edit magnitude (peak scores) 20 | def __init__(self, time, mag, old_mag=None): 21 | 22 | self.time = time 23 | self.mag = mag 24 | self.oldMag = old_mag 25 | 26 | # Output CSV line 27 | def toCsv(self): 28 | return str(self.time) + ',' + str(self.mag) + '\n' 29 | 30 | # Getters and setters 31 | def getTime(self): 32 | return self.time 33 | 34 | def getMagnitude(self): 35 | return self.mag 36 | 37 | def getOldMagnitude(self): 38 | return self.oldMag 39 | 40 | def setTime(self, time): 41 | self.time = time 42 | 43 | def setMagnitude(self, mag): 44 | self.mag = mag 45 | 46 | def setOldMagnitude(self, old_mag): 47 | self.oldMag = old_mag 48 | -------------------------------------------------------------------------------- /src/infra/workerThread.py: -------------------------------------------------------------------------------- 1 | 2 | import src.utils as utils 3 | from threading import Thread 4 | 5 | 6 | class WorkerThread: 7 | 8 | def __init__(self): 9 | 10 | # Thread related variables 11 | self.thread = None 12 | self.active = False 13 | self.completed = False 14 | self.target = None 15 | self.args = () 16 | 17 | # Start signal, create and kick off thread 18 | def start(self): 19 | self.active = True 20 | self.thread = Thread(target=self.target, args=self.args) 21 | self.thread.daemon = True 22 | self.thread.start() 23 | # utils.threadLog('Pre-processing thread started') 24 | 25 | # Stop signal, end thread after current operation is done 26 | def stop(self): 27 | # utils.threadLog('Pre-processing thread stopped.') 28 | self.active = False 29 | 30 | # Check if the thread is running 31 | def isRunning(self): 32 | # If the program is in 'active' mode AND the thread is actually still running. 33 | return self.active and (True if (self.thread and self.thread.isAlive()) else False) 34 | 35 | # Check if the pre-processing stage is finished 36 | def isDone(self): 37 | return self.completed -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import json 4 | sys.dont_write_bytecode = True 5 | 6 | from src.algorithms.peakDetection.windowedPeakDetection import Wpd 7 | from src.ui.ui import UI 8 | 9 | 10 | def main(): 11 | 12 | 13 | # Load json configuration 14 | config = json.load(open("../config.json", 'r')) 15 | 16 | algo = None 17 | 18 | if config['algorithm']['name'] == 'wpd': 19 | fp = config['file_path'] 20 | pre = config['algorithm']['params']['pre'] 21 | filter = config['algorithm']['params']['filter'] 22 | scoring = config['algorithm']['params']['scoring'] 23 | detection = config['algorithm']['params']['detection'] 24 | post = config['algorithm']['params']['post'] 25 | 26 | algo = Wpd(fp, pre, filter, scoring, detection, post) 27 | 28 | else: 29 | print("Configuration file specifies an unknown algorithm: " + config['algorithm'['name']]) 30 | return 31 | 32 | print("Starting algorithm") 33 | algo.start() 34 | 35 | while algo.isRunning(): 36 | time.sleep(1) 37 | 38 | print("Algorithm complete. Running comparison.") 39 | result = algo.compare() 40 | print(result) 41 | print(1 - abs((result[1] - result[0]) / result[1])) 42 | #ui = UI(algo) 43 | 44 | 45 | # Entry point 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /src/ui/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Oxford-step-counter/Step-Counting-Algorithms-Testing/275138eaea7fd2e978400837101b0facb2a10147/src/ui/__init__.py -------------------------------------------------------------------------------- /src/ui/ui.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # ui.py 4 | # Jamieson Brynes 5 | # 10/24/2016 6 | # 7 | # This class contains the UI master object. This initializes the UI 8 | # and handles UI updates and passes to the correct object 9 | # 10 | #========================================================================# 11 | 12 | from src.constants import Constants 13 | 14 | import matplotlib.pyplot as plt 15 | 16 | 17 | class UI: 18 | 19 | def __init__(self, algo): 20 | 21 | self.fig = plt.figure() 22 | self.subplots = {} 23 | name = algo.getName() 24 | lists = algo.getData() 25 | steps = algo.steps 26 | i = 0 27 | 28 | for l in lists: 29 | x = [] 30 | y = [] 31 | for dp in l: 32 | x.append(dp.time) 33 | y.append(dp.mag) 34 | 35 | l_name = Constants.UI_GRAPHS[name][i] 36 | axesData = Constants.UI_GRAPHS_AXES[name][l_name] 37 | positions = Constants.UI_GRAPHS_POS[name][l_name] 38 | lineData = Constants.UI_GRAPHS_LINE[name][l_name] 39 | for position in positions: 40 | 41 | if position not in self.subplots: 42 | self.subplots[position] = self.fig.add_subplot(position) 43 | 44 | self.subplots[position].set_title(l_name) 45 | self.subplots[position].set_xlabel(axesData['x']) 46 | self.subplots[position].set_ylabel(axesData['y']) 47 | self.subplots[position].plot(x, y, marker=lineData['marker'], linestyle=lineData['line']) 48 | 49 | i += 1 50 | 51 | position = Constants.UI_GRAPHS_POS[name]['steps'][0] 52 | i = 0 53 | for step in steps: 54 | self.subplots[position].axvline(x=step, ymin=0, ymax=1) 55 | i += 1 56 | plt.show() 57 | -------------------------------------------------------------------------------- /src/utils.py: -------------------------------------------------------------------------------- 1 | #========================================================================# 2 | # 3 | # utils.py 4 | # Jamieson Brynes 5 | # 10/21/2016 6 | # 7 | # This class contains various utility functions that do not belong 8 | # in a class anywhere. 9 | # 10 | #=========================================================================# 11 | from src.infra.dataStructure import DataStructure 12 | from src.infra.simpleDataStructure import Sds 13 | 14 | 15 | # Function to load in the accelerometer data from the CSV file. 16 | # @args: 17 | # 1. filepath - path to the .csv file location 18 | # TODO : Add exception handling 19 | def loadAccelCsv(filepath): 20 | 21 | data = [] 22 | with open(filepath, 'r') as f: 23 | for line in f: 24 | s_line = line.split(',') 25 | ds = DataStructure(int(s_line[0]), float(s_line[2]), float(s_line[3]), float(s_line[4])) 26 | data.append(ds) 27 | 28 | return data 29 | 30 | 31 | # Function to load in the step counter data and adjust the timescale 32 | # @args: 33 | # 1. filepath - location of the .csv with step data 34 | # 2. timeData - dictionary containing the parameters necessary 35 | # a. 'offset' - time offset from the accelerometer csv 36 | # b. 'scale' - the time scaling factor 37 | def loadStepCsv(filepath, timeData): 38 | 39 | offset = timeData['offset'] 40 | scale = timeData['scale'] 41 | 42 | left_previous = 1 43 | right_previous = 1 44 | 45 | data = [] 46 | with open(filepath, 'r') as f: 47 | for line in f: 48 | s_line = line.replace('\n','').split(',') 49 | # Check for feet transitioning from up to down. 50 | if int(s_line[1]) == 1 and left_previous == 0: 51 | time = (int(s_line[0]) - offset) / scale 52 | data.append(time) 53 | if int(s_line[2]) == 1 and right_previous == 0: 54 | time = (int(s_line[0]) - offset) / scale 55 | data.append(time) 56 | 57 | left_previous = int(s_line[1]) 58 | right_previous = int(s_line[2]) 59 | 60 | return data 61 | 62 | 63 | # Function to create empty files as clean logs on each run. 64 | def initLogs(): 65 | 66 | # Create clean files 67 | with open('threadlog.txt', 'w') as f: 68 | pass 69 | with open('errorlog.txt', 'w') as f: 70 | pass 71 | 72 | 73 | # Function for logging events with respect to thread. 74 | # @args: 75 | # 1. s - string to write to log. 76 | def threadLog(s): 77 | with open('threadlog.txt', 'a') as f: 78 | f.write(s) 79 | f.write('\r\n') 80 | 81 | 82 | # Function for logging errors 83 | # @args : 84 | # 1. s - string to write to log 85 | def errorLog(s): 86 | with open('errorlog.txt', 'a') as f : 87 | f.write(s) 88 | f.write('\r\n') 89 | 90 | 91 | # Linear interpolation function. Simple implementation. 92 | # @args: 93 | # 1. dp1 - datapoint one, assume dataStructure type 94 | # 2. dp2 - datapoint two, assume dataStructure type 95 | # 3. time - interpolation time 96 | def linearInterp(dp1, dp2, time): 97 | 98 | time1 = dp1.getTime() 99 | time2 = dp2.getTime() 100 | dt = time2 - time1 101 | 102 | value1 = dp1.getMagnitude() 103 | value2 = dp2.getMagnitude() 104 | dv = value2 - value1 105 | 106 | slope = dv / dt 107 | 108 | new_mag = slope * (time - time1) + value1 109 | 110 | return Sds(time, new_mag) 111 | 112 | 113 | # Function to print a queue of data to a .csv file 114 | # @args: 115 | # 1. queue - Queue or list object to iterate over. 116 | # 2. filename - name of the csv file to be created. 117 | def printToCsv(queue, filename): 118 | 119 | with open(filename, 'w') as f: 120 | 121 | for i in range(0,len(queue)): 122 | f.write(queue[i].toCsv()) 123 | --------------------------------------------------------------------------------