├── .gitignore ├── README ├── __init__.py ├── config.py ├── data_util.py ├── diary.py ├── habits.py ├── memex_daemon.py ├── requirements.txt ├── timer.py ├── timer_db.py ├── util.py ├── walros.py └── walros_base.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | pip install google-api-python-client 2 | https://developers.google.com/sheets/quickstart/python 3 | https://developers.google.com/apis-explorer 4 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ali01/walrOS/f5e950690ea4a3da5a677a6b76664faa778757ce/__init__.py -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | _CONFIG_FILEPATH = os.path.expanduser("~/.walros/config.json") 5 | 6 | class Config(object): 7 | def __init__(self, filepath=_CONFIG_FILEPATH): 8 | with open(filepath) as f: 9 | self._config_obj = json.load(f) 10 | 11 | @property 12 | def base_dir(self): 13 | return os.path.expanduser(self._config_obj['base_dir']) 14 | 15 | @property 16 | def timer_dir(self): 17 | return os.path.join(self.base_dir, 18 | self._config_obj['timer_subdir']) 19 | 20 | @property 21 | def timer_signals_dir(self): 22 | return os.path.join(self.base_dir, 23 | self._config_obj['timer_subdir'], 24 | self._config_obj['timer_signals_subdir']) 25 | 26 | @property 27 | def diary_dir(self): 28 | return os.path.join(self.base_dir, 29 | self._config_obj['diary_subdir']) 30 | -------------------------------------------------------------------------------- /data_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import functools 3 | from enum import Enum 4 | import string 5 | 6 | from apiclient import discovery 7 | import httplib2 8 | import oauth2client 9 | import oauth2client.file 10 | from oauth2client import client 11 | from oauth2client import tools 12 | 13 | # TODO: move to walros_base 14 | APPLICATION_NAME = "walrOS" 15 | PERMISSION_SCOPES = "https://www.googleapis.com/auth/spreadsheets" 16 | CLIENT_SECRET_FILEPATH = "~/.walros/client_secret.json" 17 | 18 | TEST_SPREADSHEET_ID = '1P_e-Tu-ZeY4fHluoMEmtg9p5pq7OLddoEEdhNqEvyVQ' 19 | TEST_WORKSHEET_ID = 0 20 | 21 | class Spreadsheet(object): 22 | 23 | def __init__(self, spreadsheet_id): 24 | self.spreadsheet_id_ = spreadsheet_id 25 | self.sheets_ = GetSpreadsheets() 26 | 27 | def GetWorksheet(self, worksheet_id): 28 | return Worksheet(self.spreadsheet_id_, worksheet_id) 29 | 30 | def GetRanges(self, ranges, fields): 31 | return self.sheets_.get(spreadsheetId=self.spreadsheet_id_, 32 | includeGridData=False, ranges=ranges, 33 | fields=fields).execute() 34 | 35 | def GetCellValue(self, worksheet_name, row, col): 36 | request = self.sheets_.values().get( 37 | spreadsheetId=self.spreadsheet_id_, 38 | range="%s!%s%d" % (worksheet_name, num2col(col), row)) 39 | response = request.execute() 40 | return response["values"][0][0] 41 | 42 | def BatchUpdate(self, batch_requests): 43 | return self.sheets_.batchUpdate(spreadsheetId=self.spreadsheet_id_, 44 | body={'requests': batch_requests}).execute() 45 | 46 | class Worksheet(object): 47 | 48 | def __init__(self, spreadsheet_id, worksheet_id): 49 | self.spreadsheet_id_ = spreadsheet_id 50 | self.worksheet_id_ = worksheet_id 51 | self.sheets_ = GetSpreadsheets() 52 | 53 | def NewInsertRowsBatchRequest(self, start_index, num_rows): 54 | return { 55 | 'insertDimension': { 56 | 'range': { 57 | 'sheetId': self.worksheet_id_, 58 | 'dimension': 'ROWS', 59 | 'startIndex': start_index - 1, 60 | 'endIndex': start_index + num_rows - 1, 61 | }, 62 | }, 63 | } 64 | 65 | def NewMergeRange(self, start_row, end_row, start_col, end_col): 66 | return { 67 | "startRowIndex": start_row - 1, 68 | "endRowIndex": end_row, 69 | "startColumnIndex": start_col - 1, 70 | "endColumnIndex": end_col, 71 | "sheetId": self.worksheet_id_, 72 | } 73 | 74 | def NewMergeCellsBatchRequest(self, merge_range): 75 | return { 76 | 'mergeCells': { 77 | 'mergeType': 'MERGE_ALL', 78 | 'range': merge_range 79 | } 80 | } 81 | 82 | class UpdateCellsMode(Enum): 83 | string = 'stringValue' 84 | number = 'numberValue' 85 | formula = 'formulaValue' 86 | 87 | def NewUpdateCellBatchRequest(self, row, col, value, 88 | update_cells_mode=UpdateCellsMode.string.value): 89 | return { 90 | 'updateCells': { 91 | 'fields': 'userEnteredValue', 92 | 'start': { # Zero-based indexing here. 93 | 'rowIndex': row - 1, 94 | 'columnIndex': col - 1, 95 | 'sheetId': self.worksheet_id_, 96 | }, 97 | 'rows': [ 98 | { 99 | 'values': { 100 | 'userEnteredValue': { 101 | update_cells_mode: value, 102 | }, 103 | }, 104 | }, 105 | ], 106 | }, 107 | } 108 | 109 | # Expose at the top-level. 110 | UpdateCellsMode = Worksheet.UpdateCellsMode 111 | 112 | 113 | class MergeRange(object): 114 | def __init__(self, merge_range): 115 | self.row_range = (merge_range['startRowIndex'] + 1, 116 | merge_range['endRowIndex']) 117 | self.col_range = (merge_range['startColumnIndex'] + 1, 118 | merge_range['endColumnIndex']) 119 | 120 | # -- Authentication -- 121 | 122 | def memoize(init_fn): 123 | """Decorator to memoize initialization""" 124 | obj = [] 125 | @functools.wraps(init_fn) 126 | def wrapper_fn(): 127 | if len(obj) == 0: 128 | obj.append(init_fn()) 129 | return obj[0] 130 | 131 | return wrapper_fn 132 | 133 | 134 | @memoize 135 | def GetSpreadsheets(): 136 | credentials = GetCredentials() 137 | http = credentials.authorize(httplib2.Http()) 138 | discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?version=v4') 139 | service = discovery.build('sheets', 'v4', http=http, 140 | discoveryServiceUrl=discoveryUrl, 141 | num_retries=3) 142 | return service.spreadsheets() 143 | 144 | def GetCredentials(): 145 | """Gets valid user credentials from storage. 146 | 147 | If nothing has been stored, or if the stored credentials are invalid, 148 | the OAuth2 flow is run to obtain the new credentials. 149 | 150 | Returns: 151 | The obtained credentials. 152 | """ 153 | credential_dir = os.path.join(os.path.expanduser('~'), '.credentials') 154 | if not os.path.exists(credential_dir): 155 | os.makedirs(credential_dir) 156 | credential_path = os.path.join(credential_dir, 157 | 'sheets.googleapis.com-walros.json') 158 | 159 | store = oauth2client.file.Storage(credential_path) 160 | credentials = store.get() 161 | if not credentials or credentials.invalid: 162 | flow = client.flow_from_clientsecrets(os.path.expanduser( 163 | CLIENT_SECRET_FILEPATH), 164 | PERMISSION_SCOPES) 165 | flow.user_agent = APPLICATION_NAME 166 | 167 | import argparse 168 | flags_namespace = argparse.Namespace() 169 | setattr(flags_namespace, 'auth_host_name', 'localhost') 170 | setattr(flags_namespace, 'logging_level', 'ERROR') 171 | setattr(flags_namespace, 'noauth_local_webserver', False) 172 | setattr(flags_namespace, 'auth_host_port', [8080, 8090]) 173 | credentials = tools.run_flow(flow, store, flags_namespace) 174 | print('Storing credentials to ' + credential_path) 175 | return credentials 176 | 177 | # -- Helper Functions -- 178 | 179 | def col2num(col): 180 | num = 0 181 | for c in col: 182 | if c in string.ascii_letters: 183 | num = num * 26 + (ord(c.upper()) - ord('A')) + 1 184 | return num 185 | 186 | 187 | def num2col(n): 188 | s = "" 189 | while n > 0: 190 | n, remainder = divmod(n - 1, 26) 191 | s = chr(65 + remainder) + s 192 | return s 193 | 194 | 195 | if __name__ == '__main__': 196 | sheet = Spreadsheet(TEST_SPREADSHEET_ID) 197 | worksheet = sheet.GetWorksheet(TEST_WORKSHEET_ID) 198 | 199 | requests = [] 200 | requests.append(worksheet.NewUpdateCellBatchRequest( 201 | 1, 1, 42, update_cells_mode=UpdateCellsMode.number.value)) 202 | sheet.BatchUpdate(requests) 203 | 204 | print(sheet.GetCellValue("Sheet1", 1, 1)) 205 | 206 | 207 | -------------------------------------------------------------------------------- /diary.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import os 4 | import os.path 5 | import time 6 | 7 | 8 | import click 9 | 10 | import config 11 | import timer_db 12 | import util 13 | 14 | 15 | _config = config.Config() 16 | _TIME_EPSILON = 1.0 # In seconds. 17 | 18 | 19 | def setup(): 20 | # Initialize task manager. 21 | if not os.path.isdir(_config.diary_dir): 22 | os.makedirs(_config.diary_dir) 23 | 24 | 25 | def new_command(label): 26 | if os.path.isfile(_resource_path(label)): 27 | util.tlog("A diary entry with label `%s` already exists" % label) 28 | return 29 | now = time.time() 30 | entry = { 31 | 'label': label, 32 | 'epoch': now, 33 | 'interval_start_time': now, 34 | 'effective': 0.0, 35 | } 36 | with util.OpenAndLock(_resource_path(label), 'w') as f: 37 | f.write(util.json_dumps(entry)) 38 | util.tlog("diary entry with label `%s` created" % label) 39 | 40 | 41 | def done_command(label): 42 | # TODO(alive): Rewrite with the paradigm used in timer_db.py. 43 | # Move this logic into Entry. 44 | if not os.path.isfile(_resource_path(label)): 45 | util.tlog("No diary entry with label `%s` exists" % label) 46 | return 47 | 48 | with util.OpenAndLock(_resource_path(label), 'r') as f: 49 | entry = json.load(f) 50 | 51 | now = time.time() 52 | span = now - entry['epoch'] 53 | effective = entry['effective'] 54 | 55 | # Handle orderings: 56 | # 1. __enter__, new, done, __exit__. 57 | # 2. new, done, __enter__, __exit__. 58 | # 3. __enter__, __exit__, new, done. 59 | # 60 | # If we are in any of the above orderings AND effective is 0.0, then we 61 | # simply set `effective` to `span`. In these cases, there is no interaction 62 | # between diary and timer. 63 | # 64 | # If, however, the first condition is True, but the second is false, then 65 | # we must be in case #1 above. The only way for `effective` to be non-zero 66 | # here is for the user to have called timer.inc(). This is only possible 67 | # if a timer is running, and therefore, cases #2 and #3 are ruled out. The 68 | # else block handles this case. 69 | if (util.isclose(entry['epoch'], entry['interval_start_time']) and 70 | util.isclose(effective, 0.0)): 71 | effective = span 72 | else: 73 | # Handle orderings: 74 | # 1. __enter__, new, done, __exit__ (with call to timer.inc()). 75 | # 5. new, __enter__, done, __exit__. 76 | # Capture the amount of time elapsed after __enter__. 77 | timer = timer_db.running_timer() 78 | if timer: 79 | with timer: 80 | if timer.label == label: 81 | effective += time.time() - entry['interval_start_time'] 82 | 83 | if util.isclose(span - effective, 0.0, abs_tol=_TIME_EPSILON): 84 | overhead = 0.0 85 | else: 86 | overhead = (span - effective) / span 87 | 88 | click.echo(" Start time: %s" % _format_timestamp(entry['epoch'])) 89 | click.echo(" End time: %s" % _format_timestamp(now)) 90 | click.echo(" Span (m): %.2f" % (span / 60.0)) 91 | click.echo(" Effective (m): %.2f" % (effective / 60.0)) 92 | click.echo(" Overhead (%%): %.1f%%" % (overhead * 100.0)) 93 | 94 | os.remove(_resource_path(label)) 95 | 96 | 97 | def remove_command(label): 98 | if not os.path.isfile(_resource_path(label)): 99 | util.tlog("No diary entry with label `%s` exists" % label) 100 | return 101 | os.remove(_resource_path(label)) 102 | 103 | 104 | def status_command(): 105 | # TODO(alive): implement 106 | util.tlog("Wawaaw... Not implemented yet :(") 107 | 108 | 109 | # TODO(alive): move into Entry 110 | def increment_effective(label, delta): 111 | if not os.path.isfile(_resource_path(label)): 112 | return False 113 | 114 | with util.OpenAndLock(_resource_path(label), 'r+') as f: 115 | entry = json.load(f) 116 | entry['effective'] += delta # Can validly result in negative numbers. 117 | f.truncate(0) 118 | f.seek(0) 119 | f.write(util.json_dumps(entry)) 120 | return True 121 | 122 | 123 | class Entry(object): 124 | def __init__(self, label): 125 | self._label = label 126 | 127 | def __enter__(self): 128 | """Signals this module that the timer is running on the given label. 129 | 130 | If a diary entry for the given label exists, this function sets its 131 | interval_start_time to the current time. 132 | 133 | Possible interactions with timer: 134 | Trivial orderings (no interaction): 135 | In these cases, new and done track all elapsed time. 136 | 1. new, done, __enter__, __exit__ 137 | 2. __enter__, __exit__, new, done 138 | 3. __enter__, new, done, __exit__ 139 | 140 | In this case, __enter__ and __exit__ track all elapsed time. 141 | 4. new, __enter__, __exit__, done 142 | 143 | 144 | Tricky orderings: 145 | 5. new, __enter__, done, __exit__ 146 | In this case, done captures the amount of time elapsed after 147 | __enter__. 148 | 149 | 6. __enter__, new, __exit__, done 150 | In this case, __exit__ captures the amount of time elapsed after new. 151 | """ 152 | # TODO(alive): rewrite with the paradigm used in timer_db.py. 153 | if os.path.isfile(_resource_path(self._label)): 154 | # TODO(alive): there's a harmless and unlikely race condition here. 155 | with util.OpenAndLock(_resource_path(self._label), 'r+') as f: 156 | entry = json.load(f) 157 | entry['interval_start_time'] = time.time() 158 | f.seek(0) 159 | f.truncate(0) 160 | f.write(util.json_dumps(entry)) 161 | 162 | return self 163 | 164 | def __exit__(self, *args): 165 | """Signals this module that the timer is running on the given label. 166 | 167 | If a diary entry for the given label exists, this function increments its 168 | 'effective' field by (time.time() - interval_start_time). 169 | """ 170 | if os.path.isfile(_resource_path(self._label)): 171 | # TODO(alive): there's a harmless and unlikely race condition here. 172 | with util.OpenAndLock(_resource_path(self._label), 'r+') as f: 173 | entry = json.load(f) 174 | entry['effective'] += time.time() - entry['interval_start_time'] 175 | f.seek(0) 176 | f.truncate(0) 177 | f.write(util.json_dumps(entry)) 178 | 179 | 180 | def _resource_path(name): 181 | return os.path.join(_config.diary_dir, name) 182 | 183 | 184 | def _format_timestamp(timestamp): 185 | datetime_obj = datetime.datetime.fromtimestamp(timestamp) 186 | return datetime.datetime.strftime(datetime_obj, "%H:%M:%S") 187 | -------------------------------------------------------------------------------- /habits.py: -------------------------------------------------------------------------------- 1 | import walros_base 2 | import data_util 3 | from data_util import UpdateCellsMode 4 | 5 | import click 6 | import util 7 | 8 | WORKSHEET_NAME = "Habits" 9 | WORKSHEET_ID = 751441428 # Found in URL. 10 | HEADER_ROWS = [ 11 | "TITLES", 12 | "COLUMN_LABELS", 13 | "MEDIANS", 14 | "PERCENTILE_75", 15 | "PERCENTILE_90", 16 | "MAX", 17 | "TOTAL_COUNT", 18 | ] 19 | 20 | # Margins 21 | COLUMN_MARGIN = 5 22 | 23 | # We currently assume that each day column is immediately followed 24 | # by week, month, and quarter columns. 25 | DAY_COLUMN_INDICES = list(range(2, 19, 4)) 26 | 27 | # Aggregate columns that are independently/manually set: 28 | WEEK_COLUMN_INDICES = [] 29 | MONTH_COLUMN_INDICES = [] 30 | QUARTER_COLUMN_INDICES = [] 31 | 32 | 33 | def init_command(): 34 | tracker_data = walros_base.TrackerData() 35 | tracker_data.worksheet_id = WORKSHEET_ID 36 | tracker_data.worksheet_name = WORKSHEET_NAME 37 | tracker_data.column_margin = COLUMN_MARGIN 38 | tracker_data.header_rows = HEADER_ROWS 39 | tracker_data.day_column_indices = DAY_COLUMN_INDICES 40 | tracker_data.week_column_indices = WEEK_COLUMN_INDICES 41 | tracker_data.month_column_indices = MONTH_COLUMN_INDICES 42 | tracker_data.quarter_column_indices = QUARTER_COLUMN_INDICES 43 | tracker_data.init_writes_zeros = False 44 | 45 | spreadsheet = data_util.Spreadsheet(walros_base.SPREADSHEET_ID) 46 | worksheet = spreadsheet.GetWorksheet(tracker_data.worksheet_id) 47 | init_requests = walros_base.build_init_requests(tracker_data, spreadsheet, 48 | worksheet) 49 | if len(init_requests) == 0: 50 | util.tlog("%s sheet is already initialized for today" % 51 | tracker_data.worksheet_name) 52 | return 53 | 54 | # Update sheet wide statistics. 55 | init_requests += build_update_statistics_requests(worksheet, tracker_data) 56 | 57 | # Send requests. 58 | response = spreadsheet.BatchUpdate(init_requests) 59 | 60 | 61 | def build_update_statistics_requests(worksheet, tracker_data): 62 | requests = [] 63 | # Build score formula. 64 | score_formula = 'SUM(' 65 | for i in tracker_data.day_column_indices[1:]: 66 | col = walros_base.col_num_to_letter(i) 67 | score_formula += "%s%d," % (col, tracker_data.last_day_row_index) 68 | score_formula += ")" 69 | 70 | # Normalize. 71 | score_formula += " / " + str(len(tracker_data.day_column_indices[1:])) 72 | 73 | # Take floor. 74 | score_formula = "=FLOOR(" + score_formula + ")" 75 | 76 | 77 | requests.append(worksheet.NewUpdateCellBatchRequest( 78 | tracker_data.last_day_row_index, 2, score_formula, 79 | UpdateCellsMode.formula.value)) 80 | return requests 81 | -------------------------------------------------------------------------------- /memex_daemon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import copy 4 | import json 5 | import math 6 | import os.path 7 | import random 8 | import re 9 | 10 | from datetime import datetime 11 | from datetime import timedelta 12 | 13 | from rtmapi import Rtm as rtm 14 | 15 | RTM_KEYS_FILEPATH = '~/.walros/memex/keys.json' 16 | 17 | 18 | class Task(object): 19 | def __init__(self, task_id=None, task_name=None): 20 | self.id = task_id 21 | self.name = task_name 22 | self.due = None 23 | self.added = None 24 | self.priority = 4 25 | self.estimate = None 26 | self.postponed = None 27 | self.completed = None 28 | self.url = None 29 | self.tags = [] 30 | self.notes = [] 31 | 32 | # RTM specific fields 33 | self.list_id = None 34 | self.taskseries_id = None 35 | self.task_id = None 36 | 37 | @classmethod 38 | def generate_task_id(class_obj, prefix, num_digits): 39 | task_num = random.randint(0, math.pow(10, num_digits) - 1) 40 | task_num_str = str(task_num).zfill(num_digits) 41 | return prefix + task_num_str 42 | 43 | @classmethod 44 | def generate_task_regex(class_obj, prefix): 45 | '''All Task IDs match the pattern {prefix}[0-9]+''' 46 | return re.compile(r'^%s([0-9]+)$' % prefix) 47 | 48 | 49 | class Milk(object): 50 | def __init__(self, api_key, secret, token, perms): 51 | self.__rtmapi = rtm(api_key, secret, perms, token) 52 | 53 | def tasks(self, selector): 54 | tasks = [] 55 | result = self.__rtmapi.rtm.tasks.getList(filter=selector) 56 | for tasklist in result.tasks: 57 | for taskseries in tasklist: 58 | # TODO: there can be multiple tasks per task series? wat 59 | task = Task() 60 | Milk.__set_fields_from_rtm(task, tasklist.id, taskseries) 61 | tasks.append(task) 62 | return tasks 63 | 64 | def create_task(self, task): 65 | entry = task.name 66 | if task.due: 67 | entry += ' ^%s' % task.due.isoformat() 68 | if task.priority: 69 | entry += ' !%d' % task.priority 70 | if task.estimate: 71 | entry += ' =%s' % task.estimate 72 | if task.tags: 73 | if task.id and task.id not in task.tags: 74 | task.tags.append(task.id) 75 | for tag in task.tags: 76 | entry += ' #%s' % tag 77 | 78 | timeline = self.__create_timeline() 79 | ret = self.__rtmapi.rtm.tasks.add(timeline=timeline, parse='1', 80 | name=entry) 81 | task.list_id = ret.list.id 82 | task.taskseries_id = ret.list.taskseries.id 83 | task.task_id = ret.list.taskseries.task.id 84 | 85 | if task.completed: 86 | self.__rtmapi.rtm.tasks.complete( 87 | timeline=timeline, list_id=task.list_id, 88 | taskseries_id=task.taskseries_id, task_id=task.task_id) 89 | 90 | if task.url: 91 | self.__rtmapi.rtm.tasks.setURL( 92 | timeline=timeline, list_id=task.list_id, 93 | taskseries_id=task.taskseries_id, task_id=task.task_id, 94 | url=task.url) 95 | 96 | for note in task.notes: 97 | self.__rtmapi.rtm.tasks.notes.add( 98 | timeline=timeline, list_id=task.list_id, 99 | taskseries_id=task.taskseries_id, task_id=task.task_id, 100 | note_title=note[0], note_text=note[1]) 101 | 102 | def set_tags(self, task, tags): 103 | if not task.list_id or not task.taskseries_id or not task.task_id: 104 | raise Exception('Milk: task rtm fields uninitialized') 105 | 106 | if task.id and task.id not in tags: 107 | tags.append(task.id) 108 | 109 | timeline = self.__create_timeline() 110 | self.__rtmapi.rtm.tasks.setTags( 111 | timeline=timeline, list_id=task.list_id, 112 | taskseries_id=task.taskseries_id, task_id=task.task_id, 113 | tags=','.join(tags)) 114 | 115 | def __create_timeline(self): 116 | return self.__rtmapi.rtm.timelines.create().timeline.value 117 | 118 | @classmethod 119 | def __parse_rtm_date(class_obj, datestr): 120 | if not datestr: 121 | return None 122 | 123 | # TODO: factor out format constant 124 | return datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%SZ') 125 | 126 | @classmethod 127 | def __set_fields_from_rtm(class_obj, task, list_id, rtm_taskseries): 128 | # TODO: these should be set to None if not set in rtm 129 | 130 | task.name = rtm_taskseries.name 131 | task.due = Milk.__parse_rtm_date(rtm_taskseries.task.due) 132 | task.added = Milk.__parse_rtm_date(rtm_taskseries.task.added) 133 | task.priority = rtm_taskseries.task.priority 134 | if task.priority == 'N': 135 | task.priority = 4 136 | else: 137 | task.priority = int(task.priority) 138 | 139 | task.estimate = rtm_taskseries.task.estimate 140 | task.postponed = rtm_taskseries.task.postponed 141 | task.completed = Milk.__parse_rtm_date(rtm_taskseries.task.completed) 142 | task.url = rtm_taskseries.url 143 | task.tags = [] 144 | for tag in rtm_taskseries.tags: 145 | task.tags.append(tag.value) 146 | 147 | task.notes = [] 148 | for note in rtm_taskseries.notes: 149 | task.notes.append((note.title, note.value)) 150 | 151 | # RTM specific fields 152 | task.list_id = list_id 153 | task.taskseries_id = rtm_taskseries.id 154 | task.task_id = rtm_taskseries.task.id 155 | 156 | def init_milk(): 157 | # read keys; TODO: key path should be in config 158 | keys = None 159 | with open(os.path.expanduser(RTM_KEYS_FILEPATH)) as f: 160 | keys = json.loads(f.read()) 161 | 162 | # initialize api 163 | api_key = keys['rtm_api_key'] 164 | secret = keys['rtm_secret'] 165 | token = keys['rtm_token'] 166 | milk = Milk(api_key, secret, token, 'delete') 167 | return milk 168 | 169 | def id_from_tags(tags, id_prefix): 170 | task_id = None 171 | id_regex = Task.generate_task_regex(id_prefix) 172 | for tag in tags: 173 | if id_regex.match(tag): 174 | task_id = tag 175 | 176 | return task_id 177 | 178 | def memex(milk): 179 | # TODO: factor out 'memex' tag constant 180 | tasks = milk.tasks('tag:memex and status:completed') 181 | interval_regex = Task.generate_task_regex('s') 182 | 183 | for task in tasks: 184 | 185 | print task.name 186 | 187 | # TODO: factor out prefix constant 188 | task.id = id_from_tags(task.tags, 'z') 189 | if not task.id: 190 | task.id = Task.generate_task_id('z', 6) 191 | 192 | # move current task to memex-archive 193 | extraneous_tags = [] 194 | for t in task.tags: 195 | if t == task.id or t == 'memex' or interval_regex.match(t): 196 | continue 197 | 198 | extraneous_tags.append(t) 199 | 200 | archive_tags = extraneous_tags + ['memex-archive'] 201 | milk.set_tags(copy.deepcopy(task), archive_tags) 202 | 203 | # extract interval size from tags 204 | interval = None 205 | for tag in task.tags: 206 | match = interval_regex.match(tag) 207 | if match: 208 | interval = int(match.groups()[0]) 209 | 210 | if interval == 0: 211 | # task is no longer of interest 212 | continue 213 | 214 | if not interval: 215 | interval = 4 216 | 217 | # create next task in review series 218 | task.due = task.completed + timedelta(interval) 219 | task.completed = None 220 | task.tags = ['memex', 's%d' % (interval * 2)] 221 | task.tags += extraneous_tags 222 | task.priority = 3 223 | 224 | milk.create_task(task) 225 | 226 | 227 | if __name__ == '__main__': 228 | milk = init_milk() 229 | memex(milk) 230 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | altgraph==0.10.2 2 | appnope==0.1.0 3 | bdist-mpkg==0.5.0 4 | bonjour-py==0.3 5 | cffi==1.5.2 6 | click==6.3 7 | cryptography==1.2.3 8 | decorator==4.0.9 9 | enum34==1.1.2 10 | gnureadline==6.3.3 11 | httplib2==0.9.2 12 | idna==2.0 13 | ipaddress==1.0.16 14 | ipython==4.1.2 15 | ipython-genutils==0.1.0 16 | macholib==1.5.1 17 | matplotlib==1.3.1 18 | modulegraph==0.10.4 19 | numpy==1.8.0rc1 20 | oauth2client==2.0.1 21 | pandas==0.18.0 22 | path.py==8.1.2 23 | pexpect==4.0.1 24 | pickleshare==0.6 25 | ptyprocess==0.5.1 26 | py2app==0.7.3 27 | pyasn1==0.1.9 28 | pyasn1-modules==0.0.8 29 | pycparser==2.14 30 | pyobjc-core==2.5.1 31 | pyobjc-framework-Accounts==2.5.1 32 | pyobjc-framework-AddressBook==2.5.1 33 | pyobjc-framework-AppleScriptKit==2.5.1 34 | pyobjc-framework-AppleScriptObjC==2.5.1 35 | pyobjc-framework-Automator==2.5.1 36 | pyobjc-framework-CFNetwork==2.5.1 37 | pyobjc-framework-Cocoa==2.5.1 38 | pyobjc-framework-Collaboration==2.5.1 39 | pyobjc-framework-CoreData==2.5.1 40 | pyobjc-framework-CoreLocation==2.5.1 41 | pyobjc-framework-CoreText==2.5.1 42 | pyobjc-framework-DictionaryServices==2.5.1 43 | pyobjc-framework-EventKit==2.5.1 44 | pyobjc-framework-ExceptionHandling==2.5.1 45 | pyobjc-framework-FSEvents==2.5.1 46 | pyobjc-framework-InputMethodKit==2.5.1 47 | pyobjc-framework-InstallerPlugins==2.5.1 48 | pyobjc-framework-InstantMessage==2.5.1 49 | pyobjc-framework-LatentSemanticMapping==2.5.1 50 | pyobjc-framework-LaunchServices==2.5.1 51 | pyobjc-framework-Message==2.5.1 52 | pyobjc-framework-OpenDirectory==2.5.1 53 | pyobjc-framework-PreferencePanes==2.5.1 54 | pyobjc-framework-PubSub==2.5.1 55 | pyobjc-framework-QTKit==2.5.1 56 | pyobjc-framework-Quartz==2.5.1 57 | pyobjc-framework-ScreenSaver==2.5.1 58 | pyobjc-framework-ScriptingBridge==2.5.1 59 | pyobjc-framework-SearchKit==2.5.1 60 | pyobjc-framework-ServiceManagement==2.5.1 61 | pyobjc-framework-Social==2.5.1 62 | pyobjc-framework-SyncServices==2.5.1 63 | pyobjc-framework-SystemConfiguration==2.5.1 64 | pyobjc-framework-WebKit==2.5.1 65 | pyOpenSSL==0.15.1 66 | pyparsing==2.0.1 67 | python-dateutil==1.5 68 | pytz==2013.7 69 | requests==2.9.1 70 | rsa==3.3 71 | scipy==0.13.0b1 72 | seaborn==0.7.0 73 | simplegeneric==0.8.1 74 | six==1.10.0 75 | traitlets==4.2.1 76 | xattr==0.6.4 77 | zope.interface==4.1.1 78 | -------------------------------------------------------------------------------- /timer.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import fcntl 3 | import itertools 4 | import os 5 | import os.path 6 | import platform 7 | import signal 8 | import subprocess 9 | import sys 10 | import threading 11 | import time 12 | 13 | import click 14 | 15 | from oauth2client.service_account import ServiceAccountCredentials 16 | 17 | import config 18 | import data_util 19 | import diary 20 | import timer_db 21 | import util 22 | import walros_base 23 | 24 | from data_util import UpdateCellsMode 25 | from util import OpenAndLock 26 | 27 | _config = config.Config() 28 | 29 | WORKSHEET_NAME = "Time" 30 | WORKSHEET_ID = 925912296 # Found in URL. 31 | HEADER_ROWS = [ 32 | "TITLES", 33 | "COLUMN_LABELS", 34 | "MEDIANS", 35 | "PERCENTILE_75", 36 | "PERCENTILE_90", 37 | "MAX", 38 | "WEIGHTS", 39 | "RELATIVE_VALUE", 40 | "GOAL_NUMBER", 41 | "PROGRESS", 42 | ] 43 | 44 | # Margins 45 | COLUMN_MARGIN = 5 46 | 47 | # We currently assume that each day column is immediately followed 48 | # by week, month, and quarter columns. 49 | DAY_COLUMN_INDICES = [2, 6, 10, 14] 50 | 51 | FOCUS_UNIT_DURATION = 1800 # Seconds (30 minutes). 52 | BASE_INTERRUPTION_PENALTY = 0.04 # Time units 53 | SPREADSHEET_KEY_FILEPATH = os.path.expanduser("~/.walros/keys.json") 54 | 55 | # Signals. 56 | SIGNALS_SUBDIR = ".signals" 57 | TIMER_RUNNING_SIGNAL = "timer_running" 58 | DISPLAY_UPDATE_SIGNAL = "display_update" 59 | 60 | 61 | def setup(): 62 | # Initialize timer. 63 | if not os.path.isdir(_config.timer_signals_dir): 64 | os.makedirs(_config.timer_signals_dir) 65 | 66 | def sigint_handler(signum, frame): # TODO: put inside with statement instead. 67 | clear_signals() 68 | sys.exit(0) 69 | 70 | signal.signal(signal.SIGINT, sigint_handler) 71 | 72 | 73 | def init_tracker_data(): 74 | tracker_data = walros_base.TrackerData() 75 | tracker_data.worksheet_id = WORKSHEET_ID 76 | tracker_data.worksheet_name = WORKSHEET_NAME 77 | tracker_data.column_margin = COLUMN_MARGIN 78 | tracker_data.header_rows = HEADER_ROWS 79 | tracker_data.day_column_indices = DAY_COLUMN_INDICES 80 | tracker_data.reduce_formula_final =\ 81 | lambda r: "=IF(SUM(%s), AVERAGE(%s), 0)" % (r, r) 82 | return tracker_data 83 | 84 | 85 | def init_command(): 86 | tracker_data = init_tracker_data() 87 | spreadsheet = data_util.Spreadsheet(walros_base.SPREADSHEET_ID) 88 | worksheet = spreadsheet.GetWorksheet(tracker_data.worksheet_id) 89 | init_requests = walros_base.build_init_requests(tracker_data, spreadsheet, 90 | worksheet) 91 | if len(init_requests) == 0: 92 | util.tlog("%s sheet is already initialized for today" % 93 | tracker_data.worksheet_name) 94 | return 95 | 96 | # Update sheet wide statistics. 97 | init_requests += build_update_statistics_requests(worksheet, tracker_data) 98 | 99 | # Send requests. 100 | response = spreadsheet.BatchUpdate(init_requests) 101 | 102 | # TODO(alive): move sheets logic into separate module. 103 | def build_update_statistics_requests(worksheet, tracker_data): 104 | # Build final score formula. 105 | range_expr = "" 106 | for i in tracker_data.day_column_indices[1:]: 107 | range_expr += "%s$%d*MIN(TimeMaxScore,%s%d/%s$%d), " % ( 108 | walros_base.col_num_to_letter(i), 109 | tracker_data.row_index("WEIGHTS"), 110 | walros_base.col_num_to_letter(i), 111 | tracker_data.last_day_row_index, 112 | walros_base.col_num_to_letter(i), 113 | tracker_data.row_index("GOAL_NUMBER")) 114 | range_expr = range_expr[:-2] # Strip trailing space &comma. 115 | final_score_formula = "=SUM(%s)" % range_expr 116 | 117 | requests = [] 118 | requests.append(worksheet.NewUpdateCellBatchRequest( 119 | tracker_data.last_day_row_index, 2, final_score_formula, 120 | UpdateCellsMode.formula.value)) 121 | 122 | return requests 123 | 124 | 125 | def start_command(label, seconds, minutes, hours, whitenoise, count, track, 126 | force): 127 | tracker_data = init_tracker_data() 128 | if not set_signal(TIMER_RUNNING_SIGNAL): 129 | util.tlog("A timer is already running") 130 | return 131 | 132 | clear_signals(exclude=[TIMER_RUNNING_SIGNAL]) 133 | if not seconds and not minutes and not hours: 134 | seconds = FOCUS_UNIT_DURATION * count 135 | 136 | if force and timer_db.timer_exists(label): 137 | with timer_db.TimerFileProxy(label) as timer: 138 | timer.clear() 139 | 140 | if timer_db.timer_exists(label): 141 | with timer_db.TimerFileProxy(label) as timer: 142 | timer.resume() 143 | util.tlog("Resuming at %d seconds" % timer.remaining) 144 | 145 | else: 146 | with timer_db.TimerFileProxy(label) as timer: 147 | timer.start(seconds, minutes, hours) 148 | util.tlog("Starting at %d seconds" % timer.remaining) 149 | 150 | try: 151 | with diary.Entry(label): # Tracks effective time spent and overhead. 152 | while True: # Timer loop. 153 | # end time could have been changed; read again from file 154 | with timer_db.TimerFileProxy(label) as timer: 155 | if timer.is_complete: 156 | util.tlog("Timer `%s` completed" % timer.label) 157 | break 158 | if unset_signal(DISPLAY_UPDATE_SIGNAL): 159 | util.tlog("Currently at %d seconds" % timer.remaining) 160 | time.sleep(1) 161 | finally: 162 | with timer_db.TimerFileProxy(label) as timer: 163 | if not timer.is_complete: 164 | remaining = timer.pause() 165 | util.tlog("Pausing timer at %d seconds" % remaining, prefix='\n') 166 | unset_signal(TIMER_RUNNING_SIGNAL) 167 | 168 | try: # Timer complete, notify and record. 169 | timer_notify() 170 | 171 | if track: 172 | with timer_db.TimerFileProxy(label) as timer: 173 | spreadsheet = data_util.Spreadsheet(walros_base.SPREADSHEET_ID) 174 | worksheet = spreadsheet.GetWorksheet(tracker_data.worksheet_id) 175 | latest_date = spreadsheet.GetCellValue( 176 | worksheet_name=tracker_data.worksheet_name, 177 | row=tracker_data.row_margin + 1, col=1) 178 | latest_date = latest_date.split()[0] 179 | date_today = datetime.datetime.now().strftime("%Y-%m-%d") 180 | if latest_date != date_today: 181 | util.tlog("Warning: the latest row in spreadsheet does not correspond " 182 | "to today's date") 183 | 184 | credit = count 185 | timer_interruptions = timer.interruptions 186 | while timer_interruptions > 0: 187 | # Impose exponential cost to interruptions. 188 | timer_interruptions -= 1 189 | credit -= BASE_INTERRUPTION_PENALTY * 2 ** timer_interruptions 190 | credit = max(credit, 0) 191 | 192 | label_count = timer_increment_label_count( 193 | spreadsheet, worksheet, tracker_data, label, credit) 194 | util.tlog("interruptions: %d, credit: %.2f" % 195 | (timer.interruptions, credit)) 196 | util.tlog("%s count: %.2f" % (label, label_count)) 197 | timer.clear() 198 | 199 | except Exception as ex: 200 | util.tlog("Error updating spreadsheet count") 201 | raise ex 202 | 203 | 204 | def status_command(data): 205 | def timer_status_str(timer): 206 | return ' %s: %d' % (timer.label, timer.remaining) 207 | running_timer = timer_db.running_timer() 208 | if running_timer: 209 | with running_timer: 210 | click.secho(timer_status_str(running_timer), fg='green') 211 | for timer in timer_db.existing_timers(): 212 | with timer: 213 | if timer.is_running: 214 | continue 215 | click.echo(timer_status_str(timer)) 216 | 217 | 218 | def clear_command(label): 219 | if timer_db.timer_exists(label): 220 | with timer_db.TimerFileProxy(label) as timer: 221 | if timer.is_running: 222 | util.tlog("The timer with label `%s` is currently running" % 223 | timer.label) 224 | return 225 | timer.clear() 226 | else: 227 | util.tlog("No paused timer with label '%s' exists" % label) 228 | 229 | 230 | def inc_command(delta): 231 | timer = timer_db.running_timer() 232 | if not timer: 233 | util.tlog("No timer is currently running") 234 | return 235 | with timer: 236 | remaining = timer.remaining 237 | timer.inc(delta) 238 | click.echo(" previous: %f" % remaining) 239 | click.echo(" current: %f" % timer.remaining) 240 | if diary.increment_effective(timer.label, -1 * delta): 241 | click.echo(" (diary updated)") 242 | 243 | set_signal(DISPLAY_UPDATE_SIGNAL) 244 | 245 | 246 | def timer_notify(): 247 | # Only notify in Mac OS. 248 | if platform.system().lower() != 'darwin': 249 | return 250 | 251 | def _notify(): 252 | util.tlog("Notified") 253 | time_str = datetime.datetime.strftime(datetime.datetime.now(), "%H:%M") 254 | subprocess.call(["osascript -e \'display notification " + 255 | "\"%s: notify\" with title \"walrOS timer\"\'" % time_str], 256 | shell=True) 257 | for ix in range(0, 3): 258 | subprocess.call(["afplay", "/System/Library/Sounds/Blow.aiff"]) 259 | time.sleep(1) 260 | 261 | # Run in a separate thread to allow API calls to make simultaneous progress. 262 | t = threading.Thread(target=_notify) 263 | t.start() 264 | 265 | 266 | def timer_signal_path(signal_name): 267 | return os.path.join(_config.timer_dir, SIGNALS_SUBDIR, signal_name) 268 | 269 | 270 | def timer_col_index_for_label(spreadsheet, worksheet, tracker_data, label): 271 | row_index = tracker_data.row_index("COLUMN_LABELS") 272 | ranges = ["%s!%d:%d" % (tracker_data.worksheet_name, row_index, row_index)] 273 | response = spreadsheet.GetRanges(ranges, "sheets/data/rowData") 274 | row_data = response["sheets"][0]["data"][0]["rowData"][0]["values"] 275 | row_data = row_data[tracker_data.column_margin:] 276 | row_labels = [ col["effectiveValue"]["stringValue"] for col in row_data ] 277 | try: 278 | col_index = row_labels.index(label) 279 | col_index += tracker_data.column_margin + 1 280 | except ValueError: 281 | raise click.ClickException("Label %s not found in spreadsheet." % label) 282 | 283 | return col_index 284 | 285 | 286 | def timer_increment_label_count(spreadsheet, worksheet, tracker_data, label, 287 | credit): 288 | row = tracker_data.row_margin + 1 289 | col = timer_col_index_for_label(spreadsheet, worksheet, tracker_data, label) 290 | cell_value = spreadsheet.GetCellValue(tracker_data.worksheet_name, row, col) 291 | cell_value = credit if not cell_value else float(cell_value) + credit 292 | 293 | requests = [] 294 | requests.append(worksheet.NewUpdateCellBatchRequest( 295 | row, col, cell_value, update_cells_mode=data_util.UpdateCellsMode.number.value)) 296 | spreadsheet.BatchUpdate(requests) 297 | 298 | return cell_value 299 | 300 | 301 | # TODO(alive): move signals into separate module. 302 | def set_signal(signal_name): 303 | signal_filepath = timer_signal_path(signal_name) 304 | if os.path.isfile(signal_filepath): 305 | return False 306 | 307 | with open(signal_filepath, 'w') as f: 308 | f.flush() 309 | 310 | return True 311 | 312 | 313 | def unset_signal(signal_name): 314 | signal_filepath = timer_signal_path(signal_name) 315 | if os.path.isfile(signal_filepath): 316 | os.remove(signal_filepath) 317 | return True 318 | return False 319 | 320 | 321 | def signal_is_set(signal_name): 322 | signal_filepath = timer_signal_path(signal_name) 323 | if os.path.isfile(signal_filepath): 324 | return True 325 | return False 326 | 327 | 328 | def clear_signals(exclude=[]): 329 | signals_dirpath = os.path.join(_config.timer_dir, SIGNALS_SUBDIR) 330 | for signal_name in os.listdir(signals_dirpath): 331 | if signal_name not in exclude: 332 | os.remove(timer_signal_path(signal_name)) 333 | 334 | -------------------------------------------------------------------------------- /timer_db.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import json 3 | import os 4 | import os.path 5 | import sys 6 | import time 7 | 8 | import config 9 | import util 10 | 11 | _config = config.Config() 12 | _TIMER_FILE_SUFFIX = '-timer' 13 | _DIRECTORY_PATH = _config.timer_dir 14 | 15 | 16 | def timer_exists(label): 17 | return os.path.isfile(_timer_filepath(label)) 18 | 19 | 20 | def existing_timers(): 21 | filepaths = (f for f in os.listdir(_DIRECTORY_PATH) 22 | if os.path.isfile(os.path.join(_DIRECTORY_PATH, f))) 23 | timer_filenames = (f for f in filepaths if f.endswith(_TIMER_FILE_SUFFIX)) 24 | timer_names = (f[:f.rfind(_TIMER_FILE_SUFFIX)] for f in timer_filenames) 25 | return (TimerFileProxy(t) for t in timer_names) 26 | 27 | 28 | def running_timer(): 29 | '''Returns the currently running timer or None if no timer is running.''' 30 | # TODO(alive): re-implement in terms of signals? 31 | running_timer = None 32 | for timer in existing_timers(): 33 | with timer: 34 | if timer.is_running: 35 | # There should never be more than one running timer. 36 | assert running_timer is None 37 | running_timer = timer 38 | return running_timer 39 | 40 | 41 | class _check_preconditions(object): 42 | def __init__(self, assert_running_is=None): 43 | self._assert_running_is = assert_running_is 44 | 45 | def __call__(self, method): 46 | @functools.wraps(method) 47 | def wrapper(method_self, *args, **kwargs): 48 | assert method_self._enter_called, ( 49 | 'TimerFileProxy must be used within a `with` statement.') 50 | if self._assert_running_is is not None: 51 | assert method_self.is_running == self._assert_running_is 52 | return method(method_self, *args, **kwargs) 53 | return wrapper 54 | 55 | 56 | class TimerFileProxy(object): 57 | def __init__(self, label): 58 | self._label = label 59 | self._enter_called = False 60 | self._clear_called = False 61 | 62 | @property 63 | @_check_preconditions() 64 | def label(self): 65 | return self._label 66 | 67 | @property 68 | @_check_preconditions() 69 | def endtime(self): 70 | return self._timer_obj['endtime'] 71 | 72 | @property 73 | @_check_preconditions() 74 | def remaining(self): 75 | if self.is_running: 76 | return int(round(self.endtime - time.time())) 77 | else: 78 | return self._timer_obj['remaining'] 79 | 80 | @property 81 | @_check_preconditions() 82 | def interruptions(self): 83 | return self._timer_obj['interruptions'] 84 | 85 | @property 86 | @_check_preconditions() 87 | def filepath(self): 88 | return _timer_filepath(self._label) 89 | 90 | @property 91 | @_check_preconditions() 92 | def is_running(self): 93 | return not util.isclose(self.endtime, 0, abs_tol=1e-3) 94 | 95 | @property 96 | @_check_preconditions() 97 | def is_complete(self): 98 | return self.remaining <= 0 99 | 100 | @_check_preconditions(assert_running_is=False) 101 | def start(self, seconds, minutes, hours): 102 | duration = seconds + minutes * 60 + hours * 3600 103 | self._timer_obj['endtime'] = time.time() + duration 104 | 105 | @_check_preconditions(assert_running_is=False) 106 | def resume(self): 107 | assert self.remaining >= 0 108 | self._timer_obj['endtime'] = int(round(time.time() + self.remaining)) 109 | 110 | @_check_preconditions() 111 | def pause(self): 112 | if self.is_running: 113 | self._timer_obj['remaining'] = int(round(self.endtime - time.time())) 114 | self._timer_obj['endtime'] = 0 115 | self._timer_obj['interruptions'] += 1 116 | return self.remaining 117 | 118 | @_check_preconditions() 119 | def clear(self): 120 | if self.is_running: 121 | self.pause() 122 | self._clear_called = True 123 | 124 | @_check_preconditions(assert_running_is=True) 125 | def inc(self, delta): 126 | self._timer_obj['endtime'] += delta 127 | 128 | def __enter__(self): 129 | # TODO(alive): Should likely hold the file lock throughout the entire `with` 130 | # statement. 131 | # TODO(alive): Explicitly create timer files. This can cause subtle bugs. 132 | self._enter_called = True 133 | if not os.path.isfile(self.filepath): 134 | self._timer_obj = { 135 | 'label': self._label, 136 | 'endtime': 0, # This field is 0 when the timer is not running. 137 | 'remaining': sys.maxsize, 138 | 'interruptions': 0 139 | } 140 | with util.OpenAndLock(self.filepath, 'w') as f: 141 | f.write(util.json_dumps(self._timer_obj)) 142 | else: 143 | with util.OpenAndLock(self.filepath, 'r') as f: 144 | self._timer_obj = json.load(f) 145 | return self 146 | 147 | def __exit__(self, *args): 148 | if self._clear_called: 149 | os.remove(self.filepath) # Delete timer. 150 | else: 151 | with util.OpenAndLock(self.filepath, 'w') as f: 152 | f.write(util.json_dumps(self._timer_obj)) 153 | self._enter_called = False 154 | 155 | 156 | def _timer_filepath(label): 157 | return os.path.join(_DIRECTORY_PATH, label + _TIMER_FILE_SUFFIX) 158 | 159 | -------------------------------------------------------------------------------- /util.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import fcntl 3 | import json 4 | 5 | import click 6 | 7 | class OpenAndLock(object): 8 | def __init__(self, filepath, open_mode): 9 | self.filepath_ = filepath 10 | self.file_ = None 11 | self.open_mode_ = open_mode 12 | self.lock_mode_ = fcntl.LOCK_EX 13 | if self.open_mode_ == 'r': 14 | self.lock_mode_ = fcntl.LOCK_SH 15 | 16 | def __enter__(self): 17 | self.file_ = open(self.filepath_, self.open_mode_) 18 | fcntl.lockf(self.file_.fileno(), self.lock_mode_) 19 | return self.file_ 20 | 21 | def __exit__(self, *args): 22 | self.file_.flush() 23 | fcntl.lockf(self.file_.fileno(), fcntl.LOCK_UN) 24 | self.file_.close() 25 | 26 | 27 | # Echo log message with timestamp. 28 | def tlog(message, prefix=''): 29 | click.echo("%s%s: %s." % 30 | (prefix, 31 | datetime.datetime.strftime(datetime.datetime.now(), "%H:%M"), 32 | message)) 33 | 34 | def json_dumps(obj): 35 | return json.dumps(obj, sort_keys=True, indent=2) 36 | 37 | def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): 38 | return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) 39 | -------------------------------------------------------------------------------- /walros.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import traceback 3 | 4 | import click 5 | 6 | import habits as habits_module 7 | import diary as diary_module 8 | import timer as timer_module 9 | 10 | 11 | @click.group() 12 | def walros(): 13 | pass 14 | 15 | 16 | @walros.command() 17 | @click.pass_context 18 | def init(ctx): 19 | timer_module.init_command() 20 | habits_module.init_command() 21 | 22 | 23 | # -- Timer -- 24 | 25 | @walros.group() 26 | def timer(): 27 | timer_module.setup() 28 | 29 | @timer.command() 30 | def init(): 31 | timer_module.init_command() 32 | 33 | 34 | @timer.command() 35 | @click.argument("label") 36 | @click.option("-s", "--seconds", default=0.0) 37 | @click.option("-m", "--minutes", default=0.0) 38 | @click.option("-h", "--hours", default=0.0) 39 | @click.option("-w", "--whitenoise", is_flag=True) 40 | @click.option("-d", "--diary", is_flag=True, default=False) 41 | @click.option("-c", "--count", default=1) 42 | @click.option("--track/--no-track", default=True) 43 | @click.option("--force", is_flag=True) 44 | def start(label, seconds, minutes, hours, whitenoise, diary, count, track, 45 | force): 46 | if diary: 47 | diary_module.new_command(label) 48 | timer_module.start_command( 49 | label, seconds, minutes, hours, whitenoise, count, track, force) 50 | 51 | 52 | @timer.command() 53 | @click.option("-d", "--data", is_flag=True) 54 | def status(data): 55 | timer_module.status_command(data) 56 | 57 | 58 | @timer.command() 59 | @click.argument("label") 60 | def clear(label): 61 | timer_module.clear_command(label) 62 | 63 | 64 | @timer.command() 65 | @click.argument("delta", type=float) 66 | def inc(delta): 67 | timer_module.inc_command(delta) 68 | 69 | 70 | @timer.command() 71 | @click.argument("delta", type=float) 72 | def dec(delta): 73 | timer_module.inc_command(-1 * delta) 74 | 75 | 76 | # -- Diary -- 77 | 78 | @walros.group() 79 | def diary(): 80 | diary_module.setup() 81 | 82 | @diary.command() 83 | @click.argument("label") 84 | def new(label): 85 | diary_module.new_command(label) 86 | 87 | @diary.command() 88 | @click.argument("label") 89 | def done(label): 90 | diary_module.done_command(label) 91 | 92 | @diary.command() 93 | @click.argument("label") 94 | def rm(label): 95 | diary_module.remove_command(label) 96 | 97 | @diary.command() 98 | def status(): 99 | diary_module.status_command() 100 | 101 | 102 | # -- Habits -- 103 | 104 | @walros.group() 105 | def habits(): 106 | pass 107 | 108 | @habits.command() 109 | def init(): 110 | habits_module.init_command() 111 | 112 | 113 | if __name__ == "__main__": 114 | try: 115 | walros() 116 | 117 | except Exception as ex: 118 | click.echo(traceback.format_exc()) 119 | -------------------------------------------------------------------------------- /walros_base.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import datetime 3 | 4 | import click 5 | 6 | import data_util 7 | from data_util import UpdateCellsMode 8 | 9 | SPREADSHEET_ID = "1JvO-sjs2kCFFD2FcX1a7XQ8uYyj-9o-anS9RElrtXYI" 10 | DATE_FORMAT = "%Y-%m-%d %A" 11 | 12 | 13 | class TrackerData(object): 14 | def __init__(self): 15 | self.worksheet_id = None # Found in spreadsheet URL. 16 | self.worksheet_name = None 17 | self.column_margin = None 18 | self.header_rows = [] 19 | self.day_column_indices = [] 20 | self.week_column_indices = [] 21 | self.month_column_indices = [] 22 | self.quarter_column_indices = [] 23 | self.init_writes_zeros = True 24 | 25 | # Reduce formula for each category. It's set to SUM by default. 26 | self.reduce_formula = lambda r: "=SUM(%s)" % r 27 | 28 | # Reduce formula for the final score on the spreadsheet margin. 29 | # It's set to `reduce_formula` above, by default. 30 | self.reduce_formula_final = self.reduce_formula 31 | 32 | 33 | @property 34 | def row_margin(self): 35 | return len(self.header_rows) 36 | 37 | @property 38 | def last_day_row_index(self): 39 | return self.row_margin + 1 40 | 41 | @property 42 | def week_merge_column_indices(self): 43 | return [ x + 1 for x in self.day_column_indices ] 44 | 45 | @property 46 | def month_merge_column_indices(self): 47 | return [ x + 2 for x in self.day_column_indices ] 48 | 49 | @property 50 | def quarter_merge_column_indices(self): 51 | return [ x + 3 for x in self.day_column_indices ] 52 | 53 | @property 54 | def all_column_indices(self): 55 | return (self.day_column_indices + 56 | self.week_merge_column_indices + 57 | self.month_merge_column_indices + 58 | self.quarter_merge_column_indices) 59 | 60 | @property 61 | def all_anchor_column_indices(self): 62 | return (self.day_column_indices + 63 | self.week_column_indices + 64 | self.month_column_indices + 65 | self.quarter_column_indices) 66 | 67 | @property 68 | def all_merge_column_indices(self): 69 | return (self.week_merge_column_indices + 70 | self.month_merge_column_indices + 71 | self.quarter_merge_column_indices) 72 | 73 | def row_index(self, row_name): 74 | return self.header_rows.index(row_name) + 1 75 | 76 | def reduce_column_offset(self, col_index): 77 | if col_index in self.all_anchor_column_indices: 78 | return 0 79 | 80 | if (col_index in [ x + 1 for x in self.day_column_indices ] or 81 | col_index in [ x + 1 for x in self.week_column_indices ] or 82 | col_index in [ x + 1 for x in self.month_column_indices ]): 83 | return -1 84 | 85 | if (col_index in [ x + 2 for x in self.day_column_indices ] or 86 | col_index in [ x + 2 for x in self.week_column_indices ]): 87 | return -2 88 | 89 | if col_index in [ x + 3 for x in self.day_column_indices ]: 90 | return -3 91 | 92 | 93 | def build_init_requests(tracker_data, spreadsheet, worksheet): 94 | # Relevant ranges to fetch from time sheet. 95 | ranges = [] 96 | ranges.append("A%d" % tracker_data.last_day_row_index) # Last date tracked. 97 | 98 | for x in tracker_data.all_merge_column_indices: 99 | ranges.append("R%dC%d" % (tracker_data.last_day_row_index, x)) 100 | 101 | # Prepend sheet name to all ranges. 102 | ranges = ["%s!%s" % (tracker_data.worksheet_name, x) for x in ranges] 103 | response = spreadsheet.GetRanges(ranges, fields="sheets(data,merges)") 104 | 105 | # Extract date information. 106 | data = response['sheets'][0]["data"] 107 | last_date_tracked_data = data[0] 108 | last_date_tracked_string = ( 109 | last_date_tracked_data['rowData'][0]['values'][0]['formattedValue']) 110 | last_date_tracked = datetime.datetime.strptime( 111 | last_date_tracked_string, DATE_FORMAT).date() 112 | today = datetime.date.today() 113 | if today == last_date_tracked: 114 | return [] 115 | 116 | # Exctract cell merge information. 117 | week_merge_ranges = ( 118 | extract_merge_ranges(worksheet, response, 119 | tracker_data.week_merge_column_indices, 120 | tracker_data.last_day_row_index)) 121 | month_merge_ranges = ( 122 | extract_merge_ranges(worksheet, response, 123 | tracker_data.month_merge_column_indices, 124 | tracker_data.last_day_row_index)) 125 | quarter_merge_ranges = ( 126 | extract_merge_ranges(worksheet, response, 127 | tracker_data.quarter_merge_column_indices, 128 | tracker_data.last_day_row_index)) 129 | 130 | # Insert new days. 131 | init_requests = build_new_day_requests( 132 | tracker_data, worksheet, today, last_date_tracked, 133 | week_merge_ranges, month_merge_ranges, quarter_merge_ranges) 134 | 135 | return init_requests 136 | 137 | 138 | def extract_merge_ranges(worksheet, response_data, column_indices, 139 | last_day_row_index): 140 | merges = response_data['sheets'][0].get("merges", []) 141 | merge_ranges = [x for i, x in enumerate(merges) 142 | if x["endColumnIndex"] in column_indices] 143 | assert(not merge_ranges or len(merge_ranges) == len(column_indices)) 144 | if not merge_ranges: 145 | merge_ranges += build_new_merge_ranges(worksheet, last_day_row_index, 146 | column_indices) 147 | return merge_ranges 148 | 149 | 150 | def build_new_merge_ranges(worksheet, row, column_indices): 151 | merge_ranges = [] 152 | for i in column_indices: 153 | merge_ranges.append(worksheet.NewMergeRange(row, row, i, i)) 154 | return merge_ranges 155 | 156 | 157 | def build_new_day_requests(tracker_data, worksheet, today, last_date_tracked, 158 | week_merge_ranges, month_merge_ranges, 159 | quarter_merge_ranges): 160 | requests = [] 161 | delta_days = (today - last_date_tracked).days 162 | 163 | # Insert new rows. 164 | requests.append(worksheet.NewInsertRowsBatchRequest( 165 | tracker_data.row_margin + 1, delta_days)) 166 | 167 | # Adjust merge ranges to account for newly inserted rows. 168 | for merge_range in (week_merge_ranges + month_merge_ranges + 169 | quarter_merge_ranges): 170 | merge_range['startRowIndex'] += delta_days 171 | merge_range['endRowIndex'] += delta_days 172 | 173 | # Write dates into new rows. 174 | tmp_date = copy.deepcopy(last_date_tracked) 175 | while tmp_date != today: 176 | tmp_date += datetime.timedelta(1) 177 | row_index = tracker_data.last_day_row_index + (today - tmp_date).days 178 | requests.append(worksheet.NewUpdateCellBatchRequest( 179 | row_index, 1, tmp_date.strftime(DATE_FORMAT))) 180 | 181 | # Deal with merges. 182 | requests += build_new_day_merge_requests( 183 | tracker_data, worksheet, today, last_date_tracked, 184 | week_merge_ranges, month_merge_ranges, quarter_merge_ranges) 185 | 186 | # For today's row, write per-column zero counts on anchor columns. 187 | if tracker_data.init_writes_zeros: 188 | for i in tracker_data.all_anchor_column_indices: 189 | requests.append(worksheet.NewUpdateCellBatchRequest( 190 | tracker_data.last_day_row_index, i, 0, UpdateCellsMode.number.value)) 191 | return requests 192 | 193 | 194 | def build_new_day_merge_requests(tracker_data, worksheet, today, 195 | last_date_tracked, week_merge_ranges, 196 | month_merge_ranges, quarter_merge_ranges): 197 | requests = [] 198 | tmp_date = copy.deepcopy(last_date_tracked) 199 | 200 | # Helper functions inside closure to avoid duplication of tedious code. 201 | def extend_merge_ranges(merge_ranges): 202 | """Helper function inside closure to avoid duplication of tedious code. 203 | """ 204 | for merge_range in merge_ranges: 205 | merge_range["startRowIndex"] -= 1 206 | 207 | def close_merge_range_requests(merge_ranges, column_indices): 208 | range_obj = data_util.MergeRange(merge_ranges[0]) 209 | 210 | # Write category reduce formulas 211 | for i, col in enumerate(column_indices): 212 | reduce_column_offset = tracker_data.reduce_column_offset(col) 213 | if reduce_column_offset != 0: # Reduce only if non-anchor. 214 | 215 | reduce_formula = tracker_data.reduce_formula 216 | if i == 0: 217 | # Reduce formula for final score on the left spreadsheet margin. 218 | reduce_formula = tracker_data.reduce_formula_final 219 | 220 | requests.append(build_reduce_formula_update( 221 | reduce_formula, worksheet, range_obj.row_range[0], col, 222 | range_obj.row_range, col + reduce_column_offset)) 223 | 224 | while merge_ranges: 225 | # TODO: don't append if row span is equal to 1 226 | # TODO: return list instead of modifying external variable 227 | requests.append(worksheet.NewMergeCellsBatchRequest( 228 | merge_ranges.pop())) 229 | 230 | while tmp_date != today: 231 | row_index = tracker_data.last_day_row_index + (today - tmp_date).days - 1 232 | tmp_next_date = tmp_date + datetime.timedelta(1) 233 | 234 | # Week column merges. 235 | if tmp_date.isocalendar()[1] == tmp_next_date.isocalendar()[1]: 236 | # Same week. Extend merge ranges on weekly columns. 237 | extend_merge_ranges(week_merge_ranges) 238 | else: 239 | # New week. Close out existing merge ranges. 240 | close_merge_range_requests(week_merge_ranges, 241 | tracker_data.week_merge_column_indices) 242 | week_merge_ranges += ( 243 | build_new_merge_ranges(worksheet, row_index, 244 | tracker_data.week_merge_column_indices)) 245 | 246 | # Month column merges. 247 | if tmp_date.month == tmp_next_date.month: 248 | # Same month. Extend merge ranges on monthly columns. 249 | extend_merge_ranges(month_merge_ranges) 250 | else: 251 | # New month. Close out existing merge ranges. 252 | close_merge_range_requests(month_merge_ranges, 253 | tracker_data.month_merge_column_indices) 254 | month_merge_ranges += ( 255 | build_new_merge_ranges(worksheet, row_index, 256 | tracker_data.month_merge_column_indices)) 257 | 258 | # Quarter column merges. 259 | if (tmp_date.month - 1) / 3 == (tmp_next_date.month - 1) / 3: 260 | # Same quarter. Extend merge ranges on quarterly columns. 261 | extend_merge_ranges(quarter_merge_ranges) 262 | else: 263 | # New quarter. Close out existing merge ranges. 264 | close_merge_range_requests(quarter_merge_ranges, 265 | tracker_data.quarter_merge_column_indices) 266 | quarter_merge_ranges += ( 267 | build_new_merge_ranges(worksheet, row_index, 268 | tracker_data.quarter_merge_column_indices)) 269 | 270 | tmp_date = tmp_next_date 271 | 272 | close_merge_range_requests(week_merge_ranges, 273 | tracker_data.week_merge_column_indices) 274 | close_merge_range_requests(month_merge_ranges, 275 | tracker_data.month_merge_column_indices) 276 | close_merge_range_requests(quarter_merge_ranges, 277 | tracker_data.quarter_merge_column_indices) 278 | return requests 279 | 280 | 281 | # Helper to build and append update formula requests to a list. 282 | def build_reduce_formula_update(reduce_formula, worksheet, 283 | target_row, target_column, 284 | formula_row_range, formula_column): 285 | formula_range = "%s%d:%s%d" % ( 286 | col_num_to_letter(formula_column), formula_row_range[0], 287 | col_num_to_letter(formula_column), formula_row_range[1]) 288 | return worksheet.NewUpdateCellBatchRequest( 289 | target_row, target_column, reduce_formula(formula_range), 290 | UpdateCellsMode.formula.value) 291 | 292 | 293 | def col_num_to_letter(column_int): 294 | start_index = 1 # it can start either at 0 or at 1 295 | letter = '' 296 | while column_int > 25 + start_index: 297 | letter += chr(65 + int((column_int - start_index) / 26) - 1) 298 | column_int = column_int - (int((column_int - start_index) / 26)) * 26 299 | letter += chr(65 - start_index + (int(column_int))) 300 | return letter 301 | --------------------------------------------------------------------------------