├── .gitignore ├── README ├── coloredlogcat.py ├── dbconvert.py ├── export-google-reader-subscriptions.py ├── gedit ├── trailsave.gedit-plugin └── trailsave.py ├── getflow-export.py ├── inisort.py ├── ispmail-admin ├── .gitignore ├── README.rst ├── config.py.sample └── mailserver-admin.py ├── mail.py ├── make-pip-url.sh ├── mount-encrypted.sh ├── mysqld-ram.sh ├── parse-android-earningsreport.py ├── php-runserver.sh ├── reconstruct-vdi-tree.py ├── sendtokindle ├── sendtokindle.sh └── sendtokindlerc ├── todoist-import.py ├── trac-ticket-merge.py └── trigger-mount.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # JetBrains IDEs 2 | /.idea/ 3 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Stuff I use on Linux. 2 | -------------------------------------------------------------------------------- /coloredlogcat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | ''' 4 | Copyright 2009, The Android Open Source Project 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | ''' 18 | 19 | # script to highlight adb logcat output for console 20 | # written by jeff sharkey, http://jsharkey.org/ 21 | # piping detection and popen() added by other android team members 22 | 23 | 24 | import os, sys, re, StringIO 25 | import fcntl, termios, struct 26 | 27 | # unpack the current terminal width/height 28 | data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234') 29 | HEIGHT, WIDTH = struct.unpack('hh',data) 30 | 31 | BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) 32 | 33 | def format(fg=None, bg=None, bright=False, bold=False, dim=False, reset=False): 34 | # manually derived from http://en.wikipedia.org/wiki/ANSI_escape_code#Codes 35 | codes = [] 36 | if reset: codes.append("0") 37 | else: 38 | if not fg is None: codes.append("3%d" % (fg)) 39 | if not bg is None: 40 | if not bright: codes.append("4%d" % (bg)) 41 | else: codes.append("10%d" % (bg)) 42 | if bold: codes.append("1") 43 | elif dim: codes.append("2") 44 | else: codes.append("22") 45 | return "\033[%sm" % (";".join(codes)) 46 | 47 | 48 | def indent_wrap(message, indent=0, width=80): 49 | wrap_area = width - indent 50 | messagebuf = StringIO.StringIO() 51 | current = 0 52 | while current < len(message): 53 | next = min(current + wrap_area, len(message)) 54 | messagebuf.write(message[current:next]) 55 | if next < len(message): 56 | messagebuf.write("\n%s" % (" " * indent)) 57 | current = next 58 | return messagebuf.getvalue() 59 | 60 | 61 | LAST_USED = [RED,GREEN,YELLOW,BLUE,MAGENTA,CYAN,WHITE] 62 | KNOWN_TAGS = { 63 | "dalvikvm": BLUE, 64 | "Process": BLUE, 65 | "ActivityManager": CYAN, 66 | "ActivityThread": CYAN, 67 | } 68 | 69 | def allocate_color(tag): 70 | # this will allocate a unique format for the given tag 71 | # since we dont have very many colors, we always keep track of the LRU 72 | if not tag in KNOWN_TAGS: 73 | KNOWN_TAGS[tag] = LAST_USED[0] 74 | color = KNOWN_TAGS[tag] 75 | LAST_USED.remove(color) 76 | LAST_USED.append(color) 77 | return color 78 | 79 | 80 | RULES = { 81 | #re.compile(r"([\w\.@]+)=([\w\.@]+)"): r"%s\1%s=%s\2%s" % (format(fg=BLUE), format(fg=GREEN), format(fg=BLUE), format(reset=True)), 82 | } 83 | 84 | TAGTYPE_WIDTH = 3 85 | TAG_WIDTH = 20 86 | PROCESS_WIDTH = 8 # 8 or -1 87 | HEADER_SIZE = TAGTYPE_WIDTH + 1 + TAG_WIDTH + 1 + PROCESS_WIDTH + 1 88 | 89 | TAGTYPES = { 90 | "V": "%s%s%s " % (format(fg=WHITE, bg=BLACK), "V".center(TAGTYPE_WIDTH), format(reset=True)), 91 | "D": "%s%s%s " % (format(fg=BLACK, bg=BLUE), "D".center(TAGTYPE_WIDTH), format(reset=True)), 92 | "I": "%s%s%s " % (format(fg=BLACK, bg=GREEN), "I".center(TAGTYPE_WIDTH), format(reset=True)), 93 | "W": "%s%s%s " % (format(fg=BLACK, bg=YELLOW), "W".center(TAGTYPE_WIDTH), format(reset=True)), 94 | "E": "%s%s%s " % (format(fg=BLACK, bg=RED), "E".center(TAGTYPE_WIDTH), format(reset=True)), 95 | } 96 | 97 | retag = re.compile("^([A-Z])/([^\(]+)\(([^\)]+)\): (.*)$") 98 | 99 | # to pick up -d or -e 100 | adb_args = ' '.join(sys.argv[1:]) 101 | 102 | # if someone is piping in to us, use stdin as input. if not, invoke adb logcat 103 | if os.isatty(sys.stdin.fileno()): 104 | input = os.popen("adb %s logcat" % adb_args) 105 | else: 106 | input = sys.stdin 107 | 108 | while True: 109 | try: 110 | line = input.readline() 111 | except KeyboardInterrupt: 112 | break 113 | 114 | match = retag.match(line) 115 | if not match is None: 116 | tagtype, tag, owner, message = match.groups() 117 | linebuf = StringIO.StringIO() 118 | 119 | # center process info 120 | if PROCESS_WIDTH > 0: 121 | owner = owner.strip().center(PROCESS_WIDTH) 122 | linebuf.write("%s%s%s " % (format(fg=BLACK, bg=BLACK, bright=True), owner, format(reset=True))) 123 | 124 | # right-align tag title and allocate color if needed 125 | tag = tag.strip() 126 | color = allocate_color(tag) 127 | tag = tag[-TAG_WIDTH:].rjust(TAG_WIDTH) 128 | linebuf.write("%s%s %s" % (format(fg=color, dim=False), tag, format(reset=True))) 129 | 130 | # write out tagtype colored edge 131 | if not tagtype in TAGTYPES: break 132 | linebuf.write(TAGTYPES[tagtype]) 133 | 134 | # insert line wrapping as needed 135 | message = indent_wrap(message, HEADER_SIZE, WIDTH) 136 | 137 | # format tag message using rules 138 | for matcher in RULES: 139 | replace = RULES[matcher] 140 | message = matcher.sub(replace, message) 141 | 142 | linebuf.write(message) 143 | line = linebuf.getvalue() 144 | 145 | print line 146 | if len(line) == 0: break 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | -------------------------------------------------------------------------------- /dbconvert.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to convert between different databases. 4 | 5 | Written against SQLAlchemy 6.1beta. 6 | 7 | Based on code from: 8 | http://www.tylerlesmann.com/2009/apr/27/copying-databases-across-platforms-sqlalchemy/ 9 | 10 | TODO: Not using the ORM is likely faster, but more extensive to write; 11 | We'd need to construct queries manually; also, even with the ORM, there 12 | are probably some SQLAlchemy-related optimizations that could be employed 13 | to speed up the the processing of large tables (expunge_all?). 14 | 15 | TODO: Quite frequently, schema conversion doesn't work because SQLAlchemy is 16 | quite strict about schemas. For example, SQLite has no LONGTEXT column, and 17 | MySQL requires a fixed length VARCHAR. Does requirements are not automatically 18 | bridged. Possible a way could be provided for the user to define the mapper 19 | him/herself. Note that this basically is only a schema creation issue. You 20 | can already workaround such a error by defining the target table manually. 21 | """ 22 | 23 | import optparse 24 | import sys 25 | import time 26 | from sqlalchemy import create_engine, MetaData, Table 27 | from sqlalchemy.orm import sessionmaker 28 | from sqlalchemy.ext.declarative import declarative_base 29 | 30 | 31 | def make_session(connection_string, convert_unicode): 32 | engine = create_engine(connection_string, echo=False, 33 | convert_unicode=convert_unicode) 34 | Session = sessionmaker(bind=engine) 35 | return Session(), engine 36 | 37 | 38 | def pull_data(from_db, to_db, options): 39 | # Note about encodings: We use "convert_unicode" for the source 40 | # but not the destination connection. To hope here is that the data 41 | # read is available in unicode, and the destination backend then has the 42 | # responsibility to properly handle the unicode data. "convert_unicode" 43 | # seems somewhat dangerous on write, because the backend gets only a 44 | # bytestring, and it seems like SQLAlchemy (0.6beta2) does not sync it's 45 | # own "encoding" setting (used by convert_unicode) with the MySQLdb 46 | # "charset" option (defaults to latin1). As a result, you have a utf8 47 | # string that is processed by the server as latin1. 48 | source, sengine = make_session(from_db, convert_unicode=True) 49 | smeta = MetaData(bind=sengine) 50 | destination, dengine = make_session(to_db, convert_unicode=False) 51 | 52 | print 'Pulling schemas from source server' 53 | smeta.reflect(only=options.tables) 54 | 55 | for name, table in smeta.tables.iteritems(): 56 | print 'Processing table "%s"' % name 57 | if options.create_tables: 58 | print '...Creating table on destination server' 59 | table.metadata.create_all(dengine) 60 | NewRecord = quick_mapper(table) 61 | columns = table.columns.keys() 62 | 63 | num_records = source.query(table).count() 64 | i = 0 65 | start = time.time() 66 | # Note that yield only affects the number of ORM objects generated 67 | # by SA; The stupid MySQLdb backend still fetches all rows at once. 68 | # Try OurSQL. References for this: 69 | # * http://www.mail-archive.com/sqlalchemy@googlegroups.com/msg17389.html) 70 | # * http://stackoverflow.com/questions/2145177/is-this-a-memory-leak-a-program-in-python-with-sqlalchemy-sqlite 71 | for record in source.query(table).yield_per(getattr(options, 'yield')): 72 | data = dict( 73 | [(str(column), getattr(record, column)) for column in columns] 74 | ) 75 | if options.merge: 76 | # TODO: Can be use load=False here? And should we? 77 | destination.merge(NewRecord(**data)) 78 | else: 79 | destination.add(NewRecord(**data)) 80 | 81 | i += 1 82 | 83 | if (options.flush and i % options.flush == 0): 84 | destination.flush() 85 | if (options.commit and i % options.commit == 0): 86 | destination.commit() 87 | 88 | now = time.time() 89 | done = i/float(num_records) 90 | sys.stderr.write('...Transferring record %d/%d (%d%%), %ds elapsed, %ds estimated\r' % ( 91 | i, num_records, done*100, now-start, (now-start)/done)) 92 | sys.stderr.flush() 93 | sys.stderr.write("\n"); 94 | print '...Transferred %d records in %f seconds' % (i, time.time() - start) 95 | print '...Committing changes' 96 | destination.commit() 97 | 98 | 99 | def get_usage(): 100 | return """usage: %prog [options] FROM TO 101 | 102 | FROM/TO syntax: driver://user[:password]@host[:port]/database) 103 | Example: mysql://root@db2:3307/reporting""" 104 | 105 | 106 | def quick_mapper(table): 107 | Base = declarative_base() 108 | class GenericMapper(Base): 109 | __table__ = table 110 | return GenericMapper 111 | 112 | 113 | def main(): 114 | parser = optparse.OptionParser(usage=get_usage()) 115 | parser.add_option('-t', '--table', dest="tables", action="append", 116 | help="comma only this table (can be given multiple times)", 117 | metavar="NAME") 118 | parser.add_option('--skip-schema', dest="create_tables", default=True, 119 | action='store_false', 120 | help="do not create tables in the destination database") 121 | parser.add_option('--merge', dest="merge", action='store_true', 122 | help="merge with existing data based on primary key; "+ 123 | "use if the target table already has rows; up to "+ 124 | "15 times slower.") 125 | parser.add_option('-y', '--yield', dest="yield", default=4000, 126 | type="int", metavar="NUM", 127 | help="number of source rows to pull into memory in one "+ 128 | "batch; some backends like MySQLdb still fetch "+ 129 | "everything anyway (default: %default)") 130 | parser.add_option('-f', '--flush', dest="flush", default=10000, 131 | type="int", metavar="NUM", 132 | help="number of rows to cache in memory before sending "+ 133 | "queries to the destination database "+ 134 | "(default: %default)") 135 | parser.add_option('-c', '--commit', dest="commit", default=None, 136 | type="int", metavar="NUM", 137 | help="number of rows after which to commit and start a "+ 138 | "new transaction; implies a flush (default: "+ 139 | "only commit when done)") 140 | options, args = parser.parse_args(sys.argv[1:]) 141 | 142 | if len(args) < 2: 143 | parser.print_usage() 144 | print >>sys.stderr, "error: you need to specify FROM and TO urls" 145 | return 1 146 | elif len(args) > 2: 147 | parser.print_usage() 148 | print >>sys.stderr, "error: unexpected arguments: %s" % ", ".join(args[2:]) 149 | return 1 150 | else: 151 | from_url, to_url = args 152 | 153 | pull_data( 154 | from_url, 155 | to_url, 156 | options, 157 | ) 158 | 159 | 160 | if __name__ == '__main__': 161 | sys.exit(main() or 0) -------------------------------------------------------------------------------- /export-google-reader-subscriptions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf8 3 | """Export your Google Reader subscriptions to OPML format. 4 | 5 | 2011 by Michael Elsdörfer. Consider this public domain. 6 | 7 | 8 | This script depends on the libgreader library: 9 | 10 | https://github.com/askedrelic/libgreader/ 11 | 12 | Which be installed via: 13 | 14 | $ easy_install libgreader 15 | 16 | 17 | Usage: 18 | 19 | $ export-google-reader-subscriptions username password 20 | 21 | If the password is not given, it is queried via stdin. The final 22 | OPML is written to stdout. 23 | """ 24 | 25 | import sys 26 | import xml.etree.ElementTree as ET 27 | try: 28 | import libgreader 29 | except ImportError: 30 | print "libgreader not installed. Use easy_install libgreader" 31 | else: 32 | from libgreader import GoogleReader, ClientAuthMethod 33 | 34 | 35 | def main(): 36 | if len(sys.argv) <= 1 or len(sys.argv) > 3: 37 | print("Usage: %s username [password]" % (sys.argv[0])) 38 | return 1 39 | 40 | username = sys.argv[1] 41 | if len(sys.argv) == 2: 42 | sys.stderr.write('Password for %s: ' % username) 43 | password = raw_input() 44 | else: 45 | password = sys.argv[2] 46 | 47 | auth = ClientAuthMethod(username, password) 48 | reader = GoogleReader(auth) 49 | 50 | root = ET.Element('opml') 51 | head = ET.SubElement(root, 'head') 52 | ET.SubElement(head, 'title').text = \ 53 | '%s subscriptions in Google Reader' % username 54 | body = ET.SubElement(root, 'body') 55 | 56 | category_els = {} 57 | 58 | reader.buildSubscriptionList() 59 | for feed in reader.getSubscriptionList(): 60 | if feed.getCategories(): 61 | for category in feed.getCategories(): 62 | # Create category element 63 | if not category.id in category_els: 64 | category_el = ET.SubElement(body, 'outline') 65 | category_el.set('text', category.label) 66 | category_el.set('title', category.label) 67 | category_els[category.id] = category_el 68 | make_feed_el(feed, category_els[category.id]) 69 | else: 70 | make_feed_el(feed, body) 71 | 72 | tree = ET.ElementTree(root) 73 | tree.write(sys.stdout, xml_declaration=True) 74 | 75 | 76 | def make_feed_el(feed, parent): 77 | feed_el = ET.SubElement(parent, 'outline') 78 | feed_el.set('text', feed.title) 79 | feed_el.set('title', feed.title) 80 | feed_el.set('type', 'rss') 81 | feed_el.set('xmlUrl', feed.feedUrl) 82 | # seems to be always empty; possible a bug in libgreader? 83 | feed_el.set('htmlUrl', feed.siteUrl or '') 84 | 85 | 86 | if __name__ == '__main__': 87 | sys.exit(main() or 0) 88 | -------------------------------------------------------------------------------- /gedit/trailsave.gedit-plugin: -------------------------------------------------------------------------------- 1 | [Gedit Plugin] 2 | Loader=python 3 | Module=trailsave 4 | IAge=2 5 | Name=Save without trailing space 6 | Description=Automatically strip all trailing whitespace from lines before saving 7 | Authors=Osmo Salomaa , Jamie Bennett 8 | Copyright=Copyright (C) 2006-2008 Osmo Salomaa, 2009 Jamie Bennett 9 | Website=http://www.linuxuk.org/projects/trailsave 10 | -------------------------------------------------------------------------------- /gedit/trailsave.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2006-2008 Osmo Salomaa 2 | # Modified 2009 Jamie Bennett 3 | # 4 | # This program is free software; you can redistribute it and/or modify it under 5 | # the terms of the GNU General Public License as published by the Free Software 6 | # Foundation; either version 2 of the License, or (at your option) any later 7 | # version. 8 | # 9 | # This program is distributed in the hope that it will be useful, but WITHOUT 10 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 11 | # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 12 | # details. 13 | # 14 | # You should have received a copy of the GNU General Public License along with 15 | # this program; if not, write to the Free Software Foundation, Inc., 51 16 | # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 | 18 | """Automatically strip all trailing whitespace before saving.""" 19 | 20 | import gedit 21 | 22 | 23 | class SaveWithoutTrailingSpacePlugin(gedit.Plugin): 24 | 25 | """Automatically strip all trailing whitespace before saving.""" 26 | 27 | def activate(self, window): 28 | """Activate plugin.""" 29 | 30 | handler_id = window.connect("tab-added", self.on_window_tab_added) 31 | window.set_data(self.__class__.__name__, handler_id) 32 | for doc in window.get_documents(): 33 | self.connect_document(doc) 34 | 35 | def connect_document(self, doc): 36 | """Connect to document's 'saving' signal.""" 37 | 38 | handler_id = doc.connect("saving", self.on_document_saving) 39 | doc.set_data(self.__class__.__name__, handler_id) 40 | 41 | def deactivate(self, window): 42 | """Deactivate plugin.""" 43 | 44 | name = self.__class__.__name__ 45 | handler_id = window.get_data(name) 46 | window.disconnect(handler_id) 47 | window.set_data(name, None) 48 | for doc in window.get_documents(): 49 | handler_id = doc.get_data(name) 50 | doc.disconnect(handler_id) 51 | doc.set_data(name, None) 52 | 53 | def on_document_saving(self, doc, *args): 54 | """Strip trailing spaces in document.""" 55 | 56 | doc.begin_user_action() 57 | self.strip_trailing_spaces_on_lines(doc) 58 | doc.end_user_action() 59 | 60 | def on_window_tab_added(self, window, tab): 61 | """Connect the document in tab.""" 62 | 63 | name = self.__class__.__name__ 64 | doc = tab.get_document() 65 | handler_id = doc.get_data(name) 66 | if handler_id is None: 67 | self.connect_document(doc) 68 | 69 | def strip_trailing_spaces_on_lines(self, doc): 70 | """Delete trailing space at the end of each line.""" 71 | 72 | buffer_end = doc.get_end_iter() 73 | for line in range(buffer_end.get_line() + 1): 74 | line_end = doc.get_iter_at_line(line) 75 | line_end.forward_to_line_end() 76 | itr = line_end.copy() 77 | while itr.backward_char(): 78 | if not itr.get_char() in (" ", "\t"): 79 | itr.forward_char() 80 | break 81 | doc.delete(itr, line_end) 82 | 83 | -------------------------------------------------------------------------------- /getflow-export.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | """Process a data export from Flow (getflow.com). 3 | 4 | Flow data is a zip that extract looks like this: 5 | 6 | index.html 7 | lists/ 8 | {List}.html 9 | {Group}/ 10 | List.html 11 | 12 | Expects to be given the path to the root directory. 13 | 14 | Notes: 15 | - Does not currently maintain folder structure, folders become flat. 16 | - Not sure about attachment/images, I don't use them or handle them here. 17 | 18 | Limitations: 19 | - Flow's export does not contain information about recurring tasks. 20 | If the task was created by the user "Flow", it presumably is 21 | recurring. 22 | - It does not contain information about which tasks are flagged. 23 | - The GetFlow export does not contain sections. 24 | 25 | """ 26 | 27 | import re 28 | import sys 29 | import os 30 | import datetime 31 | from time import mktime 32 | from os.path import join, exists 33 | import json 34 | from bs4 import BeautifulSoup 35 | from dateutil import parser as dateparser 36 | import html2text 37 | 38 | 39 | def detail(task_el, name, transform=None, optional=False): 40 | """Get the detail "name" from the task element.""" 41 | for item in task_el.find(class_='task-info').findAll('li'): 42 | #print filter(bool, ([ 43 | # (s if isinstance(s, basestring) else s.text).strip(' :\n\r\t') 44 | # for s in item.children])) 45 | title, value = filter(bool, ([ 46 | (s if isinstance(s, basestring) else s.text).strip(' :\n\r\t') 47 | for s in item.children])) 48 | 49 | if title.strip().lower() == name.lower(): 50 | if transform and value: 51 | value = transform(value) 52 | return value 53 | if optional: 54 | return None 55 | raise ValueError('detail %s not found for %s' % (name, task_el)) 56 | 57 | 58 | def activity_log(log, match): 59 | """Find an entry in the activity log.""" 60 | for entry in log: 61 | if match in entry['summary']: 62 | return entry['date'] 63 | return None 64 | 65 | 66 | def text(el): 67 | """Text of an element, or None.""" 68 | if not el: 69 | return None 70 | return el.text.strip() 71 | 72 | 73 | def stderr(msg, *args): 74 | msg = msg % tuple(args) 75 | print >> sys.stderr, msg.encode(sys.stderr.encoding) 76 | 77 | 78 | def process_one_list_file(filename): 79 | stderr(u'Processing %s', filename) 80 | with open(filename, 'r') as f: 81 | soup = BeautifulSoup(f.read()) 82 | 83 | list_name = soup.title(text=True)[0] 84 | task_elems = soup.find_all("li", class_="task") 85 | stderr(u'Found list {0} with {1} tasks'.format(list_name, len(task_elems))) 86 | 87 | tasks = [] 88 | for el in task_elems: 89 | task = { 90 | 'title': list(el.find('a', class_='body').children)[0].strip(), 91 | 'completed': 'completed' in el.find('a', class_='body')['class'], 92 | 'assigned-to': detail(el, 'Assigned to'), 93 | # 'created-on': detail(el, 'Created on', dateparser.parse), 94 | # 'completed-on': detail(el, 'Completed on', dateparser.parse, True), 95 | 'subscribers': detail(el, 'Subscribers'), 96 | 97 | 98 | 'activities': [], 99 | #'due-on': None, 100 | #'subtasks': [] 101 | } 102 | tasks.append(task) 103 | 104 | if el.find('span', class_="due-on"): 105 | task['due-on'] = dateparser.parse(el.find('span', class_="due-on").text.strip(u'— ')) 106 | # 107 | for subtask in el.findAll('li', class_='subtask'): 108 | task.setdefault('subtasks', []).append(subtask.text.strip()) 109 | 110 | for activity_el in el.findAll('li', class_='activity'): 111 | activity = { 112 | 'summary': activity_el.find(class_='summary').text.strip(), 113 | 'date': dateparser.parse(activity_el.find(class_='date').text.strip()) 114 | } 115 | 116 | detail_el = activity_el.find(class_='activity-detail') 117 | if detail_el: 118 | detail_html = "".join([str(x) for x in detail_el.contents]) 119 | detail_html = detail_html.decode('utf-8') 120 | activity['detail'] = detail_html 121 | activity['detail_plain'] = html2text.html2text(detail_html) 122 | 123 | if detail_el and 'comment' in detail_el['class']: 124 | activity['is_comment'] = True 125 | task['activities'].append(activity) 126 | 127 | task['created-at'] = \ 128 | activity_log(task['activities'], 'created this task') 129 | 130 | return (list_name, tasks) 131 | 132 | 133 | def main(prog, argv): 134 | if len(argv) != 1: 135 | print >> sys.stderr, 'Usage: {0} EXTRACTED_EXPORT_ZIP_DIR'.format(prog) 136 | return 137 | p = argv[0] 138 | 139 | if not exists(join(p, 'lists')): 140 | print >> sys.stderr, "No lists/ folder, I need the path where index.html is located." 141 | return 142 | 143 | lists = {} 144 | for dirpath, dirnames, filenames in os.walk(join(p, 'lists/')): 145 | html_files = set(filter(lambda f: f.endswith('.html'), filenames)) 146 | 147 | # filenames should be 343434-List-page-X.html 148 | # We want to process all pages in a group 149 | while html_files: 150 | first_file = list(sorted(list(html_files)))[0] 151 | basename = re.match(r'^(.*?)-page-\d+.html', first_file).groups() 152 | # Now get all page files for this group 153 | all_page_files = filter(lambda f: f.startswith('%s-page-' % basename), html_files) 154 | all_page_files.sort() 155 | 156 | list_name = None 157 | tasks = [] 158 | for filename in all_page_files: 159 | page_name, page_tasks = process_one_list_file(join(dirpath, filename)) 160 | 161 | if list_name: 162 | assert page_name == list_name 163 | else: 164 | list_name = page_name 165 | 166 | tasks.extend(page_tasks) 167 | 168 | # Add tasks from all pages to global result 169 | # Make sure list name is unique 170 | final_name = list_name 171 | i = 0 172 | while list_name in lists: 173 | final_name = '%s (%s)' % (list_name, i) 174 | i += 1 175 | lists[final_name] = tasks 176 | 177 | # Remove the processed files from global list of files 178 | html_files -= set(all_page_files) 179 | 180 | 181 | class DateEncoder(json.JSONEncoder): 182 | def default(self, obj): 183 | if isinstance(obj, datetime.datetime): 184 | #return int(mktime(obj.timetuple())) 185 | return obj.isoformat() 186 | return json.JSONEncoder.default(self, obj) 187 | print json.dumps(lists, cls=DateEncoder, indent=4) 188 | 189 | 190 | if __name__ == '__main__': 191 | main(sys.argv[0], sys.argv[1:]) -------------------------------------------------------------------------------- /inisort.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | From: http://code.activestate.com/recipes/576587-sort-sections-and-keys-in-ini-file/ 4 | Original Author: Michal Niklas 5 | """ 6 | 7 | import sys 8 | 9 | USAGE = 'USAGE:\n\tinisort.py file.ini' 10 | 11 | def sort_ini(stream): 12 | """sort .ini file: sorts sections and in each section sorts keys""" 13 | lines = stream.readlines() 14 | section = '' 15 | sections = {} 16 | for line in lines: 17 | line = line.strip() 18 | if line: 19 | if line.startswith('['): 20 | section = line 21 | continue 22 | if section: 23 | try: 24 | sections[section].append(line) 25 | except KeyError: 26 | sections[section] = [line, ] 27 | if sections: 28 | sk = sections.keys() 29 | sk.sort() 30 | for k in sk: 31 | vals = sections[k] 32 | vals.sort() 33 | print k 34 | print '\n'.join(vals) 35 | print 36 | 37 | 38 | def main(): 39 | if '-h' in sys.argv or '--help' in sys.argv or len(sys.argv) > 2: 40 | print USAGE 41 | else: 42 | if len(sys.argv) == 2: 43 | with open(sys.argv[1]) as f: 44 | sort_ini(f) 45 | else: 46 | sort_ini(sys.stdin) 47 | 48 | 49 | if __name__ == '__main__': 50 | sys.exit(main() or 0) 51 | -------------------------------------------------------------------------------- /ispmail-admin/.gitignore: -------------------------------------------------------------------------------- 1 | /config.py 2 | *.pyc 3 | -------------------------------------------------------------------------------- /ispmail-admin/README.rst: -------------------------------------------------------------------------------- 1 | Command line script to manage the database of the workaround.org 2 | "ISP-style Email Server with Debian" tutorial: 3 | 4 | http://workaround.org/ispmail 5 | 6 | Newer versions of the tutorial probably changed the database 7 | layout, so this may not work for you. 8 | 9 | This script was originally written by Christoph Haas for 10 | SQLAlchemy 0.3. I ported it to SQLAlchemy 0.6, but haven't tested 11 | all the functionality of the port in full, so use at your own risk. 12 | -------------------------------------------------------------------------------- /ispmail-admin/config.py.sample: -------------------------------------------------------------------------------- 1 | db_uri = "mysql://mailserveradmin:password@127.0.0.1:3306/mailserver" 2 | -------------------------------------------------------------------------------- /ispmail-admin/mailserver-admin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # ispmailadmin.py 4 | # 5 | # Helps maintain virtual user accounts for server that are configured 6 | # as suggested in the workaround.org isp-mail tutorial. 7 | # 8 | # (C) 2007 Christoph Haas 9 | # License: MIT 10 | # 11 | # Port to SQLAlchemy 0.6 by Michael Elsdoerfer 12 | # 13 | 14 | import os 15 | import cmd 16 | import readline 17 | import sys 18 | 19 | from sqlalchemy import * 20 | from sqlalchemy.exceptions import * 21 | from sqlalchemy.orm.exc import NoResultFound 22 | from sqlalchemy.orm import mapper, sessionmaker, relation 23 | 24 | # Database URI 25 | from config import db_uri 26 | sql_debugging_enabled = False 27 | 28 | class Console(cmd.Cmd): 29 | 30 | def __init__(self): 31 | cmd.Cmd.__init__(self) 32 | self.prompt = "=>> " 33 | self.intro = "Welcome to the ispmail console!" ## defaults to None 34 | 35 | ## Command definitions ## 36 | def do_hist(self, args): 37 | """Print a list of commands that have been entered""" 38 | print self._hist 39 | 40 | def do_exit(self, args): 41 | """Exits from the console""" 42 | return -1 43 | 44 | ## Command definitions to support Cmd object functionality ## 45 | def do_EOF(self, args): 46 | """Exit on system end of file character""" 47 | return -1 48 | 49 | def do_help(self, args): 50 | """Get help on commands 51 | 'help' or '?' with no arguments prints a list of commands for which help is available 52 | 'help ' or '? ' gives help on 53 | """ 54 | ## The only reason to define this method is for the help text in the doc string 55 | cmd.Cmd.do_help(self, args) 56 | 57 | ## Override methods in Cmd object ## 58 | def preloop(self): 59 | """Initialization before prompting user for commands. 60 | Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub. 61 | """ 62 | cmd.Cmd.preloop(self) ## sets up command completion 63 | self._hist = [] ## No history yet 64 | self._domainid = None ## Current domain 65 | 66 | # Set up SQLAlchemy for database access 67 | engine = create_engine(db_uri) 68 | Session = sessionmaker(bind=engine, autocommit=True) 69 | self.ctx = Session() 70 | meta = MetaData() 71 | meta.bind = engine 72 | engine.echo = sql_debugging_enabled 73 | 74 | domains_table = Table('domains', meta, 75 | Column('id', Integer, primary_key=True), 76 | Column('name', Unicode) 77 | ) 78 | 79 | aliases_table = Table('aliases', meta, 80 | Column('id', Integer, primary_key=True), 81 | Column('domain_id', Integer, ForeignKey('domains.id')), 82 | Column('source', Unicode), 83 | Column('destination', Unicode) 84 | ) 85 | 86 | users_table = Table('users', meta, 87 | Column('id', Integer, primary_key=True), 88 | Column('domain_id', Integer, ForeignKey('domains.id')), 89 | Column('user', Unicode), 90 | Column('password', Unicode) 91 | ) 92 | 93 | # Map tables to classes 94 | mapper(Domain, domains_table, order_by=domains_table.c.name, 95 | always_refresh=True, 96 | properties = { 97 | 'aliases' : relation(Alias, cascade="all, delete-orphan"), 98 | 'users' : relation(User, cascade="all, delete-orphan"), 99 | } 100 | ) 101 | mapper(Alias, aliases_table, order_by=aliases_table.c.source, 102 | always_refresh=True, 103 | properties = { 104 | 'domain' : relation(Domain) 105 | }) 106 | mapper(User, users_table, order_by=users_table.c.user, 107 | always_refresh=True, 108 | properties = { 109 | 'domain' : relation(Domain) 110 | }) 111 | 112 | # Print database statistics and by the way check if the database 113 | # connection works. 114 | try: 115 | number_of_domains = self.ctx.query(Domain).count() 116 | number_of_users = self.ctx.query(User).count() 117 | number_of_aliases = self.ctx.query(Alias).count() 118 | 119 | print "Connected to database (%s domains, %s users, %s aliases)" % ( 120 | number_of_domains, number_of_users, number_of_aliases) 121 | except DBAPIError, e: 122 | print "Database connection failed:", e 123 | print "Please check the 'db_uri' string at the beginning of this program." 124 | sys.exit(10) 125 | 126 | def do_domains(self, args): 127 | """Print list of virtual email domains. If an argument is given then it 128 | only lists domains starting with this string. If a unique ID or 129 | domain name is given then this domain is selected as current. 130 | """ 131 | if args: 132 | try: # try if a numerical ID was given as argument 133 | domain = self.ctx.query(Domain).get(int(args)) # get the domain by it's ID 134 | except ValueError: 135 | # try to get the domain by name 136 | domain = self.ctx.query(Domain).filter_by(name=args).first() 137 | 138 | # Found a matching domain? 139 | if domain: 140 | self._domainid = domain.id 141 | self.prompt = "[%s] =>> " % (domain.name) 142 | return 143 | 144 | # No domain found. Show a list of domains 145 | 146 | # no direct hit. list domains that begin with the search string 147 | domains = self.ctx.query(Domain).filter(Domain.name.startswith(args[0])) 148 | 149 | if domains: 150 | for domain in domains: 151 | print " [%3d] %s" % (domain.id, domain.name) 152 | return 153 | 154 | else: 155 | # Show list of all domains 156 | domains = self.ctx.query(Domain).all() 157 | 158 | for domain in domains: 159 | print " [%3d] %s" % (domain.id, domain.name) 160 | 161 | # Shortcut 'd' for domains 162 | do_d = do_domains 163 | 164 | def do_newdomain(self, args): 165 | """Create a new virtual domain 166 | """ 167 | if not args: 168 | print "Please provide the name of the new domain as argument!" 169 | return 170 | 171 | # Check if domain exists already 172 | if self.ctx.query(Domain).filter_by(name=args).first(): 173 | print "exists already..." 174 | return 175 | 176 | # Create a new domain 177 | # TODO: syntax checks 178 | new_domain = Domain() 179 | new_domain.name = args 180 | self.ctx.add(new_domain) 181 | self.ctx.flush() 182 | 183 | # Shortcut 'nd' for new domain 184 | do_nd = do_newdomain 185 | 186 | def do_deldomain(self, args): 187 | """Delete a domain 188 | """ 189 | if not args: 190 | print "Please provide the name or the number of the domain as argument!" 191 | return 192 | 193 | # Try if a numerical ID was given as argument 194 | try: 195 | domain = self.ctx.query(Domain).get(int(args)) 196 | except ValueError: 197 | domain = self.ctx.query(Domain).filter_by(name=args).first() 198 | 199 | if not domain: 200 | print "Domain not found" 201 | return 202 | 203 | print "Deleting domain '%s'" % domain.name 204 | domain.delete() 205 | self.ctx.flush() 206 | 207 | # If the domain was just selected then unselect it 208 | self._domainid = None 209 | self.prompt = "=>> " 210 | 211 | # Shortcut 'dd' for delete domain 212 | do_dd = do_deldomain 213 | 214 | def do_aliases(self, args): 215 | """Print list of virtual aliases. If an argument is given then it 216 | only lists aliases for email addresses starting with this string. 217 | """ 218 | # Make sure a domain is selected 219 | if not self._domainid: 220 | print "Please select a domain first (enter its ID or domain name as a command)" 221 | return 222 | 223 | domain = self.ctx.query(Domain).get(self._domainid) 224 | print "Aliases in %s:" % domain.name 225 | for alias in domain.aliases: 226 | print " [%-2d] %-20s -> %s" % (alias.id, alias.source or '*', alias.destination) 227 | 228 | # Shortcut 'a' for aliases 229 | do_a = do_aliases 230 | 231 | def do_newalias(self, args): 232 | """Create a new alias in the current domain 233 | """ 234 | # Make sure a domain is selected 235 | if not self._domainid: 236 | print "Please select a domain first (enter its ID or domain name as a command)" 237 | return 238 | 239 | if not args: 240 | print "newalias: source destination (source can be '*' for catchall)" 241 | return 242 | 243 | try: 244 | source, destination = args.split() 245 | except ValueError: 246 | print "newalias: source destination" 247 | return 248 | 249 | if '@' not in destination: 250 | print "The destination should contain an '@'." 251 | return 252 | 253 | print "New alias: %s -> %s" % (source, destination) 254 | if source=='*': 255 | source='' 256 | 257 | # Check if alias exists already 258 | if self.ctx.query(Alias).filter_by(domain_id=self._domainid, source=source, destination=destination).first(): 259 | print "Alias exists already" 260 | return 261 | 262 | domain = self.ctx.query(Domain).get(self._domainid) 263 | 264 | new_alias = Alias() 265 | new_alias.source = source 266 | new_alias.destination = destination 267 | domain.aliases.append(new_alias) 268 | self.ctx.flush() 269 | 270 | # Shorcut 'na' for new alias 271 | do_na = do_newalias 272 | 273 | def do_delalias(self, args): 274 | """Delete an alias 275 | """ 276 | if not args: 277 | print "Please provide either the ID or 'source destination' or 'source' as argument!" 278 | return 279 | 280 | # Attempt to get the alias by the numerical ID 281 | try: 282 | aliases = self.ctx.query(Alias).filter_by(domain_id=self._domainid, id=int(args)).all() 283 | except ValueError: 284 | # Syntax: source destination 285 | if ' ' in args: 286 | source, destination = args.split(' ') 287 | aliases = self.ctx.query(Alias).filter_by(domain_id=self._domainid, 288 | source=source, destination=destination).all() 289 | # Syntax: source 290 | else: 291 | aliases = self.ctx.query(Alias).filter_by(domain_id=self._domainid, source=args).all() 292 | 293 | if not aliases: 294 | print "No such alias found" 295 | return 296 | 297 | domain = self.ctx.query(Domain).get(self._domainid) 298 | for alias in aliases: 299 | print "Deleting alias %s -> %s" % (alias.source or '*', alias.destination) 300 | # Do not use "alias.delete()" because it will not update the 301 | # collections domain.aliases automatically in SQLAlchemy! 302 | domain.aliases.remove(alias) 303 | self.ctx.flush() 304 | 305 | # Shortcut 'da' for delete alias 306 | do_da = do_delalias 307 | 308 | def do_users(self, args): 309 | """Print list of virtual users. If an argument is given then it 310 | only lists users for email addresses starting with this string. 311 | """ 312 | # Make sure a domain is selected 313 | if not self._domainid: 314 | print "Please select a domain first (enter its ID or domain name as a command)" 315 | return 316 | 317 | domain = self.ctx.query(Domain).get(self._domainid) 318 | print "Users in %s:" % domain.name 319 | for user in domain.users: 320 | print " [%-2d] %s" % (user.id, user.user) 321 | 322 | # Shortcut 'a' for users 323 | do_u = do_users 324 | 325 | def do_newuser(self, args): 326 | """Create a new user in the current domain 327 | """ 328 | # Make sure a domain is selected 329 | if not self._domainid: 330 | print "Please select a domain first (enter its ID or domain name as a command)" 331 | return 332 | 333 | if not args: 334 | print "newuser: userpart password" 335 | return 336 | 337 | try: 338 | username, password = args.split() 339 | except ValueError: 340 | print "newuser: userpart password" 341 | return 342 | 343 | print "New user: %s (password: %s)" % (username, password) 344 | 345 | # Check if user exists already 346 | try: 347 | user = self.ctx.query(User).filter_by(domain_id=self._domainid, user=username).one() 348 | except NoResultFound: 349 | pass 350 | else: 351 | print "User exists already, changing the password" 352 | user.password = func.md5(password) 353 | self.ctx.add(user) 354 | self.ctx.flush() 355 | return 356 | 357 | domain = self.ctx.query(Domain).get(self._domainid) 358 | 359 | new_user = User() 360 | new_user.user = username 361 | new_user.password = func.md5(password) 362 | domain.users.append(new_user) 363 | self.ctx.flush() 364 | 365 | # Shorcut 'nu' for new user 366 | do_nu = do_newuser 367 | 368 | def do_deluser(self, args): 369 | """Delete a user 370 | """ 371 | if not args: 372 | print "Please provide either the ID or the userpart as argument!" 373 | return 374 | 375 | # Attempt to get the user by the numerical ID 376 | try: 377 | user = self.ctx.query(User).get(int(args)) 378 | except ValueError: 379 | user = self.ctx.query(User).filter_by(domain_id=self._domainid, user=args).first() 380 | 381 | if not user: 382 | print "No such user found" 383 | return 384 | 385 | domain = self.ctx.query(Domain).get(self._domainid) 386 | print "Deleting user %s" % user.user 387 | # Do not use "user.delete()" because it will not update the 388 | # collections domain.users automatically in SQLAlchemy! 389 | domain.users.remove(user) 390 | self.ctx.flush() 391 | 392 | # Shortcut 'du' for delete user 393 | do_du = do_deluser 394 | 395 | 396 | def postloop(self): 397 | """Take care of any unfinished business. 398 | Despite the claims in the Cmd documentaion, Cmd.postloop() is not a stub. 399 | """ 400 | cmd.Cmd.postloop(self) ## Clean up command completion 401 | print "Exiting..." 402 | 403 | def precmd(self, line): 404 | """ This method is called after the line has been input but before 405 | it has been interpreted. If you want to modifdy the input line 406 | before execution (for example, variable substitution) do it here. 407 | """ 408 | self._hist += [ line.strip() ] 409 | return line 410 | 411 | def postcmd(self, stop, line): 412 | """If you want to stop the console, return something that evaluates to true. 413 | If you want to do some post command processing, do it here. 414 | """ 415 | 416 | return stop 417 | 418 | def emptyline(self): 419 | """Do nothing on empty input line""" 420 | pass 421 | 422 | def default(self, line): 423 | """If the input is not a known command then it is assumed a certain 424 | domain is supposed to be selected. Domains can be given as their 425 | numerical ID or as the domain name itself. 426 | """ 427 | pass 428 | 429 | # Define SQLAlchemy mapper classes 430 | class Domain(object): 431 | def __str__(self): 432 | return "Domain: %s (id=%s)" % (self.name, self.id) 433 | class Alias(object): 434 | def __str__(self): 435 | return "Alias: %s -> %s" % (self.source, self.destination) 436 | class User(object): 437 | def __str__(self): 438 | return "User: %s (%s)" % (self.user, self.password) 439 | 440 | if __name__ == '__main__': 441 | console = Console() 442 | console.cmdloop() 443 | 444 | -------------------------------------------------------------------------------- /mail.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """A very simple mail tool that I use on a Synology NAS to send email, 4 | where an usuable "mail" command line tool doesn't seem to be available 5 | via ipkg, in particular if I don't want to install a local SMTP server. 6 | """ 7 | 8 | import sys 9 | from optparse import OptionParser 10 | import smtplib 11 | from email.mime.text import MIMEText 12 | 13 | 14 | def main(): 15 | parser = OptionParser() 16 | parser.add_option("-s", dest="subject", help="Subject") 17 | parser.add_option("-f", dest="from_", help="From address") 18 | parser.add_option("--host", dest="host", help="SMTP Host") 19 | parser.add_option("--port", dest="port", help="SMTP Port", default=25) 20 | parser.add_option("-q", dest="quiet", help="Be quiet.", default=False) 21 | 22 | (options, recipients) = parser.parse_args() 23 | 24 | if not recipients: 25 | print "No recipients specified." 26 | return 1 27 | 28 | stdin = sys.stdin.read() 29 | if not stdin.strip(): 30 | if not options.quiet: 31 | print "No stdin text, not sending message." 32 | return 1 33 | 34 | smtp = smtplib.SMTP() 35 | smtp.connect(options.host, options.port) 36 | for recipient in recipients: 37 | msg = MIMEText(stdin) 38 | msg['Subject'] = options.subject 39 | msg['From'] = options.from_ 40 | msg['To'] = recipient 41 | smtp.sendmail(options.from_, [recipient], msg.as_string()) 42 | smtp.quit() 43 | 44 | 45 | 46 | if __name__ == '__main__': 47 | sys.exit(main() or 0) -------------------------------------------------------------------------------- /make-pip-url.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Print a pip url to stdout. 5 | # 6 | # $ ./script.sh miracle2k/webassets 7 | # pip -e git://github.com/webassets.git@be5add0032932e0af0e066d80a1ece30cc21dba9#egg=webassets 8 | # 9 | # Update the "requirements.pip" file with the new commit hash. 10 | # 11 | # $ ./script.sh miracle2k/webassets requirements.pip 12 | # 13 | # If a file named $DEFAULT_FILENAME exists, the second syntax will 14 | # automatically be chosen, unless "-" is passed for the filename. 15 | # 16 | 17 | DEFAULT_FILENAME='requirements.pip' 18 | 19 | usage() 20 | { 21 | cat << EOF 22 | usage: $0 username/repo [filename] 23 | 24 | Fetch the latest revision of the given repository, and output 25 | a pip url. If a filename is given, the url already in that file 26 | will be updated with the new revision, or be added. 27 | EOF 28 | } 29 | 30 | # Handle the arguments 31 | if [ ! $1 ] 32 | then 33 | usage 34 | exit 1 35 | fi 36 | 37 | QUERY=$1 38 | FILENAME=$2 39 | REPO=${QUERY#*/} 40 | USER=${QUERY%/*} 41 | 42 | # Fetch the current revision 43 | #### FIX http://stackoverflow.com/questions/9179828/github-api-retrieve-all-commits-for-all-branches-for-a-repo 44 | HASH=$(curl -s http://github.com/api/v2/yaml/repos/show/$QUERY/branches | grep " master" | awk '{ print $2 }') 45 | if [ ! $HASH ]; then 46 | echo "Unable to find commit hash $QUERY at branch master"; 47 | exit 1; 48 | fi 49 | 50 | # Build the pip url 51 | URL="-e git://github.com/$QUERY.git@$HASH#egg=$REPO" 52 | 53 | # http://stackoverflow.com/questions/407523/bash-escape-a-string-for-sed-search-pattern 54 | escape() { echo "$1" | sed -e 's/\(\.\|\/\|\*\|\[\|\]\|\\\)/\\&/g'; } 55 | 56 | # If a filename was given, or the default file 57 | # exists in the wd, update. 58 | if [ "$FILENAME" -o -f "$DEFAULT_FILENAME" ] && [ ! "$FILENAME" == "-" ] 59 | then 60 | # If we're here because of DEFAULT_FILENAME, use it subsequently 61 | [ ! $FILENAME ] && FILENAME=$DEFAULT_FILENAME 62 | 63 | # Need to quote the variable before passing to escape, and I 64 | # haven't found a way to do this inline. 65 | e_url=$(escape "$URL") 66 | e_query=$(escape "$QUERY") 67 | 68 | # The string we search for to determine which line to replace 69 | search="^.*github.com\/$e_query.*\$" 70 | 71 | if grep -q "$search" "$FILENAME"; then 72 | sed -i "s/$search/$e_url/" $FILENAME 73 | echo "$FILENAME (updated)" 74 | else 75 | echo "$URL" >> $FILENAME 76 | echo "$FILENAME (appended)" 77 | fi 78 | else 79 | # Otherwise, simply output the url 80 | echo "pip install $URL" 81 | fi 82 | 83 | -------------------------------------------------------------------------------- /mount-encrypted.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Mount and unmounts an encrypted volume (in a toggle-fashion). 3 | # This is necessary because at least in Karmic, the dialog provided 4 | # by gnome-mount/gnome-volume-manager does not allow the user to 5 | # specify a keyfile (see https://bugs.launchpad.net/gnome-mount/+bug/133520) 6 | # 7 | # Currently makes a number of assumptions: 8 | # * LUKS volume with keyfile 9 | # * Uses cryptmount; partition needs to be configured in cmtab. 10 | # We chose cryptmount due to it's general awesomeness, and in 11 | # particular since we'd need to require the calle to be su 12 | # then; which means we (afaik) wouldn't be able to call this 13 | # script from a GNOME launcher, requiring another wrapper. 14 | # * The nofsck option is set for the volume in cmtab. This is 15 | # because fsck needs a terminal to run, see: 16 | # http://sourceforge.net/tracker/index.php?func=detail&aid=2937347&group_id=154099&atid=790423 17 | # 18 | 19 | usage() 20 | { 21 | cat << EOF 22 | usage: $0 CRYPT_MOUNT_NAME KEYFILE 23 | 24 | Will mount or unmount the volume CRYPT_MOUNT_NAME configured in 25 | cryptmount's cmtab using the contents of KEYFILE as a password. 26 | EOF 27 | } 28 | 29 | 30 | # Name of the volume as defined in cmtab 31 | cm_name=$1 32 | # Default location of the keyfile 33 | keyfile=$2 34 | 35 | 36 | if [ ! $1 ] || [ ! $2 ] 37 | then 38 | usage 39 | exit 1 40 | fi 41 | 42 | mapper=/dev/mapper/${cm_name} 43 | 44 | if mount | grep "^${mapper} on" > /dev/null 45 | then 46 | echo "Umounting..." 47 | # Empty echo to make zentiy progress bar pulsate; artificial delay, or it won't be much to quick. 48 | { echo ""; cryptmount -u $cm_name; sleep 2 ;} | zenity --progress --pulsate --auto-close --title "Please wait" --text "Umounting..." 49 | else 50 | echo "Mounting..." 51 | if [ ! -f $keyfile ] 52 | then 53 | keyfile=`zenity --file-selection --title="$keyfile not found; select one:"` 54 | if [ ! $? -eq 0 ]; then 55 | print "No keyfile, halting." 56 | exit 1; 57 | fi 58 | fi 59 | 60 | # The empty "echo" makes zenity "pulsate" work, since cryptmount doesn't write to stdout. 61 | # Also, the challenge here is to both get the error code, as well as capture stderr. This 62 | # is hard because we need to get the code of a subcommand of the pipe (PIPESTATUS), but 63 | # variable assignment is apparently a command of it's own and clears out PIPESTATUS. 64 | # For now, we use a temporary file. 65 | # TODO: Maybe there is a better solution. Some ideas may be here: 66 | # http://ask.metafilter.com/76984/Pipe-command-output-but-keep-the-error-code 67 | errcapture="/tmp/cryptmount.stderr.${cm_name}" 68 | { echo ""; cryptmount -w 5 $cm_name 2>${errcapture} 5< $keyfile ;} | \ 69 | zenity --progress --pulsate --auto-close --title "Please wait" --text "Mouting ${cm_name}..." 70 | if [ ${PIPESTATUS[1]} -eq 0 ]; then 71 | nautilus `cat /proc/mounts | grep "^${mapper}" | awk '{print $2}'` 72 | else 73 | zenity --error --text="An error occured: `cat ${errcapture}`" 74 | fi 75 | fi 76 | -------------------------------------------------------------------------------- /mysqld-ram.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Sets up and runs a MySQL instance using a ramdisk. 3 | # 4 | # This can dramatically speed up things like running tests (The 5 | # Django test suite takes 455 secs with this, 7843 secs without 6 | # it on my machine - that's almost factor 20). 7 | # 8 | # Written and tested on Ubuntu Karmic. 9 | # 10 | # TODO: Possible things could be even faster; options to consider: 11 | # * DELAY_KEY_WRITE 12 | # * Disable logging 13 | # * Disable binary logging 14 | # * Possibly small memory caches in MySQL help performance here. 15 | 16 | set -o nounset 17 | set -o errexit 18 | 19 | # You probably want to bind to somewhere non-default so you 20 | # don't have to shutdown your standard MySQL instance. 21 | # TODO: Make those options. 22 | BIND_SOCKET=/var/run/mysqld/mysqld-ram.sock 23 | BIND_HOST=127.0.0.1 24 | BIND_PORT=3307 25 | 26 | 27 | DATA_DIR=/var/lib/mysql-ram 28 | PID_FILE=/var/run/mysqld/mysqld-ram.pid 29 | USER=mysql 30 | GROUP=mysql 31 | MYSQL_APPARMOR_PROFILE=/etc/apparmor.d/usr.sbin.mysqld 32 | 33 | 34 | get_bind_args() { 35 | # pass "server" for use with mysqld 36 | host_option='--host' 37 | if [ ${1?"get_bind_args() needs one argument"} = "server" ]; then 38 | host_option="--bind"; 39 | fi 40 | 41 | args="" 42 | if [ -n "${BIND_SOCKET:+x}" ]; then args="$args --socket=${BIND_SOCKET}"; fi 43 | if [ -n "${BIND_HOST:+x}" ]; then args="$args $host_option=${BIND_HOST}"; fi 44 | if [ -n "${BIND_PORT:+x}" ]; then args="$args --port=${BIND_PORT}"; fi 45 | echo $args 46 | } 47 | 48 | 49 | # If not yet done, setup a ram disk in the data directory. 50 | if [ ! -d $DATA_DIR ]; then 51 | echo "Creating directory at $DATA_DIR" 52 | mkdir $DATA_DIR 53 | chown -R $USER:$GROUP $DATA_DIR 54 | fi 55 | 56 | # We're now going to do stuff we don't want to be persistent, 57 | # so make sure we are going to properly cleanup. 58 | cleanup() { 59 | # Run without errexit, we want to do as much cleanup 60 | # as possible. 61 | set +e 62 | 63 | # Unmount ramdisk 64 | if mountpoint -q $DATA_DIR; then 65 | echo "Unmounting ramdisk..." 66 | umount $DATA_DIR 67 | fi 68 | 69 | set -e 70 | exit 71 | } 72 | trap cleanup INT TERM EXIT 73 | 74 | # If the ram disk is not yet mounted, do so now. 75 | if ! mountpoint -q $DATA_DIR; then 76 | echo "Mounting ramdisk at $DATA_DIR" 77 | mount -t tmpfs none $DATA_DIR 78 | fi 79 | 80 | # If AppArmor protects MySQL, it'll have to stop doing that 81 | # for the time being. 82 | if [ -f $MYSQL_APPARMOR_PROFILE ]; then 83 | echo "Disabling AppArmor..." 84 | apparmor_parser -R $MYSQL_APPARMOR_PROFILE 85 | fi 86 | 87 | # Setup the new mysql data directory 88 | mysql_install_db --user $USER --datadir=$DATA_DIR > /dev/null 89 | 90 | # Run mysqld; we need to workaround it not reacting to CTRL+C. 91 | # Let's setup traps to shut it down ourselves. 92 | trap '/usr/bin/mysqladmin $(get_bind_args client) refresh & wait' 1 # HUP 93 | trap '/usr/bin/mysqladmin $(get_bind_args client) shutdown & wait' 2 3 15 # INT QUIT and TERM 94 | # Run MySQL in the background. 95 | mysqld $(get_bind_args server) --datadir="$DATA_DIR" --pid-file="$PID_FILE" --console & 96 | 97 | # Enable apparmor again right away; it's enough that we 98 | # started up the mysqld without the profile. 99 | if [ -f $MYSQL_APPARMOR_PROFILE ]; then 100 | echo "Re-enabling AppArmor..." 101 | apparmor_parser -a $MYSQL_APPARMOR_PROFILE 102 | fi 103 | 104 | # Wait for the MySQL background process to end. 105 | wait 106 | 107 | # Call cleanup manually 108 | trap - INT TERM EXIT 109 | cleanup -------------------------------------------------------------------------------- /parse-android-earningsreport.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf8 3 | 4 | import csv, itertools, sys, os, locale, operator, collections 5 | from decimal import Decimal 6 | import texttable # pip install texttable 7 | 8 | 9 | class CommentedFile(object): 10 | """The new montyly payout reports available since 2012 now have comments 11 | in them. The older "sales reports" did not. 12 | 13 | http://bugs.python.org/msg48505""" 14 | def __init__(self, f, commentstring="#"): 15 | self.f = f 16 | self.commentstring = commentstring 17 | def next(self): 18 | line = self.f.next() 19 | while line.startswith(self.commentstring): 20 | line = self.f.next() 21 | return line 22 | def __iter__(self): 23 | return self 24 | 25 | 26 | def read_csv(file): 27 | with open(file, 'r') as f: 28 | return list(csv.DictReader(CommentedFile(f), delimiter=',')) 29 | 30 | 31 | def group_by(records, key): 32 | """For a payout report, returns a structure like: 33 | 34 | {'DE': {'charged': 15, 35 | 'received': 10, 36 | 'num_sales': 3}, 37 | 'US': {...}, 38 | ... 39 | 40 | All values are in seller's currency (yours). 41 | 42 | In case this is an "estimtaed sales" report, it will not contain the 43 | "received" key, and the value in "charged" will be the buyer's currency. 44 | """ 45 | # itertools.groupby requires sorted input 46 | records.sort(key=key) 47 | 48 | result = collections.OrderedDict() 49 | for country, sales in itertools.groupby(records, key=key): 50 | sales = list(sales) 51 | 52 | """ 53 | TODO: There actually seems to be a bug in both payout and sales 54 | reports, where I have buyers from the US paying in KRW, and no FX Rate 55 | is given. In those cases, assuming 1 as FX rate is wrong. Example: 56 | 57 | {'Merchant Currency': 'KRW', 'Country of Buyer': 'US', ..., 'Merchant Receives': '0.00', 'Item Price': '1,165.00', 'Charged Amount': '1,165.00', 'Order Charged Date': '2012-04-18', 'Currency of Sale': 'KRW', 'City of Buyer': 'Honolulu', 'Estimated FX Rate': '', 'State of Buyer': 'HI', ... 'Financial Status': 'Charged'} 58 | """ 59 | 60 | received = lambda s: Decimal(str(locale.atof(s['Amount (Merchant Currency)']))) 61 | 62 | result[country] = { 63 | 'num_sales': len(list(sales)), 64 | 'received': sum([received(s) for s in sales]) 65 | } 66 | 67 | # Without a single currency, sum makes no sense 68 | result['SUM'] = reduce(operator.add, map(collections.Counter, result.values())) 69 | 70 | return result 71 | 72 | 73 | if __name__ == '__main__': 74 | locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') 75 | 76 | # define different keys 77 | country = lambda x: x['Buyer Country'] 78 | eu_codes = ['AT', 'BE', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'EL', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE'] 79 | euvat = lambda x: 'EU' if x['Buyer Country'] in eu_codes else 'Non-EU' 80 | 81 | for filename in sorted(sys.argv[1:]): 82 | print os.path.basename(filename) 83 | records = read_csv(filename) 84 | 85 | table = texttable.Texttable() 86 | #table.set_deco(texttable.Texttable.HEADER) 87 | table.set_cols_dtype(['t', 'i', 't', 't']) 88 | table.set_cols_align(["l", 'r', "r", "r"]) 89 | table.header(['', 'Num', 'Received', '19%']) 90 | key_to_use = euvat 91 | 92 | 93 | for country, data in sorted(group_by(records, key_to_use).items(), key=lambda t: t[1]): 94 | table.add_row([ 95 | country, 96 | data['num_sales'], 97 | '%.2f €' % data['received'] if 'received' in data else '-', 98 | '%.2f €' % (data['received'] / Decimal('1.19') * Decimal('0.19')) if 'received' in data else '-', 99 | ]) 100 | 101 | # Indent table by 4 spaces 102 | print 4 * ' ' + table.draw().replace('\n', '\n' + (4 * ' ')) 103 | -------------------------------------------------------------------------------- /php-runserver.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | CONF="" 4 | if [ -x "mongoose.conf" ]; then 5 | CONF="mongoose.conf" 6 | fi 7 | 8 | mongoose -a /dev/stdout -e /dev/stderr -I `which php5-cgi` -d yes -r $(pwd)$* $CONF -------------------------------------------------------------------------------- /reconstruct-vdi-tree.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This helps reconstructing a disk/snapshot hierarchy of a virtual machine, 3 | in case you lose it's config file, or your VirtualBox.xml file. 4 | 5 | Author: Michael Elsdoerfer . Licensed under BSD. 6 | 7 | How to use this: 8 | - Use --detail to inspect VDI headers. 9 | - If your VirtualBox.xml file is lost, use --xml to generate xml code for the 10 | media registry part. 11 | - If your VM specific xml is lost, it's thougher. The easiest way probably is: 12 | - Create a new machine with your base VDI. 13 | - Create snapshots matching the tree given by this script. 14 | - Edit the vm-specific XML file, and VirtualBox.xml, changing the UUIDs of 15 | the dummy snapshots with the UUIDs given to you by this script. 16 | """ 17 | 18 | 19 | import sys, os 20 | import logging 21 | from os import path 22 | import optparse 23 | from struct import unpack 24 | 25 | 26 | logging.basicConfig(format='%(levelname)s: %(message)s') 27 | log = logging.getLogger('') 28 | 29 | 30 | VDI_TYPES = { 31 | 1: 'Dynamic', 32 | 2: 'Fixed', 33 | 4: 'Differential', 34 | } 35 | 36 | 37 | class VDIHeader(object): 38 | version_minor = None 39 | version_major = None 40 | type = None 41 | flags = None 42 | size = None 43 | block_size = None 44 | num_blocks = None 45 | num_blocks_allocated = None 46 | uuid = None 47 | snapshot_uuid = None 48 | parent_uuid = None 49 | 50 | def __repr__(self): 51 | return "" % self.uuid 52 | 53 | @property 54 | def version_str(self): 55 | return "%s.%s" % (self.version_major, self.version_minor) 56 | 57 | 58 | class VDIError(Exception): 59 | pass 60 | 61 | 62 | 63 | def read_int(f): 64 | return unpack('" 269 | def p(node): 270 | open_tag = '' % (node.uuid, node.filename or '(not found)') 271 | return open_tag, "" 272 | walk_tree(tree, p, start=1) 273 | print "" 274 | 275 | 276 | def parse_args(argv): 277 | parser = optparse.OptionParser(usage='%prog [options] filenames...') 278 | parser.add_option('--detail', help='print detailed VDI header info', action='store_true') 279 | parser.add_option('--xml', help='print XML to use in VirtualBox.xml file', action='store_true') 280 | options, filenames = parser.parse_args(argv) 281 | if not filenames: 282 | parser.print_help() 283 | sys.exit(1) 284 | return options, filenames 285 | 286 | 287 | def main(argv): 288 | options, filenames = parse_args(argv) 289 | vdi_tree = construct_tree(read_vdis(filenames)) 290 | 291 | if options.xml: 292 | print_diskmgmt_xml(vdi_tree) 293 | else: 294 | print_info(vdi_tree, detailed=options.detail) 295 | 296 | 297 | if __name__ == '__main__': 298 | sys.exit(main(sys.argv[1:]) or 0) 299 | -------------------------------------------------------------------------------- /sendtokindle/sendtokindle.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Nautilus script to send files to your Kindle. Requires the 4 | # ``sendemail`` command. 5 | # 6 | # 2011 by Michael Elsdörfer. Licensed under MIT. 7 | # 8 | # For now, copy this to your ~/gnome2/nautilus-scripts folder. 9 | # 10 | # In the future, integration into nautilus-sendto would be cool. For a code 11 | # example, see: nautilus-sendto-plugin @ github.com/blackskad/empathy 12 | 13 | # Expects a ~/.sendtokindlerc file in ini format with the following values: 14 | # 15 | # SENDER = 16 | # KINDLE_USER = 17 | # SEND_FREE = 18 | # SMTP_HOST = 19 | # SMTP_USER = 20 | # SMTP_PASS = 21 | # SMTP_USE_TLS = 22 | 23 | # The sender email address must be in your Amazon whitelist. Configure it at: 24 | # https://www.amazon.com/gp/digital/fiona/manage 25 | 26 | # 27 | # TODO: Rewrite this in Python or C, with a UI, allowing to select options 28 | # like whether to convert, or whether to send via free or pay. Also, we can 29 | # send multiple documents in one email. 30 | # 31 | 32 | 33 | if [ -r ~/.sendtokindlerc ]; then 34 | . ~/.sendtokindlerc 35 | else 36 | notify-send -u normal -t 5 -i error "Configuration file does not exist!" 37 | exit 1 38 | fi 39 | 40 | if [ "$SEND_FREE" == "yes" ]; then 41 | kindle_domain='free.kindle.com' 42 | fi 43 | kindle_to="$KINDLE_USER@${kindle_domain:="kindle.com"}" 44 | 45 | 46 | for arg; do 47 | file="$arg" 48 | if [ -d "$file" ]; then 49 | notify-send -u normal -t 5 -i error "Not sending \"$file\", is a directory." 50 | continue 51 | fi 52 | 53 | stdout=$(sendemail -f "$SENDER" -t "$kindle_to" -m "convert" -u "convert" -s "$SMTP_HOST" -xu "$SMTP_USER" -xp "$SMTP_PASS" -o tls=${SMTP_USE_TLS:="no"} -a "$file" 2>&1) 54 | echo "bla" 55 | if [ $? -eq 0 ]; then 56 | shortname=$(basename "$file") 57 | notify-send -u normal -t 1 -i info "Send to your Kindle: \"$shortname\"" 58 | else 59 | error=$(echo "$stdout" | sed "s/^.*=> //") 60 | notify-send -u normal -t 5 -i error "Could not send: $error" 61 | exit 1 62 | fi 63 | done 64 | -------------------------------------------------------------------------------- /sendtokindle/sendtokindlerc: -------------------------------------------------------------------------------- 1 | SENDER="" 2 | KINDLE_USER="" 3 | SEND_FREE="yes" 4 | SMTP_HOST="" 5 | SMTP_USER="" 6 | SMTP_PASS="" 7 | #SMTP_USE_TLS="yes" -------------------------------------------------------------------------------- /todoist-import.py: -------------------------------------------------------------------------------- 1 | """Convert fromt the GetFlow export json to Todoist. 2 | """ 3 | 4 | import sys 5 | import json 6 | import todoist 7 | import html2text 8 | import time 9 | 10 | 11 | def main(prog, argv): 12 | if len(argv) != 2: 13 | print >> sys.stderr, 'Usage: {0} GETFLOW_JSON API_TOKEN'.format(prog) 14 | return 15 | datafile = argv[0] 16 | token = argv[1] 17 | 18 | with open(datafile, 'r') as f: 19 | data = json.loads(f.read()) 20 | 21 | 22 | api = todoist.TodoistAPI(token) 23 | 24 | for listname, tasks in data.iteritems(): 25 | print 'Adding project %s with %s items' % (listname, len(tasks)) 26 | 27 | project = api.projects.add(listname) 28 | 29 | count_added = 0 30 | for task in tasks: 31 | if task['completed']: 32 | # Skip all completed tasks 33 | continue 34 | 35 | # # 36 | # if not task.get('due-on'): 37 | # continue 38 | 39 | # The item itself 40 | if task.get('due-on'): 41 | task['due-on'] = task['due-on'].replace('T00:00:00', '') 42 | item = api.items.add(task['title'], project['id'], date_string=task.get('due-on')) 43 | 44 | # Subtasks 45 | for subtask in task.get('subtasks', []): 46 | subtitem = api.items.add(subtask, project['id'], indent=2) 47 | 48 | # Add one comment indicating the time - only to those that have notes 49 | has_notes = any([a for a in task.get('activities', []) if a.get('is_comment', False)]) 50 | if has_notes and task['created-at']: 51 | note = api.notes.add(item['id'], 'Added in Flow at %s' % task['created-at']) 52 | 53 | # Comments 54 | for activity in task.get('activities', []): 55 | if not activity.get('is_comment', False): 56 | continue 57 | note = api.notes.add(item['id'], html2text.html2text(activity['detail'])) 58 | notes_added = True 59 | 60 | count_added += 1 61 | 62 | # Todoist only allows 100 queued commands, so commit each 63 | # todo individually to be sure. 64 | result = api.commit() 65 | print result 66 | if 'error_code' in result: 67 | raise ValueError('error') 68 | 69 | # Too many requests reached, it says 70 | import time 71 | time.sleep(0.1) 72 | 73 | 74 | print " Commiting project add with %d tasks" % count_added 75 | result = api.commit() 76 | print result 77 | if result and 'error_code' in result: 78 | raise ValueError('error') 79 | 80 | # Too many requests reached, it says 81 | import time 82 | time.sleep(1) 83 | 84 | 85 | 86 | if __name__ == '__main__': 87 | main(sys.argv[0], sys.argv[1:]) -------------------------------------------------------------------------------- /trac-ticket-merge.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: %s source.db target.db new_component 3 | 4 | Copies all tickets, ticket changes and ticket attachments from the source database 5 | to the target database, and updates the 'component' field on each ticket. 6 | 7 | * 'component' is replaced and 'milestone' removed from tickets. Customize the 8 | script if you don't / need this behaviour. 9 | * Make backups yourself! 10 | * No attempt is made to preserve ticket IDs from the source. 11 | * Ignores the ticket_custom table - not sure what it's for, it always 12 | seems to be empty. 13 | * Tries to be as generic as possible, but makes some assumptions about the 14 | database schema when accessing columns via indices. 15 | * Expects both databases to be based on the same trac-version, i.e. 16 | have the same schema. 17 | """ 18 | 19 | import sys, os 20 | from pysqlite2 import dbapi2 as sqlite 21 | 22 | def main(argv): 23 | try: 24 | source_file, dest_file, new_component = argv 25 | except ValueError: 26 | print "Usage: %s source.db target.db new_component" % os.path.basename(sys.argv[0]) 27 | return 1 28 | 29 | # connect to databases 30 | source_conn = sqlite.connect(source_file) 31 | source_cur = source_conn.cursor() 32 | dest_conn = sqlite.connect(dest_file) 33 | dest_cur = dest_conn.cursor() 34 | 35 | qmarks = lambda seq: ','.join(['?' for r in seq]) 36 | try: 37 | # go through tickets in source 38 | tickets = source_cur.execute('SELECT * FROM ticket;') 39 | for ticket in tickets: 40 | # delete the id column - will get a new id 41 | old_id = ticket[0] 42 | ticket = list(ticket[1:]) 43 | # reset values of component and milestone rows 44 | ticket[4-1] = new_component # component 45 | ticket[11-1] = None # milestone 46 | # insert ticket into target db 47 | print "copying ticket #%s" % old_id 48 | dest_cur.execute('INSERT INTO ticket '+ 49 | '('+(','.join([f[0] for f in source_cur.description[1:]]))+') '+ 50 | 'VALUES('+qmarks(ticket)+')', 51 | ticket) 52 | new_id = dest_cur.lastrowid 53 | 54 | # parameters: table name, where clause, query params, id column index, table repr, row repr index 55 | def copy_table(table, whereq, params, id_idx, trepr, rrepr_idx): 56 | cur = source_conn.cursor() 57 | try: 58 | cur.execute('SELECT * FROM %s WHERE %s'%(table, whereq), params) 59 | for row in cur: 60 | row = list(row) 61 | row [id_idx] = new_id 62 | print "\tcopying %s #%s"%(trepr, row [rrepr_idx]) 63 | dest_cur.execute('INSERT INTO %s VALUES(%s)'%(table,qmarks(row)), row) 64 | finally: 65 | cur.close() 66 | 67 | # copy ticket changes 68 | copy_table('ticket_change', 69 | 'ticket=?', (old_id,), 70 | 0, 'ticket change', 1) 71 | # copy attachments 72 | copy_table('attachment', 73 | 'type="ticket" AND id=?', (old_id,), 74 | 1, 'attachment', 2) 75 | 76 | # commit changes 77 | dest_conn.commit() 78 | finally: 79 | dest_conn.close() 80 | 81 | if __name__ == '__main__': 82 | sys.exit(main(sys.argv[1:])) -------------------------------------------------------------------------------- /trigger-mount.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Use udev to run this script when you insert your usb key. The usb 4 | # key is expected to contain the key needed to unlock an encrypted 5 | # partition on your system, which this script will automatically do. 6 | # 7 | # In addition to the environment provided by udev, this script 8 | # expects the cryptmount name of the encrypted partition that should 9 | # be unlocked, as configured in cmtab, as an argument. 10 | # 11 | # We are opting to use cryptmount rather than cryptsetup to mount 12 | # the disk. While this introduces a dependency on a configured 13 | # cmtab, we do on occasion need to do manual mounts as well, for 14 | # which we prefer cryptmount (unlocking + mounting just takes to 15 | # much time for data accessed reasonably often), and when combining 16 | # cryptmount + automatic cryptsetup mounts we are risking to run 17 | # into conflicts, multiple mappers or mounts etc. 18 | # 19 | # Thus, you need to define the partition in cmtab for this to work, 20 | # and you need to set the "nofsck" flag (see 21 | # http://sourceforge.net/tracker/index.php?func=detail&aid=2937347&group_id=154099&atid=790423) 22 | # 23 | 24 | # Run all in background, so to not hold up udev. 25 | { 26 | 27 | to_unlock=$1 28 | if [ ! $to_unlock ]; then 29 | echo "Needs name of encrypted partition" >&2 30 | exit 1 31 | fi 32 | 33 | # In addition, we use some variables from the udev environment 34 | if [ ! $ACTION ] || [ ! $DEVNAME ]; then 35 | echo "udev environment is incomplete" >&2 36 | exit 1 37 | fi 38 | 39 | lockfile=/var/lock/automount-${to_unlock}.lock 40 | 41 | # Ensure we don't start multiple mount/unmount attempts at 42 | # the same time - a user might remove his stick pretty quickly. 43 | lockfile-create -r 0 $lockfile 44 | if [ ! $? -eq 0 ]; then 45 | echo "Got action $ACTION, but still busy (lockfile exists)" >&2 46 | exit 2 47 | else 48 | trap "lockfile-remove $lockfile; exit" INT TERM EXIT 49 | fi 50 | 51 | 52 | # Needed to make zenity work in udev context 53 | export DISPLAY=:0.0 54 | 55 | 56 | if [ "$ACTION" = "add" ]; then 57 | # Get the key 58 | key=$(dd ibs=1c obs=1c skip=42 count=256 if=$DEVNAME) 59 | if [ ! $? -eq 0 ]; then exit; fi 60 | # Decrypt and mount 61 | echo $key | cryptmount -w 0 $to_unlock 62 | if [ $? -eq 0 ]; then 63 | zenity --notification --window-icon="info" --text="Mounted!" --timeout 5 64 | else 65 | exit $? 66 | fi 67 | 68 | elif [ "$ACTION" = "remove" ]; then 69 | # Unmount the volume 70 | cryptmount -u $to_unlock 71 | if [ $? -eq 0 ]; then 72 | zenity --notification --window-icon="info" --text="Unmounted!" --timeout 5 73 | else 74 | exit $? 75 | fi 76 | fi 77 | 78 | lockfile-remove $lockfile 79 | trap - INT TERM EXIT 80 | 81 | 82 | } & --------------------------------------------------------------------------------