├── requirements-bgw210.txt ├── whendoiwork.png ├── .gitignore ├── avery5160_code128_numeric-example.pdf ├── gitlab_repo_import.py ├── gitlab_ssh_key_sync.py ├── README.VCS ├── print-cmd.sh ├── ismerged ├── Dockerfile.bgw210 ├── dumpMysqlGrants.sh ├── nethogs2statsd.service ├── rsyslogIsHung.php ├── lastpass_update_password.rb ├── print-cmd-wrapper.c ├── show_cf_template_params.py ├── sync_git_clones.conf ├── puppetconf_to_youtube.py ├── ruby_simplecov_diff.rb ├── ec2-list-all-tags.py ├── check_url_list.py ├── list_all_aws_resources_skew.py ├── cookies_from_pdml.py ├── linode_list_records.py ├── rss_to_mail_config.py ├── rsync-wrapper.c ├── skeleton.py ├── har_urls.py ├── firefox_recovery_to_html.py ├── add_team_to_github_org_repos.py ├── dump_firefox_session.py ├── make_puppet_param_markdown.py ├── rebuild_srpm.sh ├── timeout ├── reconcile_git_repos.html.tmpl ├── .github └── workflows │ └── bgw210-docker.yml ├── cmd-wrapper.c ├── kickRsyslog.php ├── linode_ddns_update.sh ├── watch_all_my_github_repos.py ├── pushover ├── github_find_member_with_key.py ├── ubiquiti-mac-acl ├── wireless.sql ├── README.txt └── updateAPconfigs.php.inc ├── list_github_org_repos.py ├── show_dhcp_fixed_ACKs.pl ├── libvirt_csv.py ├── wiki-to-deckjs.py ├── increment_zone_serial ├── bigipcookie.pl ├── asg_instances.py ├── git_repo_diff.py ├── sync_git_clones.sh ├── linodeDnsToCsv.php ├── jenkins_plugins_to_puppet.py ├── simpleLCDproc.py ├── aws_region_stats.py ├── find_dupes.py ├── jenkins_list_plugins.py ├── route53_ddns_update.sh ├── syslogDatesToArray.php ├── find_outdated_puppets.py ├── pacman_compare.py ├── trello_board_to_text.py ├── avery5160_code128_numeric.py ├── wordpress_daily_post.php ├── dot_find_cycles.py ├── twitter_find_followed_not_in_list.py ├── trello_copy_checklist.py ├── dump_sphinx_objects_inventory.py ├── watch_cloudformation.py ├── disqus_backup.py ├── gist.py ├── jenkins_node_labels.py ├── toxit.py ├── github_irc_hooks.py ├── github_clone_setup.py └── dynamodb_to_csv.py /requirements-bgw210.txt: -------------------------------------------------------------------------------- 1 | requests>=2.25.0 2 | lxml>=4.6.0 3 | prometheus-client>=0.8.0 -------------------------------------------------------------------------------- /whendoiwork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jantman/misc-scripts/HEAD/whendoiwork.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *~ 3 | .idea/* 4 | .glpi_token.json 5 | glpi_docker_update_report.html 6 | .glpi_image_cache.pkl 7 | -------------------------------------------------------------------------------- /avery5160_code128_numeric-example.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jantman/misc-scripts/HEAD/avery5160_code128_numeric-example.pdf -------------------------------------------------------------------------------- /gitlab_repo_import.py: -------------------------------------------------------------------------------- 1 | # This script has been moved to: 2 | # https://github.com/jantman/gitlab-scripts/blob/master/gitlab_repo_import.py 3 | -------------------------------------------------------------------------------- /gitlab_ssh_key_sync.py: -------------------------------------------------------------------------------- 1 | # This script has been moved to: 2 | # https://github.com/jantman/gitlab-scripts/blob/master/gitlab_ssh_key_sync.py 3 | -------------------------------------------------------------------------------- /README.VCS: -------------------------------------------------------------------------------- 1 | As of February 16, 2013 this repository has been migrated from Subversion 2 | hosted at svn.jasonantman.com to Git hosted at github.com. I've made my best 3 | effort to setup rewrites for web links, but you may need to spend some time 4 | finding the exact page you're looking for. 5 | 6 | The SVN repository should no longer be used, has been set read-only, and will 7 | be taken offline in a few months. 8 | -------------------------------------------------------------------------------- /print-cmd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Simple script to log environment variables and original command for forced ssh commands 4 | # 5 | # Copyright 2014 Jason Antman 6 | # Free for any use provided that patches are submitted back to me. 7 | # 8 | # The latest version of this script can be found at: 9 | # 10 | # 11 | 12 | echo "============`date`================\n" >> print-cmd.log 13 | env >> print-cmd.log 14 | -------------------------------------------------------------------------------- /ismerged: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BRANCH=$1 4 | 5 | current_branch=$(git symbolic-ref -q HEAD) 6 | current_branch=${current_branch##refs/heads/} 7 | current_branch=${current_branch:-HEAD} 8 | 9 | git fetch &>/dev/null 10 | git checkout $BRANCH &>/dev/null 11 | git pull &>/dev/null 12 | BRANCH_COMMIT=$(git rev-parse $BRANCH) 13 | git checkout master &>/dev/null 14 | if `git log | grep $BRANCH_COMMIT &>/dev/null` 15 | then 16 | echo "$BRANCH is merged into master ($BRANCH_COMMIT)" 17 | echo "Branches containing HEAD of AUTO-274:" 18 | git branch -a --contains $BRANCH_COMMIT 19 | else 20 | echo "$BRANCH NOT in master, merge-base is $(git merge-base master $BRANCH)" 21 | fi 22 | -------------------------------------------------------------------------------- /Dockerfile.bgw210: -------------------------------------------------------------------------------- 1 | FROM python:3.13-slim 2 | 3 | # Set working directory 4 | WORKDIR /app 5 | 6 | # Install system dependencies for lxml 7 | RUN apt-get update && apt-get install -y \ 8 | libxml2-dev \ 9 | libxslt-dev \ 10 | gcc \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | # Copy requirements and install Python dependencies 14 | COPY requirements-bgw210.txt . 15 | RUN pip install --no-cache-dir -r requirements-bgw210.txt 16 | 17 | # Copy the collector script 18 | COPY bgw210-700_prom_collector.py . 19 | 20 | # Create non-root user for security 21 | RUN adduser --disabled-password --gecos '' --uid 1000 collector 22 | USER collector 23 | 24 | # Expose the default port 25 | EXPOSE 8000 26 | 27 | # Default command 28 | ENTRYPOINT ["python3", "bgw210-700_prom_collector.py"] 29 | CMD ["--host", "0.0.0.0", "--port", "8000"] -------------------------------------------------------------------------------- /dumpMysqlGrants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # I got this from Richard Bronosky's () 4 | # answer to this thread: 5 | # 6 | # Many thanks to him. 7 | # 8 | # The most up-to-date version of this script can be found at: 9 | # 10 | # 11 | 12 | mygrants() 13 | { 14 | read -p "PASSWORD: " PASSWD 15 | mysql -p$PASSWD -B -N $@ -e "SELECT DISTINCT CONCAT( 16 | 'SHOW GRANTS FOR ''', user, '''@''', host, ''';' 17 | ) AS query FROM mysql.user WHERE user NOT IN ('root','phpmyadmin','debian-sys-maint')" | \ 18 | mysql -p$PASSWD $@ | \ 19 | sed 's/\(GRANT .*\)/\1;/;s/^\(Grants for .*\)/## \1 ##/;/##/{x;p;x;}' 20 | } 21 | 22 | mygrants 23 | -------------------------------------------------------------------------------- /nethogs2statsd.service: -------------------------------------------------------------------------------- 1 | # Example systemd unit file for nethogs2statsd.py 2 | # From: 3 | # Last updated: 2017-08-23 4 | # 5 | # IMPORTANT NOTE: libnethogs detects interfaces to monitor when it starts; 6 | # if you have interfaces that come up after that (i.e. wireless, VPN, etc.) 7 | # you should restart this service after the interface comes up. 8 | 9 | [Unit] 10 | Description=nethogs2statsd 11 | 12 | [Service] 13 | Restart=always 14 | StartLimitInterval=20 15 | StartLimitBurst=5 16 | TimeoutStartSec=0 17 | ExecStart=/home/jantman/GIT/misc-scripts/nethogs2statsd.py \ 18 | -L /home/jantman/GIT/nethogs/src/libnethogs.so.0.8.5-37-g7093964 \ 19 | -d enp4s0 -f '(not src net 192.168.0 and not src net 172.17.0) or (not dst net 192.168.0 and not dst net 172.17.0)' 20 | 21 | [Install] 22 | WantedBy=multi-user.target 23 | -------------------------------------------------------------------------------- /rsyslogIsHung.php: -------------------------------------------------------------------------------- 1 | #!/usr/bin/php 2 | , 7 | * on behalf of the taxpayers of the State of New Jersey and/or the students of Rutgers University, 8 | * The State University of New Jersey. 9 | * 10 | * 11 | * 12 | */ 13 | 14 | $mailTo = array('jantman@oit.rutgers.edu'); 15 | 16 | require_once('collectRsyslogInfo.php'); 17 | 18 | $out = collectRsyslogInfo(true, true); 19 | 20 | $headers = "Content-type: text/html\r\n"; 21 | 22 | $host = trim(shell_exec("hostname")); 23 | 24 | foreach($mailTo as $addr) 25 | { 26 | mail($addr, "collectRsyslogInfo.php output on $host at ".date("Y-m-d H:i:s"), $out, $headers); 27 | } 28 | 29 | echo $out; 30 | 31 | 32 | ?> 33 | -------------------------------------------------------------------------------- /lastpass_update_password.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | STDOUT.sync = true 4 | 5 | require 'lastpass-api' 6 | require 'io/console' 7 | @lastpass = Lastpass::Client.new 8 | Lastpass.verbose = true 9 | 10 | raise("LastPass is NOT logged in! Please run 'lpass login' first!") unless @lastpass.logged_in? 11 | 12 | print "Enter your OLD password: " 13 | old_password = gets.chomp 14 | print "Enter your NEW password: " 15 | new_password = gets.chomp 16 | 17 | puts "OLD Password: '#{old_password}' NEW password: '#{new_password}'" 18 | puts 'Ctrl+C to exit, or Enter to continue...' 19 | gets 20 | 21 | @lastpass.accounts.find_all(with_passwords: true).each do |acct| 22 | next unless acct.password == old_password 23 | puts acct.to_h 24 | cmd = "printf 'Password: #{new_password}' | lpass edit --non-interactive --sync=no #{acct.id}" 25 | puts cmd 26 | Lastpass::Utils.cmd cmd 27 | end 28 | sleep 1 # Allow file IO before attempting sync 29 | Lastpass::Utils.cmd 'lpass sync' 30 | sleep 1 # Allow sync to finish 31 | -------------------------------------------------------------------------------- /print-cmd-wrapper.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | /******************************************** 9 | * Wrapper - Secure Yourself * 10 | * * 11 | * 2007 - Mike Golvach - eggi@comcast.net * 12 | * * 13 | * Usage: cmd-wrapper [pre|post] * 14 | * * 15 | ********************************************/ 16 | 17 | /* Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States License */ 18 | 19 | /* Define global variables */ 20 | 21 | int gid; 22 | 23 | /* main(int argc, char **argv) - main process loop */ 24 | 25 | int main(int argc, char **argv, char **envp) 26 | { 27 | char *origcmd; 28 | 29 | origcmd = getenv("SSH_ORIGINAL_COMMAND"); 30 | 31 | printf ("Original Command:%s\n", origcmd); 32 | 33 | exit(0); 34 | } 35 | -------------------------------------------------------------------------------- /show_cf_template_params.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to show CloudFormation template parameters and their values. 4 | 5 | Copyright 2014 Jason Antman 6 | Free for any use provided that patches are submitted back to me. 7 | 8 | The latest version of this script can be found at: 9 | https://github.com/jantman/misc-scripts/blob/master/show_cf_template_params.py 10 | 11 | CHANGELOG: 12 | 13 | 2014-12-05 jantman: 14 | - initial script 15 | """ 16 | 17 | import json 18 | import sys 19 | import os 20 | 21 | try: 22 | fname = sys.argv[1] 23 | except IndexError: 24 | raise SystemExit("USAGE: show_cf_template_params.py /path/to/cloudformation.template") 25 | 26 | if not os.path.exists(fname): 27 | raise SystemExit("ERROR: path does not exist: %s" % fname) 28 | 29 | with open(fname, 'r') as fh: 30 | content = fh.read() 31 | 32 | tmpl = json.loads(content) 33 | 34 | params = tmpl['Parameters'] 35 | 36 | for p in sorted(params): 37 | if 'Default' in params[p]: 38 | print("{k}: {v}".format(k=p, v=params[p]['Default'])) 39 | else: 40 | print("{k} (no default)".format(k=p)) 41 | 42 | -------------------------------------------------------------------------------- /sync_git_clones.conf: -------------------------------------------------------------------------------- 1 | # Configuration file for sync_git_clones.sh 2 | # 3 | # sync_git_clones.sh can be found at: 4 | # https://github.com/jantman/misc-scripts/blob/master/sync_git_clones.sh 5 | # 6 | # 7 | # The current example of this config file can be found at: 8 | # https://github.com/jantman/misc-scripts/blob/master/sync_git_clones.conf 9 | # 10 | 11 | # python bin to use for github_clone_setup.py - virtualenv is recommended 12 | PYTHON_BIN=/home/jantman/venvs/foo/bin/python 13 | 14 | # path to github_clone_setup.py 15 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 16 | GITHUB_CLONE_SETUP="${DIR}/github_clone_setup.py" 17 | 18 | # set to 1 to enable setup of github repo upstream and pull branches 19 | DO_GITHUB_SETUP=1 20 | 21 | # set to 1 to require ssh-agent to be running ($SSH_AGENT_PID to be set) 22 | REQUIRE_SSH_AGENT=1 23 | 24 | # set to a command that must exit 0 or else we fail (checking VPN access, etc.) 25 | REQUIRE_COMMAND="echo" 26 | 27 | # list of directories to (non-recursively) search for git clones 28 | GIT_DIRS="/home/${USER}/GIT /home/${USER}/CMG/git /home/${USER}/CMG/git/ops" 29 | 30 | # set to 1 to pull master 31 | PULL_MASTER=1 32 | 33 | # for any repos with an upstream, fetch upstream, pull master, 34 | # merge upstream/master to origin/master, push to origin 35 | SYNC_PUSH_MASTER=1 -------------------------------------------------------------------------------- /puppetconf_to_youtube.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to generate a YouTube playlist from a puppet videos page 4 | """ 5 | 6 | import sys 7 | import requests 8 | 9 | try: 10 | from lxml import etree, html 11 | except ImportError: 12 | try: 13 | # normal cElementTree install 14 | import cElementTree as etree 15 | except ImportError: 16 | try: 17 | # normal ElementTree install 18 | import elementtree.ElementTree as etree 19 | except ImportError: 20 | raise SystemExit("Failed to import ElementTree from any known place") 21 | 22 | 23 | VIDEO_PAGE = 'https://puppetlabs.com/puppetconf-2015-videos-and-presentations' 24 | 25 | def main(): 26 | r = requests.get(VIDEO_PAGE) 27 | tree = html.fromstring(r.text) 28 | links = [] 29 | for item in tree.iterlinks(): 30 | element, attrib, link, pos = item 31 | if not link.startswith('https://puppetlabs.com/presentations/'): 32 | continue 33 | links.append(link) 34 | print("# Found %d links" % len(links)) 35 | for link in links: 36 | do_link(link) 37 | 38 | def do_link(link): 39 | r = requests.get(link) 40 | tree = html.fromstring(r.text) 41 | for item in tree.xpath('//iframe'): 42 | if 'src' in item.attrib and 'youtube.com' in item.attrib['src']: 43 | print(item.attrib['src']) 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /ruby_simplecov_diff.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # 3 | # Given two Ruby simplecov output directories, show the differences 4 | # 5 | 6 | require 'json' 7 | 8 | if ARGV.length != 2 9 | STDERR.puts "USAGE: ruby ruby_simplecov_diff.rb /path/to/old/dir /path/to/new/dir" 10 | exit 1 11 | end 12 | 13 | old_path = ARGV[0] 14 | new_path = ARGV[1] 15 | 16 | old_json = JSON.parse(File.read(File.join(old_path, '.resultset.json'))) 17 | new_json = JSON.parse(File.read(File.join(new_path, '.resultset.json'))) 18 | old_cov = old_json['RSpec']['coverage'] 19 | new_cov = new_json['RSpec']['coverage'] 20 | 21 | def get_line_percent(linearr) 22 | not_covered = 0 23 | linearr.each do |val| 24 | next if val.nil? 25 | not_covered += 1 if val < 1 26 | end 27 | return (((linearr.length - not_covered) * 1.0) / (linearr.length * 1.0)) * 100.0 28 | end 29 | 30 | def show_line_differences(old_linearr, new_linearr) 31 | old_pct = get_line_percent(old_linearr) 32 | new_pct = get_line_percent(new_linearr) 33 | if old_pct == new_pct 34 | return "coverage stayed the same" 35 | elsif old_pct < new_pct 36 | c = new_pct - old_pct 37 | return "coverage increased by #{c}%" 38 | else 39 | c = old_pct - new_pct 40 | return "coverage DECREASED by #{c}%" 41 | end 42 | end 43 | 44 | old_cov.each do |fpath, linearr| 45 | if ! new_cov.include?(fpath) 46 | puts "#{fpath} - missing in new" 47 | next 48 | end 49 | r = show_line_differences(linearr, new_cov[fpath]) 50 | puts "#{fpath} - #{r}" 51 | end 52 | -------------------------------------------------------------------------------- /ec2-list-all-tags.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Using boto3, list all distinct tag names on all EC2 instances in all regions. 4 | 5 | If you have ideas for improvements, or want the latest version, it's at: 6 | 7 | 8 | Copyright 2016 Jason Antman 9 | Free for any use provided that patches are submitted back to me. 10 | 11 | CHANGELOG: 12 | 2016-07-21 Jason Antman : 13 | - initial version of script 14 | """ 15 | 16 | from boto3 import resource, client 17 | 18 | 19 | def get_region_names(): 20 | conn = client('ec2') 21 | res = conn.describe_regions() 22 | regions = [] 23 | for r in res['Regions']: 24 | regions.append(r['RegionName']) 25 | return regions 26 | 27 | 28 | def tags_for_region(region_name): 29 | tags = set() 30 | res = resource('ec2', region_name=region_name) 31 | count = 0 32 | for i in res.instances.all(): 33 | count += 1 34 | if i.tags is None: 35 | continue 36 | for t in i.tags: 37 | tags.add(t['Key']) 38 | print('Examined %d instances in %s'% (count, region_name)) 39 | return tags 40 | 41 | 42 | def main(): 43 | tags = set() 44 | regions = get_region_names() 45 | for r in regions: 46 | tags.update(tags_for_region(r)) 47 | print('Found %d distinct tag names:' % len(tags)) 48 | for t in sorted(tags): 49 | print(t) 50 | 51 | if __name__ == "__main__": 52 | main() 53 | -------------------------------------------------------------------------------- /check_url_list.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to check a list of URLs (passed on stdin) for response code, and for response code of the final path in a series of redirects. 4 | Outputs (to stdout) a list of count of a given URL, response code, and if redirected, the final URL and its response code 5 | 6 | Optionally, with verbose flag, report on all URL checks on STDERR 7 | 8 | Copyright 2013 Jason Antman all rights reserved 9 | This script is distributed under the terms of the GPLv3, as per the 10 | LICENSE file in this repository. 11 | 12 | The canonical version of this script can be found at: 13 | 14 | """ 15 | 16 | import sys 17 | import urllib2 18 | 19 | def get_url_nofollow(url): 20 | try: 21 | response = urllib2.urlopen(url) 22 | code = response.getcode() 23 | return code 24 | except urllib2.HTTPError as e: 25 | return e.code 26 | except: 27 | return 0 28 | 29 | def main(): 30 | urls = {} 31 | 32 | for line in sys.stdin.readlines(): 33 | line = line.strip() 34 | if line not in urls: 35 | sys.stderr.write("+ checking URL: %s\n" % line) 36 | urls[line] = {'code': get_url_nofollow(line), 'count': 1} 37 | sys.stderr.write("++ %s\n" % str(urls[line])) 38 | else: 39 | urls[line]['count'] = urls[line]['count'] + 1 40 | 41 | for url in urls: 42 | if urls[url]['code'] != 200: 43 | print "%d\t%d\t%s" % (urls[url]['count'], urls[url]['code'], url) 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /list_all_aws_resources_skew.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script using skew (https://github.com/scopely-devops/skew) to list ALL 4 | resources in an AWS account. 5 | 6 | If you have ideas for improvements, or want the latest version, it's at: 7 | 8 | 9 | Copyright 2016 Jason Antman 10 | Free for any use provided that patches are submitted back to me. 11 | 12 | CHANGELOG: 13 | 2016-06-22 Jason Antman : 14 | - initial version of script 15 | """ 16 | 17 | import sys 18 | 19 | try: 20 | from skew.arn import ARN 21 | from skew.exception import ConfigNotFoundError 22 | from skew import scan 23 | except ImportError: 24 | raise Exception('You must "pip install skew" before using this script.') 25 | 26 | 27 | try: 28 | arn = ARN() 29 | except ConfigNotFoundError as ex: 30 | sys.stderr.write("Please create your skew config file per " 31 | "\n") 32 | raise ex 33 | 34 | services=arn.service.choices() 35 | services.sort() 36 | print('Enumerating all resources in the following services: ' + 37 | ' '.join(services) + '\n') 38 | for service in services: 39 | print('******' + service + '******') 40 | if service in ['iam', 'route53']: 41 | uri = 'arn:aws:%s::*:*' % service 42 | else: 43 | uri = 'arn:aws:%s:*:*:*/*' % service 44 | try: 45 | arn = scan(uri) 46 | for i in arn: 47 | id_str = None 48 | if hasattr(i, 'tags'): 49 | id_str = 'tags: %s' % i.tags 50 | print('%s %s' % (i.arn, id_str)) 51 | except: 52 | print("=> Error scanning service: %s" % service) 53 | -------------------------------------------------------------------------------- /cookies_from_pdml.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to parse http Cookie header field from WireShark PDML XML. 4 | 5 | This is a quick hack. Lots of problems. 6 | 7 | Copyright 2014 Jason Antman 8 | Free for any use provided that patches are submitted back to me. 9 | 10 | The latest version of this script can be found at: 11 | 12 | """ 13 | 14 | from lxml import etree 15 | import binascii 16 | import sys 17 | import optparse 18 | 19 | 20 | def pdml_header_fields(fname, field_name): 21 | """ return list of all values for HTTP header field_name """ 22 | tree = etree.parse(fname) 23 | results = [] 24 | for e in tree.xpath('/pdml/packet/proto[@name="http"]/field[@name="http.cookie"]'): 25 | data = binascii.unhexlify(e.get("value")) 26 | results.append(data) 27 | return results 28 | 29 | def parse_options(argv): 30 | """ parse command line options """ 31 | parser = optparse.OptionParser() 32 | 33 | parser.add_option('-f', '--pdml-file', dest='fname', action='store', type='string', 34 | help='PDML file name/path') 35 | 36 | parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, 37 | help='verbose output') 38 | 39 | options, args = parser.parse_args(argv) 40 | 41 | if not options.fname: 42 | sys.stderr.write("ERROR: you must specify PDML file with -f|--pdml-file\n") 43 | sys.exit(1) 44 | 45 | return options 46 | 47 | if __name__ == "__main__": 48 | opts = parse_options(sys.argv) 49 | cookies = pdml_header_fields(opts.fname, "Cookie") 50 | for cookie in cookies: 51 | print("Length: %d" % len(cookie)) 52 | print(cookie) 53 | print("####################") 54 | -------------------------------------------------------------------------------- /linode_list_records.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Simple script to list all records in Linode DNS via API, 4 | # along with their Domain ID and Record ID 5 | # 6 | # This requires the requests and json packages. 7 | # 8 | ################## 9 | # Copyright 2013 Jason Antman 10 | # Free for any use provided that patches are submitted back to me. 11 | # 12 | # The latest version of this script can be found at: 13 | # 14 | ########################################################################################## 15 | 16 | import sys 17 | import requests 18 | import json 19 | 20 | if len(sys.argv) < 2 or sys.argv[1] == "-h" or sys.argv[1] == "--help": 21 | print("USAGE: linode_list_records.py ") 22 | sys.exit(1) 23 | 24 | API_Key = sys.argv[1] 25 | sys.stderr.write("Using API Key: %s\n" % API_Key) 26 | 27 | URL_BASE = "https://api.linode.com/?api_key=%s" % API_Key 28 | 29 | r = requests.get("%s&api_action=domain.list" % URL_BASE) 30 | if r.status_code != 200: 31 | sys.stderr.write("ERROR: API Request for domain.list failed with HTTP code %s\n" % r.status_code) 32 | sys.exit(2) 33 | domains = r.json() 34 | 35 | print("domain,resource,type,DomainID,ResourceID") 36 | 37 | for domain in domains['DATA']: 38 | d_name = domain['DOMAIN'] 39 | d_id = domain['DOMAINID'] 40 | 41 | r = requests.get("%s&api_action=domain.resource.list&DomainID=%d" % (URL_BASE, d_id)) 42 | if r.status_code != 200: 43 | sys.stderr.write("ERROR: API Request for domain.resource.list with DomainID %d failed with HTTP code %s\n" % (d_id, r.status_code)) 44 | sys.exit(2) 45 | resources = r.json() 46 | for res in resources['DATA']: 47 | print ("%s,%s,%s,%d,%d" % (d_name, res['NAME'], res['TYPE'], d_id, res['RESOURCEID'])) 48 | -------------------------------------------------------------------------------- /rss_to_mail_config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Sample configuration file for rss_to_mail.py 4 | 5 | For full information, see rss_to_mail.py: 6 | 7 | 8 | CHANGELOG: 9 | * Fri Jun 7 2013 : 10 | - initial version of script 11 | """ 12 | 13 | EMAIL_TO = ['user@example.com', 'user2@example.com'] 14 | EMAIL_FROM = 'sender@example.com' # sender address 15 | # set EMAIL_TEXT_ONLY to True to send plain text email only, not MIME Multipart 16 | EMAIL_TEXT_ONLY = False 17 | 18 | # feeds is a dict of name (something you set) to some values 19 | FEEDS = {} 20 | 21 | """ 22 | each feeds element has a key of the feed name, and a dict of some values, including: 23 | url - the URL to get the feed from 24 | title_regex - optional, a regex to match for post titles. emails will be sent for matching titles 25 | title_regex_i - optional, make title regex case-insensitive, boolean 26 | body_regex - optional, a regex to match for post body. emails will be sent for matching post bodies 27 | body_regex_i - optional, make body regex case-insensitive, boolean 28 | 29 | The key of each feed MUST be a string that includes only filename-safe characters ([A-Za-z0-9_-\.]) 30 | 31 | Be aware that the regexes are full-string match, so to match anything with "foo" in it, you need ".*foo.*". 32 | 33 | if a title_regex is specified, only entries with a title matching it will be included in the email. 34 | if a body_regex is specified, only entries with a body matching it will be included in the email. 35 | if both are specified, only entries matching both will be included in the email. 36 | if neither is specified, all new entries will be included in the email. 37 | """ 38 | FEEDS['python_releases'] = { 39 | 'url': 'http://python.org/channews.rdf', 40 | 'title_regex': '.*released.*', 41 | 'title_regex_i': True, 42 | } 43 | -------------------------------------------------------------------------------- /rsync-wrapper.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | /******************************************** 8 | * Wrapper - Secure Yourself 9 | * 10 | * 2007 - Mike Golvach - eggi@comcast.net 11 | * Modified 2012 by Jason Antman 12 | * - configured for use as rsync wrapper 13 | * 14 | * The latest version of this script can be found at: 15 | * 16 | * 17 | ********************************************/ 18 | 19 | /* Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States License */ 20 | 21 | /* Define global variables */ 22 | 23 | int gid; 24 | 25 | /* main(int argc, char **argv) - main process loop */ 26 | 27 | int main(int argc, char **argv) 28 | { 29 | 30 | /* Set euid and egid to actual user */ 31 | 32 | gid = getgid(); 33 | setegid(getgid()); 34 | seteuid(getuid()); 35 | 36 | /* Confirm user is in GROUP(502) group */ 37 | 38 | if ( gid != 502 ) { 39 | printf("User Not Authorized! Exiting...\n"); 40 | exit(1); 41 | } 42 | 43 | /* Check argc count only at this point */ 44 | 45 | if ( argc != 1 ) { 46 | printf("Usage: rsync-wrapper\n"); 47 | exit(1); 48 | } 49 | 50 | /* Set uid, gid, euid and egid to root */ 51 | 52 | setegid(0); 53 | seteuid(0); 54 | setgid(0); 55 | setuid(0); 56 | 57 | /* Check argv for proper arguments and run 58 | * the corresponding script, if invoked. 59 | */ 60 | if (execl("/usr/bin/rsync", "rsync", "--server", "--sender", "-vlogDtprRe.iLsf", "--numeric-ids", ".", "/", NULL) < 0) { 61 | perror("Execl:"); 62 | } 63 | exit(0); 64 | } 65 | -------------------------------------------------------------------------------- /skeleton.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Skeleton of a simple Python CLI script 4 | 5 | Source 6 | ------ 7 | 8 | https://github.com/jantman/misc-scripts/blob/master/skeleton.py 9 | 10 | Dependencies 11 | ------------ 12 | 13 | Python 3+ 14 | 15 | """ 16 | 17 | import sys 18 | import argparse 19 | import logging 20 | 21 | logging.basicConfig( 22 | level=logging.WARNING, 23 | format="[%(asctime)s %(levelname)s] %(message)s" 24 | ) 25 | logger: logging.Logger = logging.getLogger() 26 | 27 | 28 | class SimpleScript: 29 | 30 | def __init__(self): 31 | pass 32 | 33 | def run(self): 34 | print("run.") 35 | 36 | 37 | def parse_args(argv): 38 | p = argparse.ArgumentParser(description='Python script skeleton') 39 | p.add_argument('-v', '--verbose', dest='verbose', action='store_true', 40 | default=False, help='verbose output') 41 | args = p.parse_args(argv) 42 | return args 43 | 44 | 45 | def set_log_info(l: logging.Logger): 46 | """set logger level to INFO""" 47 | set_log_level_format( 48 | l, 49 | logging.INFO, 50 | '%(asctime)s %(levelname)s:%(name)s:%(message)s' 51 | ) 52 | 53 | 54 | def set_log_debug(l: logging.Logger): 55 | """set logger level to DEBUG, and debug-level output format""" 56 | set_log_level_format( 57 | l, 58 | logging.DEBUG, 59 | "%(asctime)s [%(levelname)s %(filename)s:%(lineno)s - " 60 | "%(name)s.%(funcName)s() ] %(message)s" 61 | ) 62 | 63 | 64 | def set_log_level_format(lgr: logging.Logger, level: int, fmt: str): 65 | """Set logger level and format.""" 66 | formatter = logging.Formatter(fmt=fmt) 67 | lgr.handlers[0].setFormatter(formatter) 68 | lgr.setLevel(level) 69 | 70 | 71 | if __name__ == "__main__": 72 | args = parse_args(sys.argv[1:]) 73 | 74 | # set logging level 75 | if args.verbose: 76 | set_log_debug(logger) 77 | else: 78 | set_log_info(logger) 79 | 80 | SimpleScript().run() 81 | -------------------------------------------------------------------------------- /har_urls.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to dump all URLs and their status codes from a JSON 4 | HTTP Archive (HAR - http://www.softwareishard.com/blog/firebug/http-archive-specification/) 5 | file, such as those generated by the Firebug NetExport extension (http://getfirebug.com/wiki/index.php/Firebug_Extensions#NetExport) 6 | 7 | Copyright 2014 Jason Antman 8 | Free for any use provided that patches are submitted back to me. 9 | 10 | The latest version of this script can be found at: 11 | https://github.com/jantman/misc-scripts/blob/master/har_urls.py 12 | 13 | CHANGELOG: 14 | 15 | 2014-09-23 jantman: 16 | - initial script 17 | """ 18 | 19 | import sys 20 | import optparse 21 | import logging 22 | import os 23 | import json 24 | 25 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 26 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 27 | logger = logging.getLogger(__name__) 28 | 29 | 30 | def main(harfile): 31 | """ read the file, list URLs """ 32 | if not os.path.exists(harfile): 33 | raise SystemExit("ERROR: file {h} does not exist.".format(h=harfile)) 34 | with open(harfile, 'r') as fh: 35 | raw = fh.read() 36 | j = json.loads(raw) 37 | s = get_url_statuses(j) 38 | for i in sorted(s): 39 | print("{status}\t{req}".format(req=i, status=s[i])) 40 | 41 | def get_url_statuses(j): 42 | """ return dict of URLs to their statuses """ 43 | entries = {} 44 | for req in j['log']['entries']: 45 | k = '{m} {u}'.format(m=req['request']['method'], u=req['request']['url']) 46 | entries[k] = req['response']['status'] 47 | return entries 48 | 49 | def parse_args(argv): 50 | """ parse arguments/options """ 51 | p = optparse.OptionParser(usage="har_urls.py ") 52 | 53 | options, args = p.parse_args(argv) 54 | 55 | return (options, args) 56 | 57 | 58 | if __name__ == "__main__": 59 | opts, args = parse_args(sys.argv[1:]) 60 | 61 | if len(args) < 1: 62 | raise SystemExit("USAGE: har_urls.py ") 63 | 64 | main(args[0]) 65 | -------------------------------------------------------------------------------- /firefox_recovery_to_html.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to convert Firefox profile sessionstore-backups/recovery.js to HTML links 4 | 5 | Sometime in the late-20-something to early-30-something releases, Firefox stopped 6 | writing its venerable sessionstore.js file inside profile directories, in favor 7 | of a recovery.js file inside the sessionstore-backups/ directory. This script 8 | parses that file and outputs HTML with a list of links for your open tabs. 9 | 10 | Useful when sync is mishebaving. 11 | 12 | Copyright 2014 Jason Antman 13 | Free for any use provided that patches are submitted back to me. 14 | 15 | The latest version of this script can be found at: 16 | https://github.com/jantman/misc-scripts/blob/master/firefox_recovery_to_html.py 17 | 18 | CHANGELOG: 19 | 20 | 2014-12-08 jantman: 21 | - initial script 22 | """ 23 | 24 | import os 25 | import sys 26 | import json 27 | try: 28 | from html import escape # py3 29 | except ImportError: 30 | from cgi import escape # py2 31 | 32 | def usage(): 33 | print("USAGE: firefox_recovery_to_html.py /path/to/profile_dir/sessionstore-backups/recovery.js") 34 | 35 | if len(sys.argv) < 1: 36 | usage() 37 | raise SystemExit(1) 38 | 39 | fpath = sys.argv[1] 40 | 41 | if fpath == '--help' or fpath == '-h': 42 | usage() 43 | raise SystemExit() 44 | 45 | if not os.path.exists(fpath): 46 | raise SystemExit("ERROR: file does not exist: %s" % fpath) 47 | 48 | with open(fpath, 'r') as fh: 49 | raw = fh.read() 50 | 51 | js = json.loads(raw) 52 | 53 | """ 54 | _closedWindows 55 | windows 56 | session 57 | selectedWindow 58 | global 59 | """ 60 | 61 | print('') 62 | print('recovery.js tabs') 63 | print('
    ') 64 | for i in js['windows']: 65 | for x in i['tabs']: 66 | tab = x['entries'][-1] 67 | print('
  1. {title}
  2. '.format(title=tab['title'], url=escape(tab['url']))) 68 | print('
') 69 | -------------------------------------------------------------------------------- /add_team_to_github_org_repos.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Script using PyGithub to add a specified GitHub Team to all of an Organization's repositories. 4 | # 5 | # Copyright 2015 Jason Antman 6 | # Free for any use provided that patches are submitted back to me. 7 | # 8 | # The latest version of this script can be found at: 9 | # 10 | # 11 | # Requires PyGithub - `pip install PyGithub` (tested against 1.23.0) 12 | # tested with py27 and py32 13 | # 14 | # Assumes you have a GitHub API Token, either in ~/.ssh/apikeys.py or 15 | # in a GITHUB_TOKEN environment variable. 16 | # 17 | # CHANGELOG: 18 | # - initial script 19 | # 20 | 21 | from github import Github 22 | import os 23 | import sys 24 | 25 | if len(sys.argv) < 3: 26 | sys.stderr.write("USAGE: github_org_repos.py \n") 27 | raise SystemExit(1) 28 | 29 | orgname = sys.argv[1] 30 | teamname = sys.argv[2] 31 | 32 | TOKEN = None 33 | try: 34 | # look for GITHUB_TOKEN defined in ~/.ssh/apikeys.py 35 | sys.path.append(os.path.abspath(os.path.join(os.path.expanduser('~'), '.ssh'))) 36 | from apikeys import GITHUB_TOKEN 37 | TOKEN = GITHUB_TOKEN 38 | except ImportError: 39 | pass 40 | 41 | if TOKEN is None: 42 | try: 43 | TOKEN = os.environ['GITHUB_TOKEN'] 44 | except KeyError: 45 | sys.stderr.write("ERROR: you must either set GITHUB_TOKEN in ~/.ssh/apikeys.py or export it as an env variable.\n") 46 | raise SystemExit(1) 47 | 48 | g = Github(login_or_token=TOKEN) 49 | org = g.get_organization(orgname) 50 | 51 | team = None 52 | for t in org.get_teams(): 53 | if t.name == teamname: 54 | team = t 55 | 56 | if team is None: 57 | sys.stderr.write("ERROR: could not find team '%s'\n" % teamname) 58 | raise SystemExit(1) 59 | 60 | team_repos = [r.id for r in team.get_repos()] 61 | print("Team %s has %d repositories" % (teamname, len(team_repos))) 62 | 63 | for repo in org.get_repos(): 64 | if repo.id in team_repos: 65 | print("%s is already in team's repositories" % repo.name) 66 | continue 67 | print("Adding repo %s to team's repositories" % repo.name) 68 | team.add_to_repos(repo) 69 | -------------------------------------------------------------------------------- /dump_firefox_session.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # LZ4 logic taken from: https://gist.github.com/Tblue/62ff47bef7f894e92ed5 3 | # Copyright (c) 2015, Tilman Blumenbach 4 | 5 | import json 6 | import sys 7 | 8 | try: 9 | import lz4.block 10 | except ImportError: 11 | sys.stderr.write('Please "pip install lz4"\n') 12 | raise SystemExit(1) 13 | 14 | 15 | class MozLz4aError(Exception): 16 | pass 17 | 18 | 19 | class InvalidHeader(MozLz4aError): 20 | def __init__(self, msg): 21 | self.msg = msg 22 | 23 | def __str__(self): 24 | return self.msg 25 | 26 | 27 | def decompress(file_obj): 28 | if file_obj.read(8) != b"mozLz40\0": 29 | raise InvalidHeader("Invalid magic number") 30 | return lz4.block.decompress(file_obj.read()) 31 | 32 | 33 | def dump_session_js(fpath): 34 | with open(fpath, 'r') as fh: 35 | sess = json.loads(fh.read()) 36 | for window in sess['windows']: 37 | print('=== WINDOW: %s (%s)===' % (window.get('title', 'NO TITLE'), window['selected'])) 38 | if 'closedAt' in window: 39 | continue 40 | for tab in window['tabs']: 41 | if 'closedAt' in tab: 42 | continue 43 | print(str(tab['index']) + ' ' + tab['entries'][-1]['url']) 44 | 45 | 46 | def dump_session_jsonlz4(fpath): 47 | with open(fpath, "rb") as in_file: 48 | data = decompress(in_file) 49 | sess = json.loads(data) 50 | for window in sess['windows']: 51 | print('=== WINDOW: %s (%s)===' % (window.get('title', 'NO TITLE'), window['selected'])) 52 | if 'closedAt' in window: 53 | continue 54 | for tab in window['tabs']: 55 | if 'closedAt' in tab: 56 | continue 57 | print(str(tab['index']) + ' ' + tab['entries'][-1]['url']) 58 | 59 | 60 | if __name__ == "__main__": 61 | if len(sys.argv) < 1: 62 | sys.stderr.write( 63 | "USAGE: dump_firefox_session.py /path/to/sessionstore.js\n" 64 | ) 65 | raise SystemExit(1) 66 | fpath = sys.argv[1] 67 | if fpath.endswith('.js'): 68 | dump_session_js(sys.argv[1]) 69 | elif fpath.endswith('.jsonlz4'): 70 | dump_session_jsonlz4(fpath) 71 | else: 72 | raise SystemExit('Unknown file extension.') 73 | -------------------------------------------------------------------------------- /make_puppet_param_markdown.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Python script to generate MarkDown docblock fragment 4 | # for all parameters of a Puppet parameterized class 5 | # or define. 6 | # 7 | # Simple, naive regex matching. Assumes you style your manifests properly. 8 | # 9 | ################## 10 | # Copyright 2014 Jason Antman 11 | # Free for any use provided that patches are submitted back to me. 12 | # 13 | # The latest version of this script can be found at: 14 | # 15 | # 16 | # CHANGELOG: 17 | # 2014-02-06 Jason Antman : 18 | # - initial script 19 | ########################################################################################## 20 | 21 | 22 | import os.path 23 | import re 24 | import sys 25 | 26 | if len(sys.argv) < 2 or len(sys.argv) > 3: 27 | sys.stderr.write("USAGE: make_puppet_param_markdown.py /path/to/manifest.pp\n") 28 | sys.exit(1) 29 | 30 | fname = sys.argv[1] 31 | 32 | if not os.path.exists(fname): 33 | sys.stderr.write("ERROR: %s does not appear to exist\n" % fname) 34 | sys.exit(1) 35 | 36 | start_re = re.compile(r'^\s*(define|class).*\($') 37 | end_re = re.compile(r'.*{$') 38 | comment_re = re.compile(r'^\s*#') 39 | 40 | lines = [] 41 | in_params = False 42 | with open(fname, 'r') as fh: 43 | for line in fh: 44 | line = line.strip() 45 | if comment_re.match(line): 46 | continue 47 | if not in_params and start_re.match(line): 48 | in_params = True 49 | elif in_params and end_re.match(line): 50 | break 51 | elif in_params: 52 | lines.append(line) 53 | 54 | if len(lines) < 1: 55 | sys.stderr.write("ERROR: did not find any params in %s\n" % fname) 56 | sys.exit(1) 57 | 58 | line_re = re.compile(r'\s*\$(?P\S+)(\s+=\s*(?P\S+.*))?,?$') 59 | for line in lines: 60 | foo = line_re.match(line) 61 | d = foo.groupdict() 62 | print("# [*%s*]" % d['varname'].strip(', ')) 63 | print("# ()") 64 | if 'val' in d and d['val'] is not None: 65 | print("# (optional; default: %s)" % d['val'].strip(', ')) 66 | else: 67 | print("# (required)") 68 | print("#") 69 | -------------------------------------------------------------------------------- /rebuild_srpm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to rebuild a SRPM 1:1, useful when you want to build a RHEL/CentOS 6 4 | # SRPM on a RHEL/CentOS 5 system that doesn't support newer compression (cpio: MD5 sum mismatch) 5 | # 6 | # Copyright 2014 Jason Antman . All Rights Reserved. 7 | # Free for any use provided that patches are submitted back to me. 8 | # 9 | # The latest version of this script will always live at: 10 | # 11 | # 12 | 13 | if [[ -z "$1" || "$1" == "-h" || "$1" == "--help" ]] 14 | then 15 | echo "USAGE: rebuild_srpm.sh " 16 | exit 1 17 | fi 18 | 19 | if [[ -z "$2" ]] 20 | then 21 | OUTDIR=`pwd` 22 | else 23 | OUTDIR="$2" 24 | fi 25 | 26 | if [[ ! -e "$1" ]] 27 | then 28 | echo "ERROR: SRPM file not found: $1" 29 | exit 1 30 | fi 31 | 32 | if ! which rpmbuild &> /dev/null 33 | then 34 | echo "rpmbuild could not be found. please install. (sudo yum install rpm-build)" 35 | exit 1 36 | fi 37 | 38 | if ! which rpm2cpio &> /dev/null 39 | then 40 | echo "rpm2cpio could not be found. please install. (sudo yum install rpm)" 41 | exit 1 42 | fi 43 | 44 | SRPM=`dirname "$1"`"/"`basename "$1"` 45 | TEMPDIR=`mktemp -d` 46 | STARTPWD=`pwd` 47 | 48 | echo "Rebuilding $SRPM..." 49 | 50 | # copy srpm into tempdir 51 | cp $SRPM $TEMPDIR 52 | 53 | pushd $TEMPDIR &>/dev/null 54 | 55 | # setup local build dir structure 56 | mkdir -p rpm rpm/BUILD rpm/RPMS rpm/SOURCES rpm/SPECS rpm/SRPMS rpm/RPMS/athlon rpm/RPMS/i\[3456\]86 rpm/RPMS/i386 rpm/RPMS/noarch rpm/RPMS/x86_64 57 | 58 | # setup rpmmacros file 59 | cat /dev/null > $TEMPDIR/.rpmmacros 60 | echo "%_topdir $TEMPDIR/rpm" >> ~/.rpmmacros 61 | 62 | echo "Extracting SRPM..." 63 | pushd $TEMPDIR/rpm/SOURCES/ &>/dev/null 64 | rpm2cpio $SRPM | cpio -idmv &>/dev/null 65 | popd &>/dev/null 66 | 67 | # build the SRPM from the spec and sources 68 | # we're just building a SRPM so we can ignore dependencies 69 | echo "Rebuilding SRPM..." 70 | NEW_SRPM=`rpmbuild -bs --nodeps --macros=$TEMPDIR/.rpmmacros $TEMPDIR/rpm/SOURCES/*.spec | grep "^Wrote: " | awk '{print $2}'` 71 | 72 | echo "Copying to $OUTDIR" 73 | cp $NEW_SRPM $OUTDIR/ 74 | 75 | echo "Wrote file to $OUTDIR/`basename $NEW_SRPM`" 76 | 77 | # cleanup 78 | cd $STARTPWD 79 | rm -Rf $TEMPDIR 80 | -------------------------------------------------------------------------------- /timeout: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Execute a command with a timeout 4 | 5 | # Author: 6 | # http://www.pixelbeat.org/ 7 | # Notes: 8 | # Note there is a timeout command packaged with coreutils since v7.0 9 | # If the timeout occurs the exit status is 124. 10 | # There is an asynchronous (and buggy) equivalent of this 11 | # script packaged with bash (under /usr/share/doc/ in my distro), 12 | # which I only noticed after writing this. 13 | # I noticed later again that there is a C equivalent of this packaged 14 | # with satan by Wietse Venema, and copied to forensics by Dan Farmer. 15 | # Changes: 16 | # V1.0, Nov 3 2006, Initial release 17 | # V1.1, Nov 20 2007, Brad Greenlee 18 | # Make more portable by using the 'CHLD' 19 | # signal spec rather than 17. 20 | # V1.3, Oct 29 2009, Ján Sáreník 21 | # Even though this runs under dash,ksh etc. 22 | # it doesn't actually timeout. So enforce bash for now. 23 | # Also change exit on timeout from 128 to 124 24 | # to match coreutils. 25 | # V2.0, Oct 30 2009, Ján Sáreník 26 | # Rewritten to cover compatibility with other 27 | # Bourne shell implementations (pdksh, dash) 28 | 29 | if [ "$#" -lt "2" ]; then 30 | echo "Usage: `basename $0` timeout_in_seconds command" >&2 31 | echo "Example: `basename $0` 2 sleep 3 || echo timeout" >&2 32 | exit 1 33 | fi 34 | 35 | cleanup() 36 | { 37 | trap - ALRM #reset handler to default 38 | kill -ALRM $a 2>/dev/null #stop timer subshell if running 39 | kill $! 2>/dev/null && #kill last job 40 | exit 124 #exit with 124 if it was running 41 | } 42 | 43 | watchit() 44 | { 45 | trap "cleanup" ALRM 46 | sleep $1& wait 47 | kill -ALRM $$ 48 | } 49 | 50 | watchit $1& a=$! #start the timeout 51 | shift #first param was timeout for sleep 52 | trap "cleanup" ALRM INT #cleanup after timeout 53 | "$@"& wait $!; RET=$? #start the job wait for it and save its return value 54 | kill -ALRM $a #send ALRM signal to watchit 55 | wait $a #wait for watchit to finish cleanup 56 | exit $RET #return the value 57 | -------------------------------------------------------------------------------- /reconcile_git_repos.html.tmpl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |

git repository reconcile as of {{ date }}

5 |

6 |

Empty Repositories:

7 |
    8 | {% for repo in repos %} 9 | {% if repos[repo]['is_empty'] %} 10 |
  • {{ repos[repo]['name'] }}
  • 11 | {% endif %} 12 | {% endfor %} 13 |
14 |

Repositories with Less Than 10 Commits

15 |
    16 | {% for repo in repos %} 17 | {% if not repos[repo]['is_empty'] and 'num_commits' in repos[repo] and repos[repo]['num_commits'] < 10 %} 18 |
  • {{ repos[repo]['name'] }} ({{ repos[repo]['num_commits'] }})
  • 19 | {% endif %} 20 | {% endfor %} 21 |
22 |

Repositories with Newest Commit More than One Year Ago

23 |
    24 | {% for repo in repo_sorted_timestamp %} 25 | {% if 'newest_timestamp' in repos[repo] and repos[repo]['newest_timestamp'] < cutoff_ts %} 26 |
  • {{ repos[repo]['name'] }} ({{ repos[repo]['newest_timestamp']|ts2str }})
  • 27 | {% endif %} 28 | {% endfor %} 29 |
30 |

Similar Repositories

31 | {% for rdict in similar_repos %} 32 |
33 |
    34 | {% for reponame in rdict['repo_paths'] %} 35 |
  • {{ reponame }} - {{ repos[reponame]|repostats }}
  • 36 | {% endfor %} 37 |
38 | {% for comparison in rdict['comparisons'] %} 39 |

{{ comparison['pathA'] }} vs {{ comparison['pathB'] }}

40 |
    41 |
  • Branches: A +{{ comparison['branchA'] }} / ~{{ comparison['branchDiff'] }} / B +{{ comparison['branchB'] }}
  • 42 |
  • Tags: A +{{ comparison['tagA'] }} / ~{{ comparison['tagDiff'] }} / B +{{ comparison['tagB'] }}
  • 43 |
  • Commits ({{ comparison['Abranch'] }} / {{ comparison['Bbranch'] }}): A +{{ comparison['commitA'] }} / B +{{ comparison['commitB'] }}
  • 44 |
      45 | {% endfor %} 46 |
47 | {% endfor %} 48 | 49 | 50 | -------------------------------------------------------------------------------- /.github/workflows/bgw210-docker.yml: -------------------------------------------------------------------------------- 1 | name: Build and Push BGW210 Docker Image 2 | 3 | on: 4 | push: 5 | paths: 6 | - 'bgw210-700_prom_collector.py' 7 | - 'Dockerfile.bgw210' 8 | - 'requirements-bgw210.txt' 9 | - '.github/workflows/bgw210-docker.yml' 10 | pull_request: 11 | paths: 12 | - 'bgw210-700_prom_collector.py' 13 | - 'Dockerfile.bgw210' 14 | - 'requirements-bgw210.txt' 15 | - '.github/workflows/bgw210-docker.yml' 16 | 17 | env: 18 | REGISTRY: ghcr.io 19 | IMAGE_NAME: bgw210-prom-collector 20 | 21 | jobs: 22 | build-and-push: 23 | runs-on: ubuntu-latest 24 | permissions: 25 | contents: read 26 | packages: write 27 | 28 | steps: 29 | - name: Checkout repository 30 | uses: actions/checkout@v4 31 | 32 | - name: Set up Docker Buildx 33 | uses: docker/setup-buildx-action@v3 34 | 35 | - name: Log in to Container Registry 36 | uses: docker/login-action@v3 37 | with: 38 | registry: ${{ env.REGISTRY }} 39 | username: ${{ github.actor }} 40 | password: ${{ secrets.GITHUB_TOKEN }} 41 | 42 | - name: Extract metadata 43 | id: meta 44 | uses: docker/metadata-action@v5 45 | with: 46 | images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.IMAGE_NAME }} 47 | tags: | 48 | type=ref,event=branch 49 | type=ref,event=pr 50 | type=sha,prefix={{branch}}- 51 | type=raw,value=latest,enable={{is_default_branch}} 52 | 53 | - name: Build and push Docker image 54 | uses: docker/build-push-action@v5 55 | with: 56 | context: . 57 | file: ./Dockerfile.bgw210 58 | push: true 59 | tags: ${{ steps.meta.outputs.tags }} 60 | labels: ${{ steps.meta.outputs.labels }} 61 | platforms: linux/amd64,linux/arm64 62 | cache-from: type=gha 63 | cache-to: type=gha,mode=max 64 | 65 | - name: Test Docker image 66 | if: github.event_name == 'pull_request' 67 | run: | 68 | docker run --rm -d --name bgw210-test -p 8000:8000 \ 69 | ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}:${{ steps.meta.outputs.version }} 70 | sleep 10 71 | curl -f http://localhost:8000/metrics || exit 1 72 | docker stop bgw210-test -------------------------------------------------------------------------------- /cmd-wrapper.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | /******************************************** 9 | * Wrapper - Secure Yourself 10 | * 11 | * 2007 - Mike Golvach - eggi@comcast.net 12 | * Modified 2012 by Jason Antman 13 | * - configured for use as pre- and post-backup script wrapper 14 | * 15 | * USAGE: cmd-wrapper [pre|post] 16 | * 17 | * The latest version of this script can be found at: 18 | * 19 | * 20 | ********************************************/ 21 | 22 | /* Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States License */ 23 | 24 | /* Define global variables */ 25 | 26 | int gid; 27 | 28 | /* main(int argc, char **argv) - main process loop */ 29 | 30 | int main(int argc, char **argv, char **envp) 31 | { 32 | char *origcmd; 33 | 34 | origcmd = getenv("SSH_ORIGINAL_COMMAND"); 35 | 36 | /* printf ("Original Command:%s\n", origcmd); */ 37 | 38 | /* Set euid and egid to actual user */ 39 | 40 | gid = getgid(); 41 | setegid(getgid()); 42 | seteuid(getuid()); 43 | 44 | /* Confirm user is in GROUP(502) group */ 45 | 46 | if ( gid != 502 ) { 47 | printf("User Not Authorized! Exiting...\n"); 48 | exit(1); 49 | } 50 | 51 | /* Check argc count only at this point */ 52 | 53 | if ( argc != 1 ) { 54 | printf("Usage: cmd-wrapper [pre|post]\n"); 55 | exit(1); 56 | } 57 | 58 | /* Set uid, gid, euid and egid to root */ 59 | 60 | setegid(0); 61 | seteuid(0); 62 | setgid(0); 63 | setuid(0); 64 | 65 | /* Check argv for proper arguments and run 66 | * the corresponding script, if invoked. 67 | */ 68 | 69 | if ( strncmp(origcmd, "pre", 3) == 0 ) { 70 | if (execl("/root/bin/rsnapshot-pre.sh", "rsnapshot-pre.sh", NULL) < 0) { 71 | perror("Execl:"); 72 | } 73 | } else if ( strncmp(origcmd, "post", 4) == 0 ) { 74 | if (execl("/root/bin/rsnapshot-post.sh", "rsnapshot-post.sh", NULL) < 0) { 75 | perror("Execl:"); 76 | } 77 | } else { 78 | printf("ERROR: Invalid command: %s\n", origcmd); 79 | printf("Usage: COMMAND [pre|post]\n"); 80 | exit(1); 81 | } 82 | exit(0); 83 | } 84 | -------------------------------------------------------------------------------- /kickRsyslog.php: -------------------------------------------------------------------------------- 1 | #!/usr/bin/php 2 | , 9 | * on behalf of the taxpayers of the State of New Jersey and/or the students of Rutgers University, 10 | * The State University of New Jersey. 11 | * 12 | * The latest version of this script can be found at: 13 | * 14 | * 15 | */ 16 | 17 | require_once('collectRsyslogInfo.php'); 18 | 19 | $mail_to = array('jantman@oit.rutgers.edu'); 20 | 21 | if(! file_exists(PID_FILE)) 22 | { 23 | $host = trim(shell_exec("hostname")); 24 | $subj = "RSYSLOG ERROR - Not running on $host"; 25 | 26 | $body = "ERROR - '".PID_FILE."' does not exist on host '$host'.\n\nSomeone should start rsyslogd, unless it is supposed to be stopped.\n\n"; 27 | $body .= "mail sent by ".__FILE__." on $host\n"; 28 | 29 | foreach($mail_to as $addr) 30 | { 31 | mail($addr, $subj, $body); 32 | } 33 | 34 | openlog("kickRsyslog.php", LOG_NDELAY | LOG_PERROR | LOG_PID, LOG_DAEMON); 35 | syslog(LOG_EMERG, __FILE__." thinks rsyslog has died. If you actually see this log message in a file, the script is broken (rsyslog is actually running)."); 36 | closelog(); 37 | 38 | exit(1); 39 | } 40 | 41 | $mtime = filemtime(AGE_CHECK_FILE); 42 | $age = time() - $mtime; 43 | if($age >= THRESHOLD_SEC) 44 | { 45 | $body = collectRsyslogInfo(true, true); 46 | 47 | $cmd = "/sbin/service rsyslog restart"; 48 | $host = trim(shell_exec("hostname")); 49 | $start = microtime(true); 50 | exec($cmd); 51 | $foo = microtime(true) - $start; 52 | 53 | $body .= "\nIssuing command '$cmd' as root.... "; 54 | $body .= "Command ran in ".round($foo, 3)." seconds
"; 55 | 56 | $headers = "Content-type: text/html\r\n"; 57 | foreach($mail_to as $addr) 58 | { 59 | mail($addr, $subj, $body, $headers); 60 | } 61 | 62 | openlog("kickRsyslog.php", LOG_NDELAY | LOG_PERROR | LOG_PID, LOG_DAEMON); 63 | syslog(LOG_EMERG, __FILE__." thinks rsyslog is hanging. If you actually see this log message in a file, the script is broken (rsyslog is actually running)."); 64 | closelog(); 65 | exit(1); 66 | } 67 | 68 | fwrite(STDERR, __FILE__." - rsyslog appears to be running normally.\n"); 69 | 70 | ?> 71 | -------------------------------------------------------------------------------- /linode_ddns_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This is a very simple script to use Linode's HTTP API to update 4 | # dynamic DNS. I use it on my Vyatta CE router to maintain dynamic 5 | # dns for my home (dynamic IP) internet connection. 6 | # 7 | # Credit for this script goes to "Guspaz" on the Linode forums, who published it 8 | # in his post at: 9 | # 10 | ################## 11 | # Copyright 2014 Jason Antman 12 | # Free for any use provided that patches are submitted back to me. 13 | # 14 | # The most recent version of this script is available at: 15 | # 16 | # 17 | # Please read the inline comments for configuration. 18 | # 19 | # Running the script: 20 | # the nicest way would be to run it via a hook for your WAN interface, 21 | # or dhcp client. Personally I just cron it every 15 minutes and consider 22 | # that to be acceptable enough. The script caches the current WAN IP on disk, 23 | # so it will only call out to Linode's API when it changes. 24 | # 25 | 26 | # this command should return a string of only your WAN IP. I currently use a 27 | # simple PHP script on my web server, but you can modify as needed (or use this 28 | # service of mine, so long as it doesn't become too popular...) 29 | WAN_IP=`wget -O - -U wget/linode_ddns.sh/iponly http://whatismyip.jasonantman.com` 30 | 31 | # Set LINODE_API_KEY to your API key, found on the Linode Manager site - 32 | # click "my profile" at the top right, scroll down to "API Key" 33 | LINODE_API_KEY="Put your API key here" 34 | 35 | # the following are the domain and resource IDs for the record you want to update, 36 | # as used in Linode's API. You can get these using the linode_list_records.py script 37 | # in my same git repo, 38 | DOMAIN_ID=0 39 | RESOURCE_ID=0 40 | 41 | # Get your old WAN IP 42 | OLD_WAN_IP=`cat /var/CURRENT_WAN_IP.txt` 43 | 44 | # See if the new IP is the same as the old IP. 45 | if [ "$WAN_IP" = "$OLD_WAN_IP" ]; then 46 | echo "IP Unchanged" 47 | # Don't do anything if th eIP didn't change 48 | else 49 | # The IP changed. Update Linode's DNS to show the new IP 50 | echo $WAN_IP > /var/CURRENT_WAN_IP.txt 51 | wget -qO- https://api.linode.com/?api_key="$LINODE_API_KEY"\&api_action=domain.resource.update\&DomainID="$DOMAIN_ID"\&ResourceID="$RESOURCE_ID"\&Target="$WAN_IP" 52 | fi 53 | 54 | -------------------------------------------------------------------------------- /watch_all_my_github_repos.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script using PyGithub to make sure you're watching all of your own GitHub 4 | repos. 5 | 6 | Copyright 2015 Jason Antman 7 | Free for any use provided that patches are submitted back to me. 8 | 9 | The latest version of this script can be found at: 10 | 11 | 12 | Requires PyGithub - `pip install PyGithub` (tested against 1.23.0) 13 | tested with py27 and py32 14 | 15 | Assumes you have a GitHub API Token, either in ~/.ssh/apikeys.py or 16 | in a GITHUB_TOKEN environment variable. 17 | 18 | CHANGELOG: 19 | 20 | 2015-07-28 Jason Antman 21 | - initial script 22 | """ 23 | 24 | from github import Github 25 | import os 26 | import sys 27 | 28 | import logging 29 | logging.basicConfig(level=logging.DEBUG) 30 | logger = logging.getLogger() 31 | 32 | # suppress github internal logging 33 | github_log = logging.getLogger("github") 34 | github_log.setLevel(logging.WARNING) 35 | github_log.propagate = True 36 | 37 | TOKEN = None 38 | try: 39 | # look for GITHUB_TOKEN defined in ~/.ssh/apikeys.py 40 | sys.path.append(os.path.abspath(os.path.join(os.path.expanduser('~'), '.ssh'))) 41 | from apikeys import GITHUB_TOKEN 42 | TOKEN = GITHUB_TOKEN 43 | logger.debug("Using GITHUB_TOKEN from ~/.ssh/apikeys") 44 | except ImportError: 45 | pass 46 | 47 | if TOKEN is None: 48 | try: 49 | TOKEN = os.environ['GITHUB_TOKEN'] 50 | logger.debug("Using GITHUB_TOKEN env var") 51 | except KeyError: 52 | sys.stderr.write("ERROR: you must either set GITHUB_TOKEN in ~/.ssh/apikeys.py or export it as an env variable.\n") 53 | raise SystemExit(1) 54 | 55 | logger.debug("Connecting to GitHub") 56 | g = Github(login_or_token=TOKEN) 57 | user = g.get_user() 58 | logger.info("Connected to GitHub API as %s (%s)", user.login, user.name) 59 | 60 | repos = user.get_repos() 61 | for repo in repos: 62 | if repo.owner.login != user.login: 63 | logger.debug("Skipping repo %s owned by %s", repo.name, repo.owner.login) 64 | continue 65 | watched = False 66 | for watcher in repo.get_subscribers(): 67 | if watcher.login == user.login: 68 | watched = True 69 | break 70 | if watched: 71 | logger.debug("Repo %s is already watched by %s", repo.name, user.login) 72 | continue 73 | logger.info("Watching repo %s", repo.name) 74 | user.add_to_subscriptions(repo) 75 | -------------------------------------------------------------------------------- /pushover: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ################################################################################### 3 | # 4 | # Notify command completion and exit status via pushover and notify-send 5 | # 6 | # uses pushover.sh from https://raw.githubusercontent.com/jnwatts/pushover.sh/master/pushover.sh 7 | # 8 | ################################################################################### 9 | # 10 | # Copyright 2015 Jason Antman 11 | # Free for any use provided that patches are submitted back to me. 12 | # 13 | # The most recent version of this script is available at: 14 | # 15 | # 16 | ################################################################################### 17 | # 18 | # EXAMPLES: 19 | # 20 | # pushover /bin/false 21 | # (sends failure notification) 22 | # 23 | # pushover /bin/true foo bar baz 24 | # (sends success notification) 25 | # 26 | ################################################################################### 27 | # 28 | # Version 1.0.1 29 | # 30 | # CHANGELOG: 31 | # 32 | # * 1.0.1 2015-05-11 Jason Antman 33 | # * swith from env vars for pushover keys to ~/.config/pushover.conf 34 | # 35 | # * 1.0.0 2015-05-11 Jason Antman 36 | # * Initial public version 37 | # 38 | ################################################################################### 39 | 40 | 41 | if ! which pushover.sh > /dev/null 2>&1; then 42 | >&2 echo "ERROR: pushover.sh not found; please put in path (download from https://raw.githubusercontent.com/jnwatts/pushover.sh/master/pushover.sh)" 43 | exit 1 44 | fi 45 | 46 | stime=$(date '+%s') 47 | $@ 48 | exitcode=$? 49 | # timer 50 | etime=$(date '+%s') 51 | dt=$((etime - stime)) 52 | ds=$((dt % 60)) 53 | dm=$(((dt / 60) % 60)) 54 | dh=$((dt / 3600)) 55 | times=$(printf '%d:%02d:%02d' $dh $dm $ds) 56 | # end timer 57 | if [ "$exitcode" -eq 0 ] 58 | then 59 | pushover.sh -p 0 -t "Command Succeeded" "succeeded in ${times} on $(hostname): $@ (in $(pwd))" 60 | echo "(sent pushover success notification)" 61 | if which notify-send 2>&1 > /dev/null; then 62 | notify-send "Command Succeeded" "succeeded in ${times}: $@ (in $(pwd))" 63 | fi 64 | else 65 | pushover.sh -p 0 -s falling -t "Command Failed" "failed in ${times} (exit $exitcode) on $(hostname): $@ (in $(pwd))" 66 | echo "(sent pushover failure notification)" 67 | if which notify-send 2>&1 > /dev/null; then 68 | notify-send "Command Failed" "failed in ${times} (exit $exitcode): $@ (in $(pwd))" 69 | fi 70 | fi 71 | -------------------------------------------------------------------------------- /github_find_member_with_key.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Script using PyGithub to list an organization's members, and then find who 4 | # has a specified public key. 5 | # 6 | # Copyright 2016 Jason Antman 7 | # Free for any use provided that patches are submitted back to me. 8 | # 9 | # The latest version of this script can be found at: 10 | # 11 | # 12 | # Requires PyGithub - `pip install PyGithub` (tested against 1.23.0) 13 | # tested with py27 and py32 14 | # 15 | # Assumes you have a GitHub API Token, either in ~/.ssh/apikeys.py or 16 | # in a GITHUB_TOKEN environment variable. 17 | # 18 | # CHANGELOG: 19 | # 20 | # * 2016-11-23 Jason Antman 21 | # - initial script 22 | # 23 | 24 | from github import Github 25 | import os 26 | import sys 27 | import argparse 28 | 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('orgname', type=str, help='github org name') 31 | parser.add_argument('KEY_FILE_PATH', type=str, help='path to public key') 32 | args = parser.parse_args() 33 | 34 | TOKEN = None 35 | try: 36 | # look for GITHUB_TOKEN defined in ~/.ssh/apikeys.py 37 | sys.path.append(os.path.abspath(os.path.join(os.path.expanduser('~'), '.ssh'))) 38 | from apikeys import GITHUB_TOKEN 39 | TOKEN = GITHUB_TOKEN 40 | except ImportError: 41 | pass 42 | 43 | if TOKEN is None: 44 | try: 45 | TOKEN = os.environ['GITHUB_TOKEN'] 46 | except KeyError: 47 | sys.stderr.write("ERROR: you must either set GITHUB_TOKEN in ~/.ssh/apikeys.py or export it as an env variable.\n") 48 | sys.exit(1) 49 | 50 | print("Logging in to github") 51 | g = Github(login_or_token=TOKEN) 52 | with open(args.KEY_FILE_PATH, 'r') as fh: 53 | priv_key = fh.read().strip() 54 | # strip the title 55 | priv_key = ' '.join(priv_key.split(' ')[0:2]).strip() 56 | 57 | 58 | members = [m for m in g.get_organization(args.orgname).get_members()] 59 | print("Organization %s has %d members; searching their keys" % (args.orgname, len(members))) 60 | for m in members: 61 | keys = [k for k in m.get_keys()] 62 | print("Checking %s (%d keys)" % (m.login, len(keys))) 63 | for k in keys: 64 | try: 65 | title = k.title 66 | except AttributeError: 67 | title = '' 68 | key_s = "Key %d %s" % (k.id, title) 69 | if k.key == priv_key: 70 | print("\tMATCH: %s" % key_s) 71 | raise SystemExit(0) 72 | else: 73 | print("\tno match: %s" % key_s) 74 | print("NO MATCH!") 75 | -------------------------------------------------------------------------------- /ubiquiti-mac-acl/wireless.sql: -------------------------------------------------------------------------------- 1 | -- MySQL dump 10.10 2 | -- 3 | -- Host: localhost Database: wireless 4 | -- ------------------------------------------------------ 5 | -- Server version 5.0.26-log 6 | -- ------------------------------------------------------ 7 | -- SQL schema for updateAPconfigs.php.inc database 8 | -- Functions for working with MAC authentication in Ubiquiti Networks AirOS v2 configs. 9 | -- 10 | -- Copyright 2010, 2011 Jason Antman, All Rights Reserved. 11 | -- 12 | -- These functions may be used for any purpose provided that: 13 | -- 1) This copyright notice is kept intact. 14 | -- 2) You send back to me any changes/modifications/bugfixes that you make. 15 | -- 3) This may not be included in commercial software which is sold for a fee, unless you discuss this with me first. 16 | -- 17 | -- @author Jason Antman 18 | -- 19 | -- Announcement post: 20 | -- 21 | -- The canonical current version of this script lives at: 22 | -- $HeadURL$ 23 | -- $LastChangedRevision$ 24 | -- 25 | 26 | /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; 27 | /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; 28 | /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; 29 | /*!40101 SET NAMES utf8 */; 30 | /*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; 31 | /*!40103 SET TIME_ZONE='+00:00' */; 32 | /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; 33 | /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; 34 | /*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; 35 | /*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; 36 | 37 | -- 38 | -- Table structure for table `macs` 39 | -- 40 | 41 | DROP TABLE IF EXISTS `macs`; 42 | CREATE TABLE `macs` ( 43 | `mac` varchar(20) NOT NULL, 44 | `EMTid` varchar(10) default NULL, 45 | `username` varchar(30) default NULL, 46 | `userDN` varchar(100) default NULL, 47 | `alias` varchar(100) default NULL, 48 | PRIMARY KEY (`mac`) 49 | ) ENGINE=MyISAM DEFAULT CHARSET=latin1; 50 | /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; 51 | 52 | /*!40101 SET SQL_MODE=@OLD_SQL_MODE */; 53 | /*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; 54 | /*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; 55 | /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; 56 | /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; 57 | /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; 58 | /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; 59 | 60 | -- Dump completed on 2011-01-06 21:09:02 61 | -------------------------------------------------------------------------------- /list_github_org_repos.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Script using PyGithub to list an organization's repos and some info about them. 4 | # 5 | # Copyright 2014 Jason Antman 6 | # Free for any use provided that patches are submitted back to me. 7 | # 8 | # The latest version of this script can be found at: 9 | # 10 | # 11 | # Requires PyGithub - `pip install PyGithub` (tested against 1.23.0) 12 | # tested with py27 and py32 13 | # 14 | # Assumes you have a GitHub API Token, either in ~/.ssh/apikeys.py or 15 | # in a GITHUB_TOKEN environment variable. 16 | # 17 | # CHANGELOG: 18 | # 19 | # * 2015-07-07 Jason Antman 20 | # - use argparse, add csv output option 21 | # 22 | # * 2014-02-14 Jason Antman 23 | # - initial script 24 | # 25 | 26 | from github import Github 27 | import os 28 | import sys 29 | import argparse 30 | 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('--csv', dest='csv', action='store_true', default=False, 33 | help='output as CSV') 34 | parser.add_argument('orgname', type=str, help='github org name') 35 | args = parser.parse_args() 36 | 37 | TOKEN = None 38 | try: 39 | # look for GITHUB_TOKEN defined in ~/.ssh/apikeys.py 40 | sys.path.append(os.path.abspath(os.path.join(os.path.expanduser('~'), '.ssh'))) 41 | from apikeys import GITHUB_TOKEN 42 | TOKEN = GITHUB_TOKEN 43 | except ImportError: 44 | pass 45 | 46 | if TOKEN is None: 47 | try: 48 | TOKEN = os.environ['GITHUB_TOKEN'] 49 | except KeyError: 50 | sys.stderr.write("ERROR: you must either set GITHUB_TOKEN in ~/.ssh/apikeys.py or export it as an env variable.\n") 51 | sys.exit(1) 52 | 53 | g = Github(login_or_token=TOKEN) 54 | 55 | if args.csv: 56 | print("repo_name,private_or_public,fork_of,forks,url") 57 | 58 | for repo in g.get_organization(args.orgname).get_repos(): 59 | p = 'private'if repo.private else 'public' 60 | fork_of = '' 61 | if repo.fork: 62 | fork_of = '%s/%s' % (repo.parent.owner.name, repo.parent.name) 63 | if args.csv: 64 | print("{name},{p},{fork_of},{forks},{url}".format( 65 | name=repo.name, 66 | p=p, 67 | fork_of=fork_of, 68 | forks=repo.forks_count, 69 | url=repo.html_url 70 | )) 71 | else: 72 | f = " fork of %s" % fork_of if repo.fork else '' 73 | fc = "; %d forks" % (repo.forks_count) if (repo.forks_count > 0) else '' 74 | print("%s (%s%s%s) %s" % (repo.name, p, f, fc, repo.html_url)) 75 | -------------------------------------------------------------------------------- /show_dhcp_fixed_ACKs.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | # 3 | # show_dhcp_fixed_ACKs.pl - script to show the most recent DHCP ACKs per IP address for ISC DHCPd, 4 | # from a log file. Originally written for Vyatta routers that just show the dynamic leases. 5 | # 6 | # To use this, you need to have dhcpd logging to syslog, and your syslog server putting the log file at 7 | # /var/log/user/dhcpd (or a file path specified by the $logfile variable below. 8 | # 9 | # To accomplish this on Vyatta 6.3, run: 10 | # set service dhcp-server global-parameters "log-facility local2;" 11 | # set system syslog file dhcpd facility local2 level debug 12 | # set system syslog file dhcpd archive files 5 13 | # set system syslog file dhcpd archive size 3000 14 | # commit 15 | # 16 | # Copyright 2011 Jason Antman All Rights Reserved. 17 | # This script is free for use by anyone anywhere, provided that you comply with the following terms: 18 | # 1) Keep this notice and copyright statement intact. 19 | # 2) Send any substantial changes, improvements or bog fixes back to me at the above address. 20 | # 3) If you include this in a product or redistribute it, you notify me, and include my name in the credits or changelog. 21 | # 22 | # The following URL always points to the newest version of this script. If you obtained it from another source, you should 23 | # check here: 24 | # 25 | # 26 | # CHANGELOG: 27 | # 2011-12-24 jason@jasonantman.com: 28 | # initial version of script 29 | # 30 | # 31 | 32 | use strict; 33 | use warnings; 34 | 35 | my $logfile = "/var/log/user/dhcpd"; 36 | 37 | my %data = (); 38 | 39 | open DF, $logfile or die $!; 40 | while ( my $line = ) { 41 | if ( $line !~ m/dhcpd: DHCPACK/) { next;} 42 | $line =~ m/([A-Za-z]+ [0-9]+ [0-9]{1,2}:[0-9]{2}:[0-9]{2}) [^\/x]+ dhcpd: DHCPACK on (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) to ((?:[0-9a-f]{2}[:-]){5}[0-9a-f]{2}) via (.+)/; 43 | #print "$1==$2==$3==$4==\n" ; 44 | $data{"$2"}->{'mac'} = "$3"; 45 | $data{"$2"}->{'date'} = "$1"; 46 | $data{"$2"}->{'if'} = "$4"; 47 | $data{"$2"}->{'ip'} = "$2"; 48 | } 49 | 50 | printf("%-18s %-20s %-18s %-10s\n", "IP Address", "Hardware Address", "Date", "Interface"); 51 | printf("%-18s %-20s %-18s %-10s\n", "----------", "----------------", "----", "---------"); 52 | 53 | # begin sort by IP address 54 | my @keys = 55 | map substr($_, 4) => 56 | sort 57 | map pack('C4' => 58 | /(\d+)\.(\d+)\.(\d+)\.(\d+)/) 59 | . $_ => (keys %data); 60 | # end sort by IP address 61 | 62 | foreach my $key (@keys) { 63 | printf("%-18s %-20s %-18s %-10s\n", $data{$key}{'ip'}, $data{$key}{'mac'}, $data{$key}{'date'}, $data{$key}{'if'}); 64 | } 65 | -------------------------------------------------------------------------------- /libvirt_csv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Test of using the LibVirt Python bindings to gather 4 | information about libvirt (qemu/KVM) guests. 5 | 6 | test on opskvmtie13 7 | 8 | ################## 9 | Copyright 2014 Jason Antman 10 | Free for any use provided that patches are submitted back to me. 11 | 12 | The latest version of this script can be found at: 13 | 14 | 15 | CHANGELOG: 16 | - initial script 17 | """ 18 | 19 | import libvirt 20 | import sys 21 | 22 | if len(sys.argv) > 1: 23 | hostname = sys.argv[1] 24 | else: 25 | print("USAGE: test_libvirt.py <...>") 26 | sys.exit(1) 27 | 28 | DOM_STATES = { 29 | libvirt.VIR_DOMAIN_NOSTATE: 'no state', 30 | libvirt.VIR_DOMAIN_RUNNING: 'running', 31 | libvirt.VIR_DOMAIN_BLOCKED: 'blocked on resource', 32 | libvirt.VIR_DOMAIN_PAUSED: 'paused by user', 33 | libvirt.VIR_DOMAIN_SHUTDOWN: 'being shut down', 34 | libvirt.VIR_DOMAIN_SHUTOFF: 'shut off', 35 | libvirt.VIR_DOMAIN_CRASHED: 'crashed', 36 | libvirt.VIR_DOMAIN_PMSUSPENDED: 'suspended by guest power mgmt', 37 | } 38 | 39 | # bitwise or of all possible flags to virConnectListAllDomains 40 | ALL_OPTS = 16383 41 | 42 | def bool(a): 43 | if a == 0: 44 | return False 45 | return True 46 | 47 | def get_domains(conn): 48 | """ 49 | Takes a libvirt connection object, 50 | returns a list of all domains, each element 51 | being a dict with items "name", "ID", "UUID", 52 | """ 53 | domains = conn.listAllDomains(ALL_OPTS) 54 | ret = [] 55 | for d in domains: 56 | foo = {} 57 | foo['name'] = d.name() 58 | foo['ID'] = d.ID() 59 | foo['UUID'] = d.UUIDString().upper() 60 | [state, maxmem, mem, ncpu, cputime] = d.info() 61 | foo['state'] = DOM_STATES.get(state, state) 62 | ret.append(foo) 63 | return ret 64 | 65 | hosts = sys.argv 66 | hosts.pop(0) 67 | 68 | print("host,name,ID,state,UUID") 69 | 70 | for h in hosts: 71 | uri = "qemu+ssh://%s/system" % h 72 | #print("Using hostname: %s (URI: %s)" % (h, uri)) 73 | 74 | try: 75 | conn = libvirt.openReadOnly(uri) 76 | except libvirt.libvirtError as e: 77 | print("ERROR connecting to %s: %s" % (uri, e.message)) 78 | continue 79 | 80 | # some code examples imply that older versions 81 | # returned None instead of raising an exception 82 | if conn is None: 83 | print("ERROR connecting to %s: %s" % (uri, e.message)) 84 | continue 85 | 86 | doms = get_domains(conn) 87 | for d in doms: 88 | print("{host},{name},{ID},{state},{UUID}".format(host=h, name=d['name'], ID=d['ID'], UUID=d['UUID'], state=d['state'])) 89 | -------------------------------------------------------------------------------- /wiki-to-deckjs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | """ 3 | simple, awful script to change markdown-like (very restricted markup set) markup to deck.js-ready html 4 | 5 | ################## 6 | Copyright 2014 Jason Antman 7 | Free for any use provided that patches are submitted back to me. 8 | 9 | The latest version of this script can be found at: 10 | https://github.com/jantman/misc-scripts/blob/master/wiki-to-deckjs.py 11 | 12 | CHANGELOG: 13 | - initial version 14 | """ 15 | 16 | import sys 17 | import re 18 | 19 | ol_re = re.compile(r"^\d+\.\s(.*)") 20 | 21 | in_slide = False 22 | in_ul = False 23 | in_2ul = False 24 | in_ol = False 25 | for line in sys.stdin: 26 | if line.strip() == "": 27 | if in_slide: 28 | if in_2ul: 29 | print "\t\t" 30 | in_2ul = False 31 | if in_ul: 32 | print "\t" 33 | in_ul = False 34 | if in_ol: 35 | print "\t" 36 | in_ol = False 37 | print '' 38 | in_slide = False 39 | continue 40 | else: 41 | if not in_slide: 42 | in_slide = True 43 | print '
' 44 | 45 | if in_2ul and not line.startswith("** "): 46 | print "\t\t" 47 | in_2ul = False 48 | if in_ul and not line.startswith("* ") and not line.startswith("** ") and not in_2ul: 49 | print "\t" 50 | in_ul = False 51 | if in_ol and not ol_re.match(line): 52 | print "\t" 53 | in_ul = False 54 | 55 | if not in_slide: 56 | continue 57 | 58 | if line.startswith("# "): 59 | line = line[2:].strip() 60 | print "\t

%s

" % line 61 | elif line.startswith("## "): 62 | line = line[2:].strip() 63 | print "\t

%s

" % line 64 | elif line.startswith("* "): 65 | if not in_ul: 66 | print "\t
    " 67 | in_ul = True 68 | line = line[2:].strip() 69 | print "\t\t
  • %s
  • " % line 70 | elif line.startswith("** "): 71 | if not in_2ul: 72 | print "\t\t
      " 73 | in_2ul = True 74 | line = line[3:].strip() 75 | print "\t\t\t
    • %s
    • " % line 76 | elif ol_re.match(line): 77 | m = ol_re.match(line) 78 | if not in_ol: 79 | print "\t
        " 80 | in_ol = True 81 | print "\t\t
      1. %s
      2. " % m.group(1) 82 | else: 83 | #sys.stderr.write("UNKNOWN LINE: %s\n" % line) 84 | print "\t

        %s

        " % line.strip() 85 | 86 | if in_2ul: 87 | print "\t\t
    " 88 | in_2ul = False 89 | if in_ul: 90 | print "\t
" 91 | in_ul = False 92 | 93 | print '
' 94 | # done 95 | -------------------------------------------------------------------------------- /increment_zone_serial: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Michael Vallaly (Mar '10) Ver 2.5 4 | # from: 5 | # 6 | # Modified by Jason Antman 7 | 8 | # This script updates/increments the bind zone file serial number 9 | 10 | SED_BIN="/bin/sed" 11 | AWK_BIN="/usr/bin/awk" 12 | DATE_BIN="/bin/date" 13 | BC_BIN="/usr/bin/bc" 14 | GREP_BIN="/bin/grep" 15 | 16 | ########################################################################################### 17 | 18 | # Get the serial number from a bind zonefile 19 | get_serial () { 20 | 21 | local target=$1 22 | 23 | # Grab the SOA DNS Zone Header 24 | SOA_HEADER=`cat $target |$SED_BIN 's/;.*$//' |$AWK_BIN '/^[A-Za-z0-9\-\.]*[\t \.\@]+IN[\t ]+SOA[\t ]+[A-Za-z0-9\-\. \t]+\(/,/\)/' |tr -d '\n' |awk '{print $7}'` 25 | 26 | # Get the currently set serial number 27 | echo "$SOA_HEADER" |tr -t '[:lower:]' '[:upper:]' |$GREP_BIN -E -e '[[:digit:]]{10}' -e '%SERIAL%' 28 | 29 | } 30 | 31 | # Check for required binaries 32 | for req_bin in $AWK_BIN $SED_BIN $DATE_BIN $BC_BIN $GREP_BIN; do 33 | if [ ! -x "$req_bin" ]; then 34 | echo "Can't execute ${req_bin}! Aborting.." 35 | exit 1 36 | fi 37 | done 38 | 39 | # Check for needed commandline arguments 40 | usage="$0 " 41 | if [ $# -le 0 ]; then 42 | echo "$usage" 43 | exit 1 44 | else 45 | FILE=`echo $1` 46 | fi 47 | 48 | if [ ! -e "${FILE}" ]; then 49 | echo "ERROR: file $FILE does not exist, exiting." 50 | exit 1 51 | fi 52 | 53 | # Generate todays base serial number 54 | DATE_NOW=`$DATE_BIN +%Y%m%d` 55 | 56 | # Current serial 57 | OLD_SERIAL=`get_serial ${FILE}` 58 | 59 | # Check that we got something back for a serial 60 | if [ "${OLD_SERIAL}" == "" ]; then 61 | echo "Cannot find zone file ${FILE} serial number.. Exiting!" 62 | exit 1 63 | fi 64 | 65 | # Split up the serial 66 | OLD_DATE=`echo $OLD_SERIAL |cut -c 1-8` 67 | OLD_REV=`echo $OLD_SERIAL |cut -c 9-10` 68 | 69 | if [ "${OLD_REV}x" == "x" ]; then 70 | OLD_REV=01 71 | fi 72 | 73 | # If the current date number is greater just use it 74 | if [ "$(echo "$DATE_NOW > $OLD_DATE" |$BC_BIN)" -eq "1" ]; then 75 | NEW_SERIAL="${DATE_NOW}01" 76 | 77 | # If we have the same date number increment the revsion number 78 | elif [ "$(echo "$DATE_NOW == $OLD_DATE" |$BC_BIN)" -eq "1" ] ; then 79 | 80 | # Increment the revision number 81 | if [ "$(echo "$OLD_REV <= 98" |$BC_BIN)" -eq "1" ]; then 82 | NEW_REV=`printf "%02d\n" "$(echo "$OLD_REV + 1" |$BC_BIN)"` 83 | NEW_SERIAL="${DATE_NOW}${NEW_REV}" 84 | else 85 | echo "Too many revision for today wait till tomorrow.. Exiting!" 86 | exit 1 87 | fi 88 | 89 | else 90 | echo "Current zonefile serial is in the future.. Time Skew? Exiting!" 91 | exit 1 92 | fi 93 | 94 | # Replace the serial # and install the new zonefile 95 | $SED_BIN -e "s/${OLD_SERIAL}/${NEW_SERIAL}/" -i $FILE 96 | replace_status=$? 97 | -------------------------------------------------------------------------------- /bigipcookie.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | 3 | # 4 | # Perl script to de/encode F5 BigIp persistence cookies. 5 | # 6 | # The latest version of this script can always be obtained from: 7 | # 8 | # 9 | # Update information and description can be found at: 10 | # 11 | # 12 | # Copyright 2012 Jason Antman . 13 | # 14 | ######################################################################################### 15 | # 16 | # LICENSE: AGPLv3 17 | # 18 | # This program is free software: you can redistribute it and/or modify 19 | # it under the terms of the GNU Affero General Public License as published by 20 | # the Free Software Foundation, either version 3 of the License, or 21 | # (at your option) any later version. 22 | # 23 | # This program is distributed in the hope that it will be useful, 24 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 25 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 | # GNU Affero General Public License for more details. 27 | # 28 | # You should have received a copy of the GNU Affero General Public License 29 | # along with this program. If not, see . 30 | # 31 | # If you make any modifications/fixes/feature additions, it would be greatly appreciated 32 | # if you send them back to me at the above email address. 33 | # 34 | ######################################################################################### 35 | # 36 | # CREDITS: 37 | # - F5 itself for the formula: 38 | # - Tyler Krpata 39 | # for the Perl one-liner that this logic is based on. 40 | # 41 | # Changelog: 42 | # 43 | # 2012-02-02 Jason Antman : 44 | # - initial version 45 | # 46 | 47 | use strict; 48 | use warnings; 49 | 50 | if ( $#ARGV < 0 ) { 51 | print "USAGE: bigipcookie.pl \n"; 52 | exit 1; 53 | } 54 | 55 | if ($ARGV[0] =~ m/^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):(\d+)$/) { 56 | my $ipEnc = $1 + ($2*256) + ($3 * (256**2)) + ($4 * (256**3)); 57 | my $portEnc = hex(join "", reverse ((sprintf "%04x", $5) =~ /../g)); 58 | print "$ipEnc.$portEnc.0000\n"; 59 | } 60 | elsif ($ARGV[0] =~ m/^(\d+)\.(\d+)\.0000$/){ 61 | # decode a cookie value 62 | my $ipEnc = $1; 63 | my $portEnc = $2; 64 | my $ip = join ".", map {hex} reverse ((sprintf "%08x", split /\./, $ipEnc) =~ /../g); 65 | my $portDec = hex(join "", reverse ((sprintf "%04x", $portEnc) =~ /../g)); 66 | print "$ip:$portDec\n"; 67 | } 68 | else { 69 | print "USAGE: bigipcookie.pl \n"; 70 | exit 1; 71 | } 72 | 73 | -------------------------------------------------------------------------------- /asg_instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Script to list instances in an ASG 4 | 5 | Copyright 2014 Jason Antman 6 | Free for any use provided that patches are submitted back to me. 7 | 8 | CHANGELOG: 9 | 2016-06-07 Jason Antman : 10 | - initial version of script 11 | """ 12 | 13 | import sys 14 | import argparse 15 | import boto3 16 | import logging 17 | 18 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 19 | logging.basicConfig(level=logging.WARN, format=FORMAT) 20 | logger = logging.getLogger(__name__) 21 | 22 | 23 | class ASGInstances: 24 | 25 | def __init__(self): 26 | self.autoscale = boto3.client('autoscaling') 27 | self.ec2 = boto3.resource('ec2') 28 | 29 | def run(self, asg_name): 30 | instances = self.get_instances(asg_name) 31 | for i in instances: 32 | self.show_instance(i) 33 | 34 | def show_instance(self, asg_dict): 35 | inst = self.ec2.Instance(asg_dict['InstanceId']) 36 | pub_info = '' 37 | if inst.public_ip_address is not None: 38 | pub_info = ' %s (%s)' % ( 39 | inst.public_ip_address, inst.public_dns_name 40 | ) 41 | 42 | print('%s (%s; %s; %s) %s (%s)%s' % ( 43 | asg_dict['InstanceId'], 44 | asg_dict['AvailabilityZone'], 45 | asg_dict['HealthStatus'], 46 | asg_dict['LifecycleState'], 47 | inst.private_ip_address, 48 | inst.private_dns_name, 49 | pub_info 50 | )) 51 | 52 | def get_instances(self, asg_name): 53 | res = self.autoscale.describe_auto_scaling_groups( 54 | AutoScalingGroupNames=[asg_name] 55 | ) 56 | if 'AutoScalingGroups' not in res or len(res['AutoScalingGroups']) < 1: 57 | raise SystemExit("Error: ASG %s not found." % asg_name) 58 | asg = res['AutoScalingGroups'][0] 59 | print('Found ASG %s (%s)' % (asg_name, asg['AutoScalingGroupARN'])) 60 | return asg['Instances'] 61 | 62 | 63 | def parse_args(argv): 64 | """ 65 | parse arguments/options 66 | 67 | this uses the new argparse module instead of optparse 68 | see: 69 | """ 70 | p = argparse.ArgumentParser(description='Sample python script skeleton.') 71 | p.add_argument('-v', '--verbose', dest='verbose', action='count', default=0, 72 | help='verbose output. specify twice for debug-level output.') 73 | p.add_argument('ASG_NAME', action='store', type=str, 74 | help='ASG name') 75 | args = p.parse_args(argv) 76 | 77 | return args 78 | 79 | if __name__ == "__main__": 80 | args = parse_args(sys.argv[1:]) 81 | if args.verbose > 1: 82 | logger.setLevel(logging.DEBUG) 83 | elif args.verbose > 0: 84 | logger.setLevel(logging.INFO) 85 | script = ASGInstances() 86 | script.run(args.ASG_NAME) 87 | -------------------------------------------------------------------------------- /git_repo_diff.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Python script to tell what branches differ between two git repos, 4 | both by existence and head commit. 5 | 6 | Reqires GitPython>=0.3.2.RC1 7 | 8 | ################## 9 | Copyright 2014 Jason Antman 10 | Free for any use provided that patches are submitted back to me. 11 | 12 | The latest version of this script can be found at: 13 | 14 | 15 | CHANGELOG: 16 | - initial script 17 | """ 18 | 19 | import os 20 | import sys 21 | 22 | import git 23 | 24 | def main(): 25 | if len(sys.argv) < 2: 26 | print("USAGE: git_repo_diff.py path1 path2") 27 | sys.exit(2) 28 | 29 | repos = {sys.argv[1]: {'other': sys.argv[2]}, sys.argv[2]: {'other': sys.argv[1]}} 30 | for p in repos: 31 | if not os.path.exists(p) or not os.path.isdir(p): 32 | print("ERROR: %s is not a directory or does not exist." % p) 33 | sys.exit(1) 34 | d = os.path.join(p, '.git') 35 | if not os.path.exists(d) or not os.path.isdir(d): 36 | print("ERROR: %s does not appear to be a git clone" % p) 37 | sys.exit(1) 38 | o = git.Repo(p) 39 | if o.bare: 40 | print("ERROR: repo in %s is bare." % p) 41 | sys.exit(1) 42 | repos[p]['obj'] = o 43 | # find the branches and their commits 44 | origin = o.remotes.origin 45 | origin.fetch() 46 | refs = {} 47 | for b in origin.refs: 48 | foo = {} 49 | foo['hexsha'] = b.commit.hexsha 50 | foo['author'] = b.commit.author 51 | foo['date'] = b.commit.authored_date 52 | foo['message'] = b.commit.message 53 | refs[b] = foo 54 | repos[p]['refs'] = refs 55 | 56 | # now compare them 57 | reported = [] 58 | for p in repos: 59 | other = repos[p]['other'] 60 | for ref in repos[p]['refs']: 61 | if ref not in repos[other]['refs']: 62 | print("ref %s in %s but not %s" % (ref, p, other)) 63 | else: 64 | if repos[p]['refs'][ref]['hexsha'] != repos[other]['refs'][ref]['hexsha']: 65 | if ref in reported: 66 | continue 67 | print("ref %s differs between repos:" % ref) 68 | print("\t%s: sha=%s author=%s date=%s" % (p, repos[p]['refs'][ref]['hexsha'], 69 | repos[p]['refs'][ref]['author'], 70 | repos[p]['refs'][ref]['date'])) 71 | print("\t%s: sha=%s author=%s date=%s" % (other, repos[other]['refs'][ref]['hexsha'], 72 | repos[other]['refs'][ref]['author'], 73 | repos[other]['refs'][ref]['date'])) 74 | reported.append(ref) 75 | 76 | if __name__ == "__main__": 77 | main() 78 | -------------------------------------------------------------------------------- /sync_git_clones.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | # 3 | # WARNING - WARNING - WARNING this is alpha code, and probably buggy. be very careful about using it. 4 | # 5 | # I *wanted* to use Python and GitPython for this, but due to 6 | # https://github.com/gitpython-developers/GitPython/issues/28 7 | # GitPython dies on any repo with the refs/pull fetch ref. 8 | # 9 | # Script to sync all local git clones in a list of paths with 10 | # origin (and upstream, if configured). If present, uses 11 | # github_clone_setup.py to setup upstream branches for any 12 | # GitHub forks, and set refs to check out pull requests from 13 | # origin and upstream. 14 | # 15 | # Copyright 2014 Jason Antman 16 | # Free for any use provided that patches are submitted back to me. 17 | # 18 | # The canonical version of this script lives at: 19 | # https://github.com/jantman/misc-scripts/blob/master/sync_git_clones.sh 20 | # 21 | # Changelog: 22 | # 23 | # 2014-04-27 jantman (Jason Antman) 24 | # - initial version 25 | # 26 | ##################################################################### 27 | 28 | # Configuration: 29 | source sync_git_clones.conf || { echo "ERROR: could not read config file: sync_git_clones.conf.sh"; exit 1; } 30 | 31 | if (( $DO_GITHUB_SETUP == 1 )); then 32 | [[ -x $PYTHON_BIN ]] || { echo "ERROR: DO_GIT_SETUP==1 but PYTHON_BIN ${PYTHON_BIN} not found." ; exit 1; } 33 | [[ -e $GITHUB_CLONE_SETUP ]] || { echo "ERROR: DO_GIT_SETUP==1 but GITHUB_CLONE_SETUP ${GITHUB_CLONE_SETUP} not found." ; exit 1; } 34 | fi 35 | 36 | ##################################################################### 37 | 38 | if (( $REQUIRE_SSH_AGENT == 1 )); then 39 | if [[ -z "$SSH_AGENT_PID" ]] 40 | then 41 | # ssh agent isn't running 42 | exit 1 43 | fi 44 | # ssh agent isn't running 45 | kill -0 $SSH_AGENT_PID &>/dev/null || exit 1 46 | fi 47 | 48 | # make sure we can get to vcs 49 | $($REQUIRE_COMMAND) || exit 1 50 | 51 | for dir in $GIT_DIRS ; do 52 | echo $dir 53 | for i in $(find ${dir} -maxdepth 1 -type d) ; do 54 | if [[ -d $i/.git ]] ; then 55 | pushd $i 56 | if (( $DO_GITHUB_SETUP == 1 )); then 57 | grep -iq "github.com" "${i}/.git/config" && $PYTHON_BIN $GITHUB_CLONE_SETUP -d $i 58 | fi 59 | grep -iq 'remote "upstream"' "${i}/.git/config" && git fetch upstream 60 | git fetch || echo "ERROR fetching $i" 61 | branch_name=$(git symbolic-ref -q HEAD) 62 | branch_name=${branch_name##refs/heads/} 63 | branch_name=${branch_name:-HEAD} 64 | # TODO: stash any changes if dirty 65 | if (( $PULL_MASTER == 1 )); then 66 | if [[ $branch_name != "master" ]] ; then 67 | git checkout master 68 | git pull 69 | git checkout $branch_name 70 | if (( $SYNC_PUSH_MASTER == 1 )) ; then 71 | grep -iq 'remote "upstream"' "${i}/.git/config" && git merge upstream/master && git push origin master 72 | fi 73 | fi 74 | fi 75 | git pull 76 | # TODO: pop if stashed 77 | popd 78 | fi 79 | done 80 | done 81 | -------------------------------------------------------------------------------- /linodeDnsToCsv.php: -------------------------------------------------------------------------------- 1 | , many thanks to him for releasing this. 8 | * 9 | * INSTALLATION (as per krmdrms README): 10 | * pear install Net_URL2-0.3.1 11 | * pear install HTTP_Request2-0.5.2 12 | * pear channel-discover pear.keremdurmus.com 13 | * pear install krmdrms/Services_Linode 14 | * 15 | * Also requires php-openssl / php5-openssl 16 | * 17 | * USAGE: php linodeDnsToCsv.php 18 | * 19 | * Copyright 2011 Jason Antman , all rights reserved. 20 | * This script is free for use by anyone anywhere, provided that you comply with the following terms: 21 | * 1) Keep this notice and copyright statement intact. 22 | * 2) Send any substantial changes, improvements or bog fixes back to me at the above address. 23 | * 3) If you include this in a product or redistribute it, you notify me, and include my name in the credits or changelog. 24 | * 25 | * The following URL always points to the newest version of this script. If you obtained it from another source, you should 26 | * check here: 27 | * 28 | * 29 | * CHANGELOG: 30 | * 2011-12-17 Jason Antman : 31 | * merged into my svn repo 32 | * 2011-09-12 Jason Antman : 33 | * initial version of script 34 | * 35 | */ 36 | 37 | require_once("/var/www/linode_apikey.php"); // PHP file containing: define("API_KEY_LINODE", "myApiKeyHere"); 38 | require_once('Services/Linode.php'); 39 | 40 | // get list of all domains 41 | $domains = array(); // DOMAINID => domain.tld 42 | try { 43 | $linode = new Services_Linode(API_KEY_LINODE); 44 | $result = $linode->domain_list(); 45 | 46 | foreach($result['DATA'] as $domain) 47 | { 48 | $domains[$domain['DOMAINID']] = $domain["DOMAIN"]; 49 | } 50 | } 51 | catch (Services_Linode_Exception $e) 52 | { 53 | echo $e->getMessage(); 54 | } 55 | 56 | $records = array(); // array of resource records 57 | $linode->batching = true; 58 | foreach($domains as $id => $name) 59 | { 60 | $linode->domain_resource_list(array('DomainID' => $id)); 61 | } 62 | 63 | try { 64 | $result = $linode->batchFlush(); 65 | 66 | foreach($result as $batchPart) 67 | { 68 | foreach($batchPart['DATA'] as $rrec) 69 | { 70 | if(! isset($records[$rrec['DOMAINID']])){ $records[$rrec['DOMAINID']] = array();} 71 | $records[$rrec['DOMAINID']][$rrec['RESOURCEID']] = array('name' => $rrec['NAME'], 'type' => $rrec['TYPE'], 'target' => $rrec['TARGET']); 72 | } 73 | } 74 | } 75 | catch (Services_Linode_Exception $e) 76 | { 77 | echo $e->getMessage(); 78 | } 79 | 80 | echo '"recid","domain","name","type","target"'."\n"; 81 | foreach($domains as $id => $name) 82 | { 83 | foreach($records[$id] as $recid => $arr) 84 | { 85 | echo '"'.$recid.'","'.$name.'","'.$arr['name'].'","'.$arr['type'].'","'.$arr['target']."\"\n"; 86 | } 87 | } 88 | 89 | 90 | ?> 91 | -------------------------------------------------------------------------------- /jenkins_plugins_to_puppet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Python script using python-jenkins (https://pypi.python.org/pypi/python-jenkins) 4 | to query Jenkins for all installed plugins, and generate a block of Puppet code 5 | for the [puppet-jenkins](https://github.com/jenkinsci/puppet-jenkins) module. 6 | 7 | requirements: 8 | - pip install python-jenkins>=0.4.9 9 | 10 | ################## 11 | NOTE: This script is deprecated in favor of: 12 | 13 | ################## 14 | 15 | Copyright 2015 Jason Antman 16 | Free for any use provided that patches are submitted back to me. 17 | 18 | The latest version of this script can be found at: 19 | https://github.com/jantman/misc-scripts/blob/master/jenkins_plugins_to_puppet.py 20 | 21 | CHANGELOG: 22 | 23 | 2015-11-16 jantman: 24 | - initial script 25 | """ 26 | 27 | import sys 28 | import argparse 29 | import logging 30 | import re 31 | import time 32 | import os 33 | import datetime 34 | import getpass 35 | from io import StringIO 36 | 37 | from jenkins import Jenkins, JenkinsException, NotFoundException 38 | 39 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 40 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 41 | logger = logging.getLogger(__name__) 42 | 43 | def main(jenkins_url, user=None, password=None): 44 | if user is not None: 45 | logger.debug("Connecting to Jenkins as user %s ...", user) 46 | j = Jenkins(jenkins_url, user, password) 47 | else: 48 | logger.debug("Connecting to Jenkins anonymously...") 49 | j = Jenkins(jenkins_url) 50 | logger.debug("Connected.") 51 | p = j.get_plugins() 52 | plugins = {} 53 | namelen = 0 54 | for k, v in p.items(): 55 | plugins[k[0]] = v['version'] 56 | if len(k[0]) > namelen: 57 | namelen = len(k[0]) 58 | # format 59 | for name, ver in sorted(plugins.items()): 60 | print(" jenkins::plugin {'%s': version => '%s'}" % (name, ver)) 61 | 62 | def parse_args(argv): 63 | """ parse arguments/options """ 64 | p = argparse.ArgumentParser() 65 | 66 | p.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, 67 | help='verbose (debugging) output') 68 | p.add_argument('-u', '--user', dest='user', action='store', type=str, 69 | default=None, help='Jenkins username (optional)') 70 | p.add_argument('-p', '--password', dest='password', action='store', type=str, 71 | default=None, help='Jenkins password (optional; if -u/--user' 72 | ' is specified and this is not, you will be interactively ' 73 | 'prompted') 74 | p.add_argument('JENKINS_URL', action='store', type=str, 75 | help='Base URL to access Jenkins instance') 76 | args = p.parse_args(argv) 77 | if args.user is not None and args.password is None: 78 | args.password = getpass.getpass("Password for %s Jenkins user: " % args.user) 79 | return args 80 | 81 | 82 | if __name__ == "__main__": 83 | args = parse_args(sys.argv[1:]) 84 | 85 | if args.verbose: 86 | logger.setLevel(logging.DEBUG) 87 | else: 88 | logger.setLevel(logging.INFO) 89 | 90 | main(args.JENKINS_URL, user=args.user, password=args.password) 91 | -------------------------------------------------------------------------------- /simpleLCDproc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Simple LCDproc replacement in Python. Uses LCDd server. 5 | Shows hostname and time on first line, load avg on second, CPU usage on third, memory usage on fourth. 6 | 7 | By Jason Antman 2011. 8 | 9 | Free for all use, provided that you send any changes you make back to me. 10 | 11 | This requires JingleManSweep's Python lcdproc package: 12 | developed against version 0.02 13 | 14 | The canonical source of this script is: 15 | 16 | """ 17 | 18 | import time, datetime, os, commands 19 | 20 | from lcdproc.server import Server 21 | 22 | def main(): 23 | 24 | lcd = Server("localhost", debug=False) 25 | lcd.start_session() 26 | 27 | screen1 = lcd.add_screen("Screen1") 28 | screen1.set_heartbeat("off") 29 | 30 | # hostname/time 31 | uname = os.uname()[1] 32 | uname = uname.ljust(10) 33 | text1 = uname + time.strftime("%H:%M:%S") 34 | line1 = screen1.add_string_widget("String1", text=text1, x=1, y=1) 35 | 36 | # load 37 | load = os.getloadavg() 38 | text2 = "Load %.2f/%.2f/%.2f" % (load[0], load[1], load[2]) 39 | line2 = screen1.add_string_widget("String2", text=text2, x=1, y=2) 40 | 41 | # CPU usage 42 | text3 = "CPU " 43 | usage = commands.getoutput("vmstat | tail -1 | awk '{print $15 \" \" $14 \" \" $13}'") 44 | usage = usage.split(" ") 45 | text3 = "CPU %s%%u %s%%i %s%%s" % (usage[2], usage[0], usage[1]) 46 | line3 = screen1.add_string_widget("String3", text=text3, x=1, y=3) 47 | 48 | # mem/swap 49 | mem = commands.getoutput("free | grep '^Mem:' | awk '{print $4 \" \" $2}'") 50 | mem = mem.split(" ") # 0 = free 1 = total 51 | mem = (float(mem[0]) / float(mem[1])) * 100.0 52 | swap = commands.getoutput("free | grep '^Swap:' | awk '{print $4 \" \" $2}'") 53 | swap = swap.split(" ") # 0 = free 1 = total 54 | swap = (float(swap[0]) / float(swap[1])) * 100.0 55 | text4 = "free M:%.1f S:%.1f" % (mem, swap) 56 | line4 = screen1.add_string_widget("String4", text=text4, x=1, y=4) 57 | 58 | sep = ":" 59 | 60 | while True: 61 | text1 = uname + time.strftime("%H:%M:%S") 62 | line1.set_text(text1) 63 | 64 | load = os.getloadavg() 65 | text2 = "Load" + sep + "%.2f/%.2f/%.2f" % (load[0], load[1], load[2]) 66 | line2.set_text(text2) 67 | 68 | usage = commands.getoutput("vmstat | tail -1 | awk '{print $15 \" \" $14 \" \" $13}'") 69 | usage = usage.split(" ") 70 | text3 = "CPU" + sep + "%s%%u %s%%i %s%%s" % (usage[2], usage[0], usage[1]) 71 | line3.set_text(text3) 72 | 73 | mem = commands.getoutput("free | grep '^Mem:' | awk '{print $4 \" \" $2}'") 74 | mem = mem.split(" ") # 0 = free 1 = total 75 | mem = (float(mem[0]) / float(mem[1])) * 100.0 76 | swap = commands.getoutput("free | grep '^Swap:' | awk '{print $4 \" \" $2}'") 77 | swap = swap.split(" ") # 0 = free 1 = total 78 | swap = (float(swap[0]) / float(swap[1])) * 100.0 79 | text4 = "free" + sep + "M:%.1f%% S:%.1f%%" % (mem, swap) 80 | line4.set_text(text4) 81 | 82 | if sep == ":": 83 | sep = " " 84 | else: 85 | sep = ":" 86 | 87 | time.sleep(1) 88 | 89 | 90 | # Run 91 | 92 | if __name__ == "__main__": 93 | main() 94 | -------------------------------------------------------------------------------- /ubiquiti-mac-acl/README.txt: -------------------------------------------------------------------------------- 1 | /** 2 | * README.txt for updateAPconfigs.php.inc 3 | * 4 | * Copyright 2010, 2011 Jason Antman, All Rights Reserved. 5 | * 6 | * These functions may be used for any purpose provided that: 7 | * 1) This copyright notice is kept intact. 8 | * 2) You send back to me any changes/modifications/bugfixes that you make. 9 | * 3) This may not be included in commercial software which is sold for a fee, unless you discuss this with me first. 10 | * 11 | * @author Jason Antman 12 | * 13 | * Announcement post: 14 | * 15 | * The canonical current version of this script lives at: 16 | * $HeadURL$ 17 | * $LastChangedRevision$ 18 | */ 19 | 20 | updateAPconfigs.php.inc is a set of PHP functions for manipulating the MAC ACL 21 | on Ubiquiti AirOSv2 APs, specifically for programmatically changing the MAC 22 | list, and managing the full range of 32 possible MAC addresses. 23 | 24 | This is by no means a complete system. I've included some example code 25 | (wirelessTools.php) from my setup, which is a self-service PHP page for my 26 | users, allowing them to maintain a list of their MAC addresses, and have new 27 | additions pushed out to the APs. 28 | 29 | For my purpose (an organization's private WLAN), I store the list of MACs in a 30 | MySQL table (schema in 'wireless.sql'), where each user has a username, an ID 31 | number, an LDAP DN, and MAC addresses, each of which has an alias that is just 32 | used for display purposes (i.e. Laptop, iPhone, etc.). The tool is pretty 33 | specific to my client, since auth is handled via LDAP and MACs are stored in 34 | MySQL. But I'm including the code as a starting point for you. 35 | 36 | ========WARNINGS======= 37 | 1) There's little to no error checking in these functions. As you can see, I 38 | do most of the checking that files exist, are writable, etc. in the 39 | wirelessTools.php script. You should be WARNED that just calling the functions 40 | could push out an empty or bad config to your APs. I have no idea what that 41 | would do, but since I'm directly running `cfgmtd`, I assume it would be bad. 42 | 43 | 2) The APs must reboot to reload the configuration. For my purpose, all of the 44 | people with access to the self-serve page are "trusted" users. Be careful here 45 | - when a user adds a MAC address, the AP will reload config and then 46 | restart. It would be bad to let someone just sit there clicking the button... 47 | 48 | 3) The AP will only handle 32 MACs. If you give it an array longer than that, 49 | it will only add the first 32 to the config. 50 | 51 | =====USAGE==== 52 | Be sure to change the global variables at the top of updateAPconfigs.php.inc 53 | to suit your environment. You must already have pubkey-based SSH 54 | authentication setup on the APs, this script uses SSH and SCP with pubkey 55 | auth. 56 | 57 | Functions Provided: 58 | getUbntConfig($hostname, $filePath) 59 | Copies (scp) the config from AP ($hostname) to $filePath on the local machine 60 | putUbntConfig($hostname, $filePath) 61 | Copies (scp) the config from $filePath on the local machine to AP ($hostname) 62 | then runs (ssh) `cfgmtd` to persist the configuration and then reboots the AP 63 | makeNewConfigFile($oldPath, $newPath, $arr) 64 | reads an existing configuration file ($oldPath), changes the MAC ACL to 65 | include an array of MAC addresses ($arr), writes new config to $newPath 66 | -------------------------------------------------------------------------------- /aws_region_stats.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Python script to print a table with some statistics from each AWS region. Stats 4 | include number of RDS instances, EC2 instances, volumes, snapshots, VPCs, and 5 | AMIs, ECS clusters, ELBs and ASGs. 6 | 7 | Should work with python 2.7-3.x. Requires ``boto3``from pypi. 8 | 9 | The latest version of this script can be found at: 10 | https://github.com/jantman/misc-scripts/blob/master/aws_region_stats.py 11 | 12 | Copyright 2018 Jason Antman 13 | Free for any use provided that patches are submitted back to me. 14 | 15 | CHANGELOG: 16 | 2018-09-07 Jason Antman : 17 | - initial version of script 18 | """ 19 | 20 | import sys 21 | 22 | try: 23 | import boto3 24 | except ImportError: 25 | sys.stderr.write("ERROR: you must 'pip install boto3'.\n") 26 | raise SystemExit(1) 27 | 28 | try: 29 | from terminaltables import AsciiTable 30 | except ImportError: 31 | sys.stderr.write("ERROR: you must 'pip install terminaltables'.\n") 32 | raise SystemExit(1) 33 | 34 | 35 | RESULT_KEYS = [ 36 | 'AMIs', 37 | 'ASGs', 38 | 'ECS Clusters', 39 | 'ELB', 40 | 'ELBv2', 41 | 'Instances', 42 | 'RDS Inst', 43 | 'Snapshots', 44 | 'VPCs', 45 | 'Volumes' 46 | ] 47 | 48 | 49 | def get_region_names(): 50 | ec2 = boto3.client('ec2', region_name='us-east-1') 51 | return sorted([x['RegionName'] for x in ec2.describe_regions()['Regions']]) 52 | 53 | 54 | def get_account_id(): 55 | client = boto3.client('sts') 56 | cid = client.get_caller_identity() 57 | return cid['Account'] 58 | 59 | 60 | def do_region(rname, acct_id): 61 | print('Checking region: %s' % rname) 62 | res = {x: 0 for x in RESULT_KEYS} 63 | # RDS 64 | rds = boto3.client('rds', region_name=rname) 65 | for r in rds.get_paginator('describe_db_instances').paginate(): 66 | res['RDS Inst'] += len(r['DBInstances']) 67 | # ELBv2 68 | elbv2 = boto3.client('elbv2', region_name=rname) 69 | for r in elbv2.get_paginator('describe_load_balancers').paginate(): 70 | res['ELBv2'] += len(r['LoadBalancers']) 71 | # ELB 72 | elb = boto3.client('elb', region_name=rname) 73 | for r in elb.get_paginator('describe_load_balancers').paginate(): 74 | res['ELB'] += len(r['LoadBalancerDescriptions']) 75 | # ECS 76 | ecs = boto3.client('ecs', region_name=rname) 77 | for r in ecs.get_paginator('list_clusters').paginate(): 78 | res['ECS Clusters'] += len(r['clusterArns']) 79 | # EC2 80 | ec2 = boto3.resource('ec2', region_name=rname) 81 | res['VPCs'] = len(list(ec2.vpcs.all())) 82 | res['Volumes'] = len(list(ec2.volumes.all())) 83 | res['Snapshots'] = len(list(ec2.snapshots.filter(OwnerIds=[acct_id]))) 84 | res['Instances'] = len(list(ec2.instances.all())) 85 | res['AMIs'] = len(list(ec2.images.filter(Owners=['self']))) 86 | # AutoScaling 87 | autoscaling = boto3.client('autoscaling', region_name=rname) 88 | for r in autoscaling.get_paginator('describe_auto_scaling_groups').paginate(): 89 | res['ASGs'] += len(r['AutoScalingGroups']) 90 | return res 91 | 92 | headers = [k for k in RESULT_KEYS] 93 | headers.insert(0, 'REGION') 94 | tdata = [headers] 95 | acct_id = get_account_id() 96 | print('Found Account ID as: %s' % acct_id) 97 | for rname in get_region_names(): 98 | res = do_region(rname, acct_id) 99 | tmp = [rname] 100 | for k in RESULT_KEYS: 101 | tmp.append(res[k]) 102 | tdata.append(tmp) 103 | table = AsciiTable(tdata) 104 | print(table.table) 105 | -------------------------------------------------------------------------------- /find_dupes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | CHANGELOG 4 | --------- 5 | 6 | 2017-05-12 Jason Antman : 7 | - initial version of script 8 | """ 9 | 10 | import sys 11 | import argparse 12 | import logging 13 | from collections import defaultdict 14 | 15 | FORMAT = "[%(asctime)s %(levelname)s] %(message)s" 16 | logging.basicConfig(level=logging.WARNING, format=FORMAT) 17 | logger = logging.getLogger() 18 | 19 | 20 | class DupeFinder(object): 21 | """ might as well use a class. It'll make things easier later. """ 22 | 23 | def __init__(self, sums_file): 24 | """ init method, run at class creation """ 25 | self.sums_file = sums_file 26 | 27 | def run(self): 28 | """ do stuff here """ 29 | sums = self._read_sums() 30 | count = 0 31 | for md5sum, paths in sums.items(): 32 | if len(paths) < 2: 33 | continue 34 | count += 1 35 | print('# %s' % md5sum) 36 | for p in paths: 37 | print(p) 38 | pass 39 | logger.info('Found %d duplicate files', count) 40 | 41 | def _read_sums(self): 42 | sums = defaultdict(list) 43 | count = 0 44 | logger.debug('Reading SUMS_FILE: %s', self.sums_file) 45 | with open(self.sums_file, 'r') as fh: 46 | for line in fh.readlines(): 47 | line = line.strip() 48 | if line == '': 49 | continue 50 | count += 1 51 | parts = line.split() 52 | sums[parts[0]].append(parts[1]) 53 | logger.debug('Read %d file sums', count) 54 | return sums 55 | 56 | def parse_args(argv): 57 | epil = "Generate SUMS_FILE using a command like 'md5sum /foo/* > sums' " \ 58 | "or: '" \ 59 | "find $(pwd) -type f | while read -r fname; do md5sum \"$fname\" " \ 60 | ">> sums; done'" 61 | p = argparse.ArgumentParser(description='Find dupe files based on md5sum', 62 | epilog=epil) 63 | p.add_argument('SUMS_FILE', action='store', type=str, 64 | help="ms5sum output file - must be manually generated using" 65 | " the 'md5sum' command") 66 | p.add_argument('-v', '--verbose', dest='verbose', action='count', default=0, 67 | help='verbose output. specify twice for debug-level output.') 68 | args = p.parse_args(argv) 69 | return args 70 | 71 | def set_log_info(): 72 | """set logger level to INFO""" 73 | set_log_level_format(logging.INFO, 74 | '%(asctime)s %(levelname)s:%(name)s:%(message)s') 75 | 76 | 77 | def set_log_debug(): 78 | """set logger level to DEBUG, and debug-level output format""" 79 | set_log_level_format( 80 | logging.DEBUG, 81 | "%(asctime)s [%(levelname)s %(filename)s:%(lineno)s - " 82 | "%(name)s.%(funcName)s() ] %(message)s" 83 | ) 84 | 85 | 86 | def set_log_level_format(level, format): 87 | """ 88 | Set logger level and format. 89 | 90 | :param level: logging level; see the :py:mod:`logging` constants. 91 | :type level: int 92 | :param format: logging formatter format string 93 | :type format: str 94 | """ 95 | formatter = logging.Formatter(fmt=format) 96 | logger.handlers[0].setFormatter(formatter) 97 | logger.setLevel(level) 98 | 99 | if __name__ == "__main__": 100 | args = parse_args(sys.argv[1:]) 101 | 102 | # set logging level 103 | if args.verbose > 1: 104 | set_log_debug() 105 | elif args.verbose == 1: 106 | set_log_info() 107 | 108 | script = DupeFinder(args.SUMS_FILE) 109 | script.run() 110 | -------------------------------------------------------------------------------- /jenkins_list_plugins.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Python script using python-jenkins (https://pypi.python.org/pypi/python-jenkins) 4 | to query Jenkins for all installed plugins, and list them. Optionally output as 5 | a block of Puppet code for the 6 | [puppet-jenkins](https://github.com/jenkinsci/puppet-jenkins) module. 7 | 8 | requirements: 9 | - pip install python-jenkins>=0.4.9 10 | 11 | ################## 12 | 13 | Copyright 2015, 2016 Jason Antman 14 | Free for any use provided that patches are submitted back to me. 15 | 16 | The latest version of this script can be found at: 17 | https://github.com/jantman/misc-scripts/blob/master/jenkins_list_plugins.py 18 | 19 | CHANGELOG: 20 | 21 | 2016-10-17 jantman: 22 | - migrate from jenkins_plugins_to_puppet.py; default to just listing them, 23 | optional Puppet output. 24 | 25 | 2015-11-16 jantman: 26 | - initial script 27 | """ 28 | 29 | import sys 30 | import argparse 31 | import logging 32 | import re 33 | import time 34 | import os 35 | import datetime 36 | import getpass 37 | from io import StringIO 38 | 39 | from jenkins import Jenkins, JenkinsException, NotFoundException 40 | 41 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 42 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 43 | logger = logging.getLogger(__name__) 44 | 45 | def get_plugins_dict(jenkins_url, puppet=False, user=None, password=None): 46 | if user is not None: 47 | logger.debug("Connecting to Jenkins (<%s>) as user %s ...", 48 | jenkins_url, user) 49 | j = Jenkins(jenkins_url, user, password) 50 | else: 51 | logger.debug("Connecting to Jenkins (<%s>) anonymously...", jenkins_url) 52 | j = Jenkins(jenkins_url) 53 | logger.debug("Connected.") 54 | p = j.get_plugins() 55 | plugins = {} 56 | for k, v in p.items(): 57 | plugins[k[0]] = v['version'] 58 | return plugins 59 | 60 | def main(jenkins_url, puppet=False, user=None, password=None): 61 | plugins = get_plugins_dict(jenkins_url, puppet=puppet, user=user, password=password) 62 | for name, ver in sorted(plugins.items()): 63 | if puppet: 64 | print(" jenkins::plugin {'%s': version => '%s'}" % (name, ver)) 65 | else: 66 | print('%s:%s' % (name, ver)) 67 | 68 | def parse_args(argv): 69 | """ parse arguments/options """ 70 | p = argparse.ArgumentParser() 71 | 72 | p.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, 73 | help='verbose (debugging) output') 74 | p.add_argument('-u', '--user', dest='user', action='store', type=str, 75 | default=None, help='Jenkins username (optional)') 76 | p.add_argument('-p', '--password', dest='password', action='store', type=str, 77 | default=None, help='Jenkins password (optional; if -u/--user' 78 | ' is specified and this is not, you will be interactively ' 79 | 'prompted') 80 | p.add_argument('-P', '--puppet', dest='puppet', action='store_true', 81 | default=False, help='output as Puppet code') 82 | p.add_argument('JENKINS_URL', action='store', type=str, 83 | help='Base URL to access Jenkins instance') 84 | args = p.parse_args(argv) 85 | if args.user is not None and args.password is None: 86 | args.password = getpass.getpass("Password for %s Jenkins user: " % args.user) 87 | return args 88 | 89 | 90 | if __name__ == "__main__": 91 | args = parse_args(sys.argv[1:]) 92 | 93 | if args.verbose: 94 | logger.setLevel(logging.DEBUG) 95 | else: 96 | logger.setLevel(logging.INFO) 97 | 98 | main(args.JENKINS_URL, puppet=args.puppet, user=args.user, password=args.password) 99 | -------------------------------------------------------------------------------- /route53_ddns_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ################################################# 3 | # Bash script to update Route53 dynamic DNS 4 | # 5 | # Assumes you already have cli53 () 6 | # installed and properly configured. 7 | # 8 | # Export ROUTE53_ZONE and ROUTE53_RR_NAME environment variables 9 | # as your route53 Zone Id and Record Set name, respectively. 10 | # 11 | # 12 | ################################################# 13 | # Copyright 2014 Jason Antman 14 | # Free for any use provided that patches are submitted back to me. 15 | # 16 | # The latest version of this script can be found at: 17 | # 18 | # 19 | # CHANGELOG: 20 | # 21 | # * 2018-06-27 Jason Antman 22 | # - validate that new WAN_IP is not empty and looks like an IP address 23 | # - cli53 replace instead of delete and create 24 | # 25 | # * 2018-06-13 Jason Antman 26 | # - update for new Go-based cli53 27 | # 28 | # * 2017-09-29 Jason Antman 29 | # - switch from whatismyip.jasonantman.com to api.ipify.org 30 | # 31 | # * 2015-06-15 Jason Antman 32 | # - get config from env vars instead of hard-coded 33 | # - get OLD_WAN_IP from cli53 instead of local cache file 34 | # - drop TTL to 60s 35 | # 36 | # * 2015-05-31 Jason Antman 37 | # - update for new whatismyip.jasonantman.com 38 | # 39 | # * 2015-05-20 Jason Antman 40 | # - fix bug in WAN_IP 41 | # - add logging 42 | # 43 | # * 2014-12-26 Jason Antman 44 | # - initial script 45 | # 46 | ################################################# 47 | 48 | LOG_TAG=$(basename "$0") 49 | 50 | log () { 51 | logger -p local7.info -t $LOG_TAG "$1" 52 | } 53 | 54 | log_err () { 55 | logger -p local7.notice -t $LOG_TAG "$1" 56 | } 57 | 58 | if [ -z ${ROUTE53_ZONE+x} ] 59 | then 60 | >&2 echo "${LOG_TAG} - ERROR - ROUTE53_ZONE environment variable not set" 61 | log_err "ERROR - ROUTE53_ZONE environment variable not set" 62 | exit 1 63 | fi 64 | 65 | if [ -z ${ROUTE53_RR_NAME+x} ] 66 | then 67 | >&2 echo "${LOG_TAG} - ERROR - ROUTE53_RR_NAME environment variable not set" 68 | log_err "ERROR - ROUTE53_RR_NAME environment variable not set" 69 | exit 1 70 | fi 71 | 72 | log "Running with ZONE=${ROUTE53_ZONE} RR=${ROUTE53_RR_NAME}" 73 | 74 | # get WAN IP and trim whitespace 75 | WAN_IP=$(wget -q -O - --no-check-certificate https://api.ipify.org/ | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') 76 | if [[ -z "$WAN_IP" ]]; then 77 | log_err "ERROR - WAN IP from https://api.ipify.org/ is empty! Failing." 78 | exit 1 79 | fi 80 | if [[ ! "$WAN_IP" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then 81 | log_err "ERROR - WAN IP from https://api.ipify.org/ does not look like an IP address: $WAN_IP" 82 | exit 1 83 | fi 84 | log "Found current WAN IP as ${WAN_IP}" 85 | 86 | # Get your old WAN IP 87 | OLD_WAN_IP=$(cli53 export $ROUTE53_ZONE | grep "^${ROUTE53_RR_NAME}[[:space:]]" | awk '{print $5}') 88 | log "Found old WAN IP as ${OLD_WAN_IP}" 89 | 90 | # See if the new IP is the same as the old IP. 91 | if [ "$WAN_IP" = "$OLD_WAN_IP" ]; then 92 | echo "IP Unchanged" 93 | log "IP is unchanged - exiting" 94 | # Don't do anything if th eIP didn't change 95 | else 96 | # The IP changed 97 | log "Deleting current A record" 98 | set -o pipefail 99 | cli53 rrcreate --replace $ROUTE53_ZONE "$ROUTE53_RR_NAME 60 A $WAN_IP" 2>&1 | logger -p local7.info -t "${LOG_TAG}-rrcreate" && echo $WAN_IP > /var/CURRENT_WAN_IP.txt || logger -p local7.notice -t $LOG_TAG "cli53 rrcreate FAILED." 100 | fi 101 | -------------------------------------------------------------------------------- /syslogDatesToArray.php: -------------------------------------------------------------------------------- 1 | #!/usr/bin/php 2 | 17 | * 18 | * LICENSE: 19 | * This script may be used, modified and distributed provided that the following terms are met: 20 | * 1) This notice, license, copyright statement, and all URLs, names, email addresses, etc. are left intact. 21 | * 2) Any and all substantitive changes/feature additions/bug fixes are sent back to me for inclusion in the next version. 22 | * 3) The below changelog is kept intact and up-to-date. 23 | ******************************************************************************************************************** 24 | * The canonical source of the latest version of this script is: 25 | * https://github.com/jantman/misc-scripts/blob/master/syslogDatesToArray.php 26 | ******************************************************************************************************************** 27 | * CHANGELOG: 28 | * 2014-12-26 jason@jasonantman.com: 29 | * - update with GitHub URL for script 30 | * 2011-09-28 jason@jasonantman.com: 31 | * - first version of script 32 | ******************************************************************************************************************** 33 | */ 34 | 35 | $fh = fopen("php://stdin", 'r') or die("Unable to open STDIN for reading."); 36 | 37 | $dates = array(); // array to hold our syslog data, ts => count, where TS is integer timestamp of the bottom of the minute, and count is number of lines. 38 | $failed = 0; 39 | $count = 0; 40 | while(($line = fgets($fh)) !== false) 41 | { 42 | // read a line... 43 | $foo = dateFromSyslog($line); 44 | $count++; 45 | if(! $foo){ $failed++; continue;} 46 | $date = strtotime(date("Y-m-d H:i", $foo).":00"); 47 | if(! isset($dates[$date])){ $dates[$date] = 1;} else { $dates[$date]++;} 48 | } 49 | 50 | // sum up all the values in the array as a count 51 | $sum = array_sum($dates); 52 | 53 | if( ($sum + $failed) != $count){ die("ERROR: Sum appears wrong.\n");} 54 | 55 | echo serialize(array('dates' => $dates, 'failed' => $failed, 'totalLines' => $count, 'sum' => $sum)); 56 | 57 | /** 58 | * Parse the date out of a syslog line, return as an integer timestamp. 59 | * 60 | * Returns boolean False on error. Expects date to be at the beginning of the syslog line, 61 | * in standard (traditional) syslog date format, that is, matching: 62 | * 63 | * @param string $line the full syslog line 64 | * @return integer or False on error 65 | */ 66 | function dateFromSyslog($line) 67 | { 68 | // "((Sun|Mon|Tue|Wed|Thu|Fri|Sat) )?" is to cope with software that writes logs directly, starting with day of week (FreeRADIUS radiusd.log) 69 | $ptn = "/^((Sun|Mon|Tue|Wed|Thu|Fri|Sat) )?(\S{3}\s{1,2}\d{1,2} \d{2}:\d{2}:\d{2})/"; 70 | $matches = array(); 71 | $foo = preg_match($ptn, $line, $matches); 72 | if(! $foo){ return false;} 73 | return strtotime($matches[0]); 74 | } 75 | 76 | ?> 77 | -------------------------------------------------------------------------------- /find_outdated_puppets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | """ 3 | Script to look at a Puppet Dashboard unhidden-nodes.csv and extract the latest report time for each node. 4 | Optionally, list nodes with runtime BEFORE a string. 5 | 6 | ABANDONED - no longer used or maintained. 7 | 8 | """ 9 | import csv 10 | from datetime import datetime 11 | import optparse 12 | import os 13 | from dateutil import parser as dateutil_parser 14 | import sys 15 | import pytz 16 | 17 | # for column output formatting, max len of a node name 18 | NODE_NAME_MAXLEN = 40 19 | 20 | def read_node_csv(fname): 21 | """ 22 | Read in the nodes CSV and return a hash of node name to latest line for each node. 23 | """ 24 | 25 | # our final hash of nodename => row 26 | nodes = {} 27 | 28 | # read in the CSV file 29 | f = open(fname, 'rb') 30 | cr = csv.reader(f, delimiter=',') 31 | 32 | # iterate the rows. first row is headers. 33 | rownum = 0 34 | time_col = 0 35 | for row in cr: 36 | # Save header row. 37 | if rownum == 0: 38 | header = row 39 | colnum = 0 40 | for col in row: 41 | if row[colnum] == "time": 42 | time_col = colnum 43 | colnum += 1 44 | else: 45 | date_str = row[time_col] 46 | # sometimes the date is missing. in that case, it should be OLD... 47 | if date_str == '': 48 | date_str = '1970-01-01 01:00 UTC' 49 | try: 50 | date_dt = dateutil_parser.parse(date_str) 51 | except: 52 | print "Error converting time string '%s' for node '%s'" % (date_str, row[0]) 53 | date_dt = datetime(1970, 1, 1) 54 | temp = {} 55 | colnum = 0 56 | for col in row: 57 | temp[header[colnum]] = col 58 | colnum += 1 59 | temp['date'] = date_dt 60 | if row[0] not in nodes: 61 | nodes[row[0]] = temp 62 | if date_dt > nodes[row[0]]['date']: 63 | nodes[row[0]] = temp 64 | rownum += 1 65 | f.close() 66 | return nodes 67 | 68 | if __name__ == '__main__': 69 | # if the program is executed directly parse the command line options 70 | # and read the text to paste from stdin 71 | 72 | parser = optparse.OptionParser() 73 | parser.add_option('-f', '--file', dest='fname', default='unhidden-nodes.csv', 74 | help='path to unhidden-nodes.csv file (default unhidden-nodes.csv)') 75 | 76 | parser.add_option('-b', '--before', dest='before_str', default='', 77 | help='show only nodes last reported before this date string (optional)') 78 | 79 | parser.add_option('-c', '--csv', dest='csv', default=False, action='store_true', 80 | help='output as CSV rather than columns (optional)') 81 | 82 | options, args = parser.parse_args() 83 | 84 | if options.before_str: 85 | try: 86 | before_dt = pytz.UTC.localize(dateutil_parser.parse(options.before_str)) 87 | except: 88 | print "Error converting time string '%s' to datetime - failing." % options.before_str 89 | sys.exit(2) 90 | 91 | if not os.path.exists(options.fname): 92 | print "ERROR: Unable to open file %s" % options.fname 93 | 94 | # parse CSV and get back a dict 95 | nodes = read_node_csv(options.fname) 96 | 97 | # get a list of key/value pairs 98 | node_dates = {} 99 | for node in nodes: 100 | node_dates[node] = nodes[node]['date'] 101 | 102 | # sort by date 103 | nodes_sorted = sorted(node_dates.iteritems(), key=lambda (k,v): (v,k), reverse=True) 104 | 105 | # format string for column output 106 | fmt_str = "{:%ds}{:^16s}{:>20s}" % NODE_NAME_MAXLEN 107 | 108 | # output 109 | for node, date in nodes_sorted: 110 | if options.before_str and date >= before_dt: 111 | continue 112 | if options.csv: 113 | print "{:s},{:s},{:s}".format(node, date.strftime("%Y-%m-%d %H:%M"), nodes[node]['status']) 114 | else: 115 | print fmt_str.format(node, date.strftime("%Y-%m-%d %H:%M"), nodes[node]['status']) 116 | -------------------------------------------------------------------------------- /pacman_compare.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | pacman_compare.py 4 | 5 | Compare packages in two files containing ``pacman -Q`` output. Ignores 6 | versions. 7 | 8 | If you have ideas for improvements, or want the latest version, it's at: 9 | 10 | 11 | Copyright 2015 Jason Antman 12 | Free for any use provided that patches are submitted back to me. 13 | 14 | CHANGELOG: 15 | 2015-09-23 Jason Antman : 16 | - add option to include package description in output 17 | 2015-09-22 Jason Antman : 18 | - initial version of script 19 | """ 20 | 21 | import sys 22 | import os 23 | import argparse 24 | import subprocess 25 | import re 26 | 27 | 28 | class PacmanCompare: 29 | """compare packages in two ``pacman -Q`` outputs, ignoring versions""" 30 | 31 | desc_re = re.compile(r'^Description\s*: (.+)$') 32 | 33 | def read_packages(self, fpath): 34 | packages = [] 35 | with open(fpath, 'r') as fh: 36 | for line in fh.readlines(): 37 | if line.strip() == '': 38 | continue 39 | packages.append(line.split(' ')[0]) 40 | return sorted(packages) 41 | 42 | def get_package_desc(self, pkgname): 43 | """get the package description string""" 44 | try: 45 | p = subprocess.check_output(['pacman', '-Qi', pkgname]) 46 | except subprocess.CalledProcessError: 47 | return '' 48 | for line in p.split('\n'): 49 | m = self.desc_re.match(line) 50 | if m is None: 51 | continue 52 | return m.group(1) 53 | return '' 54 | 55 | def run(self, fileA, fileB, description=False): 56 | """ do stuff here """ 57 | if not os.path.exists(fileA): 58 | raise SystemExit("ERROR: FILEA_PATH %s does not exist" % fileA) 59 | if not os.path.exists(fileB): 60 | raise SystemExit("ERROR: FILEA_PATH %s does not exist" % fileB) 61 | info = [ 62 | { 63 | 'packages': self.read_packages(fileA), 64 | 'fname': os.path.basename(fileA), 65 | 'only': [], 66 | }, 67 | { 68 | 'packages': self.read_packages(fileB), 69 | 'fname': os.path.basename(fileB), 70 | 'only': [], 71 | }, 72 | ] 73 | if info[0]['packages'] == info[1]['packages']: 74 | print("Package lists identical (ignoring versions)") 75 | raise SystemExit(0) 76 | for pkname in info[0]['packages']: 77 | if pkname not in info[1]['packages']: 78 | info[0]['only'].append(pkname) 79 | for pkname in info[1]['packages']: 80 | if pkname not in info[0]['packages']: 81 | info[1]['only'].append(pkname) 82 | for idx in [0, 1]: 83 | print("%d packages only in %s (FILEA) of %d total" % ( 84 | len(info[idx]['only']), 85 | info[idx]['fname'], 86 | len(info[idx]['packages'])) 87 | ) 88 | if len(info[idx]['only']) > 0: 89 | for x in info[idx]['only']: 90 | if not description: 91 | print(x) 92 | else: 93 | print("%s : %s" % (x, self.get_package_desc(x))) 94 | print("") 95 | 96 | def parse_args(argv): 97 | """ 98 | parse arguments/options 99 | 100 | this uses the new argparse module instead of optparse 101 | see: 102 | """ 103 | p = argparse.ArgumentParser(description='Sample python script skeleton.') 104 | p.add_argument('-D', '--description', dest='description', action='store_true', 105 | default=False, 106 | help='include package description in output') 107 | p.add_argument('FILEA_PATH', type=str) 108 | p.add_argument('FILEB_PATH', type=str) 109 | args = p.parse_args(argv) 110 | return args 111 | 112 | if __name__ == "__main__": 113 | args = parse_args(sys.argv[1:]) 114 | script = PacmanCompare() 115 | script.run(args.FILEA_PATH, args.FILEB_PATH, description=args.description) 116 | -------------------------------------------------------------------------------- /trello_board_to_text.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | trello_board_to_text.py - Print to STDOUT the names of the columns on the 4 | specified trello board and the titles of the cards in that column in order. 5 | 6 | If you have ideas for improvements, or want the latest version, it's at: 7 | 8 | 9 | Copyright 2023 Jason Antman 10 | Free for any use provided that patches are submitted back to me. 11 | 12 | REQUIREMENTS: 13 | trello package 14 | """ 15 | 16 | import sys 17 | import os 18 | import logging 19 | import argparse 20 | from collections import defaultdict 21 | 22 | try: 23 | from trello import TrelloApi 24 | except ImportError: 25 | sys.stderr.write( 26 | "trello package not found; please 'pip install trello'\n" 27 | ) 28 | raise SystemExit(1) 29 | 30 | logger = logging.getLogger(__name__) 31 | 32 | # suppress logging from requests, used internally by TrelloApi 33 | requests_log = logging.getLogger("requests") 34 | requests_log.setLevel(logging.WARNING) 35 | requests_log.propagate = True 36 | 37 | 38 | class TrelloBoardToText: 39 | 40 | board_get_kwargs = { 41 | 'cards': 'visible', 42 | 'card_fields': 'all', 43 | 'lists': 'all', 44 | 'list_fields': 'all', 45 | 'labels': 'all', 46 | } 47 | 48 | def __init__(self): 49 | """get credentials and connect""" 50 | app_key = os.environ.get('TRELLO_APP_KEY', None) 51 | if app_key is None: 52 | raise SystemExit('Please export your Trello application key as the ' 53 | 'TRELLO_APP_KEY environment variable.') 54 | token = os.environ.get('TRELLO_TOKEN', None) 55 | if token is None: 56 | raise SystemExit('Please export your Trello API token as the ' 57 | 'TRELLO_TOKEN environment variable.') 58 | logger.debug('Initializing TrelloApi') 59 | self.trello = TrelloApi(app_key, token) 60 | logger.debug('TrelloApi initialized') 61 | 62 | def run(self, board_name): 63 | """main entry point""" 64 | board_id = self.get_board_id(board_name) 65 | board = self.trello.boards.get(board_id, **self.board_get_kwargs) 66 | cards_per_col = defaultdict(list) 67 | for card in board['cards']: 68 | if card['closed']: 69 | continue 70 | cards_per_col[card['idList']].append(card) 71 | for column in sorted(board['lists'], key=lambda x: x['pos']): 72 | if column['closed']: 73 | continue 74 | print(f"\n* {column['name']}\n") 75 | for card in sorted( 76 | cards_per_col[column['id']], key=lambda x: x['pos'] 77 | ): 78 | print(f'{card["name"]}') 79 | 80 | def get_board_id(self, board_name): 81 | """get the ID for a board name""" 82 | logger.debug('Getting boards') 83 | boards = self.trello.members.get_board('me') 84 | logger.debug('Found %d boards', len(boards)) 85 | for b in boards: 86 | if b['name'] == board_name: 87 | logger.info('Board "%s" id=%s', board_name, b['id']) 88 | return b['id'] 89 | raise SystemExit('Error: could not find board with name "%s"', 90 | board_name) 91 | 92 | 93 | def parse_args(argv): 94 | """ 95 | parse arguments/options 96 | """ 97 | p = argparse.ArgumentParser( 98 | description='Script print all columns and cards on a Trello board ' 99 | 'to STDOUT' 100 | ) 101 | p.add_argument('-v', '--verbose', dest='verbose', action='count', default=0, 102 | help='verbose output. specify twice for debug-level output.') 103 | p.add_argument('BOARD_NAME', type=str, help='board name') 104 | return p.parse_args(argv) 105 | 106 | 107 | if __name__ == "__main__": 108 | FORMAT = ("[%(levelname)s %(filename)s:%(lineno)s - " 109 | "%(funcName)20s() ] %(message)s") 110 | logging.basicConfig(level=logging.INFO, format=FORMAT) 111 | logger = logging.getLogger() 112 | args = parse_args(sys.argv[1:]) 113 | if args.verbose > 1: 114 | logger.setLevel(logging.DEBUG) 115 | elif args.verbose > 0: 116 | logger.setLevel(logging.INFO) 117 | TrelloBoardToText().run(args.BOARD_NAME) 118 | -------------------------------------------------------------------------------- /avery5160_code128_numeric.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to generate PDFs for numerically sequenced Code128 barcode labels on 3 | Avery 5160 / 18260 label sheets. 4 | 5 | When printing, force rasterization and fit to full page. 6 | 7 | Tested with Python 3.10.5 and the following dependencies: 8 | 9 | autopep8==1.7.0 10 | Pillow==9.2.0 11 | pycodestyle==2.9.1 12 | reportlab==3.6.11 13 | toml==0.10.2 14 | 15 | """ 16 | import sys 17 | import argparse 18 | from typing import Tuple, Optional 19 | 20 | from reportlab.pdfgen import canvas 21 | from reportlab.lib.pagesizes import LETTER 22 | from reportlab.lib.units import inch, mm 23 | from reportlab.graphics.barcode import code128 24 | 25 | 26 | class Avery5160Code128Numeric(): 27 | 28 | def __init__( 29 | self, output_filename: str, padlen: int = 6, 30 | prefix: Optional[str] = None 31 | ): 32 | self.output_filename: str = output_filename 33 | self.padlen: int = padlen 34 | self.prefix: Optional[str] = prefix 35 | self.canv: canvas.Canvas = canvas.Canvas(self.output_filename, pagesize=LETTER) 36 | self.canv.setPageCompression(0) 37 | self.bottom_margin: float = 0.5 * inch 38 | self.left_margin: float = (3/16) * inch 39 | self.row_spacing: float = 0.0 40 | self.col_spacing: float = (1/8) * inch 41 | self.cell_height: float = 1 * inch 42 | self.cell_width: float = (2 + (5/8)) * inch 43 | self.num_cols: int = 3 44 | self.num_rows: int = 10 45 | 46 | def _xy_for_cell(self, colnum: int, rownum: int) -> Tuple[float, float]: 47 | """ 48 | Given a cell number, return the (x, y) coords for its bottom left corner 49 | 50 | :param rownum: zero-base row number, left to right 51 | :param colnum: zero-base colnum number, top down. NOTE this is the opposite of reportlab PDF coordinates, which have zero on the bottom of the page. 52 | """ 53 | x: float = self.left_margin + (rownum * (self.cell_width + self.col_spacing)) 54 | y: float = self.bottom_margin + ((self.num_rows - (colnum + 1)) * (self.cell_height + self.row_spacing)) 55 | return x, y 56 | 57 | def _generate_cell(self, col: int, row: int, value: int): 58 | s: str = f'{value:0{self.padlen}}' 59 | if self.prefix is not None: 60 | s = f'{self.prefix}{value:0{self.padlen}}' 61 | x: float 62 | y: float 63 | x, y = self._xy_for_cell(row, col) 64 | # temporary outline for label cell, for testing 65 | # self.canv.rect(x, y, self.cell_width, self.cell_height, stroke=1, fill=0) 66 | self.canv.drawCentredString( 67 | x + (self.cell_width / 2), 68 | y + (0.20 * inch), 69 | s 70 | ) 71 | barcode = code128.Code128(s, barHeight=10*mm, barWidth=1) 72 | barcode.drawOn( 73 | self.canv, 74 | x + ((self.cell_width - barcode.width) / 2), 75 | y + 34 76 | ) 77 | 78 | def run(self, start_num: int, num_pages: int): 79 | count: int = start_num 80 | for pagenum in range(0, num_pages): 81 | for colnum in range(0, 3): 82 | for rownum in range(0, 10): 83 | self._generate_cell(colnum, rownum, count) 84 | count += 1 85 | self.canv.showPage() 86 | self.canv.save() 87 | 88 | 89 | if __name__ == "__main__": 90 | p = argparse.ArgumentParser( 91 | description='Script to generate PDFs for numerically sequenced Code128 ' 92 | 'barcode labels on Avery 5160 / 18260 label sheets.' 93 | ) 94 | p.add_argument( 95 | '-p', '--pad-length', action='store', type=int, dest='padlen', 96 | default=6, help='Padding length for barcode; default: 6' 97 | ) 98 | p.add_argument( 99 | '-P', '--prefix', action='store', type=str, dest='prefix', 100 | default=None, help='Prefix to add to barcodes; default None' 101 | ) 102 | p.add_argument( 103 | 'START_NUMBER', action='store', type=int, 104 | help='Starting number for first barcode label' 105 | ) 106 | p.add_argument( 107 | 'NUM_PAGES', action='store', type=int, 108 | help='Number of pages of barcodes to generate' 109 | ) 110 | args = p.parse_args(sys.argv[1:]) 111 | Avery5160Code128Numeric( 112 | 'labels.pdf', padlen=args.padlen, prefix=args.prefix 113 | ).run( 114 | start_num=args.START_NUMBER, num_pages=args.NUM_PAGES 115 | ) 116 | -------------------------------------------------------------------------------- /wordpress_daily_post.php: -------------------------------------------------------------------------------- 1 | #!/usr/bin/php 2 | 10 | * 11 | * Licensed under the Apache License, Version 2.0 12 | * 13 | * use it anywhere you want, however you want, provided that this header is left intact, 14 | * and that if redistributed, credit is given to me. 15 | * 16 | * It is strongly requested, but not technically required, that any changes/improvements 17 | * be emailed to the above address. 18 | * 19 | * The latest version of this script will always be available at: 20 | * https://github.com/jantman/misc-scripts/blob/master/wordpress_daily_post.php 21 | * 22 | * Changelog: 23 | * 2014-12-26 Jason Antman 24 | * - GitHub script URL 25 | * 2012-09-03 Jason Antman - 1.0 26 | * - first version 27 | */ 28 | 29 | # BEGIN CONFIGURATION 30 | define('WP_LOAD_LOC', '/var/www/vhosts/blog.jasonantman.com/wp-load.php'); // Configure this to the full path of your Wordpress wp-load.php 31 | define('SOURCE_POST_STATUS', 'pending'); // post status to publish 32 | # END CONFIGURATION 33 | 34 | $VERBOSE = false; 35 | $DRY_RUN = false; 36 | array_shift($argv); 37 | while(count($argv) > 0) { 38 | if(isset($argv[0]) && $argv[0] == "-d" || $argv[0] == "--dry-run"){ 39 | $DRY_RUN = true; 40 | fwrite(STDERR, "DRY RUN ONLY - NOT ACTUALLY PUBLISHING.\n"); 41 | } 42 | if(isset($argv[0]) && $argv[0] == "-v" || $argv[0] == "--verbose"){ 43 | $VERBOSE = true; 44 | fwrite(STDERR, "WP_LOAD_LOC=".WP_LOAD_LOC."\n"); 45 | fwrite(STDERR, "SOURCE_POST_STATUS=".SOURCE_POST_STATUS."\n"); 46 | } 47 | array_shift($argv); 48 | } 49 | 50 | $_SERVER['HTTP_HOST'] = 'localhost'; // needed for wp-includes/ms-settings.php:100 51 | require_once(WP_LOAD_LOC); 52 | 53 | # check that we're running on a weekday 54 | if(date('N') >= 6) { 55 | # if($VERBOSE){ fwrite(STDERR, "today is a saturday or sunday, dieing.\n"); } 56 | # exit(1); 57 | } 58 | 59 | # find the publish date/time of the last published post 60 | $published = get_posts(array('numberposts' => 1, 'orderby' => 'post_date', 'order' => 'DESC', 'post_status' => 'publish')); 61 | $post = $published[0]; 62 | $pub_date = $post->post_date; 63 | $pub_id = $post->ID; 64 | 65 | if(strtotime($pub_date) >= (time() - 86400)) { 66 | if($VERBOSE){ fwrite(STDERR, "last post (ID $pub_id) within last day ($pub_date). Nothing to do. Exiting.\n"); } 67 | exit(0); 68 | } else { 69 | if($VERBOSE){ fwrite(STDERR, "Found last post (ID $pub_id) with post date $pub_date.\n"); } 70 | } 71 | 72 | 73 | # find the earliest post of status SOURCE_POST_STATUS, if there is one. 74 | $to_post = get_posts(array('numberposts' => 1, 'orderby' => 'post_date', 'order' => 'ASC', 'post_status' => SOURCE_POST_STATUS)); 75 | if(count($to_post) < 1) { 76 | if($VERBOSE) { fwrite(STDERR, "No posts found with status '".SOURCE_POST_STATUS."'. Nothing to do. Exiting.\n"); } 77 | exit(0); 78 | } 79 | 80 | $post = $to_post[0]; 81 | $to_pub_id = $post->ID; 82 | $to_pub_date = $post->post_date; 83 | $to_pub_title = $post->post_title; 84 | $now = time(); 85 | $new_date = date("Y-m-d H:i:s", $now); 86 | $new_date_gmt = gmdate("Y-m-d H:i:s", $now); 87 | 88 | if($VERBOSE){ fwrite(STDERR, "Post to publish: ID=$to_pub_id DATE=$to_pub_date NEW_DATE=$new_date TITLE=$to_pub_title\n"); } 89 | 90 | # actually publish it 91 | if(! $DRY_RUN){ 92 | $arr = array('ID' => $to_pub_id, 'post_status' => 'publish', 'post_date' => $new_date, 'post_date_gmt' => $new_date_gmt); 93 | $ret = wp_update_post($arr); // publish the post 94 | if($ret == 0) { 95 | fwrite(STDERR, "ERROR: Post $to_pub_id was not successfully published."); 96 | exit(1); 97 | } 98 | if($VERBOSE){ fwrite(STDERR, "Published post. New ID: $ret\n"); } 99 | } 100 | else { 101 | fwrite(STDERR, "Dry run only, not publishing post.\n"); 102 | } 103 | 104 | # check that the post really was published 105 | $published = get_posts(array('numberposts' => 1, 'orderby' => 'post_date', 'order' => 'DESC', 'post_status' => 'publish')); 106 | $post = $published[0]; 107 | $pub_date = $post->post_date; 108 | $pub_id = $post->ID; 109 | $pub_title = $post->post_title; 110 | $pub_guid = $post->guid; 111 | 112 | if($pub_title != $to_pub_title) { 113 | fwrite(STDERR, "ERROR: title of most recent post does not match title of what we wanted to post."); 114 | exit(1); 115 | } 116 | 117 | fwrite(STDOUT, "Published post $pub_id at $pub_date\n"); 118 | fwrite(STDOUT, "Title: $pub_title\n"); 119 | fwrite(STDOUT, "\n\n\n GUID/Link: $pub_guid\n"); 120 | fwrite(STDOUT, "\n\n".__FILE__." on ".trim(shell_exec('hostname --fqdn'))." running as ".get_current_user()."\n"); 121 | 122 | ?> 123 | -------------------------------------------------------------------------------- /dot_find_cycles.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | dot_find_cycles.py - uses Pydot and NetworkX to find cycles in a dot file directed graph. 5 | 6 | Very helpful for Puppet stuff. 7 | 8 | By Jason Antman 2012. 9 | 10 | Free for all use, provided that you send any changes you make back to me, update the changelog, and keep this comment intact. 11 | 12 | REQUIREMENTS: 13 | Python 14 | python-networkx - 15 | graphviz-python - 16 | pydot - 17 | pydotplus - 18 | 19 | To install requirements: 20 | 21 | pip install networkx graphviz pydot pydotplus 22 | 23 | Last Test Requirement Versions: 24 | 25 | decorator==4.0.10 26 | graphviz==0.5.1 27 | networkx==1.11 28 | pydot==1.2.2 29 | pydotplus==2.0.2 30 | pyparsing==2.1.9 31 | 32 | USAGE: 33 | dot_find_cycles.py /path/to/file.dot 34 | 35 | The canonical source of this script can always be found from: 36 | 37 | 38 | CHANGELOG: 39 | 2018-05-23 Nikolaus Wittenstein : 40 | - add Python 3 support 41 | 42 | 2017-04-20 Frank Kusters : 43 | - added support for stdin 44 | - add option for only showing shortest cycles 45 | 46 | 2016-09-24 Jason Antman : 47 | - update docs to clarify the below 48 | 49 | 2016-09-24 jrk07 : 50 | - add pydotplus and fix read_dot import to work with modern networkx versions 51 | 52 | 2012-03-28 Jason Antman : 53 | - initial script creation 54 | """ 55 | 56 | import sys 57 | from os import path, access, R_OK 58 | import argparse 59 | import networkx as nx 60 | from networkx.drawing.nx_pydot import read_dot 61 | 62 | 63 | def main(): 64 | parser = argparse.ArgumentParser(description="Finds cycles in dot file graphs, such as those from Puppet. " 65 | "By Jason Antman ") 66 | parser.add_argument('dotfile', metavar='DOTFILE', nargs='?', type=argparse.FileType('r'), default=sys.stdin, 67 | help="the dotfile to process. Uses standard input if argument is '-' or not present") 68 | parser.add_argument("--only-shortest", action='store_true', 69 | help="only show the shortest cycles. Example: if both A->C and A->B->C exist, only show the former. " 70 | "This vastly reduces the amount of output when analysing dependency issues.") 71 | parser.add_argument("--print-labels", action='store_true', 72 | help="print the node labels instead of their ids.") 73 | args = parser.parse_args() 74 | 75 | # read in the specified file, create a networkx DiGraph 76 | G = nx.DiGraph(read_dot(args.dotfile)) 77 | 78 | C = nx.simple_cycles(G) 79 | if args.only_shortest: 80 | C = remove_super_cycles(C) 81 | 82 | if args.print_labels: 83 | C = extract_node_labels(C, G) 84 | 85 | for i in C: 86 | # append the first node again so that the cycle is complete 87 | i.append(i[0]) 88 | print(" -> ".join(i)) 89 | 90 | def remove_super_cycles(cycle_list): 91 | # sorting by length makes the search easier, because shorter cycles cannot be supercycles of longer ones 92 | cycle_list = sorted(cycle_list, key=len) 93 | forward_index = 0 94 | while forward_index < len(cycle_list): 95 | backward_index = len(cycle_list) - 1 96 | while backward_index > forward_index: 97 | # when comparing two cycles, remove all elements that are not in the shortest one 98 | filtered_list = [x for x in cycle_list[backward_index] if x in cycle_list[forward_index]] 99 | # double the cycle length, to account for cycles shifted over the end of the list 100 | full_cycle = filtered_list + filtered_list 101 | # find the matching start position 102 | while full_cycle and full_cycle[0] != cycle_list[forward_index][0]: 103 | del full_cycle[0] 104 | # matching start position found, now compare the rest 105 | if cycle_list[forward_index] == full_cycle[:len(cycle_list[forward_index])]: 106 | # cycle matches, remove supercycle from end result 107 | del cycle_list[backward_index] 108 | backward_index = backward_index - 1 109 | forward_index = forward_index + 1 110 | return cycle_list 111 | 112 | def extract_node_labels(C, G): 113 | C_labels = [] 114 | for cycle in C: 115 | cycle_labels = [] 116 | for node_id in cycle: 117 | cycle_labels.append(G.nodes[node_id]['label'].replace('"','')) 118 | 119 | C_labels.append(cycle_labels) 120 | return C_labels 121 | 122 | if __name__ == "__main__": 123 | try: 124 | main() 125 | except KeyboardInterrupt: 126 | pass # eat CTRL+C so it won't show an exception 127 | -------------------------------------------------------------------------------- /twitter_find_followed_not_in_list.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Terribly simple script using python-twitter to 4 | list any users you're following but whom aren't 5 | in any of your lists. 6 | 7 | ################## 8 | 9 | Copyright 2014 Jason Antman 10 | Free for any use provided that patches are submitted back to me. 11 | 12 | The latest version of this script can be found at: 13 | https://github.com/jantman/misc-scripts/blob/master/twitter_find_followed_not_in_list.py 14 | """ 15 | 16 | import sys 17 | import optparse 18 | import logging 19 | import os 20 | import time 21 | 22 | try: 23 | import twitter 24 | except ImportError: 25 | raise SystemExit("ERROR: could not import twitter. Please `pip install python-twitter` and run script again.") 26 | 27 | from twitter.error import TwitterError 28 | 29 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 30 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 31 | logger = logging.getLogger(__name__) 32 | 33 | 34 | class FindFollowedNotInList: 35 | 36 | credentials = {'api_key': None, 'api_secret': None, 'access_token': None, 'access_secret': None} 37 | 38 | def get_credentials(self): 39 | """ 40 | Get all credentials or error out 41 | """ 42 | cred_env_vars = {'api_key': 'TWITTER_API_KEY', 43 | 'api_secret': 'TWITTER_API_SECRET', 44 | 'access_token': 'TWITTER_ACCESS_TOKEN', 45 | 'access_secret': 'TWITTER_ACCESS_SECRET', 46 | } 47 | error = False 48 | for varname in cred_env_vars: 49 | val = os.getenv(cred_env_vars[varname], None) 50 | if val is None: 51 | error = True 52 | logger.error("Please export the '{name}' environment variable.".format(name=cred_env_vars[varname])) 53 | else: 54 | self.credentials[varname] = val 55 | if error: 56 | raise SystemExit("ERROR: incomplete credentials") 57 | return True 58 | 59 | def main(self, dry_run=False): 60 | """ do something """ 61 | self.get_credentials() 62 | self.api = twitter.Api(consumer_key=self.credentials['api_key'], 63 | consumer_secret=self.credentials['api_secret'], 64 | access_token_key=self.credentials['access_token'], 65 | access_token_secret=self.credentials['access_secret']) 66 | try: 67 | foo = self.api.VerifyCredentials() 68 | self.my_user_id = foo.id 69 | except: 70 | raise SystemExit("Invalid credentials / auth failed") 71 | # might need to pass owner_id or something 72 | list_members = [] 73 | lists = invoke_with_throttling_retries(self.api.GetLists, user_id=self.my_user_id) 74 | for l in lists: 75 | print("{id} {name}".format(id=l.id, name=l.name)) 76 | m = invoke_with_throttling_retries(self.api.GetListMembers, l.id, l.slug) 77 | for member in m: 78 | list_members.append(member.id) 79 | followed = invoke_with_throttling_retries(self.api.GetFriends) 80 | for u in followed: 81 | if u.id not in list_members: 82 | print("user {sn} not in any lists (id={id} name={name})".format( 83 | id=u.id, 84 | name=u.name.encode('utf-8'), 85 | sn=u.screen_name.encode('utf-8')) 86 | ) 87 | return True 88 | 89 | def invoke_with_throttling_retries(function_ref, *argv, **kwargs): 90 | MAX_RETRIES = 6 91 | SLEEP_BASE_SECONDS = 5 92 | 93 | retries = 0 94 | while True: 95 | try: 96 | retval = function_ref(*argv, **kwargs) 97 | return retval 98 | except TwitterError as e: 99 | if e[0]['message'] != 'Rate limit exceeded': 100 | raise e 101 | if retries == MAX_RETRIES: 102 | logger.error("Reached maximum number of retries; raising error") 103 | raise e 104 | stime = SLEEP_BASE_SECONDS * (2**retries) 105 | logger.info("Call of %s got throttled; sleeping %s seconds before " 106 | "retrying", function_ref, stime) 107 | time.sleep(stime) 108 | retries += 1 109 | 110 | def parse_args(argv): 111 | """ parse arguments/options """ 112 | p = optparse.OptionParser() 113 | 114 | p.add_option('-v', '--verbose', dest='verbose', action='count', default=0, 115 | help='verbose output. specify twice for debug-level output.') 116 | 117 | options, args = p.parse_args(argv) 118 | 119 | return options 120 | 121 | 122 | if __name__ == "__main__": 123 | opts = parse_args(sys.argv[1:]) 124 | 125 | if opts.verbose > 1: 126 | logger.setLevel(logging.DEBUG) 127 | elif opts.verbose > 0: 128 | logger.setLevel(logging.INFO) 129 | 130 | cls = FindFollowedNotInList() 131 | cls.main() 132 | -------------------------------------------------------------------------------- /trello_copy_checklist.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | trello_copy_checklist.py - Script to copy a checklist from one Trello card 4 | to another. 5 | 6 | If you have ideas for improvements, or want the latest version, it's at: 7 | 8 | 9 | Copyright 2018 Jason Antman 10 | Free for any use provided that patches are submitted back to me. 11 | 12 | REQUIREMENTS: 13 | `requests` (pip install requests) 14 | 15 | CHANGELOG: 16 | 2018-04-30 Jason Antman : 17 | - initial version of script 18 | """ 19 | 20 | import sys 21 | import os 22 | import logging 23 | import argparse 24 | import requests 25 | import re 26 | 27 | try: 28 | from urlparse import urlparse 29 | except ImportError: 30 | from urllib.parse import urlparse 31 | 32 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 33 | logging.basicConfig(level=logging.INFO, format=FORMAT) 34 | logger = logging.getLogger() 35 | 36 | # suppress logging from requests, used internally by TrelloApi 37 | requests_log = logging.getLogger("requests") 38 | requests_log.setLevel(logging.WARNING) 39 | requests_log.propagate = True 40 | 41 | 42 | class TrelloCopyChecklist(): 43 | 44 | def __init__(self): 45 | """get credentials and connect""" 46 | self._app_key = os.environ.get('TRELLO_APP_KEY', None) 47 | if self._app_key is None: 48 | raise SystemExit('Please export your Trello application key as the ' 49 | 'TRELLO_APP_KEY environment variable.') 50 | self._token = os.environ.get('TRELLO_TOKEN', None) 51 | if self._token is None: 52 | raise SystemExit('Please export your Trello API token as the ' 53 | 'TRELLO_TOKEN environment variable.') 54 | 55 | def run(self, source_url, checklist_name, dest_url): 56 | """main entry point""" 57 | src_id = self._get_card_id_from_url(source_url) 58 | dest_id = self._get_card_id_from_url(dest_url) 59 | logger.info( 60 | 'source card ID: %s destination card ID: %s', src_id, dest_id 61 | ) 62 | checklists = self._get_card_checklists(src_id) 63 | clist = None 64 | for l in checklists: 65 | if l['name'] == checklist_name: 66 | clist = l 67 | break 68 | if clist is None: 69 | raise RuntimeError( 70 | 'ERROR: No checklist named "%s" found on card %s' % ( 71 | checklist_name, src_id 72 | ) 73 | ) 74 | logger.debug('Source checklist: %s', clist) 75 | logger.info('Found source checklist with ID: %s', clist['id']) 76 | res = requests.post( 77 | 'https://api.trello.com/1/cards/%s/checklists?name=%s&' 78 | 'idChecklistSource=%s&bos=bottom&key=%s&token=%s' % ( 79 | dest_id, checklist_name, clist['id'], self._app_key, self._token 80 | ) 81 | ) 82 | res.raise_for_status() 83 | resp = res.json() 84 | logger.info('Created checklist ID %s', resp['id']) 85 | 86 | def _get_card_checklists(self, card_id): 87 | logger.debug('GET checklists for card ID %s', card_id) 88 | res = requests.get( 89 | 'https://api.trello.com/1/cards/%s/checklists?key=%s&token=%s' % ( 90 | card_id, self._app_key, self._token 91 | ) 92 | ) 93 | res.raise_for_status() 94 | j = res.json() 95 | logger.debug('Response JSON: %s', j) 96 | return j 97 | 98 | def _get_card_id_from_url(self, url): 99 | parsed = urlparse(url) 100 | m = re.match(r'^/c/([^/]+)/.*', parsed.path) 101 | if not m: 102 | raise RuntimeError('ERROR: Invalid card URL: %s', url) 103 | return m.group(1) 104 | 105 | 106 | def parse_args(argv): 107 | """ 108 | parse arguments/options 109 | """ 110 | p = argparse.ArgumentParser( 111 | description='Script to copy checklist from one Trello card to another.') 112 | p.add_argument('-v', '--verbose', dest='verbose', action='count', default=0, 113 | help='verbose output. specify twice for debug-level output.') 114 | p.add_argument('SOURCE_CARD_URL', action='store', type=str, 115 | help='URL to source Trello card') 116 | p.add_argument('CHECKLIST_NAME', action='store', type=str, 117 | help='name of checklist to copy') 118 | p.add_argument('DEST_CARD_URL', action='store', type=str, 119 | help='URL to destination Trello card') 120 | 121 | args = p.parse_args(argv) 122 | return args 123 | 124 | if __name__ == "__main__": 125 | args = parse_args(sys.argv[1:]) 126 | if args.verbose > 1: 127 | logger.setLevel(logging.DEBUG) 128 | elif args.verbose > 0: 129 | logger.setLevel(logging.INFO) 130 | script = TrelloCopyChecklist() 131 | script.run(args.SOURCE_CARD_URL, args.CHECKLIST_NAME, args.DEST_CARD_URL) 132 | -------------------------------------------------------------------------------- /dump_sphinx_objects_inventory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Process URL for intersphinx targets and emit html or text 4 | 5 | originally retrieved from: 6 | https://gist.github.com/gmr/11359058 7 | 8 | fixed for python3 and pep8 compliance by jantman 9 | """ 10 | 11 | from sphinx.ext.intersphinx import read_inventory_v2 12 | from posixpath import join 13 | import pprint 14 | import argparse 15 | import locale 16 | import os 17 | import sys 18 | import tempfile 19 | import urllib2 20 | 21 | 22 | def validuri(string): 23 | return string 24 | 25 | 26 | parser = argparse.ArgumentParser(description='Process intersphinx link library') 27 | 28 | parser.add_argument('--url', type=validuri, 29 | help="URL to retrieve objects.inv from") 30 | parser.add_argument('--file', help="objects.inv format file") 31 | 32 | group = parser.add_mutually_exclusive_group(required=False) 33 | 34 | group.add_argument('--html', action='store_true', help="Output HTML") 35 | group.add_argument('--terse', action='store_true', 36 | help="Output terse text list") 37 | group.add_argument('--rst', action='store_true', 38 | help="Output ReStructuredText") 39 | group.add_argument('--rewrite', action='store_true', 40 | help="Output short form and correct form of each link.") 41 | 42 | args = parser.parse_args() 43 | 44 | 45 | def start_role(role): 46 | if args.terse: 47 | return 48 | elif args.rewrite: 49 | return 50 | elif args.rst: 51 | print(role) 52 | else: 53 | print("
Role: {}
\n
\n
\n".format(role)) 54 | 55 | 56 | def start_item(role, item): 57 | if args.terse: 58 | return 59 | elif args.rewrite: 60 | return 61 | elif args.rst: 62 | print("\t:{}:{}:".format(role, item)) 63 | elif args.html: 64 | print("
{}:{}
\n".format(role, item)) 65 | print("
") 66 | print("\n") 67 | 68 | 69 | def end_item(role, item): 70 | if args.html: 71 | print("
") 72 | print("
\n") 73 | 74 | 75 | def print_link(role, item, domain, title): 76 | """Return the correct link form, if no title then extended form.""" 77 | domain = domain.lower() 78 | if title == '' or title == '-': 79 | linkStr = ":{}:`{} <{}:{}>`".format(role, item, domain, item) 80 | else: 81 | linkStr = ":{}:`{}:{}`".format(role, domain, item) 82 | 83 | if args.terse: 84 | print(linkStr) 85 | if args.rewrite: 86 | print(":{}:`{}:{}`".format(role, domain, item), "\t{}".format(linkStr)) 87 | elif args.rst: 88 | print("\t\t:Link:\t{}".format(linkStr)) 89 | elif args.html: 90 | print("Link:{}".format(linkStr)) 91 | 92 | 93 | def end_role(): 94 | if args.html: 95 | print("
\n") 96 | print("
\n") 97 | 98 | 99 | def print_meta(role, item, domain, version, url, title): 100 | if args.terse: 101 | return 102 | elif args.rewrite: 103 | return 104 | elif args.rst: 105 | print("\t\t:Domain:\t{}".format(domain)) 106 | print("\t\t:Version:\t{}".format(version)) 107 | print("\t\t:URL:\t{}".format(url)) 108 | print("\t\t:Title:\t{}".format(title)) 109 | elif args.html: 110 | print("Domain:{}".format(domain)) 111 | print("Version:{}".format(version)) 112 | print("URL:{}".format(url)) 113 | print("Title:{}".format(title)) 114 | return 115 | 116 | 117 | def fetch_data(url, inv): 118 | f = open(inv, 'rb') 119 | line = f.readline() # burn a line 120 | invdata = read_inventory_v2(f, url or '', join) 121 | if args.html: 122 | print("
") 123 | for role in invdata: 124 | start_role(role) 125 | for item in invdata[role]: 126 | (domain, version, url, title) = invdata[role][item] 127 | start_item(role, item) 128 | print_link(role, item, domain, title) 129 | print_meta(role, item, domain, version, url, title) 130 | end_item(role, item) 131 | if args.html: 132 | print("
\n") 133 | 134 | 135 | if __name__ == "__main__": 136 | if args.file: 137 | inv = args.file 138 | else: 139 | inv = False 140 | if args.url: 141 | url = args.url 142 | else: 143 | url = False 144 | 145 | if inv is False and url is False: 146 | raise Exception("need to specify a file or URL") 147 | 148 | if inv and url != '': 149 | fetch_data(url, inv) 150 | elif url: 151 | # fetch URL into inv 152 | if url.rfind('objects.inv') > 5: 153 | invdata = urllib2.urlopen(url) 154 | else: 155 | invdata = urllib2.urlopen(url + '/objects.inv') 156 | sys.stderr.write('URL resolved to: {}\n '.format(invdata.geturl())) 157 | f = tempfile.NamedTemporaryFile() 158 | f.write(invdata.read()) 159 | sys.stderr.write("objects.inv written to: {}\n".format(f.name)) 160 | sys.stderr.write("Using: {} as base HREF\n".format(url)) 161 | fetch_data(url, f.name) 162 | else: 163 | raise Exception("You need to specify a --URL") 164 | -------------------------------------------------------------------------------- /ubiquiti-mac-acl/updateAPconfigs.php.inc: -------------------------------------------------------------------------------- 1 | 14 | * 15 | * Announcement post: 16 | * 17 | * The canonical current version of this script lives at: 18 | * $HeadURL$ 19 | * $LastChangedRevision$ 20 | */ 21 | 22 | # 23 | # BEGIN CONFIGURATION OF GLOBALS 24 | # 25 | 26 | /** 27 | * @global string $file_path 28 | * @name $pubkey 29 | */ 30 | $file_path = "/var/lib/wwwrun/"; 31 | 32 | /** 33 | * @global string $pubkey absolute path to ssh public key for auth to the APs 34 | * @name $pubkey 35 | */ 36 | $pubkey = "/var/lib/wwwrun/.ssh/id_dsa"; 37 | 38 | /** 39 | * @global string $APusername the username for logging in to the APs 40 | * @name $APusername 41 | */ 42 | $APusername = "ubnt"; 43 | 44 | /** 45 | * @global bool $AP_DEBUG whether or not to show debugging output for communication with the APs 46 | * @name $AP_DEBUG 47 | */ 48 | $AP_DEBUG = false; 49 | 50 | # 51 | # END CONFIGURATION OF GLOBALS 52 | # 53 | 54 | /** 55 | * Gets the system.cfg from an AP and saves it as a local file 56 | * 57 | * @author jantman 58 | * 59 | * @TODO - no error checking here 60 | * 61 | * @param $hostname hostname of the AP 62 | * @param $filePath absolute path to where the local copy should be saved 63 | * 64 | * @global string $pubkey 65 | * @global string $APusername 66 | * @global bool $AP_DEBUG 67 | * 68 | * @return none 69 | */ 70 | function getUbntConfig($hostname, $filePath) 71 | { 72 | global $pubkey, $APusername, $AP_DEBUG; 73 | if(file_exists($filePath.".BAK")){ rename($filePath.".BAK", $filePath.".BAK".date("Y-M-D_H-i-s"));} 74 | if(file_exists($filePath)){ rename($filePath, $filePath.".BAK");} 75 | $cmd = "scp -i $pubkey $APusername@$hostname:/tmp/system.cfg $filePath"; 76 | if($AP_DEBUG){ echo "Command: ".$cmd."\n";} 77 | $foo = shell_exec($cmd); 78 | } 79 | 80 | /** 81 | * Put a local configuration file on the AP, run cfgmtd to load the configuration, then reboot the AP 82 | * 83 | * @author jantman 84 | * 85 | * @TODO - no error checking here 86 | * 87 | * @param $hostname hostname of the AP 88 | * @param $filePath absolute path to the local config file to upload 89 | * 90 | * @global string $pubkey 91 | * @global string $APusername 92 | * @global bool $AP_DEBUG 93 | * 94 | * @return none 95 | */ 96 | function putUbntConfig($hostname, $filePath) 97 | { 98 | global $pubkey, $APusername, $AP_DEBUG; 99 | $cmd = "scp -i $pubkey $filePath $APusername@$hostname:/tmp/system.cfg"; 100 | $foo = shell_exec($cmd); 101 | $cmd = "ssh -i $pubkey $APusername@$hostname 'cfgmtd -w -p /etc/'"; 102 | $foo = shell_exec($cmd); 103 | $cmd = "ssh -i $pubkey $APusername@$hostname 'reboot'"; 104 | $foo = shell_exec($cmd); 105 | } 106 | 107 | /** 108 | * Update an existing AP config file to include the specified MAC addresses, write out to a new file. 109 | * 110 | * @author jantman 111 | * 112 | * @TODO: there's little to no error checking here, even for file permissions or simple stuff like that 113 | * 114 | * @param string $oldPath the absolute path to the existing configuration file 115 | * @param string $newPath the absolute path to where the new config should be written 116 | * @param array $arr array of MAC addresses to allow, MACs should be all upper case of the form "00:00:00:00:00:00" 117 | * 118 | * @return string the full text of the new configuration that was written 119 | */ 120 | function makeNewConfigFile($oldPath, $newPath, $arr) 121 | { 122 | $fh = fopen($oldPath, "r"); 123 | $pre = ""; 124 | $post = ""; 125 | $inMAC = false; 126 | while(! feof($fh)) 127 | { 128 | $line = trim(fgets($fh)); 129 | if($line == ""){ continue;} // skip blank lines 130 | 131 | if(substr($line, 0, 18) == "wireless.1.mac_acl") 132 | { 133 | // MAC line, ignore it 134 | $inMAC = true; 135 | } 136 | elseif($inMAC == false) 137 | { 138 | $pre .= $line."\n"; 139 | } 140 | else 141 | { 142 | $post .= $line."\n"; 143 | } 144 | } 145 | 146 | $acl = ""; 147 | $count = 1; 148 | foreach($arr as $mac) 149 | { 150 | $acl .= "wireless.1.mac_acl.$count.mac=".$mac."\n"; 151 | $acl .= "wireless.1.mac_acl.$count.status=enabled\n"; 152 | $count++; 153 | } 154 | 155 | if($count < 32) 156 | { 157 | for($i = $count; $i < 33; $i++) 158 | { 159 | $acl .= "wireless.1.mac_acl.$i.mac=\n"; 160 | $acl .= "wireless.1.mac_acl.$i.status=disabled\n"; 161 | } 162 | } 163 | 164 | $acl .= "wireless.1.mac_acl.policy=allow\n"; 165 | $acl .= "wireless.1.mac_acl.status=enabled\n"; 166 | 167 | 168 | $out = $pre.$acl.$post; 169 | 170 | fclose($fh); 171 | 172 | $fh = fopen($newPath, "w"); 173 | fwrite($fh, $out); 174 | fclose($fh); 175 | return $out; 176 | } 177 | 178 | ?> 179 | -------------------------------------------------------------------------------- /watch_cloudformation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Python script to watch a CloudFormation stack's events, 4 | and exit/notify when the CF stack update or create finishes. 5 | 6 | requirements: 7 | pip install boto 8 | pip install python-pushover (optional) 9 | 10 | for pushover configuration, see the section on ~/.pushoverrc in the Configuration section: 11 | http://pythonhosted.org/python-pushover/#configuration 12 | 13 | for boto configuration, see the section on ~/.boto in the Getting Started guide: 14 | http://docs.pythonboto.org/en/latest/getting_started.html#configuring-boto-credentials 15 | 16 | ################## 17 | 18 | Copyright 2014 Jason Antman 19 | Free for any use provided that patches are submitted back to me. 20 | 21 | The latest version of this script can be found at: 22 | https://github.com/jantman/misc-scripts/blob/master/watch_cloudformation.py 23 | 24 | CHANGELOG: 25 | 26 | 2014-12-12 jantman: 27 | - initial script 28 | 2014-12-14 jantman: 29 | - add better links to config docs 30 | """ 31 | 32 | import sys 33 | import optparse 34 | import logging 35 | import re 36 | import time 37 | import os 38 | import datetime 39 | 40 | import boto.cloudformation 41 | import boto.ec2 42 | 43 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 44 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 45 | logger = logging.getLogger(__name__) 46 | 47 | reported_events = set() 48 | 49 | try: 50 | from pushover import init, Client, get_sounds 51 | have_pushover = True 52 | except ImportError: 53 | logger.warning("Pushover support disabled; `pip install python-pushover` to enable it") 54 | have_pushover = False 55 | 56 | def main(stack_name, region, sleeptime=10, pushover=False): 57 | global reported_events 58 | if pushover and not have_pushover: 59 | raise SystemExit("ERROR: to use pushover notifications, please `pip install python-pushover` and configure it.") 60 | 61 | cf = boto.cloudformation.connect_to_region(region) 62 | stack = cf.describe_stacks(stack_name)[0] 63 | while True: 64 | report_events(stack.describe_events()) 65 | stack.update() 66 | if 'IN_PROGRESS' not in stack.stack_status: 67 | break 68 | time.sleep(sleeptime) 69 | 70 | if 'FAILED' in stack.stack_status: 71 | logger.error("Stack Events end - {s}".format(s=stack.stack_status)) 72 | if pushover: 73 | notify_pushover(False, stack.stack_status, stack_name) 74 | raise SystemExit(1) 75 | else: 76 | logger.info("Stack Events end - {s}".format(s=stack.stack_status)) 77 | if pushover: 78 | notify_pushover(True, stack.stack_status, stack_name) 79 | raise SystemExit(0) 80 | 81 | def report_events(events): 82 | global reported_events 83 | """Gets a list of events for an AWS stack and logs the ones that have not already been logged""" 84 | events = set(map(str, events)) 85 | # if it's our first run, don't report anything already existing 86 | if len(reported_events) == 0: 87 | logger.info("Skipping {e} existing events.".format(e=len(reported_events))) 88 | reported_events = events 89 | to_report = events.difference(reported_events) 90 | for event in to_report: 91 | logger.info(event) 92 | 93 | reported_events = events.union(reported_events) 94 | 95 | def notify_pushover(is_success, status, stack_name): 96 | """ send notification via pushover """ 97 | msg = 'Operation on stack {n} finished with status {s}'.format(n=stack_name, 98 | s=status) 99 | title = '{n}: {s}'.format(n=stack_name, 100 | s=status) 101 | if is_success: 102 | req = Client().send_message(msg, title=title, priority=0) 103 | else: 104 | req = Client().send_message(msg, title=title, priority=0, sound='falling') 105 | 106 | def parse_args(argv): 107 | """ parse arguments/options """ 108 | p = optparse.OptionParser(usage="usage: %prog [options] stack_name") 109 | 110 | p.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, 111 | help='verbose (debugging) output') 112 | p.add_option('-s', '--sleep-time', dest='sleeptime', action='store', type=int, default=10, 113 | help='time in seconds to sleep between status checks; default 10') 114 | p.add_option('-r', '--region', dest='region', action='store', default='us-east-1', 115 | help='AWS region, default=us-east-1') 116 | push_default = False 117 | if os.path.exists(os.path.expanduser('~/.watch_jenkins_pushover')): 118 | push_default = True 119 | p.add_option('-p', '--pushover', dest='pushover', action='store_true', default=push_default, 120 | help='notify on completion via pushover (default {p}; touch ~/.watch_jenkins_pushover to default to True)'.format(p=push_default)) 121 | 122 | options, args = p.parse_args(argv) 123 | 124 | return options, args 125 | 126 | 127 | if __name__ == "__main__": 128 | opts, args = parse_args(sys.argv[1:]) 129 | 130 | if opts.verbose: 131 | logger.setLevel(logging.DEBUG) 132 | else: 133 | logger.setLevel(logging.INFO) 134 | 135 | if len(args) < 1: 136 | raise SystemExit("ERROR: you must specify a stack name") 137 | 138 | main(args[0], opts.region, sleeptime=opts.sleeptime, pushover=opts.pushover) 139 | -------------------------------------------------------------------------------- /disqus_backup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Simple script to backup Disqus comments/threads to 4 | a specified JSON file. 5 | 6 | To use this, you'll need to register an Application for the Disqus API, 7 | and get its secret and public keys. Do so at http://disqus.com/api/applications/ 8 | 9 | Requirements: 10 | disqus-python (the official API client) 11 | anyjson 12 | > pip install disqus-python anyjson 13 | 14 | Copyright 2014 Jason Antman 15 | Free for any use provided that patches are submitted back to me. 16 | 17 | The latest version of this script can be found at: 18 | 19 | """ 20 | 21 | import json 22 | import optparse 23 | import sys 24 | import os 25 | 26 | from disqusapi import DisqusAPI, Paginator, APIError 27 | 28 | def backup_disqus(short_name, key, secret, outfile, min_comments=5, verbose=False): 29 | """ 30 | backup disqus threads and comments for a given forum shortname 31 | 32 | :param short_name: Disqus forum short name / ID 33 | :type short_name: string 34 | :param key: Disqus API public key 35 | :type key: string 36 | :param secret: Disqus API secret key 37 | :type secret: string 38 | :param outfile: path to the file to write JSON output to 39 | :type outfile: string 40 | :param min_comments: minimum number of posts to have, else error and exit 41 | :type min_comments: integer (default 5) 42 | :param verbose: whether to write verbose output 43 | :type verbose: boolean 44 | """ 45 | result = {} 46 | disqus = DisqusAPI(secret, key) 47 | 48 | if verbose: 49 | print("Connected to Disqus API") 50 | try: 51 | details = disqus.forums.details(forum=short_name) 52 | except disqusapi.APIError: 53 | sys.stderr.write("ERROR: unable to find forum '%s'\n" % short_name) 54 | sys.exit(1) 55 | result['forum_details'] = details 56 | if verbose: 57 | print("Got forum details for '%s': %s" % (short_name, str(details))) 58 | 59 | try: 60 | threads = Paginator(disqus.forums.listThreads, forum=short_name) 61 | except APIError: 62 | sys.stderr.write("ERROR listing threads for forum '%s'\n" % short_name) 63 | sys.exit(1) 64 | thread_count = 0 65 | all_threads = [] 66 | for t in threads: 67 | thread_count = thread_count + 1 68 | all_threads.append(t) 69 | if verbose: 70 | print("Found %d threads" % thread_count) 71 | 72 | result['threads'] = all_threads 73 | 74 | try: 75 | posts = Paginator(disqus.forums.listPosts, forum=short_name, include=['unapproved','approved']) 76 | except APIError: 77 | sys.stderr.write("ERROR listing posts for forum '%s'\n" % short_name) 78 | sys.exit(1) 79 | post_count = 0 80 | all_posts = [] 81 | for p in posts: 82 | post_count = post_count + 1 83 | all_posts.append(p) 84 | if verbose: 85 | print("Found %d posts" % post_count) 86 | 87 | result['posts'] = all_posts 88 | 89 | 90 | with open(outfile, 'w') as fh: 91 | json.dump(result, fh) 92 | sys.stderr.write("Output written to %s\n" % outfile) 93 | return True 94 | 95 | def parse_options(argv): 96 | """ parse command line options """ 97 | parser = optparse.OptionParser() 98 | 99 | parser.add_option('-n', '--short-name', dest='short_name', action='store', type='string', 100 | help='forum short name / ID') 101 | 102 | parser.add_option('-o', '--outfile', dest='outfile', action='store', type='string', default="disqus_backup.json", 103 | help='output filename') 104 | 105 | parser.add_option('-m', '--minimum-comments', dest='min_comments', action='store', type='int', default=5, 106 | help='error if less than this number of comments') 107 | 108 | parser.add_option('--secret', dest='secret', action='store', type='string', 109 | help="Disqus API Secret Key - will try to read from DISQUS_SECRET env var if option not specified") 110 | 111 | parser.add_option('--key', dest='key', action='store', type='string', 112 | help="Disqus API Public Key - will try to read from DISQUS_KEY env var if option not specified") 113 | 114 | parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, 115 | help='verbose output') 116 | 117 | options, args = parser.parse_args(argv) 118 | 119 | if not options.short_name: 120 | sys.stderr.write("ERROR: you must specify forum short name (ID) with -n|--short-name\n") 121 | sys.exit(1) 122 | 123 | if not options.secret: 124 | try: 125 | options.sercret = os.environ['DISQUS_SECRET'] 126 | except KeyError: 127 | sys.stderr.write("ERROR: Disqus API secret key must be passed with --secret, or defined in DISQUS_SECRET env variable.\n") 128 | sys.exit(1) 129 | 130 | if not options.key: 131 | try: 132 | options.key = os.environ['DISQUS_KEY'] 133 | except KeyError: 134 | sys.stderr.write("ERROR: Disqus API Publc key must be passed with --key, or defined in DISQUS_KEY env variable.\n") 135 | sys.exit(1) 136 | 137 | return options 138 | 139 | if __name__ == "__main__": 140 | opts = parse_options(sys.argv) 141 | backup_disqus(opts.short_name, opts.key, opts.secret, opts.outfile, min_comments=opts.min_comments, verbose=opts.verbose) 142 | -------------------------------------------------------------------------------- /gist.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Python script to post a Gist of a file from 4 | a common/shared computer. Prompts for auth 5 | interactively. 6 | 7 | This is largely based on Soren Bleikertz' simple 8 | example at: 9 | 10 | 11 | ################## 12 | 13 | Copyright 2015 Jason Antman 14 | Free for any use provided that patches are submitted back to me. 15 | 16 | The latest version of this script can be found at: 17 | 18 | 19 | CHANGELOG: 20 | 21 | 2015-02-05 jantman: 22 | - initial script 23 | 24 | 2015-04-15 jantman: 25 | - catch error on ssl import and disable no_verify option 26 | """ 27 | 28 | import httplib 29 | import urllib 30 | import re 31 | import os.path 32 | from optparse import OptionParser 33 | import platform 34 | import sys 35 | import json 36 | import logging 37 | from copy import deepcopy 38 | 39 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 40 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 41 | logger = logging.getLogger(__name__) 42 | 43 | try: 44 | from ssl import _create_unverified_context 45 | have_ssl = True 46 | except ImportError: 47 | logger.error("ERROR - could not import ssl._create_unverified_context; unable to disable SSL cert verification") 48 | have_ssl = False 49 | 50 | def debug_response(response): 51 | logger.debug("Response status {s}".format(s=response.status)) 52 | logger.debug("Response: {d}".format(d=response.read())) 53 | logger.debug("Headers: \n{h}".format( 54 | h='\n'.join(['{k}: {v}\n'.format(k=i[0], v=i[1]) for i in response.getheaders()]) 55 | )) 56 | 57 | def gist_write(name, content, token=None, prefix=False, no_verify=False): 58 | if prefix: 59 | name = '{n}_{name}'.format(n=platform.node(), name=name) 60 | logger.debug("Setting name to: {n}".format(n=name)) 61 | 62 | data = { 63 | 'public': False, 64 | 'files': { 65 | name: { 66 | 'content': content 67 | } 68 | } 69 | } 70 | 71 | # data debug 72 | d = deepcopy(data) 73 | if len(d['files'][name]['content']) > 800: 74 | tmp = d['files'][name]['content'] 75 | d['files'][name]['content'] = tmp[:200] + "\n...\n" + tmp[-200:] 76 | logger.debug("POST data: {d}".format(d=d)) 77 | headers = {'User-Agent': 'https://github.com/jantman/misc-scripts/blob/master/gist.py'} 78 | if token is not None: 79 | headers['Authorization'] = 'token {t}'.format(t=token) 80 | logger.debug("Setting Authorization header to: {h}".format(h=headers['Authorization'])) 81 | 82 | if no_verify: 83 | conn = httplib.HTTPSConnection("api.github.com", context=_create_unverified_context()) 84 | else: 85 | conn = httplib.HTTPSConnection("api.github.com") 86 | logger.debug("Opened connection to https://api.github.com") 87 | logger.debug("POSTing to /gists") 88 | conn.request("POST", "/gists", json.dumps(data), headers) 89 | response = conn.getresponse() 90 | debug_response(response) 91 | if response.status == 201: 92 | data = response.read() 93 | conn.close() 94 | try: 95 | d = json.loads(data) 96 | return(d['html_url']) 97 | except: 98 | pass 99 | logger.error("Got 201 status but no JSON response") 100 | logger.debug("Response: \n{d}".format(d=data)) 101 | h = response.getheaders() 102 | for header in h: 103 | if header[0] == 'location': 104 | url = header[1].replace('api.github.com/gists/', 'gist.github.com/') 105 | return url 106 | return '' 107 | logger.error("ERROR - got response code {s}".format(s=response.status)) 108 | conn.close() 109 | raise SystemExit(1) 110 | 111 | usage = 'USAGE: gist.py [options] filename' 112 | parser = OptionParser(usage=usage) 113 | parser.add_option('-d', '--description', dest='description', action='store', 114 | type=str, help='Gist description') 115 | parser.add_option('-p', '--prefix', dest='prefix', action='store_false', 116 | default=True, 117 | help='prefix gist filename with hostname') 118 | parser.add_option('-v', '--verbose', dest='verbose', action='store_true', 119 | help='verbose output') 120 | parser.add_option('-V', '--no-verify', dest='no_verify', action='store_true', 121 | default=False, help='do not verify SSL') 122 | (options, args) = parser.parse_args() 123 | 124 | if options.verbose: 125 | logger.setLevel(logging.DEBUG) 126 | 127 | if options.no_verify and not have_ssl: 128 | logger.error("ERROR: could not import ssl._create_unverified_context; therefore unable to disable SSL cert verification") 129 | raise SystemExit(1) 130 | 131 | if len(args) < 1: 132 | sys.stderr.write(usage + "\n") 133 | raise SystemExit(1) 134 | 135 | if not os.path.exists(args[0]): 136 | logger.error("ERROR: {f} does not exist".format(f=args[0])) 137 | raise SystemExit(1) 138 | 139 | token = raw_input("GitHub API Token: ").strip() 140 | if token == '': 141 | logger.error("ERROR: empty token") 142 | raise SystemExit(1) 143 | 144 | with open(args[0], 'r') as fh: 145 | content = fh.read() 146 | 147 | name = args[0] 148 | url = gist_write(name, content, token=token, prefix=options.prefix, no_verify=options.no_verify) 149 | logger.info("Created: {u}".format(u=url)) 150 | -------------------------------------------------------------------------------- /jenkins_node_labels.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Python script using python-jenkins (https://pypi.python.org/pypi/python-jenkins) 4 | to list all nodes on a Jenkins master, and their labels. 5 | 6 | requirements: 7 | - pip install python-jenkins 8 | - lxml 9 | 10 | NOTICE: this assumes that you have unauthenticated read access enabled for Jenkins. 11 | If you need to authenticate to Jekins in order to read job status, see the comment 12 | in the main() function. 13 | 14 | ################## 15 | 16 | Copyright 2015 Jason Antman 17 | Free for any use provided that patches are submitted back to me. 18 | 19 | The latest version of this script can be found at: 20 | https://github.com/jantman/misc-scripts/blob/master/jenkins_node_labels.py 21 | 22 | CHANGELOG: 23 | 24 | 2015-10-06 jantman: 25 | - initial script 26 | """ 27 | 28 | import sys 29 | import argparse 30 | import logging 31 | import re 32 | import time 33 | import os 34 | import datetime 35 | import getpass 36 | from io import StringIO 37 | 38 | try: 39 | from lxml import etree 40 | except ImportError: 41 | try: 42 | # normal cElementTree install 43 | import cElementTree as etree 44 | except ImportError: 45 | try: 46 | # normal ElementTree install 47 | import elementtree.ElementTree as etree 48 | except ImportError: 49 | raise SystemExit("Failed to import ElementTree from any known place") 50 | 51 | from jenkins import Jenkins, JenkinsException, NotFoundException 52 | 53 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 54 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 55 | logger = logging.getLogger(__name__) 56 | 57 | def main(jenkins_url, user=None, password=None, csv=False): 58 | """ 59 | NOTE: this is using unauthenticated / anonymous access. 60 | If that doesn't work for you, change this to something like: 61 | j = Jenkins(jenkins_url, 'username', 'password') 62 | """ 63 | if user is not None: 64 | logger.debug("Connecting to Jenkins as user %s ...", user) 65 | j = Jenkins(jenkins_url, user, password) 66 | else: 67 | logger.debug("Connecting to Jenkins anonymously...") 68 | j = Jenkins(jenkins_url) 69 | logger.debug("Connected.") 70 | labels = {} 71 | nodes = j.get_nodes() 72 | for node in nodes: 73 | try: 74 | config = j.get_node_config(node['name']) 75 | logger.debug("got config for node %s", node['name']) 76 | root = etree.fromstring(config.encode('UTF-8')) 77 | label = root.xpath('//label')[0].text 78 | if label is not None and label != '': 79 | labels[node['name']] = label.split(' ') 80 | except NotFoundException: 81 | logger.error("Could not get config for node %s", node['name']) 82 | continue 83 | if 'master' in labels: 84 | tmp = labels['master'] 85 | labels[''] = tmp 86 | else: 87 | labels[''] = '' 88 | if not csv: 89 | print(dict2cols(labels)) 90 | return 91 | # csv 92 | for sname, lbls in labels.items(): 93 | print('%s,%s' % (sname, ','.join(lbls))) 94 | 95 | def dict2cols(d, spaces=2, separator=' '): 96 | """ 97 | Code taken from awslimitchecker 98 | 99 | Take a dict of string keys and string values, and return a string with 100 | them formatted as two columns separated by at least ``spaces`` number of 101 | ``separator`` characters. 102 | 103 | :param d: dict of string keys, string values 104 | :type d: dict 105 | :param spaces: number of spaces to separate columns by 106 | :type spaces: int 107 | :param separator: character to fill in between columns 108 | :type separator: string 109 | """ 110 | if len(d) == 0: 111 | return '' 112 | s = '' 113 | maxlen = max([len(k) for k in d.keys()]) 114 | fmt_str = '{k:' + separator + '<' + str(maxlen + spaces) + '}{v}\n' 115 | for k in sorted(d.keys()): 116 | s += fmt_str.format( 117 | k=k, 118 | v=d[k], 119 | ) 120 | return s 121 | 122 | def parse_args(argv): 123 | """ parse arguments/options """ 124 | p = argparse.ArgumentParser() 125 | 126 | p.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, 127 | help='verbose (debugging) output') 128 | p.add_argument('-u', '--user', dest='user', action='store', type=str, 129 | default=None, help='Jenkins username (optional)') 130 | p.add_argument('-p', '--password', dest='password', action='store', type=str, 131 | default=None, help='Jenkins password (optional; if -u/--user' 132 | 'is specified and this is not, you will be interactively ' 133 | 'prompted') 134 | p.add_argument('-c', '--csv', dest='csv', action='store_true', default=False, 135 | help='output in CSV') 136 | p.add_argument('JENKINS_URL', action='store', type=str, 137 | help='Base URL to access Jenkins instance') 138 | args = p.parse_args(argv) 139 | if args.user is not None and args.password is None: 140 | args.password = getpass.getpass("Password for %s Jenkins user: " % args.user) 141 | return args 142 | 143 | 144 | if __name__ == "__main__": 145 | args = parse_args(sys.argv[1:]) 146 | 147 | if args.verbose: 148 | logger.setLevel(logging.DEBUG) 149 | else: 150 | logger.setLevel(logging.INFO) 151 | 152 | main(args.JENKINS_URL, user=args.user, password=args.password, csv=args.csv) 153 | -------------------------------------------------------------------------------- /toxit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | toxit.py - script to parse a tox.ini file in cwd and run the test commands 4 | for a specified environment against the already-existing virtualenv (i.e. just 5 | re-run only the test commands). 6 | 7 | If you have ideas for improvements, or want the latest version, it's at: 8 | 9 | 10 | Copyright 2016-2019 Jason Antman 11 | Free for any use provided that patches are submitted back to me. 12 | 13 | CHANGELOG: 14 | 2019-07-17 Jason Antman : 15 | - BREAKING CHANGE: refactor to only support running one environment 16 | at a time, but support specifying a single test script to run with py.test. 17 | 2017-03-22 Jason Antman : 18 | - support passenv and setenv 19 | - major internal refactor 20 | 2016-10-28 Jason Antman : 21 | - ignore 'env' in commands 22 | 2016-07-05 Jason Antman : 23 | - gut the whole script and use tox's own parseconfig() 24 | 2016-07-05 Jason Antman : 25 | - bug fixes 26 | 2016-07-03 Jason Antman : 27 | - initial version of script 28 | """ 29 | 30 | import sys 31 | import os 32 | import argparse 33 | import logging 34 | import subprocess 35 | try: 36 | from tox.config import parseconfig 37 | except ImportError: 38 | sys.stderr.write("ERROR: Could not import tox - is it installed?\n") 39 | raise SystemExit(1) 40 | 41 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 42 | logging.basicConfig(level=logging.INFO, format=FORMAT) 43 | logger = logging.getLogger(__name__) 44 | 45 | 46 | class ToxIt(object): 47 | """re-run tox commands against an existing environment""" 48 | 49 | ignore_commands = [ 50 | ['python', '--version'], 51 | ['virtualenv', '--version'], 52 | ['pip', '--version'], 53 | ['pip', 'freeze'], 54 | ['env'] 55 | ] 56 | 57 | def __init__(self): 58 | self.env_config = self.parse_toxini() 59 | 60 | def parse_toxini(self): 61 | """parse the tox ini, return dict of environments to list of commands""" 62 | logger.debug('Calling tox.config.parseconfig()') 63 | config = parseconfig(args=[]) 64 | logger.debug('Config parsed; envlist: %s', config.envlist) 65 | env_config = {} 66 | for envname in config.envlist: 67 | bindir = os.path.join( 68 | config.envconfigs[envname].envdir.strpath, 69 | 'bin' 70 | ) 71 | env_config[envname] = { 72 | 'commands': [], 73 | 'passenv': [x for x in config.envconfigs[envname].passenv], 74 | 'setenv': { 75 | a: config.envconfigs[envname].setenv.get(a) for a in 76 | config.envconfigs[envname].setenv.keys() 77 | } 78 | } 79 | for cmd in config.envconfigs[envname].commands: 80 | if cmd in self.ignore_commands: 81 | logger.debug('%s - skipping ignored command: %s', 82 | envname, cmd) 83 | continue 84 | cmd[0] = os.path.join(bindir, cmd[0]) 85 | env_config[envname]['commands'].append(cmd) 86 | logger.debug('env %s: %s', envname, env_config[envname]) 87 | return env_config 88 | 89 | def run_env(self, env_config): 90 | """run a single env; return True (success) or False (failure)""" 91 | for cmd in env_config['commands']: 92 | logger.info('Running command: %s', cmd) 93 | rcode = subprocess.call(cmd, env=self._make_env(env_config)) 94 | logger.info('Command exited %s', rcode) 95 | if rcode != 0: 96 | return False 97 | return True 98 | 99 | def _make_env(self, env_config): 100 | e = {} 101 | for n in env_config['passenv']: 102 | if n in os.environ: 103 | e[n] = os.environ[n] 104 | for k, v in env_config['setenv'].items(): 105 | e[k] = v 106 | return e 107 | 108 | def run(self, envname, test_script=None): 109 | """run selected env""" 110 | if test_script is not None: 111 | for idx, cmd in enumerate(self.env_config[envname]['commands']): 112 | if not cmd[0].endswith('py.test'): 113 | continue 114 | cmd[-1] = test_script 115 | self.env_config[envname]['commands'][idx] = cmd 116 | res = self.run_env(self.env_config[envname]) 117 | if not res: 118 | print('Some commands failed.') 119 | raise SystemExit(1) 120 | print('All commands succeeded.') 121 | raise SystemExit(0) 122 | 123 | 124 | def parse_args(argv): 125 | """ 126 | parse arguments/options 127 | 128 | this uses the new argparse module instead of optparse 129 | see: 130 | """ 131 | p = argparse.ArgumentParser(description='Re-run tox test commands for a ' 132 | 'given environment against the ' 133 | 'already-existing and installed virtualenv.') 134 | p.add_argument('-v', '--verbose', dest='verbose', action='store_true', 135 | default=False, 136 | help='verbose output') 137 | p.add_argument('TOXENV', type=str, help='Tox environment name to run') 138 | p.add_argument('TEST_FILE', type=str, nargs='?', default=None, 139 | help='Optional - path to one test file to run in place of' 140 | 'running all tests (replaces last argument on the ' 141 | 'pytest command line)') 142 | return p.parse_args(argv) 143 | 144 | 145 | if __name__ == "__main__": 146 | args = parse_args(sys.argv[1:]) 147 | if args.verbose: 148 | logger.setLevel(logging.DEBUG) 149 | script = ToxIt() 150 | script.run(args.TOXENV, args.TEST_FILE) 151 | -------------------------------------------------------------------------------- /github_irc_hooks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | github_irc_hooks.py 4 | =================== 5 | 6 | Python script to setup IRC notification hooks on GitHub repositories. 7 | 8 | Your GitHub API token should either be in your global (user) git config 9 | as github.token, or in a GITHUB_TOKEN environment variable. 10 | 11 | This script assumes a server without Nickserv. 12 | 13 | Requirements 14 | ------------- 15 | 16 | github3.py (`pip install github3.py`) 17 | 18 | License 19 | -------- 20 | 21 | Copyright 2014 Jason Antman 22 | Free for any use provided that patches are submitted back to me. 23 | 24 | The latest version of this script can be found at: 25 | 26 | 27 | CHANGELOG 28 | ---------- 29 | 30 | 2015-07-08 Jason Antman : 31 | - initial version of script 32 | """ 33 | 34 | import sys 35 | import argparse 36 | import logging 37 | import subprocess 38 | import os 39 | from github3 import login, GitHub 40 | 41 | FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 42 | logging.basicConfig(level=logging.ERROR, format=FORMAT) 43 | logger = logging.getLogger(__name__) 44 | 45 | 46 | class GitHubIRCHooker: 47 | """ might as well use a class. It'll make things easier later. """ 48 | 49 | def __init__(self, apitoken, server, port, nick, password): 50 | """ init method, run at class creation """ 51 | logger.debug("Connecting to GitHub") 52 | self.gh = login(token=apitoken) 53 | logger.info("Connected to GitHub API") 54 | self.server = server 55 | self.port = port 56 | self.nick = nick 57 | self.password = password 58 | 59 | def get_config(self, channel, branches): 60 | config = { 61 | 'notice': '0', 62 | 'branches': branches, 63 | 'room': channel, 64 | 'ssl': '1', 65 | 'no_colors': '0', 66 | 'server': self.server, 67 | 'nick': self.nick, 68 | 'nickserv_password': '', 69 | 'message_without_join': '1', 70 | 'long_url': '0', 71 | 'password': self.password, 72 | 'port': self.port, 73 | } 74 | return config 75 | 76 | def add_hook(self, repo, channel, branches): 77 | config = self.get_config(channel, branches) 78 | logger.info("Adding IRC hook to repo {r}; config: {c}".format( 79 | c=config, 80 | r=repo.name 81 | )) 82 | hook = repo.create_hook( 83 | 'irc', 84 | config, 85 | events=['push', 'pull_request'], 86 | active=True, 87 | ) 88 | if hook is None: 89 | logger.error("Error creating hook.") 90 | raise SystemExit(1) 91 | logger.info("Added hook to repository.") 92 | 93 | def run(self, orgname, reponame, channel, branches): 94 | """ do stuff here """ 95 | repo = self.gh.repository(orgname, reponame) 96 | num_hooks = 0 97 | for hook in repo.iter_hooks(): 98 | num_hooks += 1 99 | if hook.name == 'irc': 100 | logger.error("ERROR: repository already has an IRC hook") 101 | raise SystemExit(1) 102 | logger.debug("Repository has %d hooks, no IRC hooks yet.", num_hooks) 103 | self.add_hook(repo, channel, branches) 104 | 105 | def parse_args(argv): 106 | """ 107 | parse arguments/options 108 | 109 | this uses the new argparse module instead of optparse 110 | see: 111 | """ 112 | p = argparse.ArgumentParser(description='Add IRC notifications to a GitHub repo') 113 | p.add_argument('-v', '--verbose', dest='verbose', action='count', default=0, 114 | help='verbose output. specify twice for debug-level output.') 115 | BRANCHES_DEFAULT = '' 116 | p.add_argument('-b', '--branches', dest='branches', action='store', 117 | default=BRANCHES_DEFAULT, 118 | help='comma-separated list of branch names to notify for' 119 | ' (default: %s)' % BRANCHES_DEFAULT) 120 | p.add_argument('-o', '--orgname', dest='orgname', action='store', 121 | required=True, help='repository owner name') 122 | p.add_argument('-s', '--server', action='store', required=True, 123 | help='IRC server hostname/IP') 124 | p.add_argument('-p', '--port', action='store', required=True, 125 | help='IRC server port') 126 | p.add_argument('-n', '--nick', action='store', required=True, 127 | help='IRC nick') 128 | p.add_argument('-P', '--password', action='store', required=True, 129 | default='', 130 | help='password for IRC nick (server password)') 131 | p.add_argument('reponame', action='store', help='repository name') 132 | p.add_argument('channel', action='store', help='channel name') 133 | args = p.parse_args(argv) 134 | 135 | return args 136 | 137 | def get_api_token(): 138 | """ get GH api token """ 139 | apikey = subprocess.check_output(['git', 'config', '--global', 140 | 'github.token']).strip() 141 | if len(apikey) != 40: 142 | raise SystemExit("ERROR: invalid github api token from `git config " 143 | "--global github.token`: '%s'" % apikey) 144 | return apikey 145 | 146 | if __name__ == "__main__": 147 | args = parse_args(sys.argv[1:]) 148 | if args.verbose > 1: 149 | logger.setLevel(logging.DEBUG) 150 | elif args.verbose > 0: 151 | logger.setLevel(logging.INFO) 152 | try: 153 | token = os.environ['GITHUB_TOKEN'] 154 | logger.debug("Using API token from GITHUB_TOKEN environment variable") 155 | except KeyError: 156 | token = get_api_token() 157 | logger.debug("Using API token from git config 'github.token'") 158 | script = GitHubIRCHooker( 159 | token, 160 | args.server, 161 | args.port, 162 | args.nick, 163 | args.password, 164 | ) 165 | script.run(args.orgname, args.reponame, args.channel, args.branches) 166 | -------------------------------------------------------------------------------- /github_clone_setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | github_clone_setup.py 4 | --------------------- 5 | 6 | Simple script to use on clones of GitHub repositories. Sets fetch refs for pull 7 | requests and, if `git config --global github.token` returns a valid API token, 8 | sets an 'upstream' remote if the repository is a fork. 9 | 10 | Note - I *was* going to use ConfigParser to interact with .git/config instead 11 | of shelling out like a bad person. Then I found that ConfigParser barfs on any 12 | lines with leading space, like .git/config. Oh well. I can't fix *every* upstream 13 | bug. 14 | 15 | The canonical version of this script lives at: 16 | https://github.com/jantman/misc-scripts/blob/master/github_clone_setup.py 17 | 18 | Copyright 2014 Jason Antman 19 | Free for any use provided that patches are submitted back to me. 20 | 21 | Requirements 22 | ============ 23 | * Python 2.7+ (uses subprocess.check_output) 24 | * github3.py>=0.8.2 (if using GitHub integration; tested with 0.8.2) 25 | 26 | Changelog 27 | ========= 28 | 2014-04-27 jantman (Jason Antman) 29 | - initial version 30 | 31 | """ 32 | 33 | import sys 34 | import os.path 35 | import subprocess 36 | import optparse 37 | import re 38 | 39 | from github3 import login, GitHub 40 | 41 | def get_api_token(): 42 | """ get GH api token """ 43 | apikey = subprocess.check_output(['git', 'config', '--global', 'github.token']).strip() 44 | if len(apikey) != 40: 45 | raise SystemExit("ERROR: invalid github api token from `git config --global github.token`: '%s'" % apikey) 46 | return apikey 47 | 48 | 49 | def get_config_value(confpath, key): 50 | """ gets a git config value using `git config` """ 51 | output = subprocess.check_output(['git', 'config', '--file=%s' % confpath, '--get-all', '%s' % key]) 52 | return output 53 | 54 | 55 | def add_config_value(confpath, key, value): 56 | """ adds a git config value using `git config` """ 57 | cmd = ['git', 'config', '--file=%s' % confpath, '--add', '%s' % key, value] 58 | output = subprocess.check_output(cmd) 59 | print(" ".join(cmd)) 60 | return output 61 | 62 | 63 | def get_remotes(confpath, gitdir): 64 | """ list all remotes for a repo """ 65 | remotes_str = subprocess.check_output(['git', '--work-tree=%s' % gitdir, '--git-dir=%s' % (os.path.join(gitdir, '.git')), 'remote']) 66 | remotes = remotes_str.splitlines() 67 | return remotes 68 | 69 | 70 | def set_pull_fetches(confpath, gitdir, remotes): 71 | """ set fetch refs for pulls if not already there """ 72 | for rmt in remotes: 73 | fetches_str = get_config_value(confpath, 'remote.%s.fetch' % rmt) 74 | fetches = fetches_str.splitlines() 75 | pull_fetch = '+refs/pull/*/head:refs/pull/%s/*' % rmt 76 | if pull_fetch not in fetches: 77 | add_config_value(confpath, 'remote.%s.fetch' % rmt, pull_fetch) 78 | return True 79 | 80 | 81 | def get_owner_reponame_from_url(url): 82 | """ 83 | parse a github repo URL into (owner, reponame) 84 | 85 | patterns: 86 | git@github.com:jantman/misc-scripts.git 87 | https://github.com/jantman/misc-scripts.git 88 | http://github.com/jantman/misc-scripts.git 89 | git://github.com/jantman/misc-scripts.git 90 | """ 91 | m = re.match(r'^.+[/:]([^/]+)/([^/\.]+)(\.git)?$', url) 92 | if not m: 93 | raise SystemExit("ERROR: unable to parse URL '%s'" % url) 94 | if len(m.groups()) < 3: 95 | raise SystemExit("ERROR: unable to parse URL '%s'" % url) 96 | return (m.group(1), m.group(2)) 97 | 98 | 99 | def setup_upstream(confpath, gitdir): 100 | """ use GH API to find parent/upstream, and set remote for it """ 101 | # see if upstream is set 102 | try: 103 | upstream = get_config_value(confpath, 'remote.upstream.url') 104 | return True 105 | except subprocess.CalledProcessError: 106 | pass 107 | origin_url = get_config_value(confpath, 'remote.origin.url') 108 | (owner, reponame) = get_owner_reponame_from_url(origin_url) 109 | apikey = get_api_token() 110 | gh = login(token=apikey) 111 | repo = gh.repository(owner, reponame) 112 | if repo.fork: 113 | upstream_url = repo.parent.ssh_url 114 | cmd = ['git', '--work-tree=%s' % gitdir, '--git-dir=%s' % (os.path.join(gitdir, '.git')), 'remote', 'add', 'upstream', upstream_url] 115 | subprocess.check_call(cmd) 116 | print(" ".join(cmd)) 117 | return True 118 | 119 | 120 | def is_github_repo(confpath, gitdir): 121 | """ return true if this repo origin is on GitHub, False otherwise """ 122 | origin_url = get_config_value(confpath, 'remote.origin.url') 123 | if 'github.com' in origin_url: 124 | return True 125 | return False 126 | 127 | 128 | def main(gitdir): 129 | """ main entry point """ 130 | gitdir = os.path.abspath(os.path.expanduser(gitdir)) 131 | if not os.path.exists(gitdir): 132 | raise SystemExit("ERROR: path does not exist: %s" % gitdir) 133 | confpath = os.path.join(gitdir, '.git', 'config') 134 | if not os.path.exists(confpath): 135 | raise SystemExit("ERROR: does not appear to be a valid git repo - path does not exist: %s" % confpath) 136 | 137 | if not is_github_repo(confpath, gitdir): 138 | raise SystemExit("%s is not a clone of a github repo" % gitdir, 0) 139 | 140 | remotes = get_remotes(confpath, gitdir) 141 | if 'upstream' not in remotes: 142 | setup_upstream(confpath, gitdir) 143 | remotes = get_remotes(confpath, gitdir) 144 | if 'upstream' not in remotes: 145 | raise SystemExit("Error: upstream not successfully added to remotes") 146 | 147 | set_pull_fetches(confpath, gitdir, remotes) 148 | 149 | 150 | def parse_args(argv): 151 | """ parse arguments with OptionParser """ 152 | parser = optparse.OptionParser(usage='github_clone_setup.py -d ') 153 | 154 | parser.add_option('-d', '--dir', dest='gitdir', action='store', type='string', 155 | help='path to the local clone of the repository') 156 | 157 | options, args = parser.parse_args(argv) 158 | return (options, args) 159 | 160 | 161 | if __name__ == "__main__": 162 | opts, args = parse_args(sys.argv) 163 | main(opts.gitdir) 164 | -------------------------------------------------------------------------------- /dynamodb_to_csv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | dynamodb_to_csv.py 4 | ================== 5 | 6 | Python3 / boto3 script to dump all data in a DynamoDB table to CSV (or JSON). 7 | 8 | Requirements 9 | ------------ 10 | 11 | * Python 3.4+ 12 | * boto3 13 | 14 | Canonical Source 15 | ---------------- 16 | 17 | https://github.com/jantman/misc-scripts/blob/master/dynamodb_to_csv.py 18 | 19 | License 20 | ------- 21 | 22 | Copyright 2019 Jason Antman 23 | Free for any use provided that patches are submitted back to me. 24 | 25 | CHANGELOG 26 | --------- 27 | 28 | 2019-08-01 Jason Antman : 29 | - serialize Decimals as floats 30 | 31 | 2019-03-18 Jason Antman : 32 | - fix bug in limiting fields 33 | 34 | 2019-03-03 Jason Antman : 35 | - ability to output to JSON instead 36 | - ability to load to DynamoDB from JSON 37 | - handle dynamodb-local via ``DYNAMO_ENDPOINT`` environment variable 38 | 39 | 2019-02-20 Jason Antman : 40 | - initial version of script 41 | """ 42 | 43 | import sys 44 | import os 45 | import argparse 46 | import logging 47 | import csv 48 | import io 49 | import json 50 | from decimal import Decimal 51 | 52 | try: 53 | import boto3 54 | except ImportError: 55 | sys.stderr.write("ERROR: You must 'pip install boto3'") 56 | raise SystemExit(1) 57 | 58 | 59 | FORMAT = '%(asctime)s %(levelname)s:%(name)s:%(message)s' 60 | logging.basicConfig(level=logging.INFO, format=FORMAT) 61 | logger = logging.getLogger() 62 | 63 | for lname in ['urllib3', 'boto3', 'botocore']: 64 | l = logging.getLogger(lname) 65 | l.setLevel(logging.WARNING) 66 | l.propagate = True 67 | 68 | 69 | class CustomEncoder(json.JSONEncoder): 70 | 71 | def default(self, o): 72 | if isinstance(o, Decimal): 73 | return float(str(o)) 74 | # Let the base class default method raise the TypeError 75 | return json.JSONEncoder.default(self, o) 76 | 77 | 78 | class DynamoDumper(object): 79 | 80 | def __init__(self): 81 | logger.debug('Connecting to DynamoDB') 82 | kwargs = {} 83 | if 'AWS_DEFAULT_REGION' in os.environ: 84 | kwargs['region_name'] = os.environ['AWS_DEFAULT_REGION'] 85 | if 'DYNAMO_ENDPOINT' in os.environ: 86 | kwargs['endpoint_url'] = os.environ['DYNAMO_ENDPOINT'] 87 | self._dynamo = boto3.resource('dynamodb', **kwargs) 88 | 89 | def run(self, table_name, fields=None, sort_field=None, as_json=False): 90 | records, all_fields = self._get_data(table_name) 91 | if fields is None: 92 | fields = sorted(all_fields) 93 | if as_json: 94 | print(json.dumps(records, cls=CustomEncoder)) 95 | else: 96 | print(self._to_csv(records, fields, sort_field)) 97 | 98 | def load_from_json(self, table_name, fname): 99 | table = self._dynamo.Table(table_name) 100 | with open(fname, 'r') as fh: 101 | records = json.loads(fh.read()) 102 | count = 0 103 | for r in records: 104 | table.put_item(Item=r) 105 | count += 1 106 | print('Loaded %d items into DynamoDB table %s' % (count, table_name)) 107 | 108 | def _to_csv(self, records, fields, sort_field): 109 | output = io.StringIO() 110 | writer = csv.DictWriter(output, fieldnames=fields, restval='') 111 | writer.writeheader() 112 | rownum = 0 113 | if sort_field is not None: 114 | records = sorted(records, key=lambda x: x[sort_field]) 115 | for r in records: 116 | rownum += 1 117 | for k, v in r.items(): 118 | # format lists nicely 119 | if isinstance(v, type([])): 120 | r[k] = ', '.join(v) 121 | writer.writerow({ 122 | x: r[x] for x in fields 123 | }) 124 | return output.getvalue() 125 | 126 | def _get_data(self, table_name): 127 | table = self._dynamo.Table(table_name) 128 | all_fields = set() 129 | records = [] 130 | logger.info('Scanning DynamoDB table: %s', table_name) 131 | for item in table.scan()['Items']: 132 | records.append(item) 133 | for k in item.keys(): 134 | all_fields.add(k) 135 | logger.debug('Retrieved %d records from DynamoDB', len(records)) 136 | return records, list(all_fields) 137 | 138 | 139 | def parse_args(argv): 140 | p = argparse.ArgumentParser(description='Dump DynamoDB table to CSV') 141 | p.add_argument('-f', '--field-order', dest='field_order', action='store', 142 | type=str, default=None, 143 | help='CSV list of field names, to output columns in this ' 144 | 'order. Fields not listed will not be output. If not ' 145 | 'specified, will output all fields in alphabetical ' 146 | 'order.') 147 | p.add_argument('-s', '--sort-field', dest='sort_field', type=str, 148 | action='store', default=None, 149 | help='Optional, name of field to sort on') 150 | p.add_argument('-j', '--json', dest='json', action='store_true', 151 | default=False, 152 | help='dump to JSON instead of CSV ' 153 | '(ignores -f/--field-order') 154 | p.add_argument('-r', '--reverse', dest='reverse', action='store', type=str, 155 | default=False, 156 | help='reverse - load FROM json file (filename specified in ' 157 | 'this option) TO dynamodb table') 158 | p.add_argument('TABLE_NAME', action='store', type=str, 159 | help='DynamoDB table name to dump') 160 | args = p.parse_args(argv) 161 | return args 162 | 163 | 164 | if __name__ == "__main__": 165 | args = parse_args(sys.argv[1:]) 166 | if args.field_order is not None: 167 | args.field_order = args.field_order.split(',') 168 | if args.reverse: 169 | DynamoDumper().load_from_json(args.TABLE_NAME, args.reverse) 170 | else: 171 | DynamoDumper().run( 172 | args.TABLE_NAME, fields=args.field_order, 173 | sort_field=args.sort_field, as_json=args.json 174 | ) 175 | --------------------------------------------------------------------------------