├── .coveragerc
├── .gitignore
├── .travis.yml
├── README.md
├── chef
├── cookbooks
│ └── datadesk
│ │ ├── definitions
│ │ └── virtualenv.rb
│ │ ├── files
│ │ └── default
│ │ │ ├── apache
│ │ │ └── apache2.conf
│ │ │ ├── init.d
│ │ │ └── pgpool2
│ │ │ ├── memcached
│ │ │ └── memcached.conf
│ │ │ ├── motd
│ │ │ └── caw.sh
│ │ │ ├── munin
│ │ │ └── varnish4_
│ │ │ ├── pgpool
│ │ │ ├── pg_hba.conf
│ │ │ └── pgpool.conf
│ │ │ ├── users
│ │ │ └── bash_profile
│ │ │ └── varnish
│ │ │ └── varnish
│ │ ├── recipes
│ │ ├── apache.rb
│ │ ├── apps.rb
│ │ ├── cron.rb
│ │ ├── default.rb
│ │ ├── memcached.rb
│ │ ├── motd.rb
│ │ ├── munin.rb
│ │ ├── newrelic.rb
│ │ ├── pgpool.rb
│ │ ├── postgresql.rb
│ │ ├── python.rb
│ │ └── varnish.rb
│ │ └── templates
│ │ └── default
│ │ ├── apache
│ │ ├── ports.conf.erb
│ │ └── vhost.erb
│ │ ├── munin
│ │ ├── munin-node.conf.erb
│ │ ├── munin.conf.erb
│ │ ├── pgstats.erb
│ │ └── varnish4_.erb
│ │ ├── users
│ │ └── sudoers.erb
│ │ └── varnish
│ │ ├── default.vcl.erb
│ │ └── esi.vcl.erb
├── node.json
└── solo.rb
├── fabfile
├── __init__.py
├── alertthemedia.py
├── bigfiles.py
├── clean.py
├── collectstatic.py
├── cook.py
├── createserver.py
├── deploy.py
├── env.py
├── hampsterdance.py
├── installchef.py
├── load.py
├── makesecret.py
├── manage.py
├── migrate.py
├── pep8.py
├── pipinstall.py
├── ps.py
├── pull.py
├── restartapache.py
├── restartvarnish.py
├── rmpyc.py
├── rs.py
├── sh.py
├── ssh.py
├── tabnanny.py
├── updatetemplates.py
└── venv.py
├── manage.py
├── project_name
├── __init__.py
├── newrelic.ini
├── settings.py
├── settings_dev.template
├── settings_prod.py
├── settings_test.template
├── urls.py
├── wsgi_dev.py
└── wsgi_prod.py
├── requirements.txt
├── setup.py
├── templates
├── 404.html
├── 500.html
├── 503.html
└── static
│ └── .touch
└── toolbox
├── __init__.py
├── apps.py
├── context_processors
├── __init__.py
├── env.py
└── sites.py
├── management
├── __init__.py
└── commands
│ ├── __init__.py
│ ├── backupdb.py
│ ├── generatesecretkey.py
│ └── loadbackupdb.py
├── models.py
├── mrss.py
├── templatetags
├── __init__.py
└── toolbox_tags.py
├── tests.py
├── unicodecsv.py
└── views.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | include =
3 | toolbox/*
4 |
5 | omit =
6 | toolbox/tests.py
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .coverage
2 | *.pyc
3 | django.log
4 | django.log*
5 | {{ project_name }}/settings_dev.py
6 | chef/cookbooks/palewire/files/default/id_rsa
7 | chef/cookbooks/palewire/files/default/id_rsa.pub
8 | chef/cookbooks/palewire/files/default/authorized_keys
9 | *.egg-*
10 | .static/*
11 | .media/*
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | python:
3 | - '2.7'
4 | install:
5 | - pip install django
6 | - mkdir repo
7 | - django-admin.py startproject --extension=py,.gitignore --template=https://github.com/datadesk/django-project-template/archive/master.zip project repo
8 | - cd repo
9 | - pip install -r requirements.txt
10 | - cp project/settings_test.template project/settings_dev.py
11 | script:
12 | - flake8 toolbox
13 | - coverage run setup.py test
14 | after_success:
15 | - coveralls
16 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
_ _ _ _ _ _ _
2 | __| |(_)__ _ _ _ __ _ ___ _ __ _ _ ___ (_)___ __| |_ | |_ ___ _ __ _ __| |__ _| |_ ___
3 | / _` || / _` | ' \/ _` / _ \ | '_ \ '_/ _ \| / -_) _| _| | _/ -_) ' \| '_ \ / _` | _/ -_)
4 | \__,_|/ \__,_|_||_\__, \___/ | .__/_| \___// \___\__|\__| \__\___|_|_|_| .__/_\__,_|\__\___|
5 | |__/ |___/ |_| |__/ |_|
6 |
7 |
8 | A custom template for initializing a new Django project the Data Desk way.
9 |
10 | Uses the [built-in](https://docs.djangoproject.com/en/dev/ref/django-admin/#startproject-projectname-destination) Django ``startproject`` templating system. Includes a number of small modifications favored by the [Los Angeles Times Data Desk](http://datadesk.latimes.com). Assumes you already have experience hacking around on Django and PostGIS.
11 |
12 | Still experimental, so don't get your hopes up.
13 |
14 | [](https://travis-ci.org/datadesk/django-project-template)
15 | [](https://coveralls.io/r/datadesk/django-project-template?branch=master)
16 |
17 | * Issues: [github.com/datadesk/django-project-template/issues](https://github.com/datadesk/django-project-template/issues)
18 | * Testing: [travis-ci.org/datadesk/django-project-template](https://travis-ci.org/datadesk/django-project-template)
19 | * Coverage: [coveralls.io/r/datadesk/django-project-template](https://coveralls.io/r/datadesk/django-project-template)
20 |
21 | Features
22 | --------
23 |
24 | * A split of ``settings.py`` that allows for different values in [development](https://github.com/datadesk/django-project-template/blob/master/project_name/settings_dev.template) versus [production](https://github.com/datadesk/django-project-template/blob/master/project_name/settings_prod.py)
25 | * Preinstallation of Django's [automatic administration panel](https://docs.djangoproject.com/en/dev/ref/contrib/admin/)
26 | * Preconfiguration of [urls.py](https://github.com/datadesk/django-project-template/blob/master/project_name/urls.py) to serve static, media and Munin files
27 | * Preconfiguration of [logging options](https://github.com/datadesk/django-project-template/blob/master/project_name/settings.py#L104)
28 | * Preconfiguration of [GeoDjango](https://docs.djangoproject.com/en/dev/ref/contrib/gis/) for [PostGIS](http://postgis.net/)
29 | * Preinstallation of [django-debug-toolbar](https://github.com/django-debug-toolbar/django-debug-toolbar)
30 | * Preinstallation of [django-greeking](https://github.com/palewire/django-greeking)
31 | * [Fabric functions](https://github.com/datadesk/django-project-template/blob/master/fabfile/) for local development and production deployment
32 | * Preinstallation of [tools for interacting with Amazon Web Services](https://code.google.com/p/boto/)
33 | * Preconfiguration of [New Relic server and Python monitoring services](https://github.com/datadesk/django-project-template/blob/master/chef/cookbooks/datadesk/recipes/newrelic.rb)
34 | * Preconfiguration of our preferred caching options for [development](https://github.com/datadesk/django-project-template/blob/master/project_name/settings_dev.template#L14) and [production](https://github.com/datadesk/django-project-template/blob/master/project_name/settings_prod.py#L14)
35 | * [Chef cookbook](https://github.com/datadesk/django-project-template/tree/master/chef) with scripted production server configuration routines
36 | * Management commands for scheduling [database backups](https://github.com/datadesk/django-project-template/blob/master/toolbox/management/commands/backupdb.py) to be stored in a bucket on Amazon S3 and [retrieving them](https://github.com/datadesk/django-project-template/blob/master/toolbox/management/commands/loadbackupdb.py) for local installation.
37 | * Custom context processors that provide the [current site](https://github.com/datadesk/django-project-template/blob/master/toolbox/context_processors/sites.py) and [environment](https://github.com/datadesk/django-project-template/blob/master/toolbox/context_processors/env.py).
38 | * A number of goofball utilities, like a [unicode CSV reader](https://github.com/datadesk/django-project-template/blob/master/toolbox/unicodecsv.py)
39 |
40 | Requirements
41 | ------------
42 |
43 | * [Django](https://www.djangoproject.com/download/)
44 | * [PostGIS](https://docs.djangoproject.com/en/dev/ref/contrib/gis/install/#installation)
45 | * [virtualenv](http://www.virtualenv.org/en/latest/)
46 |
47 | Getting started
48 | ---------------
49 |
50 | Create a virtual enviroment to work inside.
51 |
52 | ```bash
53 | $ virtualenv my-environment
54 | ```
55 |
56 | Jump in and turn it on.
57 |
58 | ```bash
59 | $ cd my-environment
60 | $ . bin/activate
61 | ```
62 |
63 | Install Django.
64 |
65 | ```bash
66 | $ pip install "django"
67 | ```
68 |
69 | Create a new Git repository.
70 |
71 | ```bash
72 | $ git init repo
73 | ```
74 |
75 | Download and install a project in there using this template.
76 |
77 | ```bash
78 | $ django-admin.py startproject --extension=py,.gitignore --template=https://github.com/datadesk/django-project-template/archive/master.zip project repo
79 | ```
80 |
81 | Now that the template has landed, jump in and install the project's Python dependencies.
82 |
83 | ```bash
84 | $ cd repo
85 | $ pip install -r requirements.txt
86 | ```
87 |
88 | Generate a secret key.
89 |
90 | ```bash
91 | $ fab makesecret
92 | ```
93 |
94 | Copy the key. Open the settings file and drop it near the top. While you're there, you can also customize any of the other top level configuration options.
95 |
96 | ```bash
97 | $ vim project/settings.py
98 | ```
99 |
100 | Create a PostGIS database to connect with. This may vary depending on your PostGIS configuration.
101 |
102 | The command below assumes you have it running and want to make the database with a user named ``postgres``. Please modify it to suit your needs. If you don't have PostGIS installed, try following [the GeoDjango installation instructions](https://docs.djangoproject.com/en/dev/ref/contrib/gis/install/#installation).
103 |
104 | ```bash
105 | $ createdb -U postgres -E UTF8 -T template_postgis mydatabasename
106 | ```
107 |
108 | Make a copy of the development settings template.
109 |
110 | ```bash
111 | $ cp project/settings_dev.template project/settings_dev.py
112 | ```
113 |
114 | Open it and put in the credentials for the database you just made.
115 |
116 | ```bash
117 | $ vim project/settings_dev.py
118 | ```
119 |
120 | Sync the database.
121 |
122 | ```bash
123 | $ python manage.py syncdb
124 | ```
125 |
126 | Fire up the test server.
127 |
128 | ```bash
129 | $ fab rs
130 | ```
131 |
132 | Get to work. Once you have something worth saving you can replace this README with a description of your new project.
133 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/definitions/virtualenv.rb:
--------------------------------------------------------------------------------
1 | define :virtualenv, :action => :create, :owner => "root", :group => "root", :mode => 0755, :packages => {} do
2 | path = params[:path] ? params[:path] : params[:name]
3 | if params[:action] == :create
4 | # Manage the directory.
5 | directory path do
6 | owner params[:owner]
7 | group params[:group]
8 | mode params[:mode]
9 | end
10 | execute "create-virtualenv-#{path}" do
11 | user params[:owner]
12 | group params[:group]
13 | command "virtualenv --no-site-packages #{path}"
14 | not_if "test -f #{path}/bin/python"
15 | end
16 | params[:packages].each_pair do |package, version|
17 | pip = "#{path}/bin/pip"
18 | execute "install-#{package}-#{path}" do
19 | user params[:owner]
20 | group params[:group]
21 | command "#{pip} install #{package}==#{version}"
22 | not_if "[ `#{pip} freeze | grep #{package} | cut -d'=' -f3` = '#{version}' ]"
23 | end
24 | end
25 | elsif params[:action] == :delete
26 | directory path do
27 | action :delete
28 | recursive true
29 | end
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/apache/apache2.conf:
--------------------------------------------------------------------------------
1 | ServerName datadesk
2 | Mutex file:${APACHE_LOCK_DIR} default
3 | PidFile ${APACHE_PID_FILE}
4 | Timeout 60
5 | KeepAlive Off
6 | StartServers 2
7 | MinSpareThreads 2
8 | MaxSpareThreads 4
9 | ThreadLimit 10
10 | ThreadsPerChild 10
11 | MaxRequestWorkers 160
12 | MaxConnectionsPerChild 10000
13 | User ${APACHE_RUN_USER}
14 | Group ${APACHE_RUN_GROUP}
15 | AccessFileName .htaccess
16 | HostnameLookups Off
17 | ErrorLog ${APACHE_LOG_DIR}/error.log
18 | LogLevel warn
19 | Include mods-enabled/*.load
20 | Include mods-enabled/*.conf
21 | Include ports.conf
22 | Include sites-enabled/*
23 | LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
24 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" combined
25 | LogFormat "%h %l %u %t \"%r\" %>s %b" common
26 | LogFormat "%v %l %u %t \"%r\" %>s %b" comonvhost
27 | LogFormat "%{Referer}i -> %U" referer
28 | LogFormat "%{User-agent}i" agent
29 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/init.d/pgpool2:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 |
3 | ### BEGIN INIT INFO
4 | # Provides: pgpool2
5 | # Required-Start: $remote_fs $syslog
6 | # Required-Stop: $remote_fs $syslog
7 | # Should-Start: postgresql
8 | # Default-Start: 2 3 4 5
9 | # Default-Stop: 0 1 6
10 | # Short-Description: start pgpool-II
11 | # Description: pgpool-II is a connection pool server and replication
12 | # proxy for PostgreSQL.
13 | ### END INIT INFO
14 |
15 |
16 | PATH=/sbin:/bin:/usr/sbin:/usr/bin
17 | DAEMON=/usr/sbin/pgpool
18 | #PIDFILE=/var/run/postgresql/pgpool.pid
19 | PIDFILE=/var/run/pgpool/pgpool.pid
20 |
21 | test -x $DAEMON || exit 5
22 |
23 | # Include pgpool defaults if available
24 | if [ -f /etc/default/pgpool2 ] ; then
25 | . /etc/default/pgpool2
26 | fi
27 |
28 | OPTS=""
29 | if [ x"$PGPOOL_LOG_DEBUG" = x"yes" ]; then
30 | OPTS="$OPTS -d"
31 | fi
32 |
33 | . /lib/lsb/init-functions
34 |
35 |
36 | is_running() {
37 | pidofproc -p $PIDFILE $DAEMON >/dev/null
38 | }
39 |
40 |
41 | d_start() {
42 | if is_running; then
43 | :
44 | else
45 | su -c "$DAEMON -n $OPTS 2>&1 /dev/null 2>&1 &" - postgres
46 | fi
47 | }
48 |
49 |
50 | d_stop() {
51 | killproc -p $PIDFILE $DAEMON -INT
52 | status=$?
53 | [ $status -eq 0 ] || [ $status -eq 3 ]
54 | return $?
55 | }
56 |
57 |
58 | case "$1" in
59 | start)
60 | log_daemon_msg "Starting pgpool-II" pgpool
61 | d_start
62 | log_end_msg $?
63 | ;;
64 | stop)
65 | log_daemon_msg "Stopping pgpool-II" pgpool
66 | d_stop
67 | log_end_msg $?
68 | ;;
69 | status)
70 | is_running
71 | status=$?
72 | if [ $status -eq 0 ]; then
73 | log_success_msg "pgpool-II is running."
74 | else
75 | log_failure_msg "pgpool-II is not running."
76 | fi
77 | exit $status
78 | ;;
79 | restart|force-reload)
80 | log_daemon_msg "Restarting pgpool-II" pgpool
81 | d_stop && sleep 1 && d_start
82 | log_end_msg $?
83 | ;;
84 | try-restart)
85 | if $0 status >/dev/null; then
86 | $0 restart
87 | else
88 | exit 0
89 | fi
90 | ;;
91 | reload)
92 | exit 3
93 | ;;
94 | *)
95 | log_failure_msg "Usage: $0 {start|stop|status|restart|try-restart|reload|force-reload}"
96 | exit 2
97 | ;;
98 | esac
99 |
100 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/memcached/memcached.conf:
--------------------------------------------------------------------------------
1 | -d
2 | logfile /var/log/memcached.log
3 | -m 256
4 | -p 11211
5 | -u nobody
6 | -l 127.0.0.1
7 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/motd/caw.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | printf '
4 | _..
5 | ____ _ _____ _ ___..-"""-. `)^| .-"""-..___ ____ _____ ____ _ __
6 | | _ \ / \ |_ _| / \ `-...___ `=.`-.` \-`.=` ___...-` | _ \ | ____|/ ___| | |/ /
7 | | | | | / _ \ | | / _ \ `\ ` ##### ` /` | | | || _| \___ \ | ` /
8 | | |_| |/ ___ \ | | / ___ \ `--;|||||;--` | |_| || |___ ___) || . \
9 | |____//_/ \_\|_|/_/ \_\ /\|||/\ |____/ |_____||____/ |_|\_\
10 | _______________________________________ ( /;-;\ ) _________________________________________
11 | `-...-`
12 | '
13 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/munin/varnish4_:
--------------------------------------------------------------------------------
1 | #!/usr/bin/perl
2 | # -*- perl -*-
3 | #
4 | # varnish4_ - Munin plugin to for Varnish 4.x
5 | # Copyright (C) 2009 Redpill Linpro AS
6 | #
7 | # Author: Kristian Lyngstol
8 | #
9 | # This program is free software; you can redistribute it and/or modify
10 | # it under the terms of the GNU General Public License as published by
11 | # the Free Software Foundation; either version 2 of the License, or
12 | # (at your option) any later version.
13 | #
14 | # This program is distributed in the hope that it will be useful,
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 | # GNU General Public License for more details.
18 | #
19 | # You should have received a copy of the GNU General Public License along
20 | # with this program; if not, write to the Free Software Foundation, Inc.,
21 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 |
23 | =head1 NAME
24 |
25 | varnish4_ - Munin plugin to monitor various aspects of varnish
26 |
27 | =head1 APPLICABLE SYSTEMS
28 |
29 | Varnish 4.x with varnishstat
30 |
31 | =head1 CONFIGURATION
32 |
33 | The plugin needs to be able to execute varnishstat.
34 |
35 | The configuration section shows the defaults
36 | [varnish4_*]
37 | env.varnishstat varnishstat
38 | env.name
39 |
40 | env.varnishstat can be a full path to varnishstat if it's
41 | not in the path already.
42 |
43 | env.name is blank (undefined) by default and can be used to specify a -n
44 | name argument to varnish if multiple instances are running on the same
45 | server.
46 |
47 | A few aspects are not linked by default. They are marked as
48 | 'DEBUG' => 'yes' (or any other value). They are:
49 |
50 | vcl, bans, bans_lurker, lru, objects_per_objhead,
51 | losthdr, esi, hcb, shm, shm_writes, overflow,
52 | session, session_herd, gzip
53 |
54 | You can link them yourself with something like this:
55 |
56 | ln -s @@LIBDIR@@/plugins/varnish4_ \
57 | @@CONFDIR@@/plugins/varnish4_data_structures
58 |
59 | =head1 INTERPRETATION
60 |
61 | Each graph uses data from varnishstat.
62 |
63 | =head1 MAGIC MARKERS
64 |
65 | #%# family=auto
66 | #%# capabilities=autoconf suggest
67 |
68 | =head1 VERSION
69 |
70 | $Id$
71 |
72 | =head1 BUGS
73 |
74 | The hit_rate graph requires munin r2040 or newer to display
75 | correctly.
76 |
77 | =head1 PATCHES-TO
78 |
79 | Please send patches to Kristian Lyngstol
80 | and/or varnish-misc@varnish-cache.org for significant changes. Munin SVN
81 | is the authoritative repository for this plugin.
82 |
83 | =head1 AUTHOR
84 |
85 | Kristian Lyngstol
86 |
87 | =head1 MODIFICATIONS
88 |
89 | Ingo Oppermann
90 |
91 | =head1 LICENSE
92 |
93 | GPLv2
94 |
95 | =cut
96 |
97 |
98 | use XML::Parser;
99 | use strict;
100 |
101 | # Set to 1 to enable output when a variable is defined in a graph but
102 | # omitted because it doesn't exist in varnishstat.
103 | my $DEBUG = 0;
104 |
105 | # Set to 1 to ignore 'DEBUG' and suggest all available aspects.
106 | my $FULL_SUGGEST = 0;
107 |
108 | # Varnishstat executable. Include full path if it's not in your path.
109 | my $varnishstatexec = exists $ENV{'varnishstat'} ? $ENV{'varnishstat'} : "varnishstat";
110 |
111 | # For multiple instances
112 | my $varnishname = exists $ENV{'name'} ? $ENV{'name'} : undef;
113 |
114 | my $self; # Haha, myself, what a clever pun.
115 |
116 | # Parameters that can be defined on top level of a graph. Config will print
117 | # them as "graph_$foo $value\n"
118 | my @graph_parameters = ('title','total','order','scale','vlabel','args');
119 |
120 | # Parameters that can be defined on a value-to-value basis and will be
121 | # blindly passed to config. Printed as "$fieldname.$param $value\n".
122 | #
123 | # 'label' is hardcoded as it defaults to a varnishstat-description if not
124 | # set.
125 | my @field_parameters = ('graph', 'min', 'max', 'draw', 'cdef', 'warning',
126 | 'colour', 'info', 'type');
127 |
128 | # Varnishstat data is stored here. Example
129 | # ('n_vbe' => { 'value' => '124', 'description'=>...,'flag'=>... }, SMA =>
130 | # { s0 => { 'value' => '...', 'flag'=> '...' },'Transient' => ...})
131 | # Both dynamic and static counters are kept here.
132 | #
133 | # Notes:
134 | # - The 'flag' field for a counter is in RRD-dialect, not varnishstat
135 | my %data;
136 |
137 | # Data structure that defines all possible graphs (aspects) and how they
138 | # are to be plotted. Every top-level entry is a graph/aspect. Each
139 | # top-level graph MUST have title set and 'values'.
140 | #
141 | # The 'values' hash must have at least one value definition. The actual
142 | # value used is either fetched from varnishstat based on the value-name, or
143 | # if 'rpn' is defined: calculated. 'type' SHOULD be set.
144 | #
145 | # Graphs with 'DEBUG' set to anything is omitted from 'suggest'.
146 | #
147 | # 'rpn' on values allows easy access to graphs consisting of multiple
148 | # values from varnishstat. (Reverse polish notation). The RPN
149 | # implementation only accepts +-*/ and varnishstat-values.
150 | #
151 | # With the exception of 'label', which is filled with the
152 | # varnishstat-description if left undefined, any value left undefined will
153 | # be left up to Munin to define/ignore/yell about.
154 | #
155 | # For dynamic counters, the values listed need to specify a counter and
156 | # family. This will plot the specified counter for each identity within
157 | # that family. Example: family of SMA, counter c_fail. This will create a
158 | # c_fail-counter for each of the SMA-identities (e.g: Transient, s0, etc).
159 | # For dynamic graphs, the value-name is only used to identify the data
160 | # point, and does not relate to any varnishstat data as that is set by
161 | # family/counter.
162 | #
163 | # Note that dynamic counters fetch the type from the XML and things like
164 | # min/max are currently not supported (and silently ignored).
165 | #
166 | # See munin documentation or rrdgraph/rrdtool for more information.
167 | my %ASPECTS = (
168 | 'request_rate' => {
169 | 'title' => 'Request rates',
170 | 'order' => 'cache_hit cache_hitpass cache_miss '
171 | . 'backend_conn backend_unhealthy '
172 | . 'client_req client_conn' ,
173 | 'values' => {
174 | 'sess_conn' => {
175 | 'type' => 'DERIVE',
176 | 'min' => '0',
177 | 'colour' => '444444',
178 | 'graph' => 'ON'
179 | },
180 | 'client_req' => {
181 | 'type' => 'DERIVE',
182 | 'colour' => '111111',
183 | 'min' => '0'
184 | },
185 | 'cache_hit' => {
186 | 'type' => 'DERIVE',
187 | 'draw' => 'AREA',
188 | 'colour' => '00FF00',
189 | 'min' => '0'
190 | },
191 | 'cache_hitpass' => {
192 | 'info' => 'Hitpass are cached passes: An '
193 | . 'entry in the cache instructing '
194 | . 'Varnish to pass. Typically '
195 | . 'achieved after a pass in '
196 | . 'vcl_fetch.',
197 | 'type' => 'DERIVE',
198 | 'draw' => 'STACK',
199 | 'colour' => 'FFFF00',
200 | 'min' => '0'
201 | },
202 | 'cache_miss' => {
203 | 'type' => 'DERIVE',
204 | 'colour' => 'FF0000',
205 | 'draw' => 'STACK',
206 | 'min' => '0'
207 | },
208 | 'backend_conn' => {
209 | 'type' => 'DERIVE',
210 | 'colour' => '995599',
211 | 'min' => '0'
212 | },
213 | 'backend_unhealthy' => {
214 | 'type' => 'DERIVE',
215 | 'min' => '0',
216 | 'colour' => 'FF55FF'
217 | },
218 | 's_pipe' => {
219 | 'type' => 'DERIVE',
220 | 'min' => '0',
221 | 'colour' => '1d2bdf'
222 | },
223 | 's_pass' => {
224 | 'type' => 'DERIVE',
225 | 'min' => '0',
226 | 'colour' => '785d0d'
227 | }
228 | }
229 | },
230 | 'hit_rate' => {
231 | 'title' => 'Hit rates',
232 | 'order' => 'client_req cache_hit cache_miss '
233 | . 'cache_hitpass' ,
234 | 'vlabel' => '%',
235 | 'args' => '-u 100 --rigid',
236 | 'scale' => 'no',
237 | 'values' => {
238 | 'client_req' => {
239 | 'type' => 'DERIVE',
240 | 'min' => '0',
241 | 'graph' => 'off',
242 | 'rpn' => [ 'cache_hit' , 'cache_miss' , 'cache_hitpass' , '+' , '+' ]
243 | },
244 | 'cache_hit' => {
245 | 'type' => 'DERIVE',
246 | 'min' => '0',
247 | 'draw' => 'AREA',
248 | 'cdef' => 'cache_hit,client_req,/,100,*'
249 | },
250 | 'cache_miss' => {
251 | 'type' => 'DERIVE',
252 | 'draw' => 'STACK',
253 | 'min' => '0',
254 | 'cdef' => 'cache_miss,client_req,/,100,*'
255 | },
256 | 'cache_hitpass' => {
257 | 'type' => 'DERIVE',
258 | 'draw' => 'STACK',
259 | 'min' => '0',
260 | 'cdef' => 'cache_hitpass,client_req,/,100,*'
261 | },
262 | }
263 | },
264 | 'backend_traffic' => {
265 | 'title' => 'Backend traffic',
266 | 'values' => {
267 | 'backend_conn' => {
268 | 'type' => 'DERIVE',
269 | 'min' => '0'
270 | },
271 | 'backend_unhealthy' => {
272 | 'type' => 'DERIVE',
273 | 'min' => '0',
274 | 'warning' => ':1'
275 | },
276 | 'backend_busy' => {
277 | 'type' => 'DERIVE',
278 | 'min' => '0'
279 | },
280 | 'backend_fail' => {
281 | 'type' => 'DERIVE',
282 | 'min' => '0'
283 | },
284 | 'backend_reuse' => {
285 | 'type' => 'DERIVE',
286 | 'min' => 0
287 | },
288 | 'backend_recycle' => {
289 | 'type' => 'DERIVE',
290 | 'min' => 0
291 | },
292 | 'backend_toolate' => {
293 | 'type' => 'DERIVE',
294 | 'min' => '0'
295 | },
296 | 'backend_retry' => {
297 | 'type' => 'DERIVE',
298 | 'min' => '0'
299 | },
300 | 'backend_req' => {
301 | 'type' => 'DERIVE',
302 | 'min' => '0'
303 | }
304 | }
305 | },
306 | 'objects' => {
307 | 'title' => 'Number of objects',
308 | 'order' => 'n_object n_objectcore n_vampireobject n_objecthead',
309 | 'values' => {
310 | 'n_object' => {
311 | 'type' => 'GAUGE',
312 | 'label' => 'Number of objects'
313 | },
314 | 'n_objectcore' => {
315 | 'type' => 'GAUGE',
316 | 'label' => 'Number of object cores'
317 | },
318 | 'n_vampireobject' => {
319 | 'type' => 'GAUGE',
320 | 'label' => 'Number of unresurrected objects'
321 | },
322 | 'n_objecthead' => {
323 | 'type' => 'GAUGE',
324 | 'label' => 'Number of object heads',
325 | 'info' => 'Each object head can have one '
326 | . 'or more object attached, '
327 | . 'typically based on the Vary: header'
328 | }
329 | }
330 | },
331 | 'transfer_rates' => {
332 | 'title' => 'Transfer rates',
333 | 'order' => 's_resp_bodybytes s_resp_hdrbytes',
334 | 'args' => '-l 0',
335 | 'vlabel' => 'bit/s',
336 | 'values' => {
337 | 's_resp_hdrbytes' => {
338 | 'type' => 'DERIVE',
339 | 'label' => 'Header traffic',
340 | 'draw' => 'STACK',
341 | 'min' => '0',
342 | 'info' => 'HTTP Header traffic. TCP/IP '
343 | . 'overhead is not included.',
344 | 'cdef' => 's_resp_hdrbytes,8,*'
345 | },
346 | 's_resp_bodybytes' => {
347 | 'type' => 'DERIVE',
348 | 'draw' => 'AREA',
349 | 'label' => 'Body traffic',
350 | 'min' => '0',
351 | 'cdef' => 's_resp_bodybytes,8,*'
352 | }
353 | }
354 | },
355 | 'threads' => {
356 | 'title' => 'Thread status',
357 | 'values' => {
358 | 'threads' => {
359 | 'type' => 'GAUGE',
360 | 'min' => '0',
361 | 'warning' => '1:'
362 | },
363 | 'threads_created' => {
364 | 'type' => 'DERIVE',
365 | 'min' => '0'
366 | },
367 | 'threads_failed' => {
368 | 'type' => 'DERIVE',
369 | 'min' => '0',
370 | 'warning' => ':1'
371 | },
372 | 'threads_limited' => {
373 | 'type' => 'DERIVE',
374 | 'min' => '0'
375 | },
376 | 'threads_destroyed' => {
377 | 'type' => 'DERIVE',
378 | 'min' => '0',
379 | 'warning' => ':1'
380 | }
381 | }
382 | },
383 | 'memory_usage' => {
384 | 'title' => 'Memory usage',
385 | 'args' => '--base 1024',
386 | 'vlabel' => 'bytes',
387 | 'values' => {
388 | 'sms_balloc' => {
389 | 'type' => 'GAUGE',
390 | },
391 | 'sms_nbytes' => {
392 | 'type' => 'GAUGE',
393 | },
394 | 'SMA_1' => {
395 | 'counter' => 'g_bytes',
396 | 'family' => 'SMA',
397 | },
398 | 'SMA_2' => {
399 | 'counter' => 'g_space',
400 | 'family' => 'SMA',
401 | },
402 | 'SMA_3' => {
403 | 'counter' => 'c_bytes',
404 | 'family' => 'SMA'
405 | },
406 | 'SMF_1' => {
407 | 'counter' => 'g_bytes',
408 | 'family' => 'SMF',
409 | },
410 | 'SMF_2' => {
411 | 'counter' => 'g_space',
412 | 'family' => 'SMF',
413 | }
414 | }
415 | },
416 | 'uptime' => {
417 | 'title' => 'Varnish uptime',
418 | 'vlabel' => 'days',
419 | 'scale' => 'no',
420 | 'values' => {
421 | 'uptime' => {
422 | 'type' => 'GAUGE',
423 | 'cdef' => 'uptime,86400,/'
424 | }
425 | }
426 | },
427 | 'objects_per_objhead' => {
428 | 'title' => 'Objects per objecthead',
429 | 'DEBUG' => 'yes',
430 | 'values' => {
431 | 'obj_per_objhead' => {
432 | 'type' => 'GAUGE',
433 | 'label' => 'Objects per object heads',
434 | 'rpn' => [ 'n_object','n_objecthead','/' ]
435 | }
436 | }
437 | },
438 | 'losthdr' => {
439 | 'title' => 'HTTP Header overflows',
440 | 'DEBUG' => 'yes',
441 | 'values' => {
442 | 'losthdr' => {
443 | 'type' => 'DERIVE',
444 | 'min' => '0'
445 | }
446 | }
447 | },
448 | 'hcb' => {
449 | 'title' => 'Critbit data',
450 | 'DEBUG' => 'yes',
451 | 'values' => {
452 | 'hcb_nolock' => {
453 | 'type' => 'DERIVE',
454 | 'min' => '0'
455 | },
456 | 'hcb_lock' => {
457 | 'type' => 'DERIVE',
458 | 'min' => '0'
459 | },
460 | 'hcb_insert' => {
461 | 'type' => 'DERIVE',
462 | 'min' => '0'
463 | }
464 | }
465 | },
466 | 'esi' => {
467 | 'title' => 'ESI',
468 | 'DEBUG' => 'yes',
469 | 'values' => {
470 | 'esi_parse' => {
471 | 'type' => 'DERIVE',
472 | 'min' => '0'
473 | },
474 | 'esi_errors' => {
475 | 'type' => 'DERIVE',
476 | 'min' => '0'
477 | },
478 | 'esi_warnings' => {
479 | 'type' => 'DERIVE',
480 | 'min' => '0'
481 | }
482 | }
483 | },
484 | 'session' => {
485 | 'title' => 'Sessions',
486 | 'DEBUG' => 'yes',
487 | 'values' => {
488 | 'sess_conn' => {
489 | 'type' => 'DERIVE',
490 | 'min' => '0'
491 | },
492 | 'sess_drop' => {
493 | 'type' => 'DERIVE',
494 | 'min' => '0'
495 | },
496 | 'sess_fail' => {
497 | 'type' => 'DERIVE',
498 | 'min' => '0'
499 | },
500 | 'sess_pipe_overflow' => {
501 | 'type' => 'DERIVE',
502 | 'min' => '0'
503 | },
504 | 'sess_queued' => {
505 | 'type' => 'DERIVE',
506 | 'min' => '0'
507 | },
508 | 'sess_dropped' => {
509 | 'type' => 'DERIVE',
510 | 'min' => '0'
511 | },
512 | 'sess_closed' => {
513 | 'type' => 'DERIVE',
514 | 'min' => '0'
515 | },
516 | 'sess_pipeline' => {
517 | 'type' => 'DERIVE',
518 | 'min' => '0'
519 | },
520 | 'sess_readahead' => {
521 | 'type' => 'DERIVE',
522 | 'min' => '0'
523 | }
524 | }
525 | },
526 | 'session_herd' => {
527 | 'title' => 'Session herd',
528 | 'DEBUG' => 'yes',
529 | 'values' => {
530 | 'sess_herd' => {
531 | 'type' => 'DERIVE',
532 | 'min' => '0'
533 | }
534 | }
535 | },
536 | 'shm_writes' => {
537 | 'title' => 'SHM writes and records',
538 | 'DEBUG' => 'yes',
539 | 'values' => {
540 | 'shm_records' => {
541 | 'type' => 'DERIVE',
542 | 'min' => '0'
543 | },
544 | 'shm_writes' => {
545 | 'type' => 'DERIVE',
546 | 'min' => '0'
547 | }
548 | }
549 | },
550 | 'shm' => {
551 | 'title' => 'Shared memory activity',
552 | 'DEBUG' => 'yes',
553 | 'values' => {
554 | 'shm_flushes' => {
555 | 'type' => 'DERIVE',
556 | 'min' => '0'
557 | },
558 | 'shm_cont' => {
559 | 'type' => 'DERIVE',
560 | 'min' => '0'
561 | },
562 | 'shm_cycles' => {
563 | 'type' => 'DERIVE',
564 | 'min' => '0'
565 | }
566 | }
567 | },
568 | 'allocations' => {
569 | 'title' => 'Memory allocation requests',
570 | 'DEBUG' => 'yes',
571 | 'values' => {
572 | 'sm_nreq' => {
573 | 'type' => 'DERIVE',
574 | 'min' => '0'
575 | },
576 | 'sma_nreq' => {
577 | 'type' => 'DERIVE',
578 | 'min' => '0'
579 | },
580 | 'sms_nreq' => {
581 | 'type' => 'DERIVE',
582 | 'min' => '0'
583 | }
584 | }
585 | },
586 | 'vcl' => {
587 | 'title' => 'VCL',
588 | 'DEBUG' => 'yes',
589 | 'values' => {
590 | 'n_backend' => {
591 | 'type' => 'GAUGE'
592 | },
593 | 'n_vcl' => {
594 | 'type' => 'DERIVE',
595 | 'min' => '0'
596 | },
597 | 'n_vcl_avail' => {
598 | 'type' => 'DERIVE',
599 | 'min' => '0'
600 | },
601 | 'n_vcl_discard' => {
602 | 'type' => 'DERIVE',
603 | 'min' => '0'
604 | }
605 | }
606 | },
607 | 'bans' => {
608 | 'title' => 'Bans',
609 | 'DEBUG' => 'yes',
610 | 'values' => {
611 | 'bans' => {
612 | 'type' => 'GAUGE'
613 | },
614 | 'bans_added' => {
615 | 'type' => 'DERIVE',
616 | 'min' => '0'
617 | },
618 | 'bans_deleted' => {
619 | 'type' => 'DERIVE',
620 | 'min' => '0'
621 | },
622 | 'bans_completed' => {
623 | 'type' => 'GAUGE'
624 | },
625 | 'bans_obj' => {
626 | 'type' => 'GAUGE'
627 | },
628 | 'bans_req' => {
629 | 'type' => 'GAUGE'
630 | },
631 | 'bans_tested' => {
632 | 'type' => 'DERIVE',
633 | 'min' => '0'
634 | },
635 | 'bans_obj_killed' => {
636 | 'type' => 'DERIVE',
637 | 'min' => '0'
638 | },
639 | 'bans_tests_tested' => {
640 | 'type' => 'DERIVE',
641 | 'min' => '0'
642 | },
643 | 'bans_dups' => {
644 | 'type' => 'GAUGE'
645 | },
646 | 'bans_persisted_bytes' => {
647 | 'type' => 'GAUGE'
648 | },
649 | 'bans_persisted_fragmentation' => {
650 | 'type' => 'GAUGE'
651 | }
652 | }
653 | },
654 | 'bans_lurker' => {
655 | 'title' => 'Ban Lurker',
656 | 'DEBUG' => 'yes',
657 | 'values' => {
658 | 'bans_lurker_tested' => {
659 | 'type' => 'DERIVE',
660 | 'min' => '0'
661 | },
662 | 'bans_lurker_tests_tested' => {
663 | 'type' => 'DERIVE',
664 | 'min' => '0'
665 | },
666 | 'bans_lurker_obj_killed' => {
667 | 'type' => 'DERIVE',
668 | 'min' => '0'
669 | },
670 | 'bans_lurker_contention' => {
671 | 'type' => 'DERIVE',
672 | 'min' => '0'
673 | }
674 | }
675 | },
676 | 'expunge' => {
677 | 'title' => 'Object expunging',
678 | 'order' => 'n_expired n_lru_nuked',
679 | 'values' => {
680 | 'n_expired' => {
681 | 'type' => 'DERIVE',
682 | 'min' => '0'
683 | },
684 | 'n_lru_nuked' => {
685 | 'type' => 'DERIVE',
686 | 'min' => '0'
687 | }
688 | }
689 | },
690 | 'lru' => {
691 | 'title' => 'LRU activity',
692 | 'DEBUG' => 'yes',
693 | 'values' => {
694 | 'n_lru_nuked' => {
695 | 'type' => 'DERIVE',
696 | 'min' => '0'
697 | },
698 | 'n_lru_moved' => {
699 | 'type' => 'DERIVE',
700 | 'min' => '0'
701 | }
702 | }
703 | },
704 | 'bad' => {
705 | 'title' => 'Misbehavior',
706 | 'values' => {
707 | 'SMA_1' => {
708 | 'counter' => 'c_fail',
709 | 'family' => 'SMA',
710 | },
711 | 'SMF_1' => {
712 | 'counter' => 'c_fail',
713 | 'family' => 'SMF',
714 | },
715 | 'sess_drop' => {
716 | 'type' => 'DERIVE'
717 | },
718 | 'backend_unhealthy' => {
719 | 'type' => 'DERIVE'
720 | },
721 | 'fetch_failed' => {
722 | 'type' => 'DERIVE'
723 | },
724 | 'backend_busy' => {
725 | 'type' => 'DERIVE'
726 | },
727 | 'threads_failed' => {
728 | 'type' => 'DERIVE'
729 | },
730 | 'threads_limited' => {
731 | 'type' => 'DERIVE'
732 | },
733 | 'threads_destroyed' => {
734 | 'type' => 'DERIVE'
735 | },
736 | 'thread_queue_len' => {
737 | 'type' => 'GAUGE'
738 | },
739 | 'losthdr' => {
740 | 'type' => 'DERIVE'
741 | },
742 | 'esi_errors' => {
743 | 'type' => 'DERIVE'
744 | },
745 | 'esi_warnings' => {
746 | 'type' => 'DERIVE'
747 | },
748 | 'sess_fail' => {
749 | 'type' => 'DERIVE'
750 | },
751 | 'sess_pipe_overflow' => {
752 | 'type' => 'DERIVE'
753 | }
754 | }
755 | },
756 | 'gzip' => {
757 | 'title' => 'GZIP activity',
758 | 'DEBUG' => 'yes',
759 | 'values' => {
760 | 'n_gzip' => {
761 | 'type' => 'DERIVE',
762 | 'min' => '0'
763 | },
764 | 'n_gunzip' => {
765 | 'type' => 'DERIVE',
766 | 'min' => '0'
767 | }
768 | }
769 | }
770 | );
771 |
772 | ################################
773 | # Various helper functions #
774 | ################################
775 |
776 | # Translate $_[0] from varnish' internal types (flags) to munin/rrd
777 | # variants (e.g: from 'i' to GAUGE). Returns the result.
778 | sub translate_type
779 | {
780 | my $d = $_[0];
781 | if ($d eq "i") {
782 | $d = "GAUGE";
783 | } elsif ($d eq "a") {
784 | $d = "DERIVE";
785 | }
786 | return $d;
787 | }
788 |
789 | # Print the value of a two-dimensional hash if it exist.
790 | # Returns false if non-existent.
791 | #
792 | # Output is formatted for plugins if arg4 is blank, otherwise arg4 is used
793 | # as the title/name of the field (ie: arg4=graph_title).
794 | sub print_if_exist
795 | {
796 | my %values = %{$_[0]};
797 | my $value = $_[1];
798 | my $field = $_[2];
799 | my $title = "$value.$field";
800 | if (defined($_[3])) {
801 | $title = $_[3];
802 | }
803 | if (defined($values{$value}{$field})) {
804 | print "$title $values{$value}{$field}\n";
805 | } else {
806 | return 0;
807 | }
808 | }
809 |
810 | # Create a output-friendly name
811 | sub normalize_name
812 | {
813 | my $name = $_[0];
814 | $name =~ s/[^a-zA-Z0-9]/_/g;
815 | return $name;
816 | }
817 |
818 | # Braindead RPN: +,-,/,* will pop two items from @stack, and perform
819 | # the relevant operation on the items. If the item in the array isn't one
820 | # of the 4 basic math operations, a value from varnishstat is pushed on to
821 | # the stack. IE: 'client_req','client_conn','/' will leave the value of
822 | # "client_req/client_conn" on the stack.
823 | #
824 | # If only one item is left on the stack, it is printed. Otherwise, an error
825 | # message is printed.
826 | sub rpn
827 | {
828 | my @stack;
829 | my $left;
830 | my $right;
831 | foreach my $item (@{$_[0]}) {
832 | if ($item eq "+") {
833 | $right = pop(@stack);
834 | $left = pop(@stack);
835 | push(@stack,$left+$right);
836 | } elsif ($item eq "-") {
837 | $right = pop(@stack);
838 | $left = pop(@stack);
839 | push(@stack,$left-$right);
840 | } elsif ($item eq "/") {
841 | $right = pop(@stack);
842 | $left = pop(@stack);
843 | push(@stack,$left/$right);
844 | } elsif ($item eq "*") {
845 | $right = pop(@stack);
846 | $left = pop(@stack);
847 | push(@stack,$left*$right);
848 | } else {
849 | push(@stack,int($data{$item}{'value'}));
850 | }
851 | }
852 | if (@stack > 1)
853 | {
854 | print STDERR "RPN error: Stack has more than one item left.\n";
855 | print STDERR "@stack\n";
856 | exit 255;
857 | }
858 | print "@stack";
859 | print "\n";
860 | }
861 |
862 | # Bail-function.
863 | sub usage
864 | {
865 | if (@_) {
866 | print STDERR "@_" . "\n\n";
867 | }
868 | print STDERR "Known arguments: suggest, config, autoconf.\n";
869 | print STDERR "Run with suggest to get a list of known aspects.\n";
870 | exit 1;
871 | }
872 |
873 | ################################
874 | # XML Parsing #
875 | ################################
876 | # The following code is for parsing varnishstat -x. While %data should be
877 | # stable, the following bits can easily be replaced with anything (json, an
878 | # other xml-parser, magic, etc)
879 | #
880 | # The basic concept is simple enough. Only worry about stuff inside
881 | # . Updating %state on each new data field, and commit it to %data
882 | # when is seen.
883 | #
884 | # We do use translate_type() on the 'flag' field.
885 |
886 |
887 | # Internal state for the XML parsing
888 | my %state = (
889 | 'stat' => 0, # inside or not
890 | 'field' => 'none', # , , , etc.
891 | );
892 |
893 | # Reset the state of XML, mainly used for end-elements.
894 | sub xml_reset_state() {
895 | $state{'stat'} = '0';
896 | $state{'field'} = 'none';
897 | $state{'values'} = ();
898 | }
899 |
900 | # Callback for data entry. Cleans leading whitespace and updates state.
901 | sub xml_characters {
902 | my $d = $_[1];
903 | if ($state{'stat'} == 0) {
904 | return;
905 | }
906 | if ($state{'field'} eq "type" && $d eq "MAIN") {
907 | return;
908 | }
909 | $d =~ s/^\s*$//g;
910 | if ($d eq "") {
911 | return;
912 | }
913 | $state{'values'}{$state{'field'}} = $d;
914 | }
915 |
916 | # Store the current state in %data. Issued at
917 | # Note that 'flag' is translated to RRD-equivalents here.
918 | sub xml_commit_state
919 | {
920 | my $name = $state{'values'}{'name'};
921 | my $type = $state{'values'}{'type'};
922 | my $ident = $state{'values'}{'ident'};
923 |
924 | foreach my $key (keys %{$state{'values'}}) {
925 | my $data = $state{'values'}{$key};
926 | if ($key eq 'flag') {
927 | $data = translate_type($data);
928 | }
929 | if (defined($type) and $type ne '' and defined($ident) and $ident ne '') {
930 | $data{$type}{$ident}{$name}{$key} = $data;
931 | } else {
932 | $data{$name}{$key} = $data
933 | }
934 | }
935 | }
936 |
937 | # Callback for end tag. E.g:
938 | sub xml_end_elem {
939 | my $element = $_[1];
940 | if ($element ne "stat") {
941 | return;
942 | }
943 |
944 | xml_commit_state();
945 | xml_reset_state();
946 | }
947 |
948 | # Callback for opening tag. E.g:
949 | sub xml_start_elem {
950 | $state{'field'} = $_[1];
951 | if ($state{'field'} eq "stat") {
952 | $state{'stat'} = 1;
953 | }
954 | }
955 |
956 | ################################
957 | # Internal API #
958 | ################################
959 |
960 |
961 | # Populate %data, includes both values and descriptions and more.
962 | # Currently driven by XML, but that could change.
963 | sub populate_stats
964 | {
965 | my $arg = "-x";
966 | my $parser = new XML::Parser(Handlers => {Start => \&xml_start_elem,
967 | End => \&xml_end_elem,
968 | Char => \&xml_characters} );
969 |
970 | if ($varnishname) {
971 | $arg .= " -n $varnishname";
972 | }
973 |
974 | open (XMLDATA, "$varnishstatexec $arg|") or die "meh";
975 | $parser->parse(*XMLDATA, ProtocolEncoding => 'ISO-8859-1');
976 | close(XMLDATA);
977 | }
978 |
979 | # Prints the fields in the list in $_[2] (e.g: 'value'/'description') for
980 | # each identity of the varnish counter/family combination as defined by
981 | # the $_[0]-counter on the aspect definition. Err, that's jibberish, so
982 | # an example:
983 | #
984 | # e.g: dynamic_print('SMA_1','',('value'))
985 | # e.g: dynamic_print('SMA_2','.label',('ident','description'))
986 | # SMA_1 is the counter-value. If it is a dynamic counter, it has a counter
987 | # and family-member (e.g: counter: c_req, family: SMA) and print_dynamic
988 | # will print c_req for each SMA-identity.
989 | #
990 | # Note that the variables to print is a list. This is to allow printing a
991 | # single item with multiple fields. Typically for identity+description so
992 | # you can distinguish between different data points.
993 | #
994 | # Returns true if it was a dynamic counter.
995 | sub print_dynamic
996 | {
997 | my $name = $_[0];
998 | shift;
999 | my $suffix = $_[0];
1000 | shift;
1001 | my @field = @_;
1002 | if (!defined($ASPECTS{$self}{'values'}{$name}{'counter'})) {
1003 | return 0;
1004 | }
1005 | if (!defined($ASPECTS{$self}{'values'}{$name}{'family'})) {
1006 | return 0;
1007 | }
1008 | my $counter = $ASPECTS{$self}{'values'}{$name}{'counter'};
1009 | my $type = $ASPECTS{$self}{'values'}{$name}{'family'};
1010 |
1011 | foreach my $key (keys %{$data{$type}}) {
1012 | my $pname = normalize_name($type . "_" . $key . "_" . $counter);
1013 | print $pname . $suffix . " ";
1014 | my $i = 0;
1015 | foreach my $f (@field) {
1016 | if ($i != 0) {
1017 | print " ";
1018 | }
1019 | $i += 1;
1020 | print $data{$type}{$key}{$counter}{$f};
1021 | }
1022 | print "\n";
1023 | }
1024 | return 1;
1025 | }
1026 |
1027 | # Read and verify the aspect ($self).
1028 | sub set_aspect
1029 | {
1030 | $self = $0;
1031 | $self =~ s/^.*\/varnish[0-9]?_//;
1032 | if (!defined($ASPECTS{$self}) && @ARGV == 0) {
1033 | usage "No such aspect";
1034 | }
1035 | }
1036 |
1037 | # Print 'yes' if it's reasonable to use this plugin, or 'no' with a
1038 | # human-readable error message. Always exit true, even if the response
1039 | # is 'no'.
1040 | sub autoconf
1041 | {
1042 | # XXX: Solaris outputs errors to stderr and always returns true.
1043 | # XXX: See #873
1044 | if (`which $varnishstatexec 2>/dev/null` =~ m{^/}) {
1045 | print "yes\n";
1046 | } else {
1047 | print "no ($varnishstatexec could not be found)\n";
1048 | }
1049 | exit 0;
1050 | }
1051 |
1052 | # Suggest relevant aspects/values of $self.
1053 | # 'DEBUG'-graphs are excluded.
1054 | sub suggest
1055 | {
1056 | foreach my $key (keys %ASPECTS) {
1057 | if (defined($ASPECTS{$key}{'DEBUG'}) && $FULL_SUGGEST != 1) {
1058 | next;
1059 | }
1060 | print "$key\n";
1061 | }
1062 | }
1063 |
1064 | # Walk through the relevant aspect and print all top-level configuration
1065 | # values and value-definitions.
1066 | sub get_config
1067 | {
1068 | my $graph = $_[0];
1069 |
1070 | # Need to double-check since set_aspect only checks this if there
1071 | # is no argument (suggest/autoconf doesn't require a valid aspect)
1072 | if (!defined($ASPECTS{$graph})) {
1073 | usage "No such aspect";
1074 | }
1075 | my %values = %{$ASPECTS{$graph}{'values'}};
1076 |
1077 | print "graph_category Varnish\n";
1078 | foreach my $field (@graph_parameters) {
1079 | print_if_exist(\%ASPECTS,$graph,$field,"graph_$field");
1080 | }
1081 |
1082 | foreach my $value (keys %values) {
1083 | # Just print the description/type if it's a dynamic
1084 | # counter. It'll be silent if it isn't.
1085 | if(print_dynamic($value,'.label',('description','type','ident'))) {
1086 | print_dynamic($value,'.type',('flag'));
1087 | next;
1088 | }
1089 |
1090 | # Need either RPN definition or a varnishstat value.
1091 | if (!defined($data{$value}{'value'}) &&
1092 | !defined($values{$value}{'rpn'})) {
1093 | if ($DEBUG) {
1094 | print "ERROR: $value not part of varnishstat.\n"
1095 | }
1096 | next;
1097 | }
1098 |
1099 | if (!print_if_exist(\%values,$value,'label')) {
1100 | print "$value.label $data{$value}{'description'}\n";
1101 | }
1102 | foreach my $field (@field_parameters) {
1103 | print_if_exist(\%values,$value,$field);
1104 | }
1105 | }
1106 | }
1107 |
1108 | # Handle arguments (config, autoconf, suggest)
1109 | # Populate stats for config is necessary, but we want to avoid it for
1110 | # autoconf as it would generate a nasty error.
1111 | sub check_args
1112 | {
1113 | if (@ARGV && $ARGV[0] eq '') {
1114 | shift @ARGV;
1115 | }
1116 | if (@ARGV == 1) {
1117 | if ($ARGV[0] eq "config") {
1118 | populate_stats;
1119 | get_config($self);
1120 | exit 0;
1121 | } elsif ($ARGV[0] eq "autoconf") {
1122 | autoconf($self);
1123 | exit 0;
1124 | } elsif ($ARGV[0] eq "suggest") {
1125 | suggest;
1126 | exit 0;
1127 | }
1128 | usage "Unknown argument";
1129 | }
1130 | }
1131 |
1132 | ################################
1133 | # Execution starts here #
1134 | ################################
1135 |
1136 | set_aspect;
1137 | check_args;
1138 | populate_stats;
1139 |
1140 | # We only get here if we're supposed to.
1141 | # Walks through the relevant values and either prints the varnishstat, or
1142 | # if the 'rpn' variable is set, calls rpn() to execute ... the rpn.
1143 | #
1144 | # NOTE: Due to differences in varnish-versions, this checks if the value
1145 | # actually exist before using it.
1146 | foreach my $value (keys %{$ASPECTS{$self}{'values'}}) {
1147 | if (defined($ASPECTS{$self}{'values'}{$value}{'rpn'})) {
1148 | print "$value.value ";
1149 | rpn($ASPECTS{$self}{'values'}{$value}{'rpn'});
1150 | } else {
1151 | if (print_dynamic($value,'.value',('value'))) {
1152 | next;
1153 | }
1154 |
1155 | if (!defined($data{$value}{'value'})) {
1156 | if ($DEBUG) {
1157 | print STDERR "Error: $value not part of "
1158 | . "varnishstat.\n";
1159 | }
1160 | next;
1161 | }
1162 | print "$value.value ";
1163 | print "$data{$value}{'value'}\n";
1164 | }
1165 | }
1166 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/pgpool/pg_hba.conf:
--------------------------------------------------------------------------------
1 | # TYPE DATABASE USER ADDRESS METHOD
2 | # Database administrative login by Unix domain socket
3 | local all postgres trust
4 | local all all md5
5 | # IPv4 local connections:
6 | host all all 127.0.0.1/32 md5
7 | # IPv6 local connections:
8 | host all all ::1/128 md5
9 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/pgpool/pgpool.conf:
--------------------------------------------------------------------------------
1 | listen_addresses = '*'
2 | port = 5433
3 | socket_dir = '/var/run/postgresql'
4 | backend_host_name = ''
5 | backend_port = 5432
6 | secondary_backend_host_name = ''
7 | secondary_backend_port = 0
8 |
9 | num_init_children = 32
10 | max_pool = 8
11 | child_life_time = 300
12 | connection_life_time = 60
13 | child_max_connections = 0
14 |
15 | logdir = '/var/run/postgresql'
16 |
17 | # Replication mode
18 | replication_mode = false
19 | replication_strict = true
20 | replication_timeout = 5000
21 | load_balance_mode = false
22 | weight_master = 0.5
23 | weight_secondary = 0.5
24 | replication_stop_on_mismatch = false
25 | replicate_select = false
26 |
27 | reset_query_list = 'ABORT; DISCARD ALL'
28 |
29 | master_slave_mode = false
30 | connection_cache = true
31 |
32 | health_check_timeout = 20
33 | health_check_period = 0
34 | health_check_user = 'nobody'
35 |
36 | insert_lock = false
37 | ignore_leading_white_space = false
38 |
39 | print_timestamp = true
40 | log_statement = false
41 | log_connections = false
42 | log_hostname = false
43 |
44 | enable_pool_hba = false
45 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/users/bash_profile:
--------------------------------------------------------------------------------
1 | . .bashrc
2 |
3 | alias l='ls -alFh'
4 | alias c='clear'
5 |
6 | export PIP_DOWNLOAD_CACHE=/tmp/pip
7 | export DJANGO_SETTINGS_MODULE=project.settings
8 | export EDITOR=vim
9 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/files/default/varnish/varnish:
--------------------------------------------------------------------------------
1 | START=yes
2 | NFILES=131072
3 | MEMLOCK=82000
4 | INSTANCE=$(uname -n)
5 | DAEMON_OPTS="-a :80 \
6 | -T :6082 \
7 | -f /etc/varnish/default.vcl \
8 | -s file,/var/lib/varnish/$INSTANCE/varnish_storage.bin,1.25G"
9 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/apache.rb:
--------------------------------------------------------------------------------
1 | # Install apache
2 | package "apache2" do
3 | :upgrade
4 | end
5 |
6 | # Install mod-wsgi so apache can talk to Django
7 | package "libapache2-mod-wsgi" do
8 | :upgrade
9 | end
10 |
11 | # install mod-rpaf so apache can use the X-Forwarded-For
12 | # header to see the real incoming IP addresses. This prevents server-status
13 | # from being publicly available
14 | package "libapache2-mod-rpaf" do
15 | :upgrade
16 | end
17 |
18 | # Set the port for Apache since Varnish will be on :80
19 | template "/etc/apache2/ports.conf" do
20 | source "apache/ports.conf.erb"
21 | mode 0640
22 | owner "root"
23 | group "root"
24 | variables({
25 | :apache_port => node[:apache_port]
26 | })
27 | end
28 |
29 | # Set a virtual host file for each app
30 | node[:apps].each do |app|
31 | template "/etc/apache2/sites-enabled/#{app[:name]}" do
32 | source "apache/vhost.erb"
33 | mode 0640
34 | owner "root"
35 | group "root"
36 | variables({
37 | :apache_port => node[:apache_port],
38 | :server_name => app[:apache_server_name],
39 | :app_name => app[:name],
40 | :apps_user => node[:apps_user]
41 | })
42 | end
43 | end
44 |
45 | cookbook_file "/etc/apache2/apache2.conf" do
46 | source "apache/apache2.conf"
47 | mode 0640
48 | owner "root"
49 | group "root"
50 | end
51 |
52 | bash "Remove default apache config" do
53 | user "root"
54 | group "root"
55 | code "rm /etc/apache2/sites-enabled/000-default.conf"
56 | ignore_failure true
57 | end
58 |
59 | script "restart-apache" do
60 | interpreter "bash"
61 | user "root"
62 | code <<-EOH
63 | apachectl restart
64 | EOH
65 | end
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/apps.rb:
--------------------------------------------------------------------------------
1 | # Create the apps directory where everything will go
2 | directory "/apps/" do
3 | owner node[:apps_user]
4 | group node[:apps_group]
5 | mode 0775
6 | end
7 |
8 | # Load the authorized keys for the root user
9 | directory "/home/#{node[:apps_user]}/.ssh" do
10 | mode 0700
11 | owner node[:apps_user]
12 | group node[:apps_group]
13 | end
14 |
15 | cookbook_file "/home/#{node[:apps_user]}/.ssh/authorized_keys" do
16 | source "users/authorized_keys"
17 | mode 0640
18 | owner node[:apps_user]
19 | group node[:apps_group]
20 | end
21 |
22 | # Load the SSH keys
23 | cookbook_file "/home/#{node[:apps_user]}/.ssh/id_rsa" do
24 | source "users/id_rsa"
25 | mode 0600
26 | owner node[:apps_user]
27 | group node[:apps_group]
28 | end
29 |
30 | cookbook_file "/home/#{node[:apps_user]}/.ssh/id_rsa.pub" do
31 | source "users/id_rsa.pub"
32 | mode 0644
33 | owner node[:apps_user]
34 | group node[:apps_group]
35 | end
36 |
37 | # Install the virtualenv requirements
38 | script "Add GitHub to known hosts" do
39 | interpreter "bash"
40 | user node[:apps_user]
41 | group node[:apps_group]
42 | code <<-EOH
43 | echo "|1|nFPVjT+tJlghvwL9SqJmckclSkI=|5HR4LAIxnl3I3cl40j5GIy+Qbwk= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" >> /home/#{node[:apps_user]}/.ssh/known_hosts
44 | echo "|1|LiSuPv5jaL9TCd9Tgue5BiGAJtE=|KYW9Uqo+gzE+Z3O/0uE8d9kadm0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==" >> /home/#{node[:apps_user]}/.ssh/known_hosts
45 | EOH
46 | end
47 |
48 | # Loop through all the apps we want to configure
49 | node[:apps].each do |app|
50 |
51 | # Make the directory for the app
52 | virtualenv "/apps/#{app[:name]}" do
53 | owner node[:apps_user]
54 | group node[:apps_group]
55 | mode 0775
56 | end
57 |
58 | # Make the directory for the repo
59 | directory "/apps/#{app[:name]}/repo" do
60 | owner node[:apps_user]
61 | group node[:apps_group]
62 | mode 0775
63 | end
64 |
65 | # Pull the git repo
66 | git "/apps/#{app[:name]}/repo" do
67 | repository app[:repo]
68 | reference "HEAD"
69 | revision app[:branch]
70 | user node[:apps_user]
71 | group node[:apps_group]
72 | action :sync
73 | end
74 |
75 | # Install the virtualenv requirements
76 | script "Install Requirements" do
77 | interpreter "bash"
78 | user node[:apps_user]
79 | group node[:apps_group]
80 | code "/apps/#{app[:name]}/bin/pip install -r /apps/#{app[:name]}/repo/requirements.txt"
81 | end
82 |
83 | # Create the database user
84 | script "Create database user" do
85 | interpreter "bash"
86 | user "postgres"
87 | code <<-EOH
88 | psql -c "CREATE USER #{app[:db_user]} WITH INHERIT SUPERUSER CREATEDB PASSWORD '#{app[:db_password]}'";
89 | EOH
90 | ignore_failure true
91 | end
92 |
93 | # Create the database
94 | script "Create database" do
95 | interpreter "bash"
96 | user "postgres"
97 | code <<-EOH
98 | createdb -T template_postgis #{app[:db_name]} -E UTF8 -O #{app[:db_user]}
99 | EOH
100 | ignore_failure true
101 | end
102 |
103 | # Run any management commands
104 | app[:management].each do |command|
105 | script "Running #{command}" do
106 | interpreter "bash"
107 | user node[:apps_user]
108 | code <<-EOH
109 | cd /apps/#{app[:name]} && . bin/activate && cd repo && /apps/#{app[:name]}/bin/python /apps/#{app[:name]}/repo/manage.py #{command}
110 | EOH
111 | end
112 | end
113 |
114 | end
115 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/cron.rb:
--------------------------------------------------------------------------------
1 | node[:crons].each_pair do |cronname, options|
2 | cron cronname do
3 | minute options[:minute] || "*"
4 | hour options[:hour] || "*"
5 | day options[:day] || "*"
6 | month options[:month] || "*"
7 | weekday options[:weekday] || "*"
8 | command options[:command]
9 | user options[:user] || node[:apps_user]
10 | end
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/default.rb:
--------------------------------------------------------------------------------
1 | # Fix the locale
2 | execute "create-locale" do
3 | command %Q{
4 | locale-gen en_US.UTF-8
5 | }
6 | end
7 |
8 | execute "set-locale" do
9 | command %Q{
10 | update-locale LANG=en_US.UTF-8
11 | }
12 | end
13 |
14 | # Load any base system wide packages
15 | node[:base_packages].each do |pkg|
16 | package pkg do
17 | :upgrade
18 | end
19 | end
20 |
21 | # Loop through the user list, create the user, load the authorized_keys
22 | # and mint a bash_profile
23 | node[:users].each_pair do |username, info|
24 | group username do
25 | gid info[:id]
26 | end
27 |
28 | user username do
29 | comment info[:full_name]
30 | uid info[:id]
31 | gid info[:id]
32 | shell info[:disabled] ? "/sbin/nologin" : "/bin/bash"
33 | supports :manage_home => true
34 | home "/home/#{username}"
35 | end
36 |
37 | directory "/home/#{username}/.ssh" do
38 | owner username
39 | group username
40 | mode 0700
41 | end
42 |
43 | cookbook_file "/home/#{username}/.ssh/authorized_keys" do
44 | source "users/authorized_keys"
45 | mode 0640
46 | owner username
47 | group username
48 | end
49 |
50 | cookbook_file "/home/#{username}/.bash_profile" do
51 | source "users/bash_profile"
52 | owner username
53 | group username
54 | mode 0755
55 | end
56 |
57 | end
58 |
59 | # Set the user groups
60 | node[:groups].each_pair do |name, info|
61 | group name do
62 | gid info[:gid]
63 | members info[:members]
64 | end
65 | end
66 |
67 | # Load the authorized keys for the root user
68 | directory "/root/.ssh" do
69 | owner "root"
70 | group "root"
71 | mode 0700
72 | end
73 |
74 | cookbook_file "/root/.ssh/authorized_keys" do
75 | source "users/authorized_keys"
76 | mode 0640
77 | owner "root"
78 | group "root"
79 | end
80 |
81 | # Load the SSH keys
82 | cookbook_file "/root/.ssh/id_rsa" do
83 | source "users/id_rsa"
84 | mode 0600
85 | owner "root"
86 | group "root"
87 | end
88 |
89 | cookbook_file "/root/.ssh/id_rsa.pub" do
90 | source "users/id_rsa.pub"
91 | mode 0644
92 | owner "root"
93 | group "root"
94 | end
95 |
96 | template "/etc/sudoers" do
97 | source "users/sudoers.erb"
98 | mode 0440
99 | owner "root"
100 | group "root"
101 | variables({
102 | :apps_user => node[:apps_user]
103 | })
104 | end
105 |
106 | script "Fix libfreetype.so" do
107 | interpreter "bash"
108 | user "root"
109 | group "root"
110 | code <<-EOH
111 | ln -s /usr/lib/`uname -i`-linux-gnu/libfreetype.so /usr/lib/
112 | EOH
113 | not_if do
114 | File.exists?("/usr/lib/libfreetype.so")
115 | end
116 | end
117 |
118 | script "Fix libz.so" do
119 | interpreter "bash"
120 | user "root"
121 | group "root"
122 | code <<-EOH
123 | ln -s /usr/lib/`uname -i`-linux-gnu/libz.so /usr/lib/
124 | EOH
125 | not_if do
126 | File.exists?("/usr/lib/libz.so")
127 | end
128 | end
129 |
130 | script "Fix libjpeg.so" do
131 | interpreter "bash"
132 | user "root"
133 | group "root"
134 | code <<-EOH
135 | ln -s /usr/lib/`uname -i`-linux-gnu/libjpeg.so /usr/lib/
136 | EOH
137 | not_if do
138 | File.exists?("/usr/lib/libjpeg.so")
139 | end
140 | end
141 |
142 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/memcached.rb:
--------------------------------------------------------------------------------
1 | package "memcached" do
2 | :upgrade
3 | end
4 |
5 | service "memcached" do
6 | enabled true
7 | running true
8 | supports :status => true, :restart => true
9 | action [:enable, :start]
10 | end
11 |
12 | cookbook_file "/etc/memcached.conf" do
13 | source "memcached/memcached.conf"
14 | mode 0640
15 | owner "root"
16 | group "root"
17 | notifies :restart, resources(:service => "memcached")
18 | end
19 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/motd.rb:
--------------------------------------------------------------------------------
1 | # CAW
2 | cookbook_file "/etc/update-motd.d/01-caw" do
3 | source "motd/caw.sh"
4 | owner "root"
5 | group "root"
6 | end
7 |
8 | bash "chmod motd" do
9 | user "root"
10 | group "root"
11 | code "chmod a+x /etc/update-motd.d/01-caw"
12 | end
13 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/munin.rb:
--------------------------------------------------------------------------------
1 | # Install munin and some extras
2 | package "munin" do
3 | :upgrade
4 | end
5 |
6 | package "munin-node" do
7 | :upgrade
8 | end
9 |
10 | package "munin-plugins-extra" do
11 | :upgrade
12 | end
13 |
14 | # Do the basic config for the master
15 | template "/etc/munin/munin.conf" do
16 | source "munin/munin.conf.erb"
17 | mode "777"
18 | owner "root"
19 | group "root"
20 | variables({
21 | :name => node[:munin_name]
22 | })
23 | end
24 |
25 | # Do the basic config for the node
26 | template "/etc/munin/munin-node.conf" do
27 | source "munin/munin-node.conf.erb"
28 | mode "777"
29 | owner "root"
30 | group "root"
31 | variables({
32 | :name => node[:munin_name],
33 | :munin_master_ips => node[:munin_master_ips]
34 | })
35 | end
36 |
37 | script "Zero out munin apache.conf" do
38 | interpreter "bash"
39 | user "root"
40 | group "root"
41 | code <<-EOH
42 | echo "#nothing to see here" > /etc/munin/apache.conf
43 | EOH
44 | end
45 |
46 | # Install the framework for Python plugins
47 | script "Install PyMunin" do
48 | interpreter "bash"
49 | user "root"
50 | group "root"
51 | code <<-EOH
52 | pip install PyMunin;
53 | EOH
54 | end
55 |
56 | # A postgresql plugin, first the conf...
57 | template "/etc/munin/plugin-conf.d/pgstats" do
58 | source "munin/pgstats.erb"
59 | owner "root"
60 | group "root"
61 | variables({
62 | :munin_db_user => node[:munin_db_user],
63 | :munin_db_name => node[:munin_db_name],
64 | :munin_include_db_list => node[:munin_include_db_list]
65 | })
66 | end
67 |
68 | # A postgresql plugin
69 | script "Install postgresql adaptor for python" do
70 | interpreter "bash"
71 | user "root"
72 | group "root"
73 | code <<-EOH
74 | pip install psycopg2;
75 | EOH
76 | end
77 |
78 | script "Install pgstats for PyMunin" do
79 | interpreter "bash"
80 | user "root"
81 | group "root"
82 | code <<-EOH
83 | ln -s /usr/share/munin/plugins/pgstats /etc/munin/plugins/pgstats
84 | EOH
85 | not_if do
86 | File.exists?("/etc/munin/plugins/pgstats")
87 | end
88 | end
89 |
90 | # Make sure we have the correct Apache plugins
91 | script "Install Apache2 plugins" do
92 | interpreter "bash"
93 | user "root"
94 | group "root"
95 | code <<-EOH
96 | ln -s /usr/share/munin/plugins/apachestats /etc/munin/plugins/apachestats
97 | EOH
98 | not_if do
99 | File.exists?("/etc/munin/plugins/apachestats")
100 | end
101 | end
102 |
103 | # A memcached plugin
104 | script "Install memcachedstats for PyMunin" do
105 | interpreter "bash"
106 | user "root"
107 | group "root"
108 | code <<-EOH
109 | ln -s /usr/share/munin/plugins/memcachedstats /etc/munin/plugins/memcachedstats
110 | EOH
111 | not_if do
112 | File.exists?("/etc/munin/plugins/memcachedstats")
113 | end
114 | end
115 |
116 | # Install our modified Varnish plugin for Varnish 4
117 | cookbook_file "/usr/share/munin/plugins/varnish4_" do
118 | source "munin/varnish4_"
119 | mode 0640
120 | owner "root"
121 | group "root"
122 | end
123 |
124 | script "Install varnish plugin" do
125 | interpreter "bash"
126 | user "root"
127 | group "root"
128 | code <<-EOH
129 | chmod a+rx /usr/share/munin/plugins/varnish4_
130 | ln -s '/usr/share/munin/plugins/http_loadtime' '/etc/munin/plugins/http_loadtime'
131 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_backend_traffic'
132 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_bad'
133 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_expunge'
134 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_memory_usage'
135 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_objects'
136 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_request_rate'
137 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_threads'
138 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_transfer_rates'
139 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_uptime'
140 | ln -s '/usr/share/munin/plugins/varnish4_' '/etc/munin/plugins/varnish4_hit_rate'
141 | EOH
142 | not_if do
143 | (File.exist?('/etc/munin/plugins/varnish4_backend_traffic') or File.exist?("/etc/munin/plugins/varnish4_"))
144 | end
145 | end
146 |
147 | template "/etc/munin/plugin-conf.d/varnish4_" do
148 | source "munin/varnish4_.erb"
149 | owner "root"
150 | group "root"
151 | end
152 |
153 | script "Restart Munin" do
154 | interpreter "bash"
155 | user "root"
156 | group "root"
157 | code <<-EOH
158 | chmod a+rx /etc/munin/plugin-conf.d/
159 | service munin-node restart
160 | EOH
161 | end
162 |
163 | script "restart-apache" do
164 | interpreter "bash"
165 | user "root"
166 | code <<-EOH
167 | apachectl restart
168 | EOH
169 | end
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/newrelic.rb:
--------------------------------------------------------------------------------
1 | script "Add New Relic package source" do
2 | interpreter "bash"
3 | user "root"
4 | group "root"
5 | code <<-EOH
6 | echo deb http://apt.newrelic.com/debian/ newrelic non-free >> /etc/apt/sources.list.d/newrelic.list
7 | EOH
8 | end
9 |
10 | script "Trust New Relic GPG key" do
11 | interpreter "bash"
12 | user "root"
13 | group "root"
14 | code <<-EOH
15 | wget -O- https://download.newrelic.com/548C16BF.gpg | apt-key add -
16 | EOH
17 | end
18 |
19 | script "Update package list" do
20 | interpreter "bash"
21 | user "root"
22 | group "root"
23 | code <<-EOH
24 | apt-get update
25 | EOH
26 | end
27 |
28 | script "Install service monitor package" do
29 | interpreter "bash"
30 | user "root"
31 | group "root"
32 | code <<-EOH
33 | apt-get install newrelic-sysmond
34 | EOH
35 | end
36 |
37 | script "Configure server monitor" do
38 | interpreter "bash"
39 | user "root"
40 | group "root"
41 | code <<-EOH
42 | nrsysmond-config --set license_key=#{node['newrelic_license_key']}
43 | EOH
44 | end
45 |
46 | script "Start server monitor" do
47 | interpreter "bash"
48 | user "root"
49 | group "root"
50 | code <<-EOH
51 | /etc/init.d/newrelic-sysmond start
52 | EOH
53 | end
54 |
55 | script "Install Python agent" do
56 | interpreter "bash"
57 | user "root"
58 | group "root"
59 | code <<-EOH
60 | pip install newrelic
61 | EOH
62 | end
63 |
64 | script "Install Python configuration" do
65 | interpreter "bash"
66 | user "root"
67 | group "root"
68 | code <<-EOH
69 | pip install newrelic
70 | EOH
71 | end
72 |
73 | script "restart-apache" do
74 | interpreter "bash"
75 | user "root"
76 | code <<-EOH
77 | apachectl restart
78 | EOH
79 | end
80 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/pgpool.rb:
--------------------------------------------------------------------------------
1 | package "pgpool2" do
2 | :upgrade
3 | end
4 |
5 | cookbook_file "/etc/pgpool2/pgpool.conf" do
6 | source "pgpool/pgpool.conf"
7 | user "postgres"
8 | group "postgres"
9 | mode 0640
10 | end
11 |
12 | cookbook_file "/etc/postgresql/9.5/main/pg_hba.conf" do
13 | source "pgpool/pg_hba.conf"
14 | user "postgres"
15 | group "postgres"
16 | mode 0640
17 | end
18 |
19 | node[:apps].each do |app|
20 | script "Set pgpool password for #{app[:name]}" do
21 | interpreter "bash"
22 | user "root"
23 | group "root"
24 | code <<-EOH
25 | pg_md5 '#{app[:db_password]}' | awk '{print "#{app[:db_user]}:"$1}' >> /etc/pgpool2/pcp.conf;
26 | EOH
27 | end
28 | end
29 |
30 | script "Set ownership of pgpool conf directory" do
31 | interpreter "bash"
32 | user "root"
33 | group "root"
34 | code <<-EOH
35 | chown postgres -R /etc/pgpool2;
36 | EOH
37 | end
38 |
39 | script "Create pgpool pid directory" do
40 | interpreter "bash"
41 | user "root"
42 | group "root"
43 | code <<-EOH
44 | mkdir /var/run/pgpool;
45 | chown postgres /var/run/pgpool;
46 | chgrp postgres /var/run/pgpool;
47 | EOH
48 | end
49 |
50 | script "Restart postgres" do
51 | interpreter "bash"
52 | user "root"
53 | group "root"
54 | code <<-EOH
55 | service postgresql restart
56 | EOH
57 | end
58 |
59 | script "Restart pgpool" do
60 | interpreter "bash"
61 | user "root"
62 | group "root"
63 | code <<-EOH
64 | service pgpool2 restart
65 | EOH
66 | end
67 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/postgresql.rb:
--------------------------------------------------------------------------------
1 | # Intended to work on Ubuntu 12.04
2 |
3 | package "binutils" do
4 | :upgrade
5 | end
6 |
7 | package "gdal-bin" do
8 | :upgrade
9 | end
10 |
11 | package "libproj-dev" do
12 | :upgrade
13 | end
14 |
15 | package "postgresql-9.5-postgis-2.2" do
16 | :upgrade
17 | end
18 |
19 | package "postgresql-server-dev-9.5" do
20 | :upgrade
21 | end
22 |
23 | package "python-psycopg2" do
24 | :upgrade
25 | end
26 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/python.rb:
--------------------------------------------------------------------------------
1 | # System wide python packages via apt
2 | node[:ubuntu_python_packages].each do |pkg|
3 | package pkg do
4 | :upgrade
5 | end
6 | end
7 |
8 | # System wide python packages via pip
9 | node[:pip_python_packages].each_pair do |pkg, version|
10 | execute "install-#{pkg}" do
11 | command "pip install #{pkg}==#{version}"
12 | not_if "[ `pip freeze | grep #{pkg} | cut -d'=' -f3` = '#{version}' ]"
13 | end
14 | end
15 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/recipes/varnish.rb:
--------------------------------------------------------------------------------
1 | # Install Varnish
2 | package "varnish" do
3 | :upgrade
4 | end
5 |
6 | # The boot config
7 | cookbook_file "/etc/default/varnish" do
8 | source "varnish/varnish"
9 | mode 0640
10 | end
11 |
12 | # The rules that regulate how varnish works
13 | template "/etc/varnish/default.vcl" do
14 | source "varnish/default.vcl.erb"
15 | mode 0640
16 | variables({
17 | :apache_port => node[:apache_port],
18 | :varnish_whitelist => node[:varnish_whitelist],
19 | :varnish_ttl => node[:varnish_ttl],
20 | :varnish_health_url => node[:varnish_health_url],
21 | :varnish_banned_ips => node[:varnish_banned_ips],
22 | :varnish_no_cache_urls => node[:varnish_no_cache_urls]
23 | })
24 | end
25 |
26 | script "Restart varnish" do
27 | interpreter "bash"
28 | user "root"
29 | group "root"
30 | code <<-EOH
31 | service varnish restart
32 | EOH
33 | end
34 |
35 | script "restart-apache" do
36 | interpreter "bash"
37 | user "root"
38 | code <<-EOH
39 | apachectl restart
40 | EOH
41 | end
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/apache/ports.conf.erb:
--------------------------------------------------------------------------------
1 | Listen <%= @apache_port %>
2 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/apache/vhost.erb:
--------------------------------------------------------------------------------
1 | >
2 | ServerName <%= @server_name %>
3 | ServerAlias <%= @server_name %>
4 | WSGIScriptAlias / /apps/<%= @app_name %>/repo/project/wsgi_prod.py
5 | WSGIDaemonProcess <%= @app_name %> user=<%= @apps_user %> processes=2 threads=15 display-name=%{GROUP} python-path=/apps/<%= @app_name %>/lib/python2.7/site-packages/:/apps/<%= @app_name %>/repo/
6 | WSGIProcessGroup <%= @app_name %>
7 | WSGIApplicationGroup %{GLOBAL}
8 | ErrorLog ${APACHE_LOG_DIR}/error.log
9 | CustomLog ${APACHE_LOG_DIR}/access.log combined
10 | /repo/project/>
11 |
12 | Require all granted
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/munin/munin-node.conf.erb:
--------------------------------------------------------------------------------
1 | #
2 | # Example config-file for munin-node
3 | #
4 |
5 | log_level 4
6 | log_file /var/log/munin/munin-node.log
7 | pid_file /var/run/munin/munin-node.pid
8 |
9 | background 1
10 | setsid 1
11 |
12 | user root
13 | group root
14 |
15 | # Regexps for files to ignore
16 |
17 | ignore_file ~$
18 | #ignore_file [#~]$ # FIX doesn't work. '#' starts a comment
19 | ignore_file DEADJOE$
20 | ignore_file \.bak$
21 | ignore_file %$
22 | ignore_file \.dpkg-(tmp|new|old|dist)$
23 | ignore_file \.rpm(save|new)$
24 | ignore_file \.pod$
25 |
26 | # Set this if the client doesn't report the correct hostname when
27 | # telnetting to localhost, port 4949
28 | #
29 | host_name <%= @name %>
30 |
31 | # A list of addresses that are allowed to connect. This must be a
32 | # regular expression, since Net::Server does not understand CIDR-style
33 | # network notation unless the perl module Net::CIDR is installed. You
34 | # may repeat the allow line as many times as you'd like
35 |
36 | allow ^127\.0\.0\.1$
37 | <% @munin_master_ips.each do |item| -%>
38 | allow <%= item %>
39 | <% end -%>
40 |
41 | # If you have installed the Net::CIDR perl module, you can use one or more
42 | # cidr_allow and cidr_deny address/mask patterns. A connecting client must
43 | # match any cidr_allow, and not match any cidr_deny. Note that a netmask
44 | # *must* be provided, even if it's /32
45 | #
46 | # Example:
47 | #
48 | # cidr_allow 127.0.0.1/32
49 | # cidr_allow 192.0.2.0/24
50 | # cidr_deny 192.0.2.42/32
51 |
52 | # Which address to bind to;
53 | host *
54 | # host 127.0.0.1
55 |
56 | # And which port
57 | port 4949
58 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/munin/munin.conf.erb:
--------------------------------------------------------------------------------
1 | includedir /etc/munin/munin-conf.d
2 |
3 | [<%= @name %>]
4 | address 127.0.0.1
5 | use_node_name yes
6 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/munin/pgstats.erb:
--------------------------------------------------------------------------------
1 | [pgstats]
2 | user <%= @munin_db_user %>
3 | env.database <%= @munin_db_name %>
4 | env.include_db <%= @munin_include_db_list %>
5 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/munin/varnish4_.erb:
--------------------------------------------------------------------------------
1 | [varnish4_*]
2 | env.varnishstat varnishstat
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/users/sudoers.erb:
--------------------------------------------------------------------------------
1 | #
2 | # This file MUST be edited with the 'visudo' command as root.
3 | #
4 | # Please consider adding local content in /etc/sudoers.d/ instead of
5 | # directly modifying this file.
6 | #
7 | # See the man page for details on how to write a sudoers file.
8 | #
9 | Defaults env_reset
10 | Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
11 |
12 | # Host alias specification
13 |
14 | # User alias specification
15 |
16 | # Cmnd alias specification
17 |
18 | # User privilege specification
19 | root ALL=(ALL:ALL) ALL
20 | <%= @apps_user %> ALL=(ALL) NOPASSWD:ALL
21 |
22 | # Members of the admin group may gain root privileges
23 | %admin ALL=(ALL) ALL
24 |
25 | # Allow members of group sudo to execute any command
26 | %sudo ALL=(ALL:ALL) ALL
27 |
28 | # See sudoers(5) for more information on "#include" directives:
29 |
30 | #includedir /etc/sudoers.d
31 |
32 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/varnish/default.vcl.erb:
--------------------------------------------------------------------------------
1 | # Newly updated for Varnish 4
2 | vcl 4.0;
3 |
4 | import std;
5 | import directors;
6 |
7 | backend default {
8 | # Set a host.
9 | .host = "127.0.0.1";
10 | # Set a port. 80 is normal Web traffic.
11 | .port = "<%= @apache_port %>";
12 | .probe = {
13 | .url = "<%= @varnish_health_url %>";
14 | .interval = 5s;
15 | .window = 5;
16 | .threshold = 3;
17 | }
18 | }
19 |
20 | # Whitelist to allow cache purge requests
21 | acl purge_ok {
22 | "127.0.0.1";
23 | "localhost";
24 | "::1";
25 | <% @varnish_whitelist.each do |item| -%>
26 | "<%= item %>";
27 | <% end -%>
28 | }
29 |
30 | acl banned_ips {
31 | "5.61.38.11";
32 | <% @varnish_banned_ips.each do |item| -%>
33 | "<%= item %>";
34 | <% end -%>
35 | }
36 |
37 | # Which URL components are used to look up a match
38 | # and return a cached page.
39 | sub vcl_hash {
40 | # Build a typical hash
41 | hash_data(req.url);
42 | if (req.http.host) {
43 | hash_data(req.http.host);
44 | } else {
45 | hash_data(server.ip);
46 | }
47 | # Pass hash out
48 | return(lookup);
49 | }
50 |
51 | # Taken from: https://www.varnish-software.com/blog/grace-varnish-4-stale-while-revalidate-semantics-varnish
52 | sub vcl_hit {
53 | if (obj.ttl >= 0s) {
54 | # normal hit
55 | return (deliver);
56 | }
57 | # We have no fresh fish. Lets look at the stale ones.
58 | if (std.healthy(req.backend_hint)) {
59 | # Backend is healthy. Limit age to 10s.
60 | if (obj.ttl + 10s > 0s) {
61 | set req.http.grace = "normal(limited)";
62 | return (deliver);
63 | } else {
64 | # No candidate for grace. Fetch a fresh object.
65 | return(fetch);
66 | }
67 | } else {
68 | # backend is sick - use full grace
69 | if (obj.ttl + obj.grace > 0s) {
70 | set req.http.grace = "full";
71 | return (deliver);
72 | } else {
73 | # no graced object.
74 | return (fetch);
75 | }
76 | }
77 | return (fetch);
78 | }
79 |
80 | sub vcl_recv {
81 | if (client.ip ~ banned_ips) {
82 | return(synth(403, "Forbidden."));
83 | }
84 |
85 | set req.backend_hint = default;
86 | set req.http.grace = "none";
87 |
88 | if (req.restarts == 0) {
89 | if (req.http.x-forwarded-for) {
90 | set req.http.X-Forwarded-For =
91 | req.http.X-Forwarded-For + ", " + client.ip;
92 | } else {
93 | set req.http.X-Forwarded-For = client.ip;
94 | }
95 | }
96 |
97 | # If it's not GET or HEAD let it go
98 | if (req.method != "GET" && req.method != "HEAD") {
99 | return (pass);
100 | }
101 |
102 | # Don't cache these pages
103 | if (req.url ~ "^/admin/" ||
104 | <% @varnish_no_cache_urls.each do |item| -%>
105 | req.url ~ "<%= item %>" ||
106 | <% end -%>
107 | req.url ~ "^/munin/" ||
108 | req.url ~ "^/server-status") {
109 | return(pass);
110 | }
111 |
112 | if (req.url ~ "\.(ico)$") {
113 | return(synth(750, "Moved Temporarily"));
114 | }
115 |
116 | # Allow purges
117 | if (req.method == "PURGE") {
118 | if (client.ip ~ purge_ok) {
119 | return(purge);
120 | } else {
121 | return(synth(403, "Access denied."));
122 | }
123 | }
124 |
125 | # Normalize the query arguments
126 | set req.url = std.querysort(req.url);
127 | return (hash);
128 | }
129 |
130 | sub vcl_deliver {
131 | set resp.http.grace = req.http.grace;
132 | if (obj.hits > 0) {
133 | set resp.http.X-Varnish-Cache = "HIT";
134 | }
135 | else {
136 | set resp.http.X-Varnish-Cache = "MISS";
137 | }
138 | return(deliver);
139 | }
140 |
141 | sub vcl_backend_response {
142 | # For grace mode. This is how long Varnish will keep objects in its cache
143 | # after their TTL has expired.
144 | set beresp.grace = 1h;
145 |
146 | # Get the response. Set the cache lifetime.
147 | set beresp.ttl = <%= @varnish_ttl %>;
148 | set beresp.http.X-Varnish-TTL = "<%= @varnish_ttl %>";
149 |
150 | unset beresp.http.Vary;
151 | return (deliver);
152 | }
153 |
--------------------------------------------------------------------------------
/chef/cookbooks/datadesk/templates/default/varnish/esi.vcl.erb:
--------------------------------------------------------------------------------
1 | # Newly updated for Varnish 4
2 | vcl 4.0;
3 |
4 | import std;
5 | import directors;
6 |
7 | backend default {
8 | # Set a host.
9 | .host = "127.0.0.1";
10 | # Set a port. 80 is normal Web traffic.
11 | .port = "<%= @apache_port %>";
12 | .probe = {
13 | .url = "<%= @varnish_health_url %>";
14 | .interval = 5s;
15 | .window = 5;
16 | .threshold = 3;
17 | }
18 | }
19 |
20 | # Whitelist to allow cache purge requests
21 | acl purge_ok {
22 | "127.0.0.1";
23 | "localhost";
24 | "::1";
25 | <% @varnish_whitelist.each do |item| -%>
26 | "<%= item %>";
27 | <% end -%>
28 | }
29 |
30 | acl banned_ips {
31 | "5.61.38.11";
32 | <% @varnish_banned_ips.each do |item| -%>
33 | "<%= item %>";
34 | <% end -%>
35 | }
36 |
37 | # Which URL components are used to look up a match
38 | # and return a cached page.
39 | sub vcl_hash {
40 | # Build a typical hash
41 | hash_data(req.url);
42 | if (req.http.host) {
43 | hash_data(req.http.host);
44 | } else {
45 | hash_data(server.ip);
46 | }
47 | # Pass hash out
48 | return(lookup);
49 | }
50 |
51 | # Taken from: https://www.varnish-software.com/blog/grace-varnish-4-stale-while-revalidate-semantics-varnish
52 | sub vcl_hit {
53 | if (obj.ttl >= 0s) {
54 | # normal hit
55 | return (deliver);
56 | }
57 | # We have no fresh fish. Lets look at the stale ones.
58 | if (std.healthy(req.backend_hint)) {
59 | # Backend is healthy. Limit age to 10s.
60 | if (obj.ttl + 10s > 0s) {
61 | set req.http.grace = "normal(limited)";
62 | return (deliver);
63 | } else {
64 | # No candidate for grace. Fetch a fresh object.
65 | return(fetch);
66 | }
67 | } else {
68 | # backend is sick - use full grace
69 | if (obj.ttl + obj.grace > 0s) {
70 | set req.http.grace = "full";
71 | return (deliver);
72 | } else {
73 | # no graced object.
74 | return (fetch);
75 | }
76 | }
77 | return (fetch);
78 | }
79 |
80 | sub vcl_recv {
81 | if (client.ip ~ banned_ips) {
82 | return(synth(403, "Forbidden."));
83 | }
84 |
85 | set req.backend_hint = default;
86 | set req.http.grace = "none";
87 |
88 | if (req.restarts == 0) {
89 | if (req.http.x-forwarded-for) {
90 | set req.http.X-Forwarded-For =
91 | req.http.X-Forwarded-For + ", " + client.ip;
92 | } else {
93 | set req.http.X-Forwarded-For = client.ip;
94 | }
95 | }
96 |
97 | # If it's not GET or HEAD let it go
98 | if (req.method != "GET" && req.method != "HEAD") {
99 | return (pass);
100 | }
101 |
102 | # Don't cache these pages
103 | if (req.url ~ "^/admin/" ||
104 | <% @varnish_no_cache_urls.each do |item| -%>
105 | req.url ~ "<%= item %>" ||
106 | <% end -%>
107 | req.url ~ "^/munin/" ||
108 | req.url ~ "^/server-status") {
109 | return(pass);
110 | }
111 |
112 | if (req.url ~ "\.(ico)$") {
113 | return(synth(750, "Moved Temporarily"));
114 | }
115 |
116 | # Allow purges
117 | if (req.method == "PURGE") {
118 | if (client.ip ~ purge_ok) {
119 | return(purge);
120 | } else {
121 | return(synth(403, "Access denied."));
122 | }
123 | }
124 |
125 | # Normalize the query arguments
126 | set req.url = std.querysort(req.url);
127 | return (hash);
128 | }
129 |
130 | sub vcl_deliver {
131 | set resp.http.grace = req.http.grace;
132 | if (obj.hits > 0) {
133 | set resp.http.X-Varnish-Cache = "HIT";
134 | unset resp.http.Cookie;
135 | unset resp.http.Set-Cookie;
136 | unset resp.http.cookie;
137 | unset resp.http.set-cookie;
138 | }
139 | else {
140 | set resp.http.X-Varnish-Cache = "MISS";
141 | }
142 | return(deliver);
143 | }
144 |
145 | sub vcl_backend_response {
146 | # For grace mode. This is how long Varnish will keep objects in its cache
147 | # after their TTL has expired.
148 | set beresp.grace = 1h;
149 |
150 | if (beresp.http.X-ESI) {
151 | set beresp.do_esi = true;
152 | }
153 |
154 | if (bereq.url ~ "/esi/" || bereq.url ~ "/my-recipes/") {
155 | set beresp.uncacheable = true;
156 | set beresp.http.X-Varnish-TTL = "uncacheable";
157 | } else {
158 | set beresp.ttl = <%= @varnish_ttl %>;
159 | set beresp.http.X-Varnish-TTL = "<%= @varnish_ttl %>";
160 | }
161 |
162 | unset beresp.http.Vary;
163 | return (deliver);
164 | }
165 |
--------------------------------------------------------------------------------
/chef/node.json:
--------------------------------------------------------------------------------
1 | {
2 | "run_list": [
3 | "datadesk::default",
4 | "datadesk::python",
5 | "datadesk::postgresql",
6 | "datadesk::pgpool",
7 | "datadesk::apps",
8 | "datadesk::apache",
9 | "datadesk::varnish",
10 | "datadesk::memcached",
11 | "datadesk::munin",
12 | "datadesk::newrelic",
13 | "datadesk::cron",
14 | "datadesk::motd"
15 | ],
16 | "base_packages": [
17 | "git-core",
18 | "bash-completion",
19 | "zip",
20 | "unzip",
21 | "libfreetype6-dev",
22 | "libjpeg-dev",
23 | "libxml-parser-perl"
24 | ],
25 |
26 | "users": {
27 | "datadesk": {
28 | "id": 1002,
29 | "full_name": "datadesk"
30 | }
31 | },
32 |
33 | "groups": {
34 | "datadesk": {
35 | "gid": 202,
36 | "members": ["datadesk"]
37 | }
38 | },
39 |
40 | "ubuntu_python_packages": [
41 | "python-setuptools",
42 | "python-pip",
43 | "python-dev",
44 | "libpq-dev",
45 | "python-virtualenv",
46 | "fabric"
47 | ],
48 |
49 | "pip_python_packages": {},
50 |
51 | "apps_user": "datadesk",
52 | "apps_password": "datadesk",
53 |
54 | "apps": [{
55 | "name": "",
56 | "repo": "",
57 | "branch": "master",
58 | "db_user": "",
59 | "db_name": "",
60 | "db_password": "",
61 | "apache_server_name": "",
62 | "management": [""]
63 | }],
64 |
65 | "apache_port": "8008",
66 | "varnish_banned_ips": [],
67 | "varnish_whitelist": [],
68 | "varnish_no_cache_urls": [],
69 | "varnish_ttl": "10m",
70 | "varnish_health_url": "/app_status/",
71 |
72 | "newrelic_license_key": "",
73 |
74 | "munin_name": "",
75 | "munin_db_user": "",
76 | "munin_db_name": "",
77 | "munin_include_db_list": "",
78 | "munin_master_ips": [],
79 | "crons": {}
80 | }
81 |
--------------------------------------------------------------------------------
/chef/solo.rb:
--------------------------------------------------------------------------------
1 | cookbook_path File.expand_path(File.join(File.dirname(__FILE__), "cookbooks"))
2 |
--------------------------------------------------------------------------------
/fabfile/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from .alertthemedia import alertthemedia
3 | from .bigfiles import bigfiles
4 | from .clean import clean
5 | from .collectstatic import collectstatic
6 | from .cook import cook
7 | from .createserver import createserver
8 | from .deploy import deploy
9 | from .hampsterdance import hampsterdance
10 | from .installchef import installchef
11 | from .load import load
12 | from .makesecret import makesecret
13 | from .manage import manage
14 | from .migrate import migrate
15 | from .migrate import syncdb
16 | from .pep8 import pep8
17 | from .pipinstall import pipinstall
18 | from .ps import ps
19 | from .pull import pull
20 | from .restartapache import restartapache
21 | from .restartvarnish import restartvarnish
22 | from .rmpyc import rmpyc
23 | from .rs import rs
24 | from .sh import sh
25 | from .ssh import ssh
26 | from .tabnanny import tabnanny
27 | from .updatetemplates import updatetemplates
28 |
29 | from .env import *
30 |
31 | __all__ = (
32 | 'alertthemedia',
33 | 'bigfiles',
34 | 'clean',
35 | 'collectstatic',
36 | 'cook',
37 | 'createserver',
38 | 'deploy',
39 | 'hampsterdance',
40 | 'installchef',
41 | 'load',
42 | 'makesecret',
43 | 'manage',
44 | 'migrate',
45 | 'syncdb',
46 | 'pep8',
47 | 'pipinstall',
48 | 'ps',
49 | 'pull',
50 | 'pushrawdata',
51 | 'restartapache',
52 | 'restartvarnish',
53 | 'rmpyc',
54 | 'rs',
55 | 'sh',
56 | 'ssh',
57 | 'tabnanny',
58 | 'updatetemplates',
59 | )
60 |
--------------------------------------------------------------------------------
/fabfile/alertthemedia.py:
--------------------------------------------------------------------------------
1 | from fabric.api import task, local
2 |
3 |
4 | @task
5 | def alertthemedia():
6 | """
7 | Ring the alarm!
8 | """
9 | local("curl -I http://databank-soundsystem.latimes.com/rollout/")
10 |
--------------------------------------------------------------------------------
/fabfile/bigfiles.py:
--------------------------------------------------------------------------------
1 | from fabric.api import local, hide, task
2 |
3 |
4 | @task
5 | def bigfiles(min_size='20000k'):
6 | """
7 | List all files in the current directory over the provided size,
8 | which 20MB by default.
9 |
10 | Example usage:
11 |
12 | $ fab bigfiles
13 |
14 | """
15 | cmd = """find ./ -type f -size +%s -exec ls -lh {} \; | \
16 | awk '{ print $NF ": " $5 }'"""
17 | with hide('everything'):
18 | list_ = local(cmd % min_size)
19 | if list_:
20 | print("Files over %s" % min_size)
21 | print(list_)
22 | else:
23 | print("No files over %s" % min_size)
24 |
--------------------------------------------------------------------------------
/fabfile/clean.py:
--------------------------------------------------------------------------------
1 | from fabric.api import env, cd, sudo, task
2 |
3 |
4 | @task
5 | def clean():
6 | """
7 | Erases pyc files from our app code directory.
8 | """
9 | env.shell = "/bin/bash -c"
10 | with cd(env.project_dir):
11 | sudo("find . -name '*.pyc' -print0|xargs -0 rm", pty=True)
12 |
--------------------------------------------------------------------------------
/fabfile/collectstatic.py:
--------------------------------------------------------------------------------
1 | from .venv import _venv
2 | from fabric.api import task
3 |
4 |
5 | @task
6 | def collectstatic():
7 | """
8 | Roll out the latest static files
9 | """
10 | _venv("rm -rf ./static")
11 | _venv("python manage.py collectstatic --noinput")
12 |
--------------------------------------------------------------------------------
/fabfile/cook.py:
--------------------------------------------------------------------------------
1 | from fabric.api import sudo, local, env, task
2 | from fabric.contrib.project import rsync_project
3 |
4 |
5 | @task
6 | def cook():
7 | """
8 | Update Chef cookbook and execute it.
9 | """
10 | sudo('mkdir -p /etc/chef')
11 | sudo('chown ubuntu -R /etc/chef')
12 | rsync_project("/etc/chef/", "./chef/")
13 | sudo('cd /etc/chef && %s' % env.chef, pty=True)
14 |
--------------------------------------------------------------------------------
/fabfile/createserver.py:
--------------------------------------------------------------------------------
1 | import boto
2 | import time
3 | import boto.ec2
4 | from fabric.api import env, task
5 |
6 |
7 | @task
8 | def createserver(
9 | region='us-west-2',
10 | ami='ami-7c803d1c',
11 | key_name='datadesk.march.2015',
12 | instance_type='t2.medium',
13 | block_gb_size=10,
14 | volume_type='gp2',
15 | subnet_id='subnet-3e349e49',
16 | ):
17 | """
18 | Spin up a new server on Amazon EC2.
19 |
20 | Returns the id and public address.
21 |
22 | By default, we use Ubuntu 12.04 LTS
23 | """
24 | print("Warming up...")
25 | conn = boto.ec2.connect_to_region(
26 | region,
27 | aws_access_key_id=env.AWS_ACCESS_KEY_ID,
28 | aws_secret_access_key=env.AWS_SECRET_ACCESS_KEY,
29 | )
30 | print("Reserving an instance...")
31 | bdt = boto.ec2.blockdevicemapping.BlockDeviceType(
32 | connection=conn,
33 | size=block_gb_size,
34 | volume_type=volume_type,
35 | )
36 | bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping(connection=conn)
37 | bdm['/dev/sda1'] = bdt
38 | reservation = conn.run_instances(
39 | ami,
40 | key_name=key_name,
41 | instance_type=instance_type,
42 | block_device_map=bdm,
43 | subnet_id=subnet_id,
44 | )
45 | instance = reservation.instances[0]
46 | print('Waiting for instance to start...')
47 | # Check up on its status every so often
48 | status = instance.update()
49 | while status == 'pending':
50 | time.sleep(10)
51 | status = instance.update()
52 | if status == 'running':
53 | print('New instance %s' % instance.id)
54 | print('Accessible at %s' % instance.public_dns_name)
55 | else:
56 | print('Instance status: ' + status)
57 | return (instance.id, instance.public_dns_name)
58 |
--------------------------------------------------------------------------------
/fabfile/deploy.py:
--------------------------------------------------------------------------------
1 | from fabric.api import settings, task
2 | from .pull import pull
3 | from .clean import clean
4 | from .pipinstall import pipinstall
5 | from .restartapache import restartapache
6 |
7 |
8 | @task
9 | def deploy():
10 | """
11 | Deploy the latest code and restart everything.
12 | """
13 | pull()
14 | with settings(warn_only=True):
15 | clean()
16 | pipinstall()
17 | restartapache()
18 |
--------------------------------------------------------------------------------
/fabfile/env.py:
--------------------------------------------------------------------------------
1 | from fabric.api import env
2 | from os.path import expanduser
3 |
4 | env.key_filename = (expanduser(''),)
5 | env.user = 'datadesk'
6 | env.known_hosts = ''
7 | env.chef = '/usr/local/bin/chef-solo -c solo.rb -j node.json'
8 | env.app_user = 'datadesk'
9 | env.project_dir = ''
10 | env.activate = 'source /bin/activate'
11 | env.branch = 'master'
12 | env.AWS_SECRET_ACCESS_KEY = ''
13 | env.AWS_ACCESS_KEY_ID = ''
14 | env.hosts = ("",)
15 |
--------------------------------------------------------------------------------
/fabfile/hampsterdance.py:
--------------------------------------------------------------------------------
1 | from fabric.api import task, local
2 |
3 |
4 | @task
5 | def hampsterdance():
6 | """
7 | The soundtrack of the Internet that once was.
8 | """
9 | local("curl -I http://databank-soundsystem.latimes.com/hampster-dance/")
10 |
--------------------------------------------------------------------------------
/fabfile/installchef.py:
--------------------------------------------------------------------------------
1 | from fabric.api import sudo, task
2 |
3 |
4 | @task
5 | def installchef():
6 | """
7 | Install all the dependencies to run a Chef cookbook
8 | """
9 | # Install dependencies
10 | sudo('apt-get update', pty=True)
11 | sudo('apt-get install -y git-core', pty=True)
12 | # Install Chef
13 | sudo('curl -L https://www.opscode.com/chef/install.sh | bash', pty=True)
14 | sudo('ln -s /opt/chef/bin/chef-solo /usr/local/bin/chef-solo')
15 |
--------------------------------------------------------------------------------
/fabfile/load.py:
--------------------------------------------------------------------------------
1 | from fabric.api import run, hide, task, env
2 |
3 |
4 | @task
5 | def load():
6 | """
7 | Prints the current load values.
8 |
9 | Example usage:
10 |
11 | $ fab stage load
12 | $ fab prod load
13 |
14 | """
15 | def _set_color(load):
16 | """
17 | Sets the terminal color for an load average value depending on how
18 | high it is.
19 |
20 | Accepts a string formatted floating point.
21 |
22 | Returns a formatted string you can print.
23 | """
24 | value = float(load)
25 | template = "\033[1m\x1b[%sm%s\x1b[0m\033[0m"
26 | if value < 1:
27 | # Return green
28 | return template % (32, value)
29 | elif value < 3:
30 | # Return yellow
31 | return template % (33, value)
32 | else:
33 | # Return red
34 | return template % (31, value)
35 |
36 | with hide('everything'):
37 | # Fetch the data
38 | uptime = run("uptime")
39 | # Whittle it down to only the load averages
40 | load = uptime.split(":")[-1]
41 | # Split up the load averages and apply a color code to each depending
42 | # on how high it is.
43 | one, five, fifteen = [_set_color(i.strip()) for i in load.split(',')]
44 | # Get the name of the host that is currently being tested
45 | host = env['host']
46 | # Combine the two things and print out the results
47 | output = u'%s: %s' % (host, ", ".join([one, five, fifteen]))
48 | print(output)
49 |
--------------------------------------------------------------------------------
/fabfile/makesecret.py:
--------------------------------------------------------------------------------
1 | import random
2 | from fabric.api import task
3 |
4 |
5 | @task
6 | def makesecret(
7 | length=50,
8 | allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
9 | ):
10 | """
11 | Generates secret key for use in Django settings.
12 | """
13 | key = ''.join(random.choice(allowed_chars) for i in range(length))
14 | print 'SECRET_KEY = "%s"' % key
15 |
--------------------------------------------------------------------------------
/fabfile/manage.py:
--------------------------------------------------------------------------------
1 | from .venv import _venv
2 | from fabric.api import task
3 |
4 |
5 | @task
6 | def manage(cmd):
7 | """
8 | Run the provided Django manage.py command
9 | """
10 | _venv("python manage.py %s" % cmd)
11 |
--------------------------------------------------------------------------------
/fabfile/migrate.py:
--------------------------------------------------------------------------------
1 | from venv import _venv
2 | from fabric.api import task
3 |
4 |
5 | @task
6 | def migrate():
7 | """
8 | Run Django's migrate command
9 | """
10 | _venv("python manage.py migrate")
11 |
12 |
13 | @task
14 | def syncdb():
15 | """
16 | Run Django's syncdb command
17 | """
18 | _venv("python manage.py syncdb")
19 |
--------------------------------------------------------------------------------
/fabfile/pep8.py:
--------------------------------------------------------------------------------
1 | import os
2 | from fabric.api import local, hide, task
3 |
4 |
5 | @task
6 | def pep8():
7 | """
8 | Flags any violations of the Python style guide.
9 |
10 | Requires that you have the pep8 package installed
11 |
12 | Example usage:
13 |
14 | $ fab pep8
15 |
16 | Documentation:
17 |
18 | http://github.com/jcrocholl/pep8
19 |
20 | """
21 | print("Checking Python style")
22 | # Grab everything public folder inside the current directory
23 | dir_list = [x[0] for x in os.walk('./') if not x[0].startswith('./.')]
24 | # Loop through them all and run pep8
25 | results = []
26 | with hide('everything'):
27 | for d in dir_list:
28 | results.append(local("pep8 %s" % d))
29 | # Filter out the empty results and print the real stuff
30 | results = [e for e in results if e]
31 | for e in results:
32 | print(e)
33 |
--------------------------------------------------------------------------------
/fabfile/pipinstall.py:
--------------------------------------------------------------------------------
1 | from .venv import _venv
2 | from fabric.api import task
3 |
4 |
5 | @task
6 | def pipinstall(package=''):
7 | """
8 | Install Python requirements inside a virtualenv.
9 | """
10 | if not package:
11 | _venv("pip install -r requirements.txt")
12 | else:
13 | _venv("pip install %s" % package)
14 |
--------------------------------------------------------------------------------
/fabfile/ps.py:
--------------------------------------------------------------------------------
1 | from fabric.api import run, task
2 |
3 |
4 | @task
5 | def ps(process='all'):
6 | """
7 | Reports a snapshot of the current processes.
8 |
9 | If the process kwarg provided is 'all', every current process is returned.
10 |
11 | Otherwise, the list will be limited to only those processes
12 | that match the kwarg.
13 |
14 | Example usage:
15 |
16 | $ fab prod ps:process=all
17 | $ fab prod ps:process=httpd
18 | $ fab prod ps:process=postgres
19 |
20 | Documentation::
21 |
22 | "ps":http://unixhelp.ed.ac.uk/CGI/man-cgi?ps
23 |
24 | """
25 | if process == 'all':
26 | run("ps aux")
27 | else:
28 | run("ps -e -O rss,pcpu | grep %s" % process)
29 |
--------------------------------------------------------------------------------
/fabfile/pull.py:
--------------------------------------------------------------------------------
1 | from .venv import _venv
2 | from fabric.api import env, task
3 |
4 |
5 | @task
6 | def pull():
7 | """
8 | Pulls the latest code using Git
9 | """
10 | _venv("git pull origin %s" % env.branch)
11 |
--------------------------------------------------------------------------------
/fabfile/restartapache.py:
--------------------------------------------------------------------------------
1 | from fabric.api import sudo, task
2 |
3 |
4 | @task
5 | def restartapache():
6 | """
7 | Restarts apache on both app servers.
8 | """
9 | sudo("/etc/init.d/apache2 reload", pty=True)
10 |
--------------------------------------------------------------------------------
/fabfile/restartvarnish.py:
--------------------------------------------------------------------------------
1 | from fabric.api import sudo, task
2 |
3 |
4 | @task
5 | def restartvarnish():
6 | """
7 | Restart the Varnish cache service
8 | """
9 | sudo("service varnish restart", pty=True)
10 |
--------------------------------------------------------------------------------
/fabfile/rmpyc.py:
--------------------------------------------------------------------------------
1 | from fabric.api import local, hide, task
2 |
3 |
4 | @task
5 | def rmpyc():
6 | """
7 | Erases pyc files from current directory.
8 |
9 | Example usage:
10 |
11 | $ fab rmpyc
12 |
13 | """
14 | print("Removing .pyc files")
15 | with hide('everything'):
16 | local("find . -name '*.pyc' -print0|xargs -0 rm", capture=False)
17 |
--------------------------------------------------------------------------------
/fabfile/rs.py:
--------------------------------------------------------------------------------
1 | from .rmpyc import rmpyc
2 | from fabric.api import local, settings, task
3 |
4 |
5 | @task
6 | def rs(port=8000):
7 | """
8 | Fire up the Django test server, after cleaning out any .pyc files.
9 |
10 | Example usage:
11 |
12 | $ fab rs
13 | $ fab rs:port=9000
14 |
15 | """
16 | with settings(warn_only=True):
17 | rmpyc()
18 | local("python manage.py runserver 0.0.0.0:%s" % port, capture=False)
19 |
--------------------------------------------------------------------------------
/fabfile/sh.py:
--------------------------------------------------------------------------------
1 | from .rmpyc import rmpyc
2 | from fabric.api import local, task
3 |
4 |
5 | @task
6 | def sh():
7 | """
8 | Fire up the Django shell, after cleaning out any .pyc files.
9 |
10 | Example usage:
11 |
12 | $ fab sh
13 |
14 | """
15 | rmpyc()
16 | local("python manage.py shell", capture=False)
17 |
--------------------------------------------------------------------------------
/fabfile/ssh.py:
--------------------------------------------------------------------------------
1 | from fabric.api import env, local, task
2 |
3 |
4 | @task
5 | def ssh():
6 | """
7 | Log into the remote host using SSH
8 | """
9 | local("ssh ubuntu@%s -i %s" % (env.hosts[0], env.key_filename[0]))
10 |
--------------------------------------------------------------------------------
/fabfile/tabnanny.py:
--------------------------------------------------------------------------------
1 | from fabric.api import hide, local, task
2 |
3 |
4 | @task
5 | def tabnanny():
6 | """
7 | Checks whether any of your files have improper tabs
8 |
9 | Example usage:
10 |
11 | $ fab tabnanny
12 |
13 | """
14 | print("Running tabnanny")
15 | with hide('everything'):
16 | local("python -m tabnanny ./")
17 |
--------------------------------------------------------------------------------
/fabfile/updatetemplates.py:
--------------------------------------------------------------------------------
1 | from fabric.api import lcd, local, task
2 |
3 |
4 | @task
5 | def updatetemplates(template_path='./templates'):
6 | """
7 | Download the latest template release and load it into your system.
8 |
9 | It will unzip to "./templates" where you run it.
10 | """
11 | url = "http://databank-cookbook.latimes.com/dist/templates/latest.zip"
12 | with lcd(template_path):
13 | local("curl -O %s" % url)
14 | local("unzip -o latest.zip")
15 | local("rm latest.zip")
16 |
--------------------------------------------------------------------------------
/fabfile/venv.py:
--------------------------------------------------------------------------------
1 | from fabric.api import cd, env, sudo
2 |
3 |
4 | def _venv(cmd):
5 | """
6 | Runs the provided command in a remote virturalenv
7 | """
8 | with cd(env.project_dir):
9 | sudo(
10 | "%s && %s && %s" % (env.activate, env.activate, cmd),
11 | user=env.app_user
12 | )
13 |
--------------------------------------------------------------------------------
/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 |
5 | if __name__ == "__main__":
6 | os.environ.setdefault(
7 | "DJANGO_SETTINGS_MODULE",
8 | "{{ project_name }}.settings"
9 | )
10 | from django.core.management import execute_from_command_line
11 | execute_from_command_line(sys.argv)
12 |
--------------------------------------------------------------------------------
/project_name/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/datadesk/django-project-template/b9eb644f73d30680038c7032b0060ab4e3a4b677/project_name/__init__.py
--------------------------------------------------------------------------------
/project_name/newrelic.ini:
--------------------------------------------------------------------------------
1 | # ---------------------------------------------------------------------------
2 |
3 | #
4 | # This file configures the New Relic Python Agent.
5 | #
6 | # The path to the configuration file should be supplied to the function
7 | # newrelic.agent.initialize() when the agent is being initialized.
8 | #
9 | # The configuration file follows a structure similar to what you would
10 | # find for Microsoft Windows INI files. For further information on the
11 | # configuration file format see the Python ConfigParser documentation at:
12 | #
13 | # http://docs.python.org/library/configparser.html
14 | #
15 | # For further discussion on the behaviour of the Python agent that can
16 | # be configured via this configuration file see:
17 | #
18 | # http://newrelic.com/docs/python/python-agent-configuration
19 | #
20 |
21 | # ---------------------------------------------------------------------------
22 |
23 | # Here are the settings that are common to all environments.
24 |
25 | [newrelic]
26 |
27 | # You must specify the license key associated with your New
28 | # Relic account. This key binds the Python Agent's data to your
29 | # account in the New Relic service.
30 | license_key =
31 |
32 | # The appplication name. Set this to be the name of your
33 | # application as you would like it to show up in New Relic UI.
34 | # The UI will then auto-map instances of your application into a
35 | # entry on your home dashboard page.
36 | app_name =
37 |
38 | # When "true", the agent collects performance data about your
39 | # application and reports this data to the New Relic UI at
40 | # newrelic.com. This global switch is normally overridden for
41 | # each environment below.
42 | monitor_mode = true
43 |
44 | # Sets the name of a file to log agent messages to. Useful for
45 | # debugging any issues with the agent. This is not set by
46 | # default as it is not known in advance what user your web
47 | # application processes will run as and where they have
48 | # permission to write to. Whatever you set this to you must
49 | # ensure that the permissions for the containing directory and
50 | # the file itself are correct, and that the user that your web
51 | # application runs as can write to the file. If not able to
52 | # write out a log file, it is also possible to say "stderr" and
53 | # output to standard error output. This would normally result in
54 | # output appearing in your web server log.
55 | #log_file = /tmp/newrelic-python-agent.log
56 |
57 | # Sets the level of detail of messages sent to the log file, if
58 | # a log file location has been provided. Possible values, in
59 | # increasing order of detail, are: "critical", "error", "warning",
60 | # "info" and "debug". When reporting any agent issues to New
61 | # Relic technical support, the most useful setting for the
62 | # support engineers is "debug". However, this can generate a lot
63 | # of information very quickly, so it is best not to keep the
64 | # agent at this level for longer than it takes to reproduce the
65 | # problem you are experiencing.
66 | log_level = info
67 |
68 | # The Python Agent communicates with the New Relic service using
69 | # SSL by default. Note that this does result in an increase in
70 | # CPU overhead, over and above what would occur for a non SSL
71 | # connection, to perform the encryption involved in the SSL
72 | # communication. This work is though done in a distinct thread
73 | # to those handling your web requests, so it should not impact
74 | # response times. You can if you wish revert to using a non SSL
75 | # connection, but this will result in information being sent
76 | # over a plain socket connection and will not be as secure.
77 | ssl = true
78 |
79 | # The Python Agent will attempt to connect directly to the New
80 | # Relic service. If there is an intermediate firewall between
81 | # your host and the New Relic service that requires you to use a
82 | # HTTP proxy, then you should set both the "proxy_host" and
83 | # "proxy_port" settings to the required values for the HTTP
84 | # proxy. The "proxy_user" and "proxy_pass" settings should
85 | # additionally be set if proxy authentication is implemented by
86 | # the HTTP proxy. The "proxy_scheme" setting dictates what
87 | # protocol scheme is used in talking to the HTTP protocol. This
88 | # would normally always be set as "http" which will result in the
89 | # agent then using a SSL tunnel through the HTTP proxy for end to
90 | # end encryption.
91 | # proxy_scheme = http
92 | # proxy_host = hostname
93 | # proxy_port = 8080
94 | # proxy_user =
95 | # proxy_pass =
96 |
97 | # Tells the transaction tracer and error collector (when
98 | # enabled) whether or not to capture the query string for the
99 | # URL and send it as the request parameters for display in the
100 | # UI. When "true", it is still possible to exclude specific
101 | # values from being captured using the "ignored_params" setting.
102 | capture_params = false
103 |
104 | # Space separated list of variables that should be removed from
105 | # the query string captured for display as the request
106 | # parameters in the UI.
107 | ignored_params =
108 |
109 | # The transaction tracer captures deep information about slow
110 | # transactions and sends this to the UI on a periodic basis. The
111 | # transaction tracer is enabled by default. Set this to "false"
112 | # to turn it off.
113 | transaction_tracer.enabled = true
114 |
115 | # Threshold in seconds for when to collect a transaction trace.
116 | # When the response time of a controller action exceeds this
117 | # threshold, a transaction trace will be recorded and sent to
118 | # the UI. Valid values are any positive float value, or (default)
119 | # "apdex_f", which will use the threshold for a dissatisfying
120 | # Apdex controller action - four times the Apdex T value.
121 | transaction_tracer.transaction_threshold = apdex_f
122 |
123 | # When the transaction tracer is on, SQL statements can
124 | # optionally be recorded. The recorder has three modes, "off"
125 | # which sends no SQL, "raw" which sends the SQL statement in its
126 | # original form, and "obfuscated", which strips out numeric and
127 | # string literals.
128 | transaction_tracer.record_sql = obfuscated
129 |
130 | # Threshold in seconds for when to collect stack trace for a SQL
131 | # call. In other words, when SQL statements exceed this
132 | # threshold, then capture and send to the UI the current stack
133 | # trace. This is helpful for pinpointing where long SQL calls
134 | # originate from in an application.
135 | transaction_tracer.stack_trace_threshold = 0.5
136 |
137 | # Determines whether the agent will capture query plans for slow
138 | # SQL queries. Only supported in MySQL and PostgreSQL. Set this
139 | # to "false" to turn it off.
140 | transaction_tracer.explain_enabled = true
141 |
142 | # Threshold for query execution time below which query plans
143 | # will not not be captured. Relevant only when "explain_enabled"
144 | # is true.
145 | transaction_tracer.explain_threshold = 0.5
146 |
147 | # Space separated list of function or method names in form
148 | # 'module:function' or 'module:class.function' for which
149 | # additional function timing instrumentation will be added.
150 | transaction_tracer.function_trace =
151 |
152 | # The error collector captures information about uncaught
153 | # exceptions or logged exceptions and sends them to UI for
154 | # viewing. The error collector is enabled by default. Set this
155 | # to "false" to turn it off.
156 | error_collector.enabled = true
157 |
158 | # To stop specific errors from reporting to the UI, set this to
159 | # a space separated list of the Python exception type names to
160 | # ignore. The exception name should be of the form 'module:class'.
161 | error_collector.ignore_errors =
162 |
163 | # Browser monitoring is the Real User Monitoring feature of the UI.
164 | # For those Python web frameworks that are supported, this
165 | # setting enables the auto-insertion of the browser monitoring
166 | # JavaScript fragments.
167 | browser_monitoring.auto_instrument = true
168 |
169 | # A thread profiling session can be scheduled via the UI when
170 | # this option is enabled. The thread profiler will periodically
171 | # capture a snapshot of the call stack for each active thread in
172 | # the application to construct a statistically representative
173 | # call tree.
174 | thread_profiler.enabled = true
175 |
176 | # ---------------------------------------------------------------------------
177 |
178 | #
179 | # The application environments. These are specific settings which
180 | # override the common environment settings. The settings related to a
181 | # specific environment will be used when the environment argument to the
182 | # newrelic.agent.initialize() function has been defined to be either
183 | # "development", "test", "staging" or "production".
184 | #
185 |
186 | [newrelic:development]
187 | monitor_mode = false
188 |
189 | [newrelic:test]
190 | monitor_mode = false
191 |
192 | [newrelic:staging]
193 | app_name = Python Application (Staging)
194 | monitor_mode = true
195 |
196 | [newrelic:production]
197 | monitor_mode = true
198 |
199 | # ---------------------------------------------------------------------------
200 |
--------------------------------------------------------------------------------
/project_name/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 | from django.core.exceptions import SuspiciousOperation
3 |
4 | SECRET_KEY = "RUN fab generatesecret to get a real secret key!"
5 |
6 | # Amazon Web Services
7 | AWS_ACCESS_KEY_ID = '' # The shorter one
8 | AWS_SECRET_ACCESS_KEY = '' # The longer one
9 | AWS_BUCKET_NAME = '' # For your static files
10 | AWS_BACKUP_BUCKET_NAME = '' # For database backups
11 | AWS_BACKUP_BUCKET_DIRECTORY = '' # A prefix for the database backup key
12 |
13 | # Settings paths that are handy to use other places
14 | SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__))
15 | BASE_DIR = os.path.join(
16 | os.path.abspath(
17 | os.path.join(SETTINGS_DIR, os.path.pardir),
18 | ),
19 | )
20 |
21 | # Email
22 | ADMINS = [
23 | ('', ''),
24 | ]
25 | MANAGERS = ADMINS
26 |
27 | EMAIL_HOST_USER = ''
28 | EMAIL_HOST_PASSWORD = ''
29 | EMAIL_HOST = 'smtp.gmail.com'
30 | EMAIL_PORT = 587
31 | EMAIL_USE_TLS = True
32 |
33 | # Localization
34 | TIME_ZONE = 'America/Los_Angeles'
35 | LANGUAGE_CODE = 'en-us'
36 | SITE_ID = 1
37 | USE_I18N = True
38 | USE_L10N = True
39 | USE_TZ = True
40 |
41 | # Media and static files
42 | MEDIA_ROOT = os.path.join(BASE_DIR, '.media')
43 | STATIC_ROOT = os.path.join(BASE_DIR, '.static')
44 | STATICFILES_DIRS = [
45 | os.path.join(BASE_DIR, 'templates', 'static'),
46 | ]
47 | STATICFILES_FINDERS = [
48 | 'django.contrib.staticfiles.finders.FileSystemFinder',
49 | 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
50 | ]
51 |
52 | # Templates
53 | TEMPLATES = [
54 | {
55 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
56 | 'DIRS': [
57 | os.path.join(BASE_DIR, 'templates'),
58 | ],
59 | 'APP_DIRS': True,
60 | 'OPTIONS': {
61 | 'context_processors': [
62 | 'django.contrib.auth.context_processors.auth',
63 | 'django.template.context_processors.debug',
64 | 'django.template.context_processors.i18n',
65 | 'django.template.context_processors.media',
66 | 'django.template.context_processors.static',
67 | 'django.template.context_processors.tz',
68 | 'django.contrib.messages.context_processors.messages',
69 | 'django.core.context_processors.request',
70 | 'toolbox.context_processors.env.environment',
71 | 'toolbox.context_processors.sites.current_site',
72 | ],
73 | },
74 | },
75 | ]
76 |
77 | # Web request stuff
78 | MIDDLEWARE = [
79 | 'django.middleware.security.SecurityMiddleware',
80 | 'django.contrib.sessions.middleware.SessionMiddleware',
81 | 'corsheaders.middleware.CorsMiddleware',
82 | 'django.middleware.common.CommonMiddleware',
83 | 'django.middleware.csrf.CsrfViewMiddleware',
84 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
85 | 'django.contrib.messages.middleware.MessageMiddleware',
86 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
87 | ]
88 | ROOT_URLCONF = '{{ project_name }}.urls'
89 | USE_X_FORWARDED_HOST = True
90 |
91 |
92 | # Installed apps
93 | INSTALLED_APPS = [
94 | 'django.contrib.auth',
95 | 'django.contrib.contenttypes',
96 | 'django.contrib.sessions',
97 | 'django.contrib.sites',
98 | 'django.contrib.messages',
99 | 'django.contrib.staticfiles',
100 | 'django.contrib.gis',
101 | 'django.contrib.admin',
102 | 'django.contrib.humanize',
103 | 'django.contrib.sitemaps',
104 | 'toolbox',
105 | 'greeking',
106 | ]
107 |
108 | # Logging
109 | MUNIN_ROOT = '/var/cache/munin/www/'
110 |
111 |
112 | def skip_suspicious_operations(record):
113 | if record.exc_info:
114 | exc_value = record.exc_info[1]
115 | if isinstance(exc_value, SuspiciousOperation):
116 | return False
117 | return True
118 |
119 | LOGGING = {
120 | 'version': 1,
121 | 'disable_existing_loggers': False,
122 | 'filters': {
123 | 'require_debug_false': {
124 | '()': 'django.utils.log.RequireDebugFalse'
125 | },
126 | 'skip_suspicious_operations': {
127 | '()': 'django.utils.log.CallbackFilter',
128 | 'callback': skip_suspicious_operations,
129 | },
130 | },
131 | 'handlers': {
132 | 'mail_admins': {
133 | 'level': 'ERROR',
134 | 'filters': ['require_debug_false', 'skip_suspicious_operations'],
135 | 'class': 'django.utils.log.AdminEmailHandler'
136 | },
137 | 'null': {
138 | 'level': 'DEBUG',
139 | 'class': 'logging.NullHandler',
140 | },
141 | 'console': {
142 | 'level': 'DEBUG',
143 | 'class': 'logging.StreamHandler',
144 | 'formatter': 'verbose'
145 | },
146 | 'logfile': {
147 | 'level': 'DEBUG',
148 | 'class': 'logging.handlers.RotatingFileHandler',
149 | 'filename': os.path.join(BASE_DIR, 'django.log'),
150 | 'maxBytes': 1024*1024*5, # 5MB,
151 | 'backupCount': 0,
152 | 'formatter': 'verbose',
153 | },
154 | },
155 | 'formatters': {
156 | 'verbose': {
157 | 'format': ('%(levelname)s|%(asctime)s|%(module)s'
158 | '|%(process)d|%(thread)d|%(message)s')
159 | 'datefmt': "%d/%b/%Y %H:%M:%S"
160 | },
161 | 'simple': {
162 | 'format': '%(levelname)s|%(message)s'
163 | },
164 | },
165 | 'loggers': {
166 | 'django.request': {
167 | 'handlers': ['mail_admins'],
168 | 'level': 'ERROR',
169 | 'propagate': True,
170 | },
171 | 'django.security.DisallowedHost': {
172 | 'handlers': ['null'],
173 | 'propagate': False,
174 | },
175 | }
176 | }
177 |
178 | # Local settings
179 | try:
180 | from settings_dev import *
181 | except ImportError:
182 | from settings_prod import *
183 | TEMPLATE_DEBUG = DEBUG
184 |
185 | # Django debug toolbar configuration
186 | if DEBUG_TOOLBAR:
187 | # Debugging toolbar middleware
188 | MIDDLEWARE_CLASSES += (
189 | 'debug_toolbar.middleware.DebugToolbarMiddleware',
190 | )
191 | # JavaScript panels for the deveopment debugging toolbar
192 | DEBUG_TOOLBAR_PANELS = (
193 | 'debug_toolbar.panels.versions.VersionsPanel',
194 | 'debug_toolbar.panels.timer.TimerPanel',
195 | 'debug_toolbar.panels.settings.SettingsPanel',
196 | 'debug_toolbar.panels.headers.HeadersPanel',
197 | 'debug_toolbar.panels.request.RequestPanel',
198 | 'debug_toolbar.panels.profiling.ProfilingPanel',
199 | 'debug_toolbar.panels.sql.SQLPanel',
200 | 'debug_toolbar.panels.staticfiles.StaticFilesPanel',
201 | 'debug_toolbar.panels.templates.TemplatesPanel',
202 | 'debug_toolbar.panels.cache.CachePanel',
203 | 'debug_toolbar.panels.signals.SignalsPanel',
204 | 'debug_toolbar.panels.logging.LoggingPanel',
205 | 'debug_toolbar.panels.redirects.RedirectsPanel',
206 | )
207 | # Debug toolbar app
208 | INSTALLED_APPS += ('debug_toolbar',)
209 | CONFIG_DEFAULTS = {
210 | 'INTERCEPT_REDIRECTS': False,
211 | }
212 |
--------------------------------------------------------------------------------
/project_name/settings_dev.template:
--------------------------------------------------------------------------------
1 | DEBUG = True
2 | DEVELOPMENT, PRODUCTION = True, False
3 | DEBUG_TOOLBAR = False
4 | DATABASES = {
5 | 'default': {
6 | 'ENGINE': 'django.contrib.gis.db.backends.postgis',
7 | 'NAME': '',
8 | 'USER': '',
9 | 'PASSWORD': '',
10 | 'HOST': 'localhost',
11 | 'PORT': '5432',
12 | }
13 | }
14 | CACHES = {
15 | 'default': {
16 | 'LOCATION': 'my_cache_table',
17 | 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
18 | #'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
19 | }
20 | }
21 | STATIC_URL = '/static/'
22 | WSGI_APPLICATION = '{{ project_name }}.wsgi_dev.application'
23 |
--------------------------------------------------------------------------------
/project_name/settings_prod.py:
--------------------------------------------------------------------------------
1 | DEBUG = False
2 | DEVELOPMENT, PRODUCTION = False, True
3 | DEBUG_TOOLBAR = False
4 | DATABASES = {
5 | 'default': {
6 | # 'ENGINE': 'django.contrib.gis.db.backends.postgis',
7 | 'ENGINE': 'django.db.backends.postgresql_psycopg2',
8 | 'NAME': '',
9 | 'USER': '',
10 | 'PASSWORD': '',
11 | 'HOST': 'localhost',
12 | 'PORT': '5433',
13 | }
14 | }
15 | CACHES = {
16 | 'default': {
17 | 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
18 | 'LOCATION': '127.0.0.1:11211',
19 | 'TIMEOUT': 60 * 30,
20 | 'OPTIONS': {
21 | 'MAX_ENTRIES': 1500
22 | }
23 | }
24 | }
25 | TEMPLATES = [
26 | {
27 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
28 | 'DIRS': [
29 | os.path.join(BASE_DIR, 'templates'),
30 | ],
31 | 'APP_DIRS': True,
32 | 'OPTIONS': {
33 | 'context_processors': [
34 | 'django.contrib.auth.context_processors.auth',
35 | 'django.template.context_processors.debug',
36 | 'django.template.context_processors.i18n',
37 | 'django.template.context_processors.media',
38 | 'django.template.context_processors.static',
39 | 'django.template.context_processors.tz',
40 | 'django.contrib.messages.context_processors.messages',
41 | 'django.core.context_processors.request',
42 | 'toolbox.context_processors.env.environment',
43 | 'toolbox.context_processors.sites.current_site',
44 | ],
45 | 'loaders': [
46 | ('django.template.loaders.cached.Loader', [
47 | 'django.template.loaders.filesystem.Loader',
48 | 'django.template.loaders.app_directories.Loader',
49 | ]),
50 | ],
51 | },
52 | },
53 | ]
54 | STATIC_URL = ''
55 | WSGI_APPLICATION = '{{ project_name }}.wsgi_prod.application'
56 | ALLOWED_HOSTS = ()
57 |
--------------------------------------------------------------------------------
/project_name/settings_test.template:
--------------------------------------------------------------------------------
1 | DEBUG = True
2 | DEVELOPMENT, PRODUCTION = True, False
3 | DEBUG_TOOLBAR = True
4 | DATABASES = {
5 | 'default': {
6 | 'ENGINE': 'django.db.backends.sqlite3',
7 | 'NAME': 'test.db'
8 | }
9 | }
10 | STATIC_URL = '/static/'
11 |
--------------------------------------------------------------------------------
/project_name/urls.py:
--------------------------------------------------------------------------------
1 | from django.conf import settings
2 | from django.contrib import admin
3 | from toolbox import views as toolbox_views
4 | from django.conf.urls import include, url
5 | from django.views.static import serve as static_serve
6 | from django.contrib.admin.views.decorators import staff_member_required
7 |
8 |
9 | urlpatterns = [
10 | url(r'^admin/', include(admin.site.urls)),
11 | # This is the URL Varnish will ping to check the server health.
12 | url(r'^app_status/$', toolbox_views.app_status, name='status'),
13 | ]
14 |
15 | if settings.DEBUG:
16 | urlpatterns += [
17 | url(r'^static/(?P.*)$', static_serve, {
18 | 'document_root': settings.STATIC_ROOT,
19 | 'show_indexes': True,
20 | }),
21 | url(r'^media/(?P.*)$', static_serve, {
22 | 'document_root': settings.MEDIA_ROOT,
23 | 'show_indexes': True,
24 | }),
25 | ]
26 |
--------------------------------------------------------------------------------
/project_name/wsgi_dev.py:
--------------------------------------------------------------------------------
1 | import os
2 | from django.core.wsgi import get_wsgi_application
3 |
4 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
5 | application = get_wsgi_application()
6 |
--------------------------------------------------------------------------------
/project_name/wsgi_prod.py:
--------------------------------------------------------------------------------
1 | import os
2 | from django.core.wsgi import get_wsgi_application
3 |
4 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{ project_name }}.settings")
5 | application = get_wsgi_application()
6 |
7 | try:
8 | import newrelic.agent
9 | this_dir = os.path.dirname(os.path.realpath(__file__))
10 | newrelic.agent.initialize(os.path.join(this_dir, 'newrelic.ini'))
11 | application = newrelic.agent.wsgi_application()(application)
12 | except ImportError:
13 | pass
14 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | Django
2 | Fabric
3 | argparse
4 | boto
5 | coverage
6 | django-debug-toolbar
7 | greeking
8 | flake8
9 | psycopg2
10 | pycrypto
11 | pyflakes
12 | python-coveralls
13 | python-memcached
14 | pytz
15 | six
16 | sqlparse
17 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from distutils.core import Command
3 |
4 |
5 | class TestCommand(Command):
6 | user_options = []
7 |
8 | def initialize_options(self):
9 | pass
10 |
11 | def finalize_options(self):
12 | pass
13 |
14 | def run(self):
15 | from django.conf import settings
16 | settings.configure(
17 | DATABASES={
18 | 'default': {
19 | 'NAME': ':memory:',
20 | 'ENGINE': 'django.db.backends.sqlite3'
21 | }
22 | },
23 | INSTALLED_APPS=(
24 | 'django.contrib.auth',
25 | 'django.contrib.contenttypes',
26 | 'django.contrib.sessions',
27 | 'django.contrib.admin',
28 | 'toolbox',
29 | )
30 | )
31 | # In Django > 1.7 you have to run setup to get app registry in order
32 | import django
33 | django.setup()
34 | from django.core.management import call_command
35 | call_command('test', 'toolbox.tests')
36 |
37 |
38 | setup(
39 | name='django-project-template',
40 | cmdclass={'test': TestCommand}
41 | )
42 |
--------------------------------------------------------------------------------
/templates/404.html:
--------------------------------------------------------------------------------
1 | Page Not Found (404)
2 |
--------------------------------------------------------------------------------
/templates/500.html:
--------------------------------------------------------------------------------
1 | Internal Server Error (500)
2 |
--------------------------------------------------------------------------------
/templates/503.html:
--------------------------------------------------------------------------------
1 | Service Unavailable (503)
2 |
--------------------------------------------------------------------------------
/templates/static/.touch:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/datadesk/django-project-template/b9eb644f73d30680038c7032b0060ab4e3a4b677/templates/static/.touch
--------------------------------------------------------------------------------
/toolbox/__init__.py:
--------------------------------------------------------------------------------
1 | default_app_config = 'toolbox.apps.ToolboxConfig'
2 |
--------------------------------------------------------------------------------
/toolbox/apps.py:
--------------------------------------------------------------------------------
1 | from django.apps import AppConfig
2 |
3 |
4 | class ToolboxConfig(AppConfig):
5 | name = 'toolbox'
6 | verbose_name = "Toolbox"
7 |
--------------------------------------------------------------------------------
/toolbox/context_processors/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/datadesk/django-project-template/b9eb644f73d30680038c7032b0060ab4e3a4b677/toolbox/context_processors/__init__.py
--------------------------------------------------------------------------------
/toolbox/context_processors/env.py:
--------------------------------------------------------------------------------
1 | from django.conf import settings
2 |
3 |
4 | def environment(request):
5 | """
6 | Adds env-related context variables to the context.
7 | """
8 | return {
9 | 'DEVELOPMENT': settings.DEVELOPMENT,
10 | 'PRODUCTION': settings.PRODUCTION,
11 | }
12 |
--------------------------------------------------------------------------------
/toolbox/context_processors/sites.py:
--------------------------------------------------------------------------------
1 | from django.contrib.sites.models import Site
2 |
3 |
4 | def current_site(request):
5 | """
6 | Pass the "current site" to the template context.
7 | """
8 | try:
9 | return {
10 | 'current_site': Site.objects.get_current(),
11 | }
12 | except Site.DoesNotExist:
13 | return {
14 | 'current_site': ''
15 | }
16 |
--------------------------------------------------------------------------------
/toolbox/management/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/datadesk/django-project-template/b9eb644f73d30680038c7032b0060ab4e3a4b677/toolbox/management/__init__.py
--------------------------------------------------------------------------------
/toolbox/management/commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/datadesk/django-project-template/b9eb644f73d30680038c7032b0060ab4e3a4b677/toolbox/management/commands/__init__.py
--------------------------------------------------------------------------------
/toolbox/management/commands/backupdb.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import boto
4 | from django.conf import settings
5 | from django.core.management.base import BaseCommand
6 |
7 | CHUNK_DIR = '/tmp/chunk_dir'
8 | os.path.exists(CHUNK_DIR) or os.mkdir(CHUNK_DIR)
9 |
10 |
11 | class Command(BaseCommand):
12 | help = 'Back up the database to an archive on Amazon S3'
13 |
14 | def split_file(self, input_file, chunk_size=900000000):
15 | f = open(input_file, 'rb')
16 | data = f.read()
17 | f.close()
18 |
19 | if not os.path.exists(CHUNK_DIR):
20 | os.makedirs(CHUNK_DIR)
21 |
22 | bytes = len(data)
23 | num_chunks = bytes / chunk_size
24 | if (bytes % chunk_size):
25 | num_chunks += 1
26 |
27 | chunk_names = []
28 | for i in range(0, bytes + 1, chunk_size):
29 | chunk_name = "chunk%s" % i
30 | chunk_names.append(chunk_name)
31 | f = open(CHUNK_DIR+'/'+chunk_name, 'wb')
32 | f.write(data[i:i+chunk_size])
33 | f.close()
34 |
35 | def set_options(self, *args, **kwargs):
36 | """
37 | Initialize all the settings we'll use to backup the database.
38 | """
39 | # Set the database credentials
40 | self.db_user = settings.DATABASES['default']['USER']
41 | self.db_name = settings.DATABASES['default']['NAME']
42 | self.db_pass = settings.DATABASES['default']['PASSWORD']
43 | os.environ['PGPASSWORD'] = self.db_pass
44 |
45 | # Set up the file name
46 | if settings.PRODUCTION:
47 | prefix = 'prod'
48 | else:
49 | prefix = 'dev'
50 | now = time.strftime('%Y-%m-%d')
51 | self.filename = '%s_%s.sql.gz' % (prefix, now)
52 | self.cmd = 'pg_dump -U %s -Fc %s > %s' % (
53 | self.db_user,
54 | self.db_name,
55 | self.filename
56 | )
57 |
58 | # Set up everything we need on S3
59 | self.bucket_name = settings.AWS_BACKUP_BUCKET_NAME
60 | self.boto_conn = boto.connect_s3(
61 | settings.AWS_ACCESS_KEY_ID,
62 | settings.AWS_SECRET_ACCESS_KEY
63 | )
64 | self.bucket = self.boto_conn.get_bucket(self.bucket_name)
65 | if hasattr(settings, 'AWS_BACKUP_BUCKET_DIRECTORY'):
66 | self.key_path = '%s/%s' % (
67 | settings.AWS_BACKUP_BUCKET_DIRECTORY,
68 | self.filename
69 | )
70 | else:
71 | self.key_path = self.filename
72 | self.latest_key_path = self.key_path.replace(now, "latest")
73 |
74 | def handle(self, *args, **options):
75 | # Initialize all the settings we'll need
76 | self.set_options(*args, **options)
77 |
78 | # Create the backup locally
79 | print 'Backing up PostgreSQL: %s' % self.cmd
80 | os.system(self.cmd)
81 |
82 | # Upload it to S3 in pieces
83 | print "Uploading to %s" % self.key_path
84 | mp = self.bucket.initiate_multipart_upload(
85 | self.key_path,
86 | reduced_redundancy=False,
87 | policy='bucket-owner-full-control'
88 | )
89 | self.split_file(self.filename)
90 | file_list = os.listdir(CHUNK_DIR)
91 | i = 1
92 | for file_part in file_list:
93 | with open(CHUNK_DIR+'/'+file_part) as f:
94 | mp.upload_part_from_file(f, i)
95 | os.remove(CHUNK_DIR+'/'+file_part)
96 | i += 1
97 | mp.complete_upload()
98 |
99 | # Create a 'latest' copy and make both public
100 | print "Copying as %s" % self.latest_key_path
101 | key = self.bucket.lookup(mp.key_name)
102 | key.copy(self.bucket, self.latest_key_path)
103 |
104 | # Delete the local file
105 | print "Deleting %s" % self.filename
106 | os.remove(self.filename)
107 |
--------------------------------------------------------------------------------
/toolbox/management/commands/generatesecretkey.py:
--------------------------------------------------------------------------------
1 | from django.utils.crypto import get_random_string
2 | from django.core.management.base import BaseCommand
3 |
4 |
5 | class Command(BaseCommand):
6 | help = "Generates a secret key for use in project settings"
7 |
8 | def handle(self, *args, **options):
9 | chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
10 | key = get_random_string(50, chars)
11 | print 'SECRET_KEY = "%s"' % key
12 |
--------------------------------------------------------------------------------
/toolbox/management/commands/loadbackupdb.py:
--------------------------------------------------------------------------------
1 | import os
2 | import boto
3 | from datetime import datetime
4 | from django.conf import settings
5 | from django.core.management.base import BaseCommand, CommandError
6 |
7 |
8 | class Command(BaseCommand):
9 | args = ''
10 | help = 'Load a database snapshot from our nightly archive. Pulls latest' \
11 | + ' by default. Specify date for an older one.'
12 |
13 | def add_arguments(self, parser):
14 | parser.add_argument(
15 | "--name",
16 | action="store",
17 | dest="name",
18 | default='',
19 | help="A custom name for the database we're creating locally"
20 | )
21 | parser.add_argument(
22 | "--env",
23 | action="store",
24 | dest="env",
25 | default='prod',
26 | help=("The deployment environment you want pull the database "
27 | " from. By default it's prod.")
28 | )
29 |
30 | def set_options(self, *args, **kwargs):
31 | # If the user provides a date, try to use that
32 | if args:
33 | try:
34 | dt = datetime.strptime(args[0], '%Y-%m-%d')
35 | dt = dt.strftime("%Y-%m-%d")
36 | except ValueError:
37 | raise CommandError("The date you submitted is not valid.")
38 | # Otherwise just use the today minus one day
39 | else:
40 | dt = 'latest'
41 | self.filename = '%s_%s.sql.gz' % (kwargs['env'], dt)
42 |
43 | # Get all our S3 business straight
44 | self.bucket_name = settings.AWS_BACKUP_BUCKET_NAME
45 | self.boto_conn = boto.connect_s3(
46 | settings.AWS_ACCESS_KEY_ID,
47 | settings.AWS_SECRET_ACCESS_KEY
48 | )
49 | self.bucket = self.boto_conn.get_bucket(self.bucket_name)
50 | if hasattr(settings, 'AWS_BACKUP_BUCKET_DIRECTORY'):
51 | self.key_path = '%s/%s' % (
52 | settings.AWS_BACKUP_BUCKET_DIRECTORY,
53 | self.filename
54 | )
55 | else:
56 | self.key_path = self.filename
57 |
58 | # Set local database settings
59 | db = settings.DATABASES['default']
60 | os.environ['PGPASSWORD'] = db['PASSWORD']
61 | self.db_user = db['USER']
62 | self.db_name = kwargs.get('name') or db['NAME']
63 |
64 | def handle(self, *args, **options):
65 | # Initialize the options
66 | self.set_options(*args, **options)
67 |
68 | # Download the snapshot
69 | self.download(self.key_path)
70 |
71 | # Load the snapshot into the database
72 | self.load(self.filename)
73 |
74 | def load(self, source):
75 | """
76 | Load a database snapshot into our postgres installation.
77 | """
78 | print "Loading to new database: %s" % self.db_name
79 | # If the db already exists, we need to drop it.
80 | try:
81 | os.system("dropdb -U %s %s" % (self.db_user, self.db_name))
82 | except:
83 | pass
84 | # Create the database
85 | os.system("createdb -U %s %s" % (self.db_user, self.db_name))
86 | # Load the data
87 | os.system(
88 | "pg_restore -U %s -Fc -d %s ./%s" % (
89 | self.db_user,
90 | self.db_name,
91 | source
92 | )
93 | )
94 | # Delete the snapshot
95 | os.system("rm ./%s" % source)
96 |
97 | def download(self, dt):
98 | """
99 | Download a database snapshot.
100 | """
101 | print "Downloading database: %s" % self.key_path
102 | self.key = self.bucket.get_key(self.key_path)
103 | if not self.key:
104 | raise CommandError("%s does not exist in the database archive." % (
105 | self.key_path
106 | ))
107 | self.key.get_contents_to_filename(self.filename)
108 |
--------------------------------------------------------------------------------
/toolbox/models.py:
--------------------------------------------------------------------------------
1 | # Nothing to see here!
2 |
--------------------------------------------------------------------------------
/toolbox/mrss.py:
--------------------------------------------------------------------------------
1 | from django.utils.feedgenerator import Rss201rev2Feed
2 |
3 |
4 | class MediaRSSFeed(Rss201rev2Feed):
5 | """Basic implementation of Yahoo Media RSS (mrss)
6 | http://video.search.yahoo.com/mrss
7 |
8 | Includes these elements in the Item feed:
9 | media:content
10 | url, width, height
11 | media:thumbnail
12 | url, width, height
13 | media:description
14 | media:title
15 | media:keywords
16 | """
17 | def rss_attributes(self):
18 | attrs = super(MediaRSSFeed, self).rss_attributes()
19 | attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
20 | attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'
21 | return attrs
22 |
23 | def add_item_elements(self, handler, item):
24 | """Callback to add elements to each item (item/entry) element."""
25 | super(MediaRSSFeed, self).add_item_elements(handler, item)
26 |
27 | if 'media:title' in item:
28 | handler.addQuickElement(u"media:title", item['media:title'])
29 | if 'media:description' in item:
30 | handler.addQuickElement(
31 | u"media:description",
32 | item['media:description']
33 | )
34 |
35 | if 'content_url' in item:
36 | content = dict(url=item['content_url'])
37 | if 'content_width' in item:
38 | content['width'] = str(item['content_width'])
39 | if 'content_height' in item:
40 | content['height'] = str(item['content_height'])
41 | handler.addQuickElement(u"media:content", '', content)
42 |
43 | if 'thumbnail_url' in item:
44 | thumbnail = dict(url=item['thumbnail_url'])
45 | if 'thumbnail_width' in item:
46 | thumbnail['width'] = str(item['thumbnail_width'])
47 | if 'thumbnail_height' in item:
48 | thumbnail['height'] = str(item['thumbnail_height'])
49 | handler.addQuickElement(u"media:thumbnail", '', thumbnail)
50 |
51 | if 'keywords' in item:
52 | handler.addQuickElement(u"media:keywords", item['keywords'])
53 |
--------------------------------------------------------------------------------
/toolbox/templatetags/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/datadesk/django-project-template/b9eb644f73d30680038c7032b0060ab4e3a4b677/toolbox/templatetags/__init__.py
--------------------------------------------------------------------------------
/toolbox/templatetags/toolbox_tags.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 | from django import template
3 | from django.template.defaultfilters import stringfilter
4 | register = template.Library()
5 |
6 |
7 | @register.filter(is_safe=True)
8 | @stringfilter
9 | def dropcap(value):
10 | """
11 | Wraps the first character in a tag with a .dropcap class
12 | that can be used to make it bigger than the surrounding text.
13 | """
14 | return value and "%s%s" % (
15 | value[0].upper(),
16 | value[1:]
17 | )
18 |
19 |
20 | @register.filter(is_safe=True)
21 | @stringfilter
22 | def emdashes(html):
23 | """
24 | Replace any '--' with '—'
25 | """
26 | return html.replace("--", "—")
27 |
--------------------------------------------------------------------------------
/toolbox/tests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # -*- coding: utf-8 -*-
3 | from __future__ import absolute_import
4 | from django.test import TestCase
5 | from .templatetags.toolbox_tags import dropcap
6 | from .templatetags.toolbox_tags import emdashes
7 |
8 |
9 | class ToolboxTest(TestCase):
10 |
11 | def test_unicodecsv(self):
12 | """
13 | Test simple usage of the unicodecsv toy.
14 | """
15 | from toolbox import unicodecsv
16 | from cStringIO import StringIO
17 | d = StringIO("""Name,Type,County
18 | La Cañada Flintridge,Neighborhood,L.A.County
19 | Downtown,Neighborhood,L.A.County
20 | """)
21 | reader = unicodecsv.UnicodeDictReader(d)
22 | reader.next()
23 | reader.__iter__()
24 | self.assertEqual(type(list(reader)), type([]))
25 |
26 | def test_dropcap_filter(self):
27 | """
28 | Test simple usage of the capfirst templatetag.
29 | """
30 | before = "I love dropcaps."
31 | after = dropcap(before)
32 | self.failUnlessEqual(
33 | after,
34 | "I love dropcaps."
35 | )
36 |
37 | def test_emdashes_filter(self):
38 | """
39 | Test simple usage of the emdashes templatetag.
40 | """
41 | before = "The end.
-- Ruben Salazar
"
42 | after = emdashes(before)
43 | self.failUnlessEqual(
44 | after,
45 | "The end.
— Ruben Salazar
"
46 | )
47 |
--------------------------------------------------------------------------------
/toolbox/unicodecsv.py:
--------------------------------------------------------------------------------
1 | """
2 | Support for reading CSV as Unicode objects.
3 |
4 | This module is necessary because Python's csv library doesn't support reading
5 | Unicode strings.
6 |
7 | This code is mostly copied from the Python documentation:
8 | http://www.python.org/doc/2.5.2/lib/csv-examples.html
9 | """
10 | import csv
11 | import codecs
12 |
13 |
14 | class UTF8Recoder:
15 | """
16 | Reencodes input to UTF-8.
17 | """
18 | def __init__(self, f, encoding):
19 | self.reader = codecs.getreader(encoding)(f)
20 |
21 | def __iter__(self):
22 | return self
23 |
24 | def next(self):
25 | return self.reader.next().encode('utf-8')
26 |
27 |
28 | class UnicodeDictReader:
29 | """
30 | Like the standard csv.DictReader, except it actually works with unicode.
31 | """
32 | def __init__(self, f, encoding='utf-8', **kwargs):
33 | f = UTF8Recoder(f, encoding)
34 | self.reader = csv.DictReader(f, **kwargs)
35 |
36 | def next(self):
37 | row = self.reader.next()
38 | keys = [unicode(i, 'utf-8') for i in row.keys()]
39 | values = [unicode(i, 'utf-8') for i in row.values()]
40 | return dict(zip(keys, values))
41 |
42 | def __iter__(self):
43 | return self
44 |
--------------------------------------------------------------------------------
/toolbox/views.py:
--------------------------------------------------------------------------------
1 | from django.http import HttpResponse
2 |
3 |
4 | def app_status(request):
5 | """
6 | Page for Varnish to check the server/app status
7 | """
8 | return HttpResponse("ok")
9 |
--------------------------------------------------------------------------------