├── .gitignore ├── Gemfile ├── LICENSE ├── README.md ├── bin └── pd2pg ├── examples ├── escalation-rate-per-service.sql ├── incidents-per-service.sql ├── individual-detail.sql ├── interrupted-hours-per-week.sql ├── off-hours-incidents-per-service.sql └── team-detail.sql ├── pd2pg.gemspec └── schema.sql /.gitignore: -------------------------------------------------------------------------------- 1 | Gemfile.lock 2 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | gemspec 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014- Stripe, Inc. (https://stripe.com) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pd2pg 2 | 3 | pd2pg imports data from the PagerDuty API into a Postgres database for 4 | easy querying and analysis. 5 | 6 | It helps you: 7 | 8 | * Collect summary statistics about on-call activity. 9 | * Calculate per-user, per-service, per-escalation-policy on-call metrics. 10 | * Determine the frequency of on-hours vs. off-hours pages. 11 | * Produce custom on-call reports with incident-level detail. 12 | * Back-test proposed on-call changes. 13 | * Perform one-off queries against historical pager data. 14 | 15 | ## Importing 16 | 17 | pd2pg imports user, service, escalation policy, incident, and log entry 18 | (incident event) data from the PagerDuty API into a specified Postgres 19 | database. The import is incremental and idempotent, so you can run it as 20 | often as you'd like to refresh your database. 21 | 22 | You'll need the following config set in environment variables: 23 | 24 | * `PAGERDUTY_API_KEY`: a read-only API key from `https://api.pagerduty.com/api_keys`. 25 | * `DATABASE_URL`: URL to a Postgres database, e.g. `postgres://127.0.0.1:5432/pagerduty` 26 | 27 | Perform a one-time schema load with: 28 | 29 | ``` 30 | $ psql $DATABASE_URL < schema.sql 31 | ``` 32 | 33 | Then refresh the database as often as desired with: 34 | 35 | ``` 36 | $ bundle exec pd2pg 37 | ``` 38 | 39 | ## Querying 40 | 41 | pd2pg makes PagerDuty data available as regular Postgres data, so you 42 | can query it in the usual way, e.g. with: 43 | 44 | ``` 45 | $ psql $DATABASE_URL 46 | ``` 47 | 48 | For example, to count the number of incidents per service over the past 49 | 28 days: 50 | 51 | ```sql 52 | select 53 | services.name, 54 | count(incidents.id) 55 | from 56 | incidents, 57 | services 58 | where 59 | incidents.created_at > now() - '28 days'::interval and 60 | incidents.service_id = services.id 61 | group by 62 | services.name 63 | order by 64 | count(incidents.id) desc 65 | ``` 66 | 67 | Or show all incidents that notified a specific user over the past week: 68 | 69 | ```sql 70 | select 71 | log_entries.created_at as notification_time, 72 | incidents.html_url as incident_url, 73 | incidents.trigger_summary_subject, 74 | services.name as service_name 75 | from 76 | users, 77 | log_entries, 78 | incidents, 79 | services 80 | where 81 | users.email = 'mark@stripe.com' and 82 | log_entries.user_id = users.id and 83 | log_entries.type = 'notify' and 84 | log_entries.created_at > now() - '7 days'::interval and 85 | incidents.id = log_entries.incident_id and 86 | incidents.service_id = services.id 87 | order by 88 | incidents.created_at desc 89 | ``` 90 | 91 | See `schema.sql` for details of the data model and `examples/` for 92 | example SQL queries. 93 | -------------------------------------------------------------------------------- /bin/pd2pg: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require "json" 4 | require "excon" 5 | require "sequel" 6 | 7 | # Ensure all data processing and storage is in UTC. 8 | ENV["TZ"] = "UTC" 9 | 10 | class PG2PD 11 | # Largest page size allowed. 12 | PAGINATION_LIMIT = 100 13 | 14 | # Rewind by ~1h when doing incremental updates, to ensure we don't 15 | # miss anything. 16 | INCREMENTAL_BUFFER = 60*60 17 | 18 | # Apply incremental updates atomically for ~24 hour windows, instead 19 | # of trying to fetch all of history and apply it at once. 20 | INCREMENTAL_WINDOW = 60*60*24 21 | 22 | # Earliest time PagerDuty data could be available. 23 | PAGERDUTY_EPOCH = Time.parse("2009-01-01T00:00Z") 24 | 25 | # Reads required config from environment variables. 26 | def env!(k) 27 | v = ENV[k] 28 | if !v 29 | $stderr.puts("Must set #{k} in environment") 30 | Kernel.exit(1) 31 | end 32 | v 33 | end 34 | 35 | # Logs a key-prefixed, name=value line. 36 | def log(key, data={}) 37 | data_str = data.map { |(k,v)| "#{k}=#{v}" }.join(" ") 38 | $stdout.puts("#{key}#{" " + data_str if data_str}") 39 | end 40 | 41 | # Connections to the Postgres DB and PagerDuty API respectively. 42 | attr_accessor :db, :api 43 | 44 | # Initialize by reading config and establishing API and DB 45 | # connections. 46 | def initialize 47 | # Read config. 48 | database_url = env!("DATABASE_URL") 49 | pagerduty_api_token = env!("PAGERDUTY_API_KEY") 50 | 51 | # Establish API connection. 52 | self.api = Excon::Connection.new( 53 | :scheme => "https", 54 | :host => "api.pagerduty.com", 55 | :port => 443, 56 | :headers => { 57 | "Authorization" => "Token token=#{pagerduty_api_token}", 58 | "Accept" => "application/vnd.pagerduty+json;version=2" 59 | }) 60 | 61 | # Establish DB connection. 62 | self.db = Sequel.connect(database_url) 63 | end 64 | 65 | # Send all service records from Pagerduty to database. 66 | def services_to_db(items) 67 | columns = [:id, :name, :status, :type] 68 | records = items.map do |i| 69 | [i['id'], 70 | i['name'], 71 | i['status'], 72 | i['type']] 73 | end 74 | database_replace(:services, columns, records) 75 | end 76 | 77 | # Adds users to user_schedule table associated with given schedule. 78 | def user_schedules_to_db(schedule_items) 79 | columns = [:id, :user_id, :schedule_id] 80 | all_records = [] 81 | schedule_items.each do |s| 82 | users = get_bulk(:users, 83 | "schedules/#{s['id']}/users", 84 | { since: Time.now.strftime('%Y-%m-%d') }, 85 | false) 86 | users.each do |u| 87 | all_records << ["#{u['id']}_#{s['id']}", u['id'], s['id']] 88 | end 89 | end 90 | database_replace(:user_schedule, columns, all_records) 91 | end 92 | 93 | # Send all schedule records from Pagerduty to database. 94 | def schedules_to_db(items) 95 | user_schedules_to_db(items) 96 | 97 | columns = [:id, :name] 98 | records = items.map do |i| 99 | [i['id'], 100 | i['name']] 101 | end 102 | database_replace(:schedules, columns, records) 103 | end 104 | 105 | # Send all escalation policy records from Pagerduty to database. 106 | def escalation_policies_to_db(items) 107 | ep_columns = [:id, :name, :num_loops] 108 | ep_records = items.map do |i| 109 | [i['id'], 110 | i['name'], 111 | i['num_loops']] 112 | end 113 | 114 | er_columns = [:id, :escalation_policy_id, :escalation_delay_in_minutes, :level_index] 115 | eru_columns = [:id, :escalation_rule_id, :user_id] 116 | ers_columns = [:id, :escalation_rule_id, :schedule_id] 117 | er_records = [] 118 | eru_records = [] 119 | ers_records = [] 120 | items.each do |ep| 121 | ep['escalation_rules'].each.with_index do |er, i| 122 | 123 | er_records << [ 124 | er['id'], 125 | ep['id'], 126 | er['escalation_delay_in_minutes'], 127 | i + 1 128 | ] 129 | er['targets'].each do |t| 130 | if t['type'] == 'user' 131 | eru_records << [ 132 | "#{er['id']}_#{t['id']}", 133 | er['id'], 134 | t['id'] 135 | ] 136 | else 137 | ers_records << [ 138 | "#{er['id']}_#{t['id']}", 139 | er['id'], 140 | t['id'] 141 | ] 142 | end 143 | end 144 | end 145 | end 146 | database_replace(:escalation_rules, er_columns, er_records) 147 | database_replace(:escalation_rule_users, eru_columns, eru_records) 148 | database_replace(:escalation_rule_schedules, ers_columns, ers_records) 149 | database_replace(:escalation_policies, ep_columns, ep_records) 150 | end 151 | 152 | # Send all user records to database. 153 | def users_to_db(items) 154 | columns = [:id, :name, :email] 155 | records = items.map do |i| 156 | [i['id'], 157 | i['name'], 158 | i['email']] 159 | end 160 | database_replace(:users, columns, records) 161 | end 162 | 163 | # Convert log entry API value into a DB record. 164 | def convert_log_entry(le) 165 | { 166 | id: le["id"], 167 | type: le["type"], 168 | created_at: Time.parse(le["created_at"]), 169 | incident_id: le["incident"]["id"], 170 | agent_type: le["agent"] && le["agent"]["type"], 171 | agent_id: le["agent"] && le["agent"]["id"], 172 | channel_type: le["channel"] && le["channel"]["type"], 173 | user_id: le["user"] && le["user"]["id"], 174 | notification_type: le["notification"] && le["notification"]["type"], 175 | assigned_user_id: le["assigned_user"] && le["assigned_user"]["id"] 176 | } 177 | end 178 | 179 | # Convert incident API value into a DB structure. 180 | def convert_incident(i) 181 | { 182 | id: i["id"], 183 | incident_number: i["incident_number"], 184 | created_at: i["created_at"], 185 | html_url: i["html_url"], 186 | incident_key: i["incident_key"], 187 | service_id: i["service"] && i["service"]["id"], 188 | escalation_policy_id: i["escalation_policy"] && i["escalation_policy"]["id"], 189 | trigger_summary_subject: i["summary"], 190 | trigger_summary_description: i["description"], 191 | # this column is no longer supported in Pagerduty's v2 API 192 | # we're leaving it in for backwards compatibility. 193 | trigger_type: "DEPRECATED" 194 | } 195 | end 196 | 197 | # Returns list of raw values from the database for the given collection or endpoint. 198 | def get_bulk(collection, endpoint=nil, additional_headers={}, should_log=true) 199 | if endpoint.nil? 200 | endpoint = collection 201 | end 202 | 203 | offset = 0 204 | total = nil 205 | records = [] 206 | loop { 207 | if should_log 208 | log('get_bulk.page', collection: collection, offset: offset, total: total || '?') 209 | end 210 | response = api.request( 211 | method: :get, 212 | path: "/#{endpoint}", 213 | query: { 214 | 'total' => true, 215 | 'offset' => offset, 216 | 'limit' => PAGINATION_LIMIT 217 | }.merge(additional_headers), 218 | expects: [200] 219 | ) 220 | data = JSON.parse(response.body) 221 | total = data["total"] || data[collection.to_s].length 222 | offset += PAGINATION_LIMIT 223 | records.concat(data[collection.to_s]) 224 | break if !data["more"] 225 | } 226 | if should_log 227 | log('get_bulk.update', collection: collection, total: records.length) 228 | end 229 | return records 230 | end 231 | 232 | def database_replace(table_name, columns, records) 233 | # Atomically update the given table. Deletes the contents of the table before inserting the new records. 234 | db.transaction do 235 | table = db[table_name] 236 | table.delete() 237 | records.each do |record| 238 | table.insert(record) 239 | end 240 | end 241 | end 242 | 243 | # Update database state for the given table by fetching relevant new 244 | # values from the API. Determine point from which to resume based 245 | # on existing records, which is assumed to be complete up to the most 246 | # recent record. Yields each API value to a block that should convert 247 | # the API value data to a DB record for subseqent insertion. 248 | def refresh_incremental(collection, query_params={}) 249 | # Calculate the point from which we should resume incremental 250 | # updates. Allow a bit of overlap to ensure we don't miss anything. 251 | last_record = db[collection].reverse_order(:created_at).first 252 | latest = (last_record && last_record[:created_at]) || PAGERDUTY_EPOCH 253 | log("refresh_incremental.check", collection: collection, latest: latest.iso8601) 254 | 255 | # Update data in windowed time chunks. This will give us manageable 256 | # amounts of data request from the API coherently. 257 | since = latest - INCREMENTAL_BUFFER 258 | while since < Time.now 259 | through = since + INCREMENTAL_WINDOW 260 | log("refresh_incremental.window", collection: collection, since: since.iso8601, through: through.iso8601) 261 | 262 | # Fetch all values from the API and apply the conversion block to 263 | # each, forming an in-memory array of DB-ready records. 264 | offset = 0 265 | total = nil 266 | records = [] 267 | loop { 268 | log("refresh_incremental.page", collection: collection, since: since, offset: offset, total: total || "?") 269 | response = api.request( 270 | method: :get, 271 | path: "/#{collection}", 272 | query: { 273 | total: true, 274 | since: since, 275 | until: through, 276 | offset: offset, 277 | limit: PAGINATION_LIMIT 278 | }.merge(query_params), 279 | expects: [200] 280 | ) 281 | data = JSON.parse(response.body) 282 | total = data["total"] 283 | offset += PAGINATION_LIMIT 284 | items = data[collection.to_s] 285 | records.concat(items.map { |i| yield(i) }) 286 | break if !data["more"] 287 | } 288 | 289 | # Atomically update the DB by inserting all novel records. 290 | if !records.empty? 291 | log("refresh_incremental.update", total: records.length) 292 | db.transaction do 293 | table = db[collection] 294 | records.each do |record| 295 | if table.where(id: record[:id]).empty? 296 | table.insert(record) 297 | end 298 | end 299 | end 300 | end 301 | 302 | since = through 303 | end 304 | end 305 | 306 | # Refresh data for all tracked tables. 307 | def refresh 308 | log("refresh.start") 309 | 310 | services_to_db(get_bulk(:services)) 311 | escalation_policies_to_db(get_bulk(:escalation_policies)) 312 | schedules_to_db(get_bulk(:schedules)) 313 | users_to_db(get_bulk(:users)) 314 | 315 | refresh_incremental(:incidents) { |i| convert_incident(i) } 316 | refresh_incremental(:log_entries, "include[]" => "incident") { |le| convert_log_entry(le) } 317 | 318 | log("refresh.finish") 319 | end 320 | end 321 | 322 | PG2PD.new.refresh 323 | -------------------------------------------------------------------------------- /examples/escalation-rate-per-service.sql: -------------------------------------------------------------------------------- 1 | -- Compute escalation rate per service over the last 28 days. 2 | 3 | with incident_counts as ( 4 | select 5 | incidents.id as incident_id, 6 | incidents.service_id, 7 | (select 8 | count(*) 9 | from 10 | log_entries 11 | where 12 | log_entries.incident_id = incidents.id and 13 | log_entries.type = 'escalate' 14 | ) as escalations 15 | from 16 | incidents 17 | where 18 | incidents.created_at > now()-'28 days'::interval 19 | group by 20 | incident_id, 21 | service_id 22 | ), 23 | 24 | counts as ( 25 | select 26 | incident_counts.service_id, 27 | count(incident_counts.incident_id) as incidents, 28 | sum(incident_counts.escalations) as escalations 29 | from 30 | incident_counts 31 | group by 32 | incident_counts.service_id 33 | ) 34 | 35 | select 36 | services.name as service, 37 | round(counts.escalations / counts.incidents, 1) as escalation_rate, 38 | counts.incidents as incidents, 39 | counts.escalations as escalations 40 | from 41 | services, 42 | counts 43 | where 44 | services.id = counts.service_id 45 | order by 46 | escalation_rate desc 47 | ; 48 | -------------------------------------------------------------------------------- /examples/incidents-per-service.sql: -------------------------------------------------------------------------------- 1 | -- Count incidents per service over the last 28 days. 2 | 3 | select 4 | services.name, 5 | count(incidents.id) 6 | from 7 | incidents, 8 | services 9 | where 10 | incidents.created_at > now() - '28 days'::interval and 11 | incidents.service_id = services.id 12 | group by 13 | services.name 14 | order by 15 | count(incidents.id) desc 16 | ; 17 | -------------------------------------------------------------------------------- /examples/individual-detail.sql: -------------------------------------------------------------------------------- 1 | -- Show details about an individual's notifications over the past week. 2 | -- Note that the user's email is in the query. 3 | 4 | select 5 | incidents.html_url as incident_url, 6 | log_entries.created_at at time zone 'America/Los_Angeles' as notification_time, 7 | log_entries.notification_type as notification_type, 8 | incidents.trigger_summary_subject, 9 | services.name as service_name 10 | from 11 | users, 12 | log_entries, 13 | incidents, 14 | services 15 | where 16 | users.email = 'mark@stripe.com' and 17 | log_entries.user_id = users.id and 18 | log_entries.type = 'notify' and 19 | log_entries.created_at > now() - '1 week'::interval and 20 | incidents.id = log_entries.incident_id and 21 | incidents.service_id = services.id 22 | order by 23 | log_entries.created_at desc 24 | ; 25 | -------------------------------------------------------------------------------- /examples/interrupted-hours-per-week.sql: -------------------------------------------------------------------------------- 1 | -- Show the number of interrupted hours per escalation policy, per-week. 2 | -- In the result set, weeks are rows and there's one column per 3 | -- escalation policy. Note that escalation policies are whitelisted in 4 | -- the query. 5 | 6 | select * from crosstab( 7 | 8 | -- Data: {week, escalation policy name, interrupted hours count}. 9 | -- Interrupted hours are defined as {clock hour, person} tuples for 10 | -- which the person received >= 1 notification during the clock hour. 11 | -- Counts are per escalation-policy, so if 1 person receives pages for 12 | -- 2 EPs during the same hour that will count as 2 interrupted hours. 13 | $SQL$ 14 | with notifications as ( 15 | select 16 | log_entries.created_at, 17 | log_entries.user_id, 18 | log_entries.incident_id 19 | from 20 | log_entries 21 | where 22 | type = 'notify' 23 | ), 24 | 25 | interruptions as ( 26 | select 27 | date_trunc('hour', notifications.created_at) as hour, 28 | incidents.escalation_policy_id as escalation_policy_id, 29 | count(distinct notifications.user_id) as interrupted_users 30 | from 31 | notifications, 32 | incidents 33 | where 34 | notifications.incident_id = incidents.id 35 | group by 36 | hour, 37 | escalation_policy_id 38 | ) 39 | 40 | select 41 | to_char(date_trunc('week', interruptions.hour), 'YYYY-MM-DD') as week, 42 | escalation_policies.name as escalation_policy_name, 43 | sum(interruptions.interrupted_users) as interrupted_hours 44 | from 45 | interruptions, 46 | escalation_policies 47 | where 48 | interruptions.escalation_policy_id = escalation_policies.id 49 | group by 50 | week, 51 | escalation_policy_name 52 | order by 53 | week desc 54 | $SQL$, 55 | 56 | -- Escalation policy names. 57 | $SQL$ 58 | select unnest(array[ 59 | 'Systems', 60 | 'Databases', 61 | 'API' 62 | ]) 63 | $SQL$ 64 | 65 | -- Column definitions: week + escalation policy names. 66 | ) as ct( 67 | week text, 68 | systems int, 69 | databases int, 70 | api int 71 | ) 72 | ; 73 | -------------------------------------------------------------------------------- /examples/off-hours-incidents-per-service.sql: -------------------------------------------------------------------------------- 1 | -- Compute the number of off-hours incidents that occur off-hours 2 | -- (Pacific time) per service over the last 28 days. 3 | 4 | with timezoned_incidents as ( 5 | select 6 | incidents.id, 7 | incidents.service_id, 8 | incidents.created_at at time zone 'America/Los_Angeles' as local_created_at 9 | from 10 | incidents 11 | where 12 | incidents.created_at > now()-'28 days'::interval 13 | ) 14 | 15 | select 16 | services.name as service, 17 | count(timezoned_incidents.id) as incidents 18 | from 19 | timezoned_incidents, 20 | services 21 | where 22 | timezoned_incidents.service_id = services.id and 23 | -- On-hours = Monday through Friday, 9:00a thorugh 6:00p. 24 | not (extract(dow from local_created_at) >= 1 and 25 | extract(dow from local_created_at) <= 5 and 26 | extract(hour from local_created_at) >= 9 and 27 | extract(hour from local_created_at) <= 17) 28 | group by 29 | service 30 | order by 31 | incidents desc 32 | ; 33 | -------------------------------------------------------------------------------- /examples/team-detail.sql: -------------------------------------------------------------------------------- 1 | -- Show details about each incident that either originated form one of 2 | -- the team's services or alerted a member of the team (potentially 3 | -- coming from other services). Note that team services and members are 4 | -- enumerated in the query. 5 | 6 | with team_notified_incident_ids as ( 7 | select 8 | distinct log_entries.incident_id as incident_id 9 | from 10 | incidents, 11 | log_entries, 12 | users 13 | where 14 | incidents.created_at > now() - '7 days'::interval and 15 | incidents.id = log_entries.incident_id and 16 | log_entries.type = 'notify' and 17 | log_entries.user_id = users.id and 18 | substring(users.email from '(.+)@stripe.com') in ( 19 | 'amy', 20 | 'fred', 21 | 'neil', 22 | 'susan' 23 | ) 24 | ), 25 | 26 | team_originating_incident_ids as ( 27 | select 28 | distinct incidents.id as incident_id 29 | from 30 | incidents, 31 | services 32 | where 33 | incidents.created_at > now() - '7 days'::interval and 34 | incidents.service_id = services.id and 35 | services.name in ( 36 | 'Team API', 37 | 'Team Pingdom', 38 | 'Team 911' 39 | ) 40 | ), 41 | 42 | team_incident_ids as ( 43 | select * from team_notified_incident_ids union 44 | select * from team_originating_incident_ids 45 | ), 46 | 47 | escalation_counts as ( 48 | select 49 | team_incident_ids.incident_id, 50 | (select count(*) 51 | from log_entries 52 | where log_entries.incident_id = team_incident_ids.incident_id and 53 | log_entries.type = 'escalate' 54 | ) as num_escalations 55 | from 56 | team_incident_ids 57 | ) 58 | 59 | select 60 | substring(incidents.html_url from 9) as url, 61 | substring(coalesce(incidents.trigger_summary_subject, incidents.trigger_summary_description, '') for 100) as trigger_summary, 62 | incidents.created_at at time zone 'America/Los_Angeles' as created_at, 63 | substring(users.email from '(.+)@stripe.com') as resolved_by, 64 | services.name as service, 65 | escalation_policies.name as escalation_policy, 66 | escalation_counts.num_escalations as escalations 67 | from 68 | incidents 69 | left outer join 70 | log_entries on incidents.id = log_entries.incident_id and log_entries.type = 'resolve' 71 | left outer join 72 | users on log_entries.agent_id = users.id 73 | left outer join 74 | services on incidents.service_id = services.id 75 | left outer join 76 | escalation_policies on incidents.escalation_policy_id = escalation_policies.id 77 | left outer join 78 | escalation_counts on incidents.id = escalation_counts.incident_id 79 | where 80 | incidents.id in (select * from team_incident_ids) 81 | order by 82 | incidents.created_at asc 83 | ; 84 | -------------------------------------------------------------------------------- /pd2pg.gemspec: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | 3 | Gem::Specification.new do |gem| 4 | gem.name = 'pd2pg' 5 | gem.version = '1.0.0' 6 | gem.authors = ['Stripe'] 7 | gem.email = ['oss@stripe.com'] 8 | gem.description = "Shovel data from PagerDuty's API to a Postgres database" 9 | gem.summary = 'PagerDuty to Postgres shovel' 10 | gem.homepage = 'https://github.com/stripe/pd2pg' 11 | gem.files = `git ls-files`.split($/) 12 | gem.executables = ['pd2pg'] 13 | gem.test_files = [] 14 | 15 | gem.require_paths = ['lib'] 16 | 17 | gem.add_dependency "excon", ">= 0.71.0" 18 | gem.add_dependency 'pg', '~> 0.18' 19 | gem.add_dependency 'sequel', '~> 4' 20 | end 21 | -------------------------------------------------------------------------------- /schema.sql: -------------------------------------------------------------------------------- 1 | create table incidents ( 2 | id varchar primary key, 3 | incident_number int not null, 4 | created_at timestamptz not null, 5 | html_url varchar not null, 6 | incident_key varchar, 7 | service_id varchar, 8 | escalation_policy_id varchar, 9 | trigger_summary_subject varchar, 10 | trigger_summary_description varchar, 11 | trigger_type varchar 12 | ); 13 | 14 | create table log_entries ( 15 | id varchar primary key, 16 | type varchar not null, 17 | created_at timestamptz not null, 18 | incident_id varchar not null, 19 | agent_type varchar, 20 | agent_id varchar, 21 | channel_type varchar, 22 | user_id varchar, 23 | notification_type varchar, 24 | assigned_user_id varchar 25 | ); 26 | 27 | create table services ( 28 | id varchar primary key, 29 | name varchar not null, 30 | status varchar not null, 31 | type varchar not null 32 | ); 33 | 34 | create table escalation_policies ( 35 | id varchar primary key, 36 | name varchar not null, 37 | num_loops int not null 38 | ); 39 | 40 | create table escalation_rules ( 41 | id varchar primary key, 42 | escalation_policy_id varchar not null, 43 | escalation_delay_in_minutes int, 44 | level_index int 45 | ); 46 | 47 | create table escalation_rule_users ( 48 | id varchar primary key, 49 | escalation_rule_id varchar not null, 50 | user_id varchar 51 | ); 52 | 53 | create table escalation_rule_schedules ( 54 | id varchar primary key, 55 | escalation_rule_id varchar not null, 56 | schedule_id varchar 57 | ); 58 | 59 | create table schedules ( 60 | id varchar primary key, 61 | name varchar not null 62 | ); 63 | 64 | create table users ( 65 | id varchar primary key, 66 | name varchar not null, 67 | email varchar not null 68 | ); 69 | 70 | create table user_schedule ( 71 | id varchar primary key, 72 | user_id varchar, 73 | schedule_id varchar 74 | ); 75 | 76 | -- Extension tablefunc enables crosstabs. 77 | create extension tablefunc; 78 | --------------------------------------------------------------------------------