├── .gitignore ├── LICENSE ├── README.md ├── bin ├── daemon.js └── index.js ├── config └── daemon.js.example ├── daemon-README.md ├── lib └── index.js ├── nagios-check_couch_postgres_count ├── package-lock.json └── package.json /.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules/* 2 | /config/daemon.js 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, sysadminmike 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | couch-to-postgres / pgcouch / couchpg / couchgres / postcouch 2 | ================= 3 | 4 | Node libary to stream CouchDB changes into PostgreSQL with a simple client example. Based on https://github.com/orchestrate-io/orchestrate-couchdb. 5 | 6 | By adding a few some extra bits allows not only for SELECT queries on the data but also UPDATE/INSERT/DELETE on your couchdb docs within Postgres. It is also possible to use your couch views as tables. 7 | 8 | Basically it allows postgres to use couchdb as its datastore - sort of like a Foreign Data Wrapper https://wiki.postgresql.org/wiki/Foreign_data_wrappers eg couchdb_fdw - but has a near realtime copy of records in postgres. 9 | 10 | For example: 11 | 12 | Add a doc to a couch 13 | 14 | curl -X PUT http://192.168.3.21:5984/example/1234567 -d '{"myvar":"foo"}' 15 | {"ok":true,"id":"1234567","rev":"1-d3747a58baa817834a21ceeaf3084c41"} 16 | 17 | 18 | See it in postgres: 19 | 20 | postgresdb=> SELECT id, doc FROM example WHERE id='1234567'; 21 | 22 | id | doc 23 | ---------+---------------------------------------------------------------------------------- 24 | 1234567 | {"_id": "1234567", "_rev": "1-d3747a58baa817834a21ceeaf3084c41", "myvar": "foo"} 25 | (1 row) 26 | 27 | 28 | 29 | Update a doc using postgres: 30 | 31 | postgresdb=> UPDATE example 32 | postgresdb-> SET doc=json_object_set_key(doc::json, 'myvar'::text, 'bar'::text)::jsonb, from_pg=true 33 | postgresdb-> WHERE id='1234567'; 34 | DEBUG: pgsql-http: queried http://192.168.3.21:5984/example/1234567 35 | CONTEXT: SQL statement "SELECT headers FROM http_post('http://192.168.3.21:5984/' || TG_TABLE_NAME || '/' || NEW.id::text, '', NEW.doc::text, 'application/json'::text)" 36 | PL/pgSQL function couchdb_put() line 9 at SQL statement 37 | UPDATE 0 38 | 39 | The couchdb_put function needs some more work. 40 | 41 | See it in couch: 42 | 43 | curl -X GET http://192.168.3.21:5984/example/1234567 44 | {"_id":"1234567","_rev":"2-b9f4c54fc36bdeb78c31590920c9751b","myvar":"bar"} 45 | 46 | And in postgres: 47 | 48 | postgresdb=> SELECT id, doc FROM example WHERE id='1234567'; 49 | id | doc 50 | ---------+---------------------------------------------------------------------------------- 51 | 1234567 | {"_id": "1234567", "_rev": "2-b9f4c54fc36bdeb78c31590920c9751b", "myvar": "bar"} 52 | (1 row) 53 | 54 | 55 | 56 | Add a doc using postgres 57 | 58 | postgresdb=> INSERT INTO example (id, doc, from_pg) VALUES ('7654321', json_object('{_id,myvar}','{7654321, 100}')::jsonb, true); 59 | DEBUG: pgsql-http: queried http://192.168.3.21:5984/example/7654321 60 | CONTEXT: SQL statement "SELECT headers FROM http_post('http://192.168.3.21:5984/' || TG_TABLE_NAME || '/' || NEW.id::text, '', NEW.doc::text, 'application/json'::text)" 61 | PL/pgSQL function couchdb_put() line 9 at SQL statement 62 | INSERT 0 0 63 | 64 | 65 | See it in couch 66 | 67 | curl -X GET http://192.168.3.21:5984/example/7654321 68 | {"_id":"7654321","_rev":"1-08343cb32bb0903348c0903e574cfbd0","myvar":"100"} 69 | 70 | 71 | Update doc created postgres with couch 72 | 73 | curl -X PUT http://192.168.3.21:5984/example/7654321 -d '{"_id":"7654321","_rev":"1-08343cb32bb0903348c0903e574cfbd0","myvar":"50"}' 74 | {"ok":true,"id":"7654321","rev":"2-5057c4942c6b92f8a9e2c3e5a75fd0b9" 75 | 76 | 77 | See it in postgres 78 | 79 | SELECT id, doc FROM example WHERE id='1234567'; 80 | id | doc 81 | ---------+---------------------------------------------------------------------------------- 82 | 1234567 | {"_id": "1234567", "_rev": "2-b9f4c54fc36bdeb78c31590920c9751b", "myvar": "bar"} 83 | (1 row) 84 | 85 | 86 | Add some more docs 87 | 88 | 89 | INSERT INTO example (id, doc, from_pg) VALUES ('test1', json_object('{_id,myvar}','{test1, 100}')::jsonb, true); 90 | INSERT INTO example (id, doc, from_pg) VALUES ('test2', json_object('{_id,myvar}','{test2, 50}')::jsonb, true); 91 | 92 | or 93 | 94 | curl -X PUT http://192.168.3.21:5984/example/test3 -d '{"_id":"test3", "myvar":"100"}' 95 | curl -X PUT http://192.168.3.21:5984/example/test4 -d '{"_id":"test4", "myvar":"50"}' 96 | curl -X PUT http://192.168.3.21:5984/example/test5 -d '{"_id":"test5", "myvar":"70"}' 97 | curl -X PUT http://192.168.3.21:5984/example/test6 -d '{"_id":"test6", "myvar":"20"}' 98 | curl -X PUT http://192.168.3.21:5984/example/test7 -d '{"_id":"test7", "myvar":"10"}' 99 | 100 | Do a query on the docs 101 | 102 | SELECT id, doc->'myvar' AS myvar FROM example 103 | WHERE id LIKE 'test%' AND CAST(doc->>'myvar' AS numeric) > 50 104 | ORDER BY myvar 105 | 106 | id | myvar 107 | -------+------- 108 | test3 | "100" 109 | test1 | "100" 110 | test5 | "70" 111 | (3 rows) 112 | 113 | 114 | Update some of the docs 115 | 116 | UPDATE example 117 | SET doc=json_object_set_key( 118 | doc::json, 'myvar'::text, (CAST(doc->>'myvar'::text AS numeric) + 50)::text 119 | )::jsonb, 120 | from_pg=true 121 | WHERE id LIKE 'test%' AND CAST(doc->>'myvar' AS numeric) < 60 122 | 123 | Peform same query 124 | 125 | SELECT id, doc->'myvar' AS myvar FROM example 126 | WHERE id LIKE 'test%' AND CAST(doc->>'myvar' AS numeric) > 50 127 | ORDER BY myvar 128 | 129 | id | myvar 130 | -------+------- 131 | test4 | "100" 132 | test2 | "100" 133 | test3 | "100" 134 | test1 | "100" 135 | test7 | "60" 136 | test5 | "70" 137 | test6 | "70" 138 | (7 rows) 139 | 140 | Initially I didnt spot the above order being wrong so you need to be careful. 141 | 142 | SELECT id, CAST(doc->>'myvar' AS numeric) as myvar FROM example 143 | WHERE id LIKE 'test%' AND CAST(doc->>'myvar' AS numeric) > 50 144 | ORDER BY myvar, doc->>'_id' 145 | 146 | id | myvar 147 | -------+------- 148 | test7 | "60" 149 | test5 | "70" 150 | test6 | "70" 151 | test1 | "100" 152 | test2 | "100" 153 | test3 | "100" 154 | test4 | "100" 155 | (7 rows) 156 | 157 | Order is now correct. 158 | 159 | 160 | And finally in couch 161 | 162 | curl -s -X POST '192.168.3.21:5984/example/_temp_view?include_docs=false' -H 'Content-Type: application/json' \ 163 | -d '{"map":"function(doc) { emit(doc._id, doc.myvar) };"}' 164 | {"total_rows":7,"offset":0,"rows":[ 165 | {"id":"test1","key":"test1","value":"100"}, 166 | {"id":"test2","key":"test2","value":"100"}, 167 | {"id":"test3","key":"test3","value":"100"}, 168 | {"id":"test4","key":"test4","value":"100"}, 169 | {"id":"test5","key":"test5","value":"70"}, 170 | {"id":"test6","key":"test6","value":"70"}, 171 | {"id":"test7","key":"test7","value":"60"} 172 | ]} 173 | 174 | It is also possible to use a couchdb view as a table: 175 | 176 | The couch design doc: 177 | 178 | { 179 | "_id": "_design/mw_views", 180 | "language": "javascript", 181 | "views": { 182 | "by_feedName": { 183 | "map": "function(doc) { emit(doc.feedName,null); }", 184 | "reduce": "_count" 185 | }, 186 | "by_tags": { 187 | "map": "function(doc) { for(var i in doc.tags) { emit (doc.tags[i],null); } }", 188 | "reduce": "_count" 189 | } 190 | } 191 | } 192 | 193 | 194 | WITH by_feedname_reduced AS ( 195 | SELECT * FROM json_to_recordset( 196 | ( 197 | SELECT (content::json->>'rows')::json 198 | FROM http_get('http://192.168.3.23:5984/articles/_design/mw_views/_view/by_feedName?group=true')) 199 | ) AS x (key text, value text) 200 | ) 201 | 202 | SELECT * FROM by_feedname_reduced WHERE value::numeric > 6000 ORDER BY key 203 | 204 | This takes under a second to run but the initial build of the view takes about 20 mins for a fresh copy of the couchdb. 205 | 206 | The equivilent query using the the data in postgres 207 | 208 | WITH tbl AS ( 209 | SELECT DISTINCT doc->>'feedName' as key, COUNT(doc->>'feedName') AS value 210 | FROM articles 211 | GROUP BY doc->>'feedName' 212 | ) 213 | SELECT key, value FROM tbl WHERE value > 6000 ORDER BY key; 214 | 215 | This takes over 4 seconds. 216 | 217 | 218 | 219 | 220 | Testing with my articles database from birdreader - https://github.com/glynnbird/birdreader 221 | 222 | curl -X GET http://localhost:5984/articles 223 | {"db_name":"articles","doc_count":63759,"doc_del_count":2,"update_seq":92467,"purge_seq":0,"compact_running":false,"disk_size":151752824,"data_size":121586165,"instance_start_time":"1418686121041424","disk_format_version":6,"committed_update_seq":92467} 224 | 225 | 226 | SELECT DISTINCT jsonb_object_keys(doc) AS myfields 227 | FROM articles ORDER BY myfields 228 | 229 | This queries all of the documents and retrieves the couch documents fields. 230 | 231 | On another couch database with a 'type' field for different doc types stored in the same database - about 70k docs. 232 | 233 | SELECT DISTINCT doc->>'type' as doctype, count(doc->>'type') 234 | FROM mytable GROUP BY doctype ORDER BY doctype 235 | 236 | Takes under a second. 237 | 238 | SELECT DISTINCT doc->>'type' as doctype, jsonb_object_keys(doc) AS myfields 239 | FROM mytable 240 | ORDER BY doctype , myfields; 241 | 242 | With no indexes the above query takes just over 10 secs. I have made no indexes or adjustments to the default FreeBSD postgresql94-server-9.4.r1 port. 243 | 244 | 245 | ---------- 246 | 247 | Example setup and postgres configuration 248 | 249 | git clone git@github.com:sysadminmike/couch-to-postgres.git 250 | 251 | 252 | Get needed modules: 253 | 254 | npm install 255 | 256 | 257 | Edit ./bin/index.js to suite your settings: 258 | 259 | var settings = 260 | { 261 | couchdb: { 262 | url: 'http://192.168.3.21:5984', 263 | pgtable: 'example', 264 | database: 'example' 265 | } 266 | }; 267 | 268 | pgclient = new pg.Client("postgres://mike@localhost/pgdatabase"); 269 | 270 | 271 | Before starting it up create the since_checkpoints table 272 | 273 | CREATE TABLE since_checkpoints 274 | ( 275 | pgtable text NOT NULL, 276 | since numeric DEFAULT 0, 277 | enabled boolean DEFAULT false, --not used in the simple client example 278 | CONSTRAINT since_checkpoint_pkey PRIMARY KEY (pgtable) 279 | ) 280 | 281 | This table is used to store the checkpoint for the database(s) being synced something akin to the couchdb _replicator database. 282 | 283 | Create the table to store the couch docs: 284 | 285 | CREATE TABLE example 286 | ( 287 | id text NOT NULL, 288 | doc jsonb, 289 | CONSTRAINT example_pkey PRIMARY KEY (id) 290 | ) 291 | 292 | Start watching changes 293 | 294 | ./bin/index.js 295 | 296 | It will add a record to the since_checkpoints table and begin syncing. 297 | 298 | At this point you can now perform SELECT queries the docs within postgres as in the above example. This should be fine to use against a production couchdb as it makes no changes to and is performing the same tasks as the elastic search river plugin. With a bit of copy/pasting it is possible to use sql to create simple scripts or one liners to run in a shell with curl. 299 | 300 | Also take a look at /bin/daemon.js and https://github.com/sysadminmike/couch-to-postgres/blob/master/daemon-README.md 301 | 302 | ------- 303 | 304 | To handle UPDATE/INSERT/DELETE more configuration is required. Note this is still experimental so I wouldnt point this at any production data. 305 | 306 | First install the postgres extension pgsql-http at https://github.com/pramsey/pgsql-http 307 | 308 | See note about pgsql-http module install if you not sure how to install a postgres extension - note this has just been updated to handle PUT and DELETE requests - i have not yet had a chane to test anything i have done on this page with the new version but will try to shortly but think any reference to http_post need updating as the new version specifies: 309 | 310 | http_post(uri VARCHAR, content VARCHAR, content_type VARCHAR) 311 | 312 | But on this page I am using the old one: 313 | 314 | http_post(url VARCHAR, params VARCHAR, data VARCHAR, contenttype VARCHAR DEFAULT NULL) 315 | 316 | So please bear this in mind if setting this up. 317 | 318 | Then add it in the database you want to use: 319 | 320 | CREATE EXTENSION http 321 | 322 | 323 | If you havent already done it: 324 | 325 | CREATE TABLE since_checkpoints 326 | ( 327 | pgtable text NOT NULL, 328 | since numeric DEFAULT 0, 329 | enabled boolean DEFAULT false, 330 | CONSTRAINT since_checkpoint_pkey PRIMARY KEY (pgtable) 331 | ); 332 | 333 | 334 | Add function to put data into couchdb: 335 | 336 | CREATE OR REPLACE FUNCTION couchdb_put() RETURNS trigger AS $BODY$ 337 | DECLARE 338 | RES RECORD; 339 | BEGIN 340 | IF (NEW.from_pg) IS NULL THEN 341 | RETURN NEW; 342 | ELSE 343 | 344 | SELECT status FROM http_post('http://192.168.3.21:5984/' || TG_TABLE_NAME || '/' || NEW.id::text, '', NEW.doc::text, 'application/json'::text) INTO RES; 345 | 346 | --Need to check RES for response code 347 | --RAISE EXCEPTION 'Result: %', RES; 348 | RETURN null; 349 | END IF; 350 | END; 351 | $BODY$ 352 | LANGUAGE plpgsql VOLATILE 353 | 354 | Add function to modify fields inside the PostgreSQL JSON datatype - from: http://stackoverflow.com/questions/18209625/how-do-i-modify-fields-inside-the-new-postgresql-json-datatype 355 | 356 | 357 | CREATE OR REPLACE FUNCTION json_object_set_key(json json, key_to_set text, value_to_set anyelement) 358 | RETURNS json AS 359 | $BODY$ 360 | SELECT COALESCE( 361 | (SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}') 362 | FROM (SELECT * 363 | FROM json_each("json") 364 | WHERE "key" <> "key_to_set" 365 | UNION ALL 366 | SELECT "key_to_set", to_json("value_to_set")) AS "fields"), 367 | '{}' 368 | )::json 369 | $BODY$ 370 | LANGUAGE sql IMMUTABLE STRICT; 371 | 372 | 373 | 374 | Create table to hold the docs 375 | 376 | CREATE TABLE example 377 | ( 378 | id text NOT NULL, 379 | doc jsonb, 380 | from_pg boolean, -- for trigger nothing stored here 381 | CONSTRAINT example_pkey PRIMARY KEY (id) 382 | ); 383 | 384 | 385 | Create trigger to stop data being inserted into the table from sql and send off to couch instead 386 | 387 | CREATE TRIGGER add_doc_to_couch 388 | BEFORE INSERT OR UPDATE 389 | ON example FOR EACH ROW EXECUTE PROCEDURE couchdb_put(); 390 | 391 | 392 | Note: All queries in postgres must have "from_pg=true" for inserts and updates or the postgres will send the data to the table and not send it to couch. 393 | 394 | I plan to reverse this logic and make the libary include this so it will be possible to issue inserts/updates and exclude this field. 395 | 396 | You can now start the node client and give it a test. 397 | 398 | ----- 399 | 400 | A few variable to tune in ./lib/index.js need to move to config options 401 | 402 | In checkpoint_changes function: 403 | 404 | ckwait = 3 * 1000; 405 | 406 | This is how often the stream is checkpointed when the stream is active. I would adjust this depending on how busy you couchdb is. When the stream is idle this increases to 10 secs. 407 | 408 | In startFollowing function there is: 409 | // The inactivity timer is for time between *changes*, or time between the 410 | // initial connection and the first change. Therefore it goes here. 411 | stream.inactivity_ms = 30000; 412 | 413 | Maybe use NOTIFY and have node client LISTEN for a message when postgres calls couchdb_put() for the first time (can you do a timer in postgres?? or node will get notified about every update and only needs a wake up after idle time). 414 | 415 | ----- 416 | 417 | 418 | Performance wise compared to the php dumping script 419 | 420 | On a test with a couchdb of about 150Mb with 65k docs the node libary complete working through _changes in about 17 minutes to add all the docs to an empty table and then keeps it in sync. 421 | 422 | The couch-to-postgres-php-dumper script - https://github.com/sysadminmike/couch-to-postgres-php-dump takes about 28 minutes for the initial sync and 11 secs for a resync. 423 | 424 | Replicating the same db from one jail running couch to another couch jail on the same machine as a baseline takes just over 8 minutes: 425 | 426 | {"session_id":"661411f2137c64efc940f55b802dc35b","start_time":"Tue, 16 Dec 2014 17:00:05 GMT","end_time":"Tue, 16 Dec 2014 17:08:10 GMT","start_last_seq":0,"end_last_seq":92862,"recorded_seq":92862,"missing_checked":63840,"missing_found":63840,"docs_read":63840,"docs_written":63840,"doc_write_failures":0} 427 | 428 | Looking at top I think postgres is waiting on the disk most of the time rather than the process being cpu bound - the single php process calling curl for each doc was hitting the cpu hard and couldnt be used as a solution for huge databases or have the ability to deal with more than one db at once. 429 | 430 | On further testing with some dogey postgres conf settings: 431 | 432 | fsync = off 433 | synchronous_commit = off 434 | 435 | As postgres is not the primary datastore its ok if the data dies considering a full rebuild now is under 2 mins: 436 | 437 | mike:~/postgres-couch/couch-to-postgres-test % time ./bin/index.js 438 | articlespg: {"db_name":"articlespg","doc_count":63838,"doc_del_count":2,"update_seq":63840,"purge_seq":0,"compact_running":false,"disk_size":242487416,"data_size":205414817,"instance_start_time":"1418749205916149","disk_format_version":6,"committed_update_seq":63840} 439 | Connected to postgres 440 | articlespg: initial since=0 441 | articlespg: Starting checkpointer 442 | articlespg: Checkpoint set to 7180 next check in 3 seconds 443 | articlespg: Checkpoint set to 9344 next check in 3 seconds 444 | articlespg: Checkpoint set to 11536 next check in 3 seconds 445 | ... 446 | articlespg: Checkpoint set to 60920 next check in 3 seconds 447 | articlespg: Checkpoint set to 63636 next check in 3 seconds 448 | articlespg: Checkpoint set to 63840 next check in 3 seconds 449 | articlespg: Checkpoint 63840 is current next check in: 10 seconds 450 | ^C45.919u 3.226s 1:42.10 48.1% 10864+321k 158+0io 0pf+0w 451 | mike:~/postgres-couch/couch-to-postgres-test % 452 | 453 | So down to well under 2 minutes now todo the initial sync of the same test db - so 4 times faster than a native couch to couch sync. I think this is faster than Elastic search river doing a similar task. 454 | 455 | Snippet from top while it was syncing: 456 | 457 | PID USERNAME THR PRI NICE SIZE RES STATE C TIME WCPU COMMAND 458 | 57635 mike 6 45 0 621M 66064K uwait 1 0:25 50.78% node 459 | 57636 70 1 36 0 186M 97816K sbwait 1 0:11 22.75% postgres 460 | 44831 919 11 24 0 181M 30048K uwait 0 67:28 20.51% beam.smp 461 | 23891 919 11 20 0 232M 69168K uwait 0 26:22 0.39% beam.smp 462 | 57624 70 1 20 0 180M 17840K select 0 0:00 0.29% postgres 463 | 57622 70 1 21 0 180M 65556K select 1 0:00 0.20% postgres 464 | 465 | 466 | ----- 467 | 468 | Possible ways to deploy - master-master postgres setup using couchdbs primary data store and setting up replication between all locations using Postgres and Couch as a pair. 469 | 470 | Location 1 Location 2 471 | Postgres == CouchDB ---------- CouchDB == Postgres 472 | \ / 473 | \ / 474 | \ / 475 | \ / 476 | \/ 477 | Location 3 478 | CouchDB 479 | || 480 | Postgres 481 | 482 | Where === is the node client keeping the paired postgres up to date 483 | And ----- is couchdb performing replication 484 | 485 | ----- 486 | 487 | IDEAS/TODOS - Comments most welcome. 488 | 489 | How to do bulk updates: 490 | 491 | WITH new_docs AS ( 492 | SELECT json_object_set_key(doc::json, 'test'::text, 'Couch & Postgres are scool'::text)::jsonb AS docs 493 | FROM articlespg 494 | ), 495 | agg_docs AS ( 496 | SELECT json_agg(docs) AS aggjson FROM new_docs 497 | ) 498 | 499 | SELECT headers FROM 500 | http_post('http://192.168.3.21:5984/articlespg/_bulk_docs', '', 501 | '{"all_or_nothing":true, "docs":' || (SELECT * FROM agg_docs) || '}', 502 | 'application/json'::text) ; 503 | 504 | I tried on the articles test db i am using and it was very fast for an update to < 100 rows 505 | I then tried to update all docs and crashed couch 506 | 507 | DEBUG: pgsql-http: queried http://192.168.3.21:5984/articlespg/_bulk_docs 508 | ERROR: Failed to connect to 192.168.3.21 port 5984: Connection refused 509 | couchplay=> 510 | 511 | However if we split up the request in to smaller chunks: 512 | 513 | WITH newdocs AS ( -- Make chage to json here 514 | SELECT id, json_object_set_key(doc::json, 'test'::text, 'Couch & Postgres are scool'::text)::jsonb AS docs 515 | FROM articlespg 516 | ), 517 | chunked AS ( -- get in chunks 518 | SELECT docs, ((ROW_NUMBER() OVER (ORDER BY id) - 1) / 50) +1 AS chunk_no 519 | FROM newdocs 520 | ), 521 | chunked_newdocs AS ( -- Bulk up bulk_docs chunks to send 522 | SELECT json_agg(docs) AS bulk_docs, chunk_no FROM chunked GROUP BY chunk_no ORDER BY chunk_no 523 | ) 524 | 525 | SELECT chunk_no, status FROM chunked_newdocs, 526 | http_post('http://192.168.3.21:5984/articlespg/_bulk_docs', '', 527 | '{"all_or_nothing":true, "docs":' || (bulk_docs) || '}', 528 | 'application/json'::text); 529 | 530 | Chunk size - in this case 50 - i think safe to go to about 500 or 1000 depending on doc size - I tried 1000 to begin with but http_post timed out - and 500 seems to be fine. 531 | 532 | Watching the node daemon while running chunked bulk updates i can see the changes streaming back to postgres almost as soon as the start so i think better using an UPDATE as postgres doesnt lock the table while this is happening ***Note need to retest this. 533 | 534 | However I think better to change all PUTS to bulks POSTS - need a function like: 535 | 536 | post_docs(docs,chunk_size) - returning recordset of status codes? or just true/false? 537 | 538 | how to deal with the case where there are 5 chunks and the first 2 sucseed but the 3rd fails? 539 | is it possible to rollback a transaction in postgres and give the function oldocs and newdocs 540 | then a post_docs chunk fails it can rollback the chunks which have succeeded? 541 | 542 | to be used after like: 543 | 544 | SELECT post_docs(json_object_set_key(doc::json, 'test'::text, 'Couch & Postgres are scool'::text)::jsonb,100) 545 | AS results 546 | FROM articlespg 547 | 548 | 549 | 550 | This also makes it very simple to make new databases - just add a new db in couch and change the url to point to it: 551 | 552 | chunked AS ( 553 | SELECT docs, ((ROW_NUMBER() OVER (ORDER BY id) - 1) / 500) +1 AS chunk_no 554 | FROM articlespg 555 | ), 556 | chunked_newdocs AS ( 557 | SELECT json_agg(docs) AS bulk_docs, chunk_no FROM chunked GROUP BY chunk_no ORDER BY chunk_no 558 | ) 559 | SELECT chunk_no, status FROM chunked_newdocs, 560 | http_post('http://192.168.3.21:5984/NEW_articlespg_COPY/_bulk_docs', '', 561 | '{"all_or_nothing":true, "docs":' || (bulk_docs) || '}', 'application/json'::text); 562 | 563 | I think maybe faster than a replication. 564 | 565 | 566 | ------ 567 | 568 | 569 | Note: On pgsql-http module install: 570 | 571 | https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules 572 | 573 | For FreeBSD you need to have curl and gettext-tools installed. 574 | 575 | # gmake PG_CONFIG=/usr/local/bin/pg_config 576 | cc -O2 -pipe -fstack-protector -fno-strict-aliasing -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv -fPIC -DPIC -I. -I./ -I/usr/local/include/postgresql/server -I/usr/local/include/postgresql/internal -I/usr/local/include/libxml2 -I/usr/include -I/usr/local/include -I/usr/local/include -c -o http.o http.c 577 | http.c:89:1: warning: unused function 'header_value' [-Wunused-function] 578 | header_value(const char* header_str, const char* header_name) 579 | ^ 580 | 1 warning generated. 581 | cc -O2 -pipe -fstack-protector -fno-strict-aliasing -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv -fPIC -DPIC -I. -I./ -I/usr/local/include/postgresql/server -I/usr/local/include/postgresql/internal -I/usr/local/include/libxml2 -I/usr/include -I/usr/local/include -I/usr/local/include -c -o stringbuffer.o stringbuffer.c 582 | cc -O2 -pipe -fstack-protector -fno-strict-aliasing -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute -Wformat-security -fno-strict-aliasing -fwrapv -fPIC -DPIC -shared -o http.so http.o stringbuffer.o -L/usr/local/lib -L/usr/local/lib -pthread -Wl,-rpath,/usr/lib:/usr/local/lib -fstack-protector -L/usr/local/lib -L/usr/lib -L/usr/local/lib -Wl,--as-needed -Wl,-R'/usr/local/lib' -L/usr/local/lib -lcurl 583 | 584 | 585 | 586 | # gmake PG_CONFIG=/usr/local/bin/pg_config install 587 | /bin/mkdir -p '/usr/local/lib/postgresql' 588 | /bin/mkdir -p '/usr/local/share/postgresql/extension' 589 | /bin/mkdir -p '/usr/local/share/postgresql/extension' 590 | /usr/bin/install -c -o root -g wheel -m 755 http.so '/usr/local/lib/postgresql/http.so' 591 | /usr/bin/install -c -o root -g wheel -m 644 http.control '/usr/local/share/postgresql/extension/' 592 | /usr/bin/install -c -o root -g wheel -m 644 http--1.0.sql '/usr/local/share/postgresql/extension/' 593 | 594 | 595 | 596 | ------------------- 597 | 598 | Futher thoughts and ideas/questions or want to help? https://github.com/sysadminmike/couch-to-postgres/issues 599 | 600 | More testing: 601 | 602 | Update to all records in test articles db - 603 | 604 | SELECT id , doc->>'read' FROM articlespg WHERE doc->>'read'='false' 605 | 606 | couchplay=> SELECT id , doc->>'read' FROM articlespg WHERE doc->>'read'='false' 607 | couchplay-> 608 | couchplay-> ; 609 | id | ?column? 610 | ------------------+---------- 611 | _design/mw_views | false 612 | (1 row) 613 | 614 | 615 | Returns just the design doc. 616 | 617 | 618 | On running: 619 | 620 | UPDATE articlespg 621 | SET doc = json_object_set_key(doc::json, 'read'::text, true)::jsonb, from_pg=true ; 622 | 623 | Something interesting happens with the feed and postgres - I think postgres locks the table while the update takes place as the feeder carries on querying couch but does not update postgres until the update is complete. 624 | 625 | While the query is runing you can see the commit sequence in couch updating: 626 | 627 | articlespg: {"db_name":"articlespg","doc_count":63838,"doc_del_count":2,"update_seq":233296,"purge_seq":0,"compact_running":false,"disk_size":2145373958,"data_size":214959726,"instance_start_time":"1418762851354294","disk_format_version":6,"committed_update_seq":233224} 628 | articlespg: Checkpoint 192414 is current next check in: 10 seconds 629 | PG_WATCHDOG: OK 630 | 631 | articlespg: {"db_name":"articlespg","doc_count":63838,"doc_del_count":2,"update_seq":242301,"purge_seq":0,"compact_running":false,"disk_size":2234531964,"data_size":215440194,"instance_start_time":"1418762851354294","disk_format_version":6,"committed_update_seq":242301} 632 | articlespg: Checkpoint 192414 is current next check in: 10 seconds 633 | PG_WATCHDOG: OK 634 | 635 | As soon as I get a return for the query the feed goes mad so think postgres has locked the table while the update runs. *** I need to restest this as i may have been doing this test after i introduced a bug stalling the feed on updates. 636 | 637 | 638 | The UPDATE takes 475 seconds to return 639 | The river then takes about 3 minutes to catch up after the return 640 | So about 10 minutes to do an update on all 60k records. 641 | 642 | I need to look at the bulk updates as i do now think it is possible to do all or nothing update and possible do in a transaction - i think if 2 updates to couch were issued and the second failed then the first would have still taken place as far as couch is concerned. 643 | At the moment if a single PUT were to fail postgres assume no data has been updated but all of the docs up to then would have been updated - in a bulk this would not be a problem i think. Note so far not one insert or update has failed but i havent killed couch 1/2 way through. 644 | 645 | 646 | 647 | 648 | This did give me an idea for another use for this. Populate a new couchdb from a subset of the couchdb tables in postgres by simply updating the put_function to temporarly submit updates to a different ip or db eg: 649 | 650 | --SELECT headers FROM http_post('http://192.168.3.21:5984/' || TG_TABLE_NAME || '/' || NEW.id::text, '', NEW.doc::text, 'application/json'::text) INTO RES; 651 | SELECT headers FROM http_post('http://192.168.3.21:5984/articlespg-subset' || '/' || NEW.id::text, '', NEW.doc::text, 'application/json'::text) INTO RES; 652 | 653 | Then re-run the update but with a WHERE 654 | 655 | UPDATE articlespg 656 | SET doc = json_object_set_key(doc::json, 'read'::text, true)::jsonb, from_pg=true 657 | WHERE doc ->>'feedName' ='::Planet PostgreSQL::'; 658 | 659 | About 10 secs later a populated couchdb with just 761 docs matching the WHERE: 660 | 661 | {"db_name":"articlespg-subset","doc_count":761,"doc_del_count":0,"update_seq":761,"purge_seq":0,"compact_running":false,"disk_size":6107249,"data_size":3380130,"instance_start_time":"1418770153501066","disk_format_version":6,"committed_update_seq":761} 662 | 663 | A lot simpler that creating a design doc for a one of filtered replication. 664 | There is no reason why you couldnt do a union on two couch db tables in posgres and merge them into a new couchdb provided there are no id issues. 665 | 666 | I have also done a quick test with excel and ms query & access and a passthrough sql query both via odbc to postgres - i can see the couch data in both - this makes ad hoc reports so simple. 667 | 668 | Note on name - I think I like postcouch best - and i think most of the work will be done by http_post function POSTing (from postgres) to couchdb 669 | 670 | (I think a good idea to also also give option to do PUTS) 671 | -------------------------------------------------------------------------------- /bin/daemon.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var Q = require('kew'); 4 | var pg = require('pg'); 5 | var PostgresCouchDB = require('../lib'); 6 | var DaemonSettings = require('../config/daemon.js'); 7 | 8 | var daemon_settings = new DaemonSettings(); 9 | 10 | var PostgresCouchContainer = []; 11 | 12 | 13 | var control_port = 8888; 14 | 15 | //how often do we check pgtables for new records 16 | var findfeeds_interval = (60 * 1000) * 1; //15 mins - can force a find at /_finder 17 | 18 | //how often should watchdog run 19 | var feedswatchdog_interval = 50 * 1000; //50 secs - can force watchdog at: /_watchdog 20 | var pgwatchdog_interval = 10 * 1000; //10 secs - can force watchdog at: /_watchdog 21 | 22 | var find_once = false; //stop finder calling itself if invoked via http 23 | var watch_once = false; //stop watchdog calling itself if invoked via http 24 | var postgres_dead = true; 25 | 26 | /* 27 | note: if the db in couch is not found then the feed alive is set to false 28 | the watchdog will reap this feed but it will get re-added the next 29 | time findFeeds executes. 30 | */ 31 | 32 | var pgclient = ''; 33 | 34 | function pgClientUrl() { 35 | var url_prefix = "postgres://" + daemon_settings["postgres"]["username"]; 36 | 37 | var pg_pass = daemon_settings["postgres"]["password"]; 38 | var url_optional_pass = pg_pass ? (":" + pg_pass) : ""; 39 | 40 | var url_postfix = "@" + daemon_settings["postgres"]["host"] + 41 | "/" + daemon_settings["postgres"]["database"]; 42 | 43 | return (url_prefix + url_optional_pass + url_postfix); 44 | }; 45 | 46 | function connectPostgres(do_one_find){ 47 | if(postgres_dead == true){ 48 | pg_client_url = pgClientUrl(); 49 | pgclient = new pg.Client(pg_client_url); 50 | pgclient.connect(function(err) { 51 | if (err) { 52 | if(err.code == 'ECONNREFUSED'){ //try to catch here but i dont think works 53 | console.error('ERROR: Connection to postgres refused', err); 54 | }else{ 55 | console.error('ERROR: Could not connect to postgres', err); 56 | } 57 | postgres_dead = true; 58 | // process.exit(); 59 | } else { 60 | console.log('Connected to postgres'); 61 | postgres_dead = false; //this should be the only place we set this to false 62 | if(do_one_find==true){ 63 | setTimeout(function() { 64 | //console.log(''); 65 | findFeeds(true) 66 | }, 3000); 67 | } 68 | } 69 | }); 70 | }else{ 71 | console.error('Postgres client reconnect called when postgres_alive=true', err); 72 | } 73 | } 74 | 75 | process.on('uncaughtException', function (err) { 76 | if (err.code == 'ECONNREFUSED'){ //not sure where this one come from postgres or couch - i think socket lib err so is the same for pg & couch 77 | console.error('ERROR: ECONNREFUSED - Node NOT Exiting...'); 78 | // postgres_dead = true; // we dont know this for sure - may have been set somewhere else 79 | feedsWatchdog(true); //should kill all feeds 80 | 81 | }else if ( (err.code=='ECONNRESET') | 82 | (err.code=='ECONNABORTED') | 83 | (err.file=='postgres.c' & err.severity=='FATAL') 84 | 85 | ){ 86 | console.error("Postgresl connection died - Node NOT Exiting...",err); 87 | postgres_dead = true; 88 | feedsWatchdog(true); //should kill all feeds 89 | }else{ 90 | console.error('UNKNOWN ERR - exiting',err); 91 | //perhaps make a shutdown function 92 | process.exit(); 93 | } 94 | }); 95 | 96 | function findFeeds(find_once) { 97 | if(postgres_dead == false){ 98 | if(find_once == true){ 99 | console.log('FINDER: One off find started.'); 100 | }else{ 101 | console.log('FINDER: Started'); 102 | } 103 | var sql = "SELECT pgtable, since FROM since_checkpoints WHERE enabled=True ORDER BY pgtable"; 104 | pgclient.query(sql, function(err, result) { 105 | if (err) { 106 | console.error("FINDER: Could not get pgtables and checkpoints with: " + sql, err); 107 | process.exit(); 108 | } else { 109 | var pgtable; 110 | var couchdb; 111 | 112 | console.log("FINDER: " + result.rows.length + ' dbs to check found'); 113 | for (var i = 0; i < result.rows.length; i++) { 114 | couchdb = result.rows[i].pgtable; 115 | pgtable = couchdb.replace(/\-/g,"_"); 116 | 117 | if (PostgresCouchContainer[pgtable] === undefined) { 118 | pgtableCheck(pgtable); 119 | 120 | PostgresCouchContainer[pgtable] = new PostgresCouchDB(pgclient, { 121 | couchdb: { 122 | url: daemon_settings["couchdb"]["url"], 123 | pgtable: pgtable, 124 | since: result.rows[i].since, 125 | database: couchdb 126 | } 127 | }); 128 | 129 | PostgresCouchContainer[pgtable].start(); 130 | console.log('FINDER: Found ' + pgtable + ' processing changes since: ' + result.rows[i].since); 131 | 132 | PostgresCouchContainer[pgtable].events.on('connect', console.log); 133 | 134 | PostgresCouchContainer[pgtable].events.on('checkpoint', console.log); //Comment out if too much info 135 | PostgresCouchContainer[pgtable].events.on('checkpoint.error', function(msg, err) { 136 | console.error(msg, err); 137 | process.exit(1); 138 | }); 139 | 140 | //PostgresCouchContainer[pgtable].events.on('change', console.log); 141 | PostgresCouchContainer[pgtable].events.on('change.error', function(tbl, change, err) { 142 | console.error(tbl, err.body, err); 143 | }); 144 | 145 | PostgresCouchContainer[pgtable].events.on('error', function(msg, err) { console.error(msg, err); }); 146 | 147 | //PostgresCouchContainer[pgtable].events.on('drain', console.log); 148 | 149 | PostgresCouchContainer[pgtable].events.on('stop', function(key) { 150 | console.log(key + ': stopped'); 151 | }); 152 | } //undefined check 153 | } //for loop 154 | } 155 | }); //pgclient 156 | } else { 157 | console.log('FINDER: postgres is dead'); 158 | 159 | } 160 | if(find_once == false){ 161 | setTimeout(function() { 162 | findFeeds(false); 163 | }, findfeeds_interval); 164 | } 165 | } 166 | 167 | 168 | function reaperCheck(tbl) { 169 | 170 | if(postgres_dead == true){ 171 | PostgresCouchContainer[tbl].stop(); 172 | console.error('WATCHDOG: ' + tbl + ' postgres dead feed stopped'); 173 | }else{ 174 | var sql = "SELECT pgtable FROM since_checkpoints WHERE pgtable='" + tbl + "' AND enabled=True"; 175 | pgclient.query(sql, function(err, result) { 176 | if (err) { 177 | console.error("WATCHDOG: Could not get pgtables with: " + sql, err); 178 | process.exit(); 179 | } else { 180 | if (result.rows.length == 0) { 181 | PostgresCouchContainer[tbl].stop(); 182 | console.error('WATCHDOG: ' + tbl + ' feed stopped'); 183 | }else{ 184 | console.error('WATCHDOG: ' + tbl + ' ok'); 185 | } 186 | } 187 | }); 188 | } 189 | } 190 | 191 | function feedsWatchdog(watch_once) { 192 | var pgtbl; 193 | if(watch_once == true){ 194 | console.log('WATCHDOG: One off watch started.'); 195 | }else{ 196 | console.log('WATCHDOG: Started'); 197 | } 198 | for (var pgtbl in PostgresCouchContainer) { 199 | console.log('WATCHDOG: Checking ' + pgtbl); 200 | if (typeof PostgresCouchContainer[pgtbl] == "undefined") { 201 | delete PostgresCouchContainer[pgtbl]; 202 | console.log('WATCHDOG: Cleared reaped undefined ' + pgtbl); 203 | } else if (PostgresCouchContainer[pgtbl].alive() == false) { 204 | delete PostgresCouchContainer[pgtbl]; 205 | console.log('WATCHDOG: Reaped dead ' + pgtbl); 206 | } else { 207 | reaperCheck(pgtbl) 208 | } 209 | } 210 | if(watch_once == false){ 211 | setTimeout(function() { 212 | feedsWatchdog(false); 213 | }, feedswatchdog_interval); 214 | } 215 | } 216 | 217 | function pgWatchdog(){ 218 | if(postgres_dead == true){ 219 | //feedsWatchdog(true); ?? 220 | console.log('PG_WATCHDOG: reconnecting'); 221 | connectPostgres(); 222 | }else{ 223 | //test postgres connection 224 | console.log('PG_WATCHDOG: OK'); //comment out if too much info 225 | } 226 | setTimeout(function() { 227 | pgWatchdog(); 228 | }, 15000); 229 | 230 | } 231 | 232 | 233 | function pgtableCheck(pgtbl) { 234 | 235 | var sql = "SELECT EXISTS (SELECT 1 FROM pg_catalog.pg_class c JOIN "; 236 | sql += "pg_catalog.pg_namespace n ON n.oid = c.relnamespace "; 237 | sql += "WHERE n.nspname = 'public' AND c.relkind = 'r' "; 238 | sql += "AND c.relname = '" + pgtbl + "') AS mytest"; 239 | pgclient.query(sql, function(err, result) { 240 | if (err) { 241 | console.error(sql, err); 242 | process.exit(); 243 | } else { 244 | if (result.rows[0].mytest.toString() == 'false') { 245 | sql = "CREATE TABLE " + pgtbl + " "; 246 | sql += "(id text, doc jsonb, CONSTRAINT "; 247 | sql += pgtbl + "_pkey PRIMARY KEY (id) ) "; 248 | 249 | //also need to set since_checkpoints.since to 0 250 | 251 | pgclient.query(sql, function(err, result) { 252 | if (err) { 253 | console.error(sql, err); 254 | process.exit(); 255 | } else { 256 | console.log(pgtbl + ': pgtable created'); 257 | } 258 | }); 259 | } 260 | } 261 | }); 262 | } 263 | 264 | var http = require("http"); 265 | 266 | 267 | function onRequest(request, response) { 268 | response.writeHead(200, { 269 | "Content-Type": "text/plain" 270 | }); 271 | 272 | console.log(request.url); 273 | switch (request.url) { 274 | case '/_finder': 275 | response.write("Starting FINDER\n"); 276 | findFeeds(true); 277 | break; 278 | case '/_watchdog': 279 | response.write("Starting WATCHDOG\n"); 280 | feedsWatchdog(true); 281 | break; 282 | case '/_status': 283 | var status = []; 284 | for (var pgtbl in PostgresCouchContainer) { 285 | if (typeof PostgresCouchContainer[pgtbl] == "undefined") { //is possble that watchdog has cleared dead object 286 | status[pgtbl] = { alive: false, checkpoint: false }; 287 | } else { 288 | status.push ( { feed: pgtbl, status: { alive: PostgresCouchContainer[pgtbl].alive(), 289 | status: PostgresCouchContainer[pgtbl].status(), 290 | since: PostgresCouchContainer[pgtbl].since().toString(), 291 | since_checkpoint: PostgresCouchContainer[pgtbl].since_checkpoint().toString() 292 | //% complete 293 | //time running 294 | //pg rec cound, pg table size, 295 | //couch rec count, couch update seq, couch size? 296 | } 297 | } ); 298 | } 299 | } 300 | response.write(JSON.stringify(status)); 301 | 302 | break; 303 | default: 304 | response.write("OK\n"); 305 | } 306 | response.end(); 307 | } 308 | 309 | http.createServer(onRequest).listen(control_port); 310 | console.log('Listening on port ' + control_port); 311 | 312 | connectPostgres(true); //connect and run feedFinder once 313 | 314 | setTimeout(function() { 315 | pgWatchdog(); 316 | }, pgwatchdog_interval); 317 | 318 | setTimeout(function() { 319 | findFeeds(false); //with timeout reinvoke itself 320 | }, findfeeds_interval); 321 | 322 | setTimeout(function() { 323 | feedsWatchdog(false); //with timout to reinvoke itself 324 | }, feedswatchdog_interval); 325 | 326 | 327 | 328 | 329 | -------------------------------------------------------------------------------- /bin/index.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var pg = require('pg'); 4 | var PostgresCouchDB = require('../lib'); 5 | 6 | 7 | 8 | //Note there is an error in the simple example which i have not tracked down/fixed 9 | //yet it will not restart the stream from where it left off if the feeder is stopped 10 | // 11 | //I am working on the daemon.js in same direcory as this which restarts happily. 12 | // 13 | 14 | var settings = 15 | { 16 | couchdb: { 17 | url: 'http://192.168.3.21:5984', 18 | pgtable: 'example', 19 | database: 'example' 20 | } 21 | }; 22 | 23 | pgclient = new pg.Client("postgres://mike@localhost/pgdatabase"); 24 | 25 | 26 | pgclient.connect(function(err) { 27 | if (err) { 28 | if(err.code == 'ECONNREFUSED'){ //try to catch here but i dont think works 29 | console.error('ERROR: Connection to postgres refused', err); 30 | }else{ 31 | console.error('ERROR: Could not connect to postgres', err); 32 | } 33 | process.exit(); 34 | } else { 35 | console.log('Connected to postgres'); 36 | } 37 | }) ; 38 | 39 | 40 | initial_since = get_initial_since(settings.couchdb.pgtable); 41 | 42 | createImporter(); 43 | 44 | 45 | function createImporter(){ 46 | settings.since = initial_since; 47 | var importer = new PostgresCouchDB(pgclient, settings ); 48 | 49 | importer.start(); 50 | 51 | //enable what event you want to watch 52 | importer.events.on('connect', console.log); 53 | importer.events.on('checkpoint', console.log); 54 | importer.events.on('checkpoint.error', function(msg, err) { 55 | console.error(msg, err); 56 | process.exit(1); 57 | }); 58 | 59 | //importer.events.on('change', console.log); //very noisy 60 | importer.events.on('change.error', function(feed, change, err) { 61 | console.error(feed, err.body, err); 62 | }); 63 | 64 | importer.events.on('error', function(msg, err) { console.error(msg, err); }); 65 | 66 | //importer.events.on('drain', console.log); 67 | 68 | importer.events.on('stop', function(key) { 69 | console.log(key + ': stopped'); 70 | }); 71 | 72 | } 73 | 74 | 75 | 76 | function get_initial_since(feedname) { 77 | var sql = ''; 78 | sql = "SELECT since FROM since_checkpoints WHERE pgtable='" + feedname + "' AND enabled=True"; 79 | pgclient.query(sql, function(err, result) { 80 | if (err) { 81 | console.error(feedname + ": Could not get pgtables and checkpoints with: " + sql, err); 82 | process.exit(); 83 | } else { 84 | if (result.rows.length > 0) { 85 | console.log(feedname + ': initial since=' + result.rows[0].since); 86 | initial_since = result.rows[0].since; 87 | } else { 88 | sql = "INSERT INTO since_checkpoints "; 89 | sql += "(pgtable, since, enabled) VALUES "; 90 | sql += "('" + feedname + "', 0, True)"; 91 | pgclient.query(sql, function(err, result) { 92 | if (err) { 93 | console.error(feedname + ': Unable to insert row "' + feedname + '"into table', sql, err); 94 | process.exit(); 95 | } else { 96 | console.log(feedname + ': Added to since_checkpoint table'); 97 | initial_since = 0; 98 | } 99 | }); 100 | } 101 | } 102 | }); 103 | 104 | } 105 | 106 | -------------------------------------------------------------------------------- /config/daemon.js.example: -------------------------------------------------------------------------------- 1 | function DaemonSettings() { 2 | var credentials = { 3 | postgres: { 4 | database: "couchplay", 5 | username: "mike", 6 | password: "mike", 7 | host: "localhost" 8 | }, 9 | couchdb: { 10 | url: 'http://127.0.0.1:5984' 11 | } 12 | }; 13 | 14 | return credentials; 15 | }; 16 | 17 | module.exports = DaemonSettings; 18 | 19 | -------------------------------------------------------------------------------- /daemon-README.md: -------------------------------------------------------------------------------- 1 | Setup Postgresql database and CouchDb settings in 2 | config/daemon.js 3 | 4 | You can use example file: 5 | ``` 6 | cp config/daemon.js.example config/daemon.js 7 | ``` 8 | 9 | * password can be optional. For example: 10 | ``` 11 | function DaemonSettings() { 12 | var credentials = { 13 | postgres: { 14 | database: "couchplay", 15 | username: "mike", 16 | host: "localhost" 17 | }, 18 | couchdb: { 19 | url: 'http://127.0.0.1:5984' 20 | } 21 | }; 22 | 23 | return credentials; 24 | }; 25 | 26 | module.exports = DaemonSettings; 27 | ``` 28 | 29 | You will need 3 terminal open for: DAEMON, PGSQL, CURL 30 | 31 | Start up daemon 32 | 33 | % ./bin/daemon.js 34 | DAEMON terminal: Listening on port 8888 35 | DAEMON terminal: Connected to postgres 36 | DAEMON terminal: FINDER: One off find started. 37 | DAEMON terminal: FINDER: 0 dbs to check found 38 | DAEMON terminal: PG_WATCHDOG: OK 39 | 40 | 41 | See what happening (TODO: change to /_feeds_status - add /_status about the daemon itself) 42 | 43 | CURL terminal: $ curl 127.0.0.1:8888/_status 44 | CURL terminal: [] 45 | 46 | 47 | Add new db to follow 48 | 49 | PGSQL terminal: INSERT INTO since_checkpoints (pgtable, since, enabled) VALUES ('articlespg',0,true); 50 | 51 | 52 | Wake up the finder - note: this will run periodically so you can just wait a few mins at this point 53 | 54 | CURL terminal: $ curl 127.0.0.1:8888/_finder 55 | 56 | 57 | The daemon should now see the new feed to follow 58 | 59 | DAEMON terminal: /_finder 60 | DAEMON terminal: FINDER: One off find started. 61 | DAEMON terminal: FINDER: 1 dbs to check found 62 | DAEMON terminal: FINDER: Found articlespg processing changes since: 0 63 | DAEMON terminal: articlespg: {"db_name":"articlespg","doc_count":63338,"doc_del_count":0,"update_seq":63338,"purge_seq":0,"compact_running":false,"disk_size":206778481,"data_size":206048077,"instance_start_time":"1418803415250170","disk_format_version":6,"committed_update_seq":63338} 64 | DAEMON terminal: articlespg: Starting checkpointer 65 | DAEMON terminal: articlespg: Checkpoint 938 is current next check in: 10 seconds 66 | DAEMON terminal: PG_WATCHDOG: OK 67 | DAEMON terminal: WATCHDOG: Started 68 | DAEMON terminal: WATCHDOG: Checking articlespg 69 | DAEMON terminal: PG_WATCHDOG: OK 70 | DAEMON terminal: FINDER: Started 71 | DAEMON terminal: WATCHDOG: articlespg ok 72 | DAEMON terminal: articlespg: Checkpoint set to 4510 next check in 3 seconds 73 | DAEMON terminal: FINDER: 1 dbs to check found 74 | DAEMON terminal: articlespg: Checkpoint set to 8194 next check in 3 seconds 75 | DAEMON terminal: articlespg: Checkpoint set to 12084 next check in 3 seconds 76 | DAEMON terminal: articlespg: Checkpoint set to 16020 next check in 3 seconds 77 | DAEMON terminal: PG_WATCHDOG: OK 78 | DAEMON terminal: /_status 79 | DAEMON terminal: articlespg: Checkpoint set to 19680 next check in 3 seconds 80 | DAEMON terminal: articlespg: Checkpoint set to 23608 next check in 3 seconds 81 | DAEMON terminal: articlespg: Checkpoint set to 27310 next check in 3 seconds 82 | DAEMON terminal: PG_WATCHDOG: OK 83 | DAEMON terminal: articlespg: Checkpoint set to 31152 next check in 3 seconds 84 | DAEMON terminal: articlespg: Checkpoint set to 34923 next check in 3 seconds 85 | DAEMON terminal: articlespg: Checkpoint set to 38783 next check in 3 seconds 86 | DAEMON terminal: PG_WATCHDOG: OK 87 | DAEMON terminal: articlespg: Checkpoint set to 42680 next check in 3 seconds 88 | DAEMON terminal: /_status 89 | DAEMON terminal: articlespg: Checkpoint set to 46601 next check in 3 seconds 90 | DAEMON terminal: articlespg: Checkpoint set to 50549 next check in 3 seconds 91 | DAEMON terminal: articlespg: Checkpoint set to 54489 next check in 3 seconds 92 | DAEMON terminal: PG_WATCHDOG: OK 93 | DAEMON terminal: articlespg: Checkpoint set to 58512 next check in 3 seconds 94 | DAEMON terminal: articlespg: Checkpoint set to 62484 next check in 3 seconds 95 | DAEMON terminal: articlespg: Checkpoint set to 63338 next check in 3 seconds 96 | DAEMON terminal: WATCHDOG: Started 97 | DAEMON terminal: WATCHDOG: Checking articlespg 98 | DAEMON terminal: PG_WATCHDOG: OK 99 | DAEMON terminal: WATCHDOG: articlespg ok 100 | DAEMON terminal: articlespg: Checkpoint 63338 is current next check in: 10 seconds 101 | 102 | 103 | 104 | While the daemon was proccessing through the change log in the CURL terminal: 105 | 106 | CURL terminal: $ curl 127.0.0.1:8888/_status 107 | CURL terminal: [{"feed":"articlespg","status":{"alive":true,"status":"Following","since":"17832","since_checkpoint":"16019"}}] 108 | CURL terminal: $ curl 127.0.0.1:8888/_status 109 | CURL terminal: [{"feed":"articlespg","status":{"alive":true,"status":"Following","since":"43822","since_checkpoint":"42679"}}]$ 110 | CURL terminal: $ curl 127.0.0.1:8888/_status 111 | CURL terminal: [{"feed":"articlespg","status":{"alive":true,"status":"Following","since":"63338","since_checkpoint":"63338"}}]$ 112 | CURL terminal: $ curl 127.0.0.1:8888/_status 113 | CURL terminal: [{"feed":"articlespg","status":{"alive":true,"status":"Following","since":"63338","since_checkpoint":"63338"}}] 114 | 115 | 116 | If we dont want this feed anymore we can disable or delete it from the since_checkpoints table 117 | 118 | PGSQL terminal: UPDATE since_checkpoints SET enabled=false WHERE pgtable='articlespg'; 119 | 120 | 121 | We can either wait for the watchdog or force it to run 122 | 123 | CURL terminal: $ curl 127.0.0.1:8888/_watchdog 124 | CURL terminal: Starting WATCHDOG 125 | 126 | 127 | 128 | DAEMON terminal: /_watchdog 129 | DAEMON terminal: WATCHDOG: One off watch started. 130 | DAEMON terminal: WATCHDOG: Checking articlespg 131 | DAEMON terminal: articlespg: stopping stream 132 | DAEMON terminal: articlespg: stopped 133 | DAEMON terminal: WATCHDOG: articlespg feed stopped 134 | 135 | 136 | Lets check what the status is 137 | 138 | CURL terminal: $ curl 127.0.0.1:8888/_status 139 | CURL terminal: [{"feed":"articlespg","status":{"alive":false,"status":"stopped","since":"63338","since_checkpoint":"63338"}}]$ 140 | 141 | 142 | Once a stream has been stopped by the watchdog it will then be reaped the next time the watchdog runs again we can force it to run or wait 143 | 144 | CURL terminal: $ curl 127.0.0.1:8888/_watchdog 145 | CURL terminal: Starting WATCHDOG 146 | 147 | Second pass its now gone 148 | 149 | DAEMON terminal: /_watchdog 150 | DAEMON terminal: WATCHDOG: One off watch started. 151 | DAEMON terminal: WATCHDOG: Checking articlespg 152 | DAEMON terminal: WATCHDOG: Reaped dead articlespg 153 | 154 | To be sure: 155 | 156 | CURL terminal: $ curl 127.0.0.1:8888/_status 157 | CURL terminal: [] 158 | 159 | 160 | We can now enable it again 161 | 162 | PGSQL terminal: UPDATE since_checkpoints SET enabled=true WHERE pgtable='articlespg'; 163 | 164 | 165 | 166 | And start the finder 167 | 168 | CURL terminal: curl 127.0.0.1:8888/_finder 169 | CURL terminal: Starting FINDER 170 | 171 | 172 | It will now start the feed again 173 | 174 | DAEMON terminal: /_finder 175 | DAEMON terminal: FINDER: One off find started. 176 | DAEMON terminal: FINDER: 1 dbs to check found 177 | DAEMON terminal: FINDER: Found articlespg processing changes since: 63338 178 | DAEMON terminal: articlespg: {"db_name":"articlespg","doc_count":63338,"doc_del_count":0,"update_seq":63338,"purge_seq":0,"compact_running":false,"disk_size":206778481,"data_size":206048077,"instance_start_time":"1418803415250170","disk_format_version":6,"committed_update_seq":63338} 179 | DAEMON terminal: articlespg: Starting checkpointer 180 | DAEMON terminal: articlespg: Checkpoint 63338 is current next check in: 10 seconds 181 | 182 | CURL terminal: $ curl 127.0.0.1:8888/_status 183 | CURL terminal: [{"feed":"articlespg","status":{"alive":true,"status":"Following","since":"63338","since_checkpoint":"63338"}}] 184 | 185 | 186 | ----------------------------------- 187 | 188 | What if postgres dies (unlikley) or something else upsets it. 189 | 190 | Stop postgres 191 | 192 | 193 | PG_WATCHDOG: OK 194 | articlespg: Checkpoint 63338 is current next check in: 10 seconds 195 | PG_WATCHDOG: OK 196 | articlespg: Checkpoint 63338 is current next check in: 10 seconds 197 | PG_WATCHDOG: OK 198 | articlespg: {"db_name":"articlespg","doc_count":63338,"doc_del_count":0,"update_seq":63338,"purge_seq":0,"compact_running":false,"disk_size":206778481,"data_size":206048077,"instance_start_time":"1418803415250170","disk_format_version":6,"committed_update_seq":63338} 199 | Postgresl connection died - Node NOT Exiting... { [error: terminating connection due to administrator command] 200 | name: 'error', 201 | length: 109, 202 | severity: 'FATAL', 203 | code: '57P01', 204 | detail: undefined, 205 | hint: undefined, 206 | position: undefined, 207 | internalPosition: undefined, 208 | internalQuery: undefined, 209 | where: undefined, 210 | file: 'postgres.c', 211 | line: '2873', 212 | routine: 'ProcessInterrupts' } 213 | WATCHDOG: One off watch started. 214 | WATCHDOG: Checking articlespg 215 | articlespg: stopping stream 216 | articlespg: stopped 217 | WATCHDOG: articlespg postgres dead feed stopped 218 | PG_WATCHDOG: reconnecting 219 | ERROR: Connection to postgres refused { [Error: connect ECONNREFUSED] 220 | code: 'ECONNREFUSED', 221 | errno: 'ECONNREFUSED', 222 | syscall: 'connect' } 223 | WATCHDOG: Started 224 | WATCHDOG: Checking articlespg 225 | WATCHDOG: Reaped dead articlespg 226 | PG_WATCHDOG: reconnecting 227 | ERROR: Connection to postgres refused { [Error: connect ECONNREFUSED] 228 | code: 'ECONNREFUSED', 229 | errno: 'ECONNREFUSED', 230 | syscall: 'connect' } 231 | FINDER: postgres is dead 232 | 233 | 234 | The daemon will terminate all feeds and try to reconnect to postgres, once reconnected it will bring the feeds back up. 235 | 236 | 237 | PG_WATCHDOG: reconnecting 238 | Connected to postgres 239 | PG_WATCHDOG: OK 240 | PG_WATCHDOG: OK 241 | PG_WATCHDOG: OK 242 | WATCHDOG: Started 243 | FINDER: Started 244 | FINDER: 1 dbs to check found 245 | FINDER: Found articlespg processing changes since: 63338 246 | articlespg: {"db_name":"articlespg","doc_count":63338,"doc_del_count":0,"update_seq":63338,"purge_seq":0,"compact_running":false,"disk_size":206778481,"data_size":206048077,"instance_start_time":"1418803415250170","disk_format_version":6,"committed_update_seq":63338} 247 | articlespg: Starting checkpointer 248 | articlespg: Checkpoint 63338 is current next check in: 10 seconds 249 | articlespg: Checkpoint 63338 is current next check in: 10 seconds 250 | 251 | 252 | ------------------ 253 | 254 | What happens if couchdb dies? 255 | 256 | PG_WATCHDOG: OK 257 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 258 | WATCHDOG: Started 259 | WATCHDOG: Checking articlespg 260 | WATCHDOG: articlespg ok 261 | PG_WATCHDOG: OK 262 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 263 | articlespg: Error connection refused. Sleeping for: 37 seconds { [Error: connect ECONNREFUSED] 264 | code: 'ECONNREFUSED', 265 | errno: 'ECONNREFUSED', 266 | syscall: 'connect' } 267 | FINDER: Started 268 | FINDER: 1 dbs to check found 269 | PG_WATCHDOG: OK 270 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 271 | PG_WATCHDOG: OK 272 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 273 | PG_WATCHDOG: OK 274 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 275 | PG_WATCHDOG: OK 276 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 277 | articlespg: Error connection refused. Sleeping for: 46 seconds { [Error: connect ECONNREFUSED] 278 | code: 'ECONNREFUSED', 279 | errno: 'ECONNREFUSED', 280 | syscall: 'connect' } 281 | WATCHDOG: Started 282 | WATCHDOG: Checking articlespg 283 | WATCHDOG: articlespg ok 284 | PG_WATCHDOG: OK 285 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 286 | PG_WATCHDOG: OK 287 | articlespg: Checkpoint 63337 is current next check in: 10 seconds 288 | FINDER: Started 289 | FINDER: 1 dbs to check found 290 | PG_WATCHDOG: OK 291 | 292 | 293 | In the curl console when couch was dead: 294 | 295 | $ curl 127.0.0.1:8888/_status 296 | [{"feed":"articlespg","status":{"alive":true,"status":"Error: Not connected to couch server trying to reconnect.","since":"63337","since_checkpoint":"63337"}}] 297 | 298 | And when couch came back: 299 | 300 | $ curl 127.0.0.1:8888/_status 301 | [{"feed":"articlespg","status":{"alive":true,"status":"Following","since":"63338","since_checkpoint":"63337"}}]$ 302 | 303 | 304 | 305 | 306 | -------------------------------------------------------------------------------- /lib/index.js: -------------------------------------------------------------------------------- 1 | var follow = require('follow'); 2 | var EventEmitter = require('events').EventEmitter; 3 | var async = require('async'); 4 | var Q = require('kew'); 5 | var pgescape = require('pg-escape'); 6 | 7 | 8 | function PostgresCouchDB(pgclient, options) { 9 | 10 | var opts = set_options(options); 11 | var events = new EventEmitter(); 12 | 13 | var changecount = opts.couchdb.since; 14 | if(changecount < 0) changecount = 0; 15 | var previous_since = changecount; 16 | 17 | //console.error('start changecount', opts.couchdb.since, changecount, previous_since); 18 | 19 | var alive = true; 20 | var status = ''; 21 | 22 | function set_options(opts) { 23 | opts = opts || {}; 24 | opts.couchdb = opts.couchdb || {}; 25 | return { 26 | couchdb: { 27 | url: opts.couchdb.url || process.env.COUCHDB_URL || 'http://localhost:5984', 28 | username: opts.couchdb.username || process.env.COUCHDB_USERNAME, 29 | password: opts.couchdb.password || process.env.COUCHDB_PASSWORD, 30 | database: opts.couchdb.database || process.env.COUCHDB_DATABASE, 31 | since: opts.couchdb.since || 0, 32 | pgtable: (opts.couchdb.pgtable || process.env.PGTABLE).replace(/\-/g,"_") 33 | }, 34 | map: opts.map || null 35 | }; 36 | } 37 | var pgtable = opts.couchdb.pgtable; 38 | 39 | 40 | var queue = async.queue(process_change, 1); 41 | queue.drain = function() { 42 | events.emit('drain', opts.couchdb.pgtable + ': drain'); 43 | }; 44 | 45 | var db_url = [opts.couchdb.url, opts.couchdb.database].join('/'); 46 | var auth; 47 | if (opts.couchdb.username && opts.couchdb.password) { 48 | auth = 'Basic ' + new Buffer([opts.couchdb.username, opts.couchdb.password].join(':')).toString('base64'); 49 | } 50 | var stream = new follow.Feed({ 51 | db: [opts.couchdb.url, opts.couchdb.database].join('/'), 52 | include_docs: true 53 | }); 54 | 55 | 56 | setTimeout(function() { 57 | events.emit('checkpoint', opts.couchdb.pgtable + ': Starting checkpointer'); 58 | checkpoint_changes(changecount) 59 | }, 1000); 60 | 61 | 62 | function checkpoint_changes(last_changecount) { 63 | var ckwait = 20 * 1000; 64 | var mychangecount = changecount; 65 | if(alive){ 66 | 67 | //console.error('in checkpoint_changes', last_changecount, changecount); 68 | 69 | if(last_changecount < mychangecount){ 70 | //do insert into stats table here? 71 | //maybe something like (changecount - last_changecount), NOW() 72 | //then we only collect stats when there is a change and can assume 0 changes otherwise 73 | //can then work out changes/sec etc 74 | //at the moment not possible todo stats on what type of change 75 | // need to add to update and destroy then dont collect (changecount - last_changecount) as can be calculated 76 | pgclient.query("UPDATE since_checkpoints SET since=" + mychangecount + " WHERE pgtable='" + pgtable + "'", function(err, result) { 77 | if (err) { 78 | //catch postgres disconnects and other errors? 79 | events.emit('checkpoint.error', pgtable + ": UNABLE TO SET SINCE CHECKPOINT to " + mychangecount, err); 80 | } else { 81 | events.emit('checkpoint', pgtable + ': Checkpoint set to ' + mychangecount + ' next check in ' + (ckwait / 1000) + " seconds"); 82 | } 83 | }); 84 | }else{ 85 | // ckwait = Math.floor(Math.random() * ((60000*2) - 30000) + 30000) 86 | ckwait = 120 * 1000; //increase wait as may be idle for a while - adjust to suite needs 87 | events.emit('checkpoint', pgtable + ": Checkpoint " + mychangecount + ' is current next check in: ' + Math.floor(ckwait / 1000) + ' seconds'); 88 | } 89 | previous_since = mychangecount; //for /_status 90 | setTimeout(function() { 91 | checkpoint_changes(previous_since) 92 | }, ckwait); 93 | } 94 | } 95 | 96 | function update(pgtable, key, doc) { 97 | var deferred = Q.defer(); 98 | var sql = ''; 99 | sql = "SELECT doc->>'_rev' as rev FROM " + pgtable + " WHERE id='" + key + "'"; 100 | pgclient.query(sql, function(err, result) { 101 | if (err) { 102 | //console.error(pgtable, err); 103 | deferred.reject(err); 104 | } else { 105 | //i think may be better to just check full revs and update if different. 106 | if (result.rows.length > 0) { 107 | doc_rev_num = doc._rev.split('-')[0]; 108 | pg_rev_num = result.rows[0].rev.split('-')[0]; 109 | 110 | if (doc._rev != result.rows[0].rev) { 111 | // console.log(pgtable + ": update " + doc._id + " pg_rev: " + pg_rev_num + " < doc_rev: " + doc_rev_num); 112 | 113 | json_doc = pgescape.literal(JSON.stringify(doc)); 114 | sql = "UPDATE " + pgtable + " SET doc=" + json_doc + " WHERE id='" + key + "'"; 115 | pgclient.query(sql, function(err, result) { 116 | if (err) { 117 | console.error(pgtable + ": " + sql, err); 118 | deferred.reject(err); 119 | } else { 120 | // console.log(pgtable + ": " + key + " updated _rev: " + doc._rev); 121 | deferred.resolve(doc._id + ' ' + doc._rev); 122 | } 123 | }); 124 | 125 | }else{ 126 | //console.log(pgtable + ": NOOP " + doc._id + " pg_rev: " + pg_rev_num + " = doc_rev: " + doc_rev_num); 127 | deferred.resolve(doc._id + ' ' + doc._rev); 128 | } 129 | } else { 130 | //new doc need to add 131 | if (doc.type != "Harms::AttachmentAccessingLogXXXX") { 132 | json_doc = pgescape.literal(JSON.stringify(doc)); 133 | sql = "INSERT INTO " + pgtable + " (id, doc) VALUES ('" + key + "', " + json_doc + ")"; 134 | pgclient.query(sql, function(err, result) { 135 | if (err) { 136 | //console.error(pgtable + ": " + ' ' + sql, err); 137 | deferred.reject(err); 138 | } else { 139 | // console.log(pgtable + ": " + doc._id + " added"); 140 | deferred.resolve(doc._id); 141 | } 142 | }); 143 | } else { 144 | //console.log(pgtable + ": " + doc._id + " is " + doc.type + "ignoring"); 145 | deferred.resolve('ignoring'); 146 | } 147 | } 148 | } 149 | }); 150 | return deferred.promise; 151 | } 152 | 153 | 154 | function destroy(pgtable, key) { 155 | var deferred = Q.defer(); 156 | //show we first check if there is a record to delete - does this matter? 157 | 158 | pgclient.query("SELECT id FROM " + pgtable + " WHERE id='" + key + "'", function(err, result) { 159 | if (err) { 160 | //console.error(pgtable, err); 161 | deferred.reject(err); 162 | } else { 163 | if (result.rows.length > 0) { 164 | sql = "DELETE FROM " + pgtable + " WHERE id='" + key + "'"; 165 | pgclient.query(sql, function(err, result) { 166 | if (err) { 167 | console.err(pgtable + ": " + sql, err); 168 | deferred.reject(err); 169 | } else { 170 | console.log(pgtable + ": " + key + " deleted"); 171 | deferred.resolve(result.rowCount); 172 | } 173 | }); 174 | } else { 175 | //console.log(pgtable + ": " + key + " does not exist nothing to delete"); 176 | deferred.resolve('nothing to delete'); 177 | } 178 | } 179 | }); 180 | return deferred.promise; 181 | } 182 | 183 | function process_change(change, done) { 184 | var promise; 185 | var deletion = !!change.deleted; 186 | var doc = change.doc; 187 | 188 | if (opts.map) { 189 | doc = opts.map(doc); 190 | } 191 | 192 | if (deletion) { 193 | promise = destroy(opts.couchdb.pgtable, change.id); 194 | } else { 195 | promise = update(opts.couchdb.pgtable, change.id, doc); 196 | } 197 | promise 198 | .then(function(res, body) { 199 | events.emit('change', change); 200 | events.emit('change.success', change); 201 | done(); 202 | }) 203 | .fail(function(err) { 204 | 205 | if (err.code == "EPIPE") { //pg gone away - 206 | console.error('Error EPIPE:' + opts.couchdb.pgtable + ' in change'); 207 | status = 'Error: EPIPE'; 208 | //events.emit('error', opts.couchdb.pgtable + ': EPIPE', err); 209 | stopFollowing(); 210 | } else if (err.code == '42P01'){ 211 | console.error('Error 42P01:' + opts.couchdb.pgtable + ' in change'); 212 | status = 'Error: table not found in postgres datebase'; 213 | stopFollowing(); 214 | // } else if (err.code == '57P01'){ //not able to catch here 215 | // console.error('Error 57P01:' + opts.couchdb.pgtable + ' in change'); 216 | // status = 'Error: 57P01'; 217 | // stopFollowing(); 218 | } else if (err.code == "ECONNRESET") { //pg gone away - cant catch here for some reason - try in changes.error 219 | status = 'Error: ECONNRESET'; 220 | stopFollowing(); 221 | console.error('Error ECONNRESET:' + opts.couchdb.pgtable + ' in change'); 222 | 223 | } else if (err.code == 'ECONNREFUSED') { //couchdb error 224 | status = 'Error: Not connected to couch server trying to reconnect.'; 225 | wait = Math.floor(Math.random() * (60000 - 10000) + 10000); //mixup wait time as could be many 226 | console.error('Error ECONNREFUSED:' + opts.couchdb.pgtable + ' in change'); 227 | setTimeout(function() { 228 | stream.restart(); 229 | }, wait); 230 | } 231 | events.emit('change', change, err); 232 | events.emit('change.error', opts.couchdb.pgtable, change, err); 233 | done(err); 234 | }); 235 | 236 | changecount++; 237 | //if (changecount % 500 == 0) { 238 | // checkpoint_changes(opts.couchdb.pgtable, changecount); 239 | //} 240 | } 241 | 242 | 243 | function startFollowing() { 244 | 245 | if (auth) stream.headers.Authentication = auth; 246 | 247 | stream.since = previous_since; 248 | stream.inactivity_ms = 30000; 249 | // stream.heartbeat = 10000; 250 | 251 | stream.on('confirm', function(db_info) { 252 | //console.log(JSON.stringify(db_info)); 253 | events.emit('connect', opts.couchdb.pgtable + ': ' + JSON.stringify(db_info)); 254 | status = "Following"; 255 | }); 256 | stream.on('change', function(change) { 257 | events.emit('change', opts.couchdb.pgtable + ': ' + change); 258 | events.emit('change.start', opts.couchdb.pgtable + ': ' + change); 259 | // pause the stream 260 | stream.pause(); 261 | queue.push(change, function() { 262 | // unpause the stream 263 | stream.resume(); 264 | }); 265 | }); 266 | stream.on('error', function(err) { 267 | if (err.code == 'ECONNREFUSED') { //couchdb error 268 | status = 'Error: Not connected to couch server trying to reconnect.'; 269 | wait = Math.floor(Math.random() * (60000 - 10000) + 10000); //mixup wait time as could be many 270 | events.emit('error', opts.couchdb.pgtable + ': Error connection refused. Sleeping for: ' + Math.floor(wait / 1000) + ' seconds', err); 271 | setTimeout(function() { 272 | stream.restart(); 273 | }, wait); 274 | } else if (err.toString().indexOf("no_db_file") > 0) { //couchdb error 275 | status = 'Error: db not found on couch server'; 276 | events.emit('error', opts.couchdb.pgtable + ': couchdb not found', err); 277 | stopFollowing(); 278 | } else { 279 | status = 'unknown'; 280 | events.emit('error', opts.couchdb.pgtable + ': stream.on error #' + err + '#', err); 281 | } 282 | }); 283 | 284 | stream.follow(); 285 | // events.stop = stream.stop.bind(stream); 286 | // return events; 287 | 288 | //TODO: set started_on -like couch rep status page 289 | // also last update? 290 | } 291 | 292 | 293 | function stopFollowing() { 294 | console.log(opts.couchdb.pgtable + ': stopping stream'); 295 | stream.stop(); 296 | stream.removeAllListeners(); 297 | events.emit('stop', opts.couchdb.pgtable); 298 | events.removeAllListeners(); 299 | status = 'stopped'; 300 | alive = false; 301 | } 302 | 303 | 304 | 305 | 306 | 307 | function is_alive() { 308 | return alive; 309 | } 310 | function get_status() { 311 | return status; 312 | } 313 | function get_since () { 314 | return changecount; 315 | } 316 | function get_since_checkpoint () { 317 | return previous_since; 318 | } 319 | 320 | 321 | return { 322 | events: events, 323 | alive: function alive() { 324 | return is_alive() 325 | }, 326 | status: function () { 327 | return get_status() 328 | }, 329 | since: function () { 330 | return get_since() 331 | }, 332 | since_checkpoint: function () { 333 | return get_since_checkpoint() 334 | }, 335 | start: function start() { 336 | return startFollowing() 337 | }, 338 | stop: function stop() { 339 | return stopFollowing() 340 | } 341 | }; 342 | // return events; 343 | 344 | } 345 | // 346 | //PostgresCouchDB.prototype.is_alive = function (){ 347 | //} 348 | 349 | module.exports = PostgresCouchDB; 350 | 351 | //module.exports = { 'PostgresCouchDB': PostgresCouchDB }; 352 | -------------------------------------------------------------------------------- /nagios-check_couch_postgres_count: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/bash 2 | 3 | # 4 | # nagios check script to make sure couchdb & postgres table 5 | # doc counts match and make sure couch-to-postgres working correctly 6 | # 7 | # note this needs json - npm -i json 8 | # 9 | # check_couch_postgres_count.sh 192.168.0.12 192.168.0.13 10 | 11 | couch_host=$1 12 | postgres_host=$2 13 | 14 | difference_threashold=10 15 | 16 | dbs=`curl -s http://$couch_host:5984/_all_dbs | json -ga | grep -v -E '_replicator|_users|articles|feeds|fluent' | sort` 17 | 18 | err="" 19 | msg="$dbs" 20 | exitcode=0 21 | 22 | for db in $dbs; do 23 | couch_count=`curl -s http://$couch_host:5984/$db | json doc_count` 24 | #if [ -z "$couch_count" ]; then continue; fi #no db 25 | 26 | postgres_count=`psql -h $postgres_host -d mydb -Upgsql -wqA -c \ 27 | "SELECT count(id) FROM $db" | tail -2 | head -1` 28 | 29 | if [ "$couch_count" -ne "$postgres_count" ]; then 30 | if [ "$couch_count" -gt "$postgres_count" ]; then 31 | difference=$(($couch_count - $postgres_count)) 32 | else 33 | difference=$(($postgres_count - $couch_count)) 34 | fi 35 | 36 | if [[ $difference -gt $difference_threashold ]]; then 37 | err+="ERROR - $db count difference $couch_host: $couch_count != $postgres_host: $postgres_count - difference: $difference\n" 38 | exitcode=2 39 | else 40 | err+="WARNING - $db count difference $couch_host: $couch_count != $postgres_host: $postgres_count - difference: $difference\n" 41 | if [[ $exitcode -lt 1 ]]; then 42 | exitcode=1 43 | fi 44 | fi 45 | fi 46 | #echo "$db $couch_count $postgres_count $difference" 47 | done 48 | 49 | if [[ $exitcode -gt 0 ]]; then 50 | echo -e -n $err $msg 51 | exit $exitcode 52 | else 53 | echo -e "OK - $msg - doc count match" 54 | exit $exitcode 55 | fi 56 | -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "couch-to-postgres", 3 | "version": "0.0.5", 4 | "lockfileVersion": 1, 5 | "requires": true, 6 | "dependencies": { 7 | "ajv": { 8 | "version": "5.2.3", 9 | "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.2.3.tgz", 10 | "integrity": "sha1-wG9Zh3jETGsWGrr+NGa4GtGBTtI=", 11 | "requires": { 12 | "co": "4.6.0", 13 | "fast-deep-equal": "1.0.0", 14 | "json-schema-traverse": "0.3.1", 15 | "json-stable-stringify": "1.0.1" 16 | } 17 | }, 18 | "asn1": { 19 | "version": "0.2.3", 20 | "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.3.tgz", 21 | "integrity": "sha1-2sh4dxPJlmhJ/IGAd36+nB3fO4Y=" 22 | }, 23 | "assert-plus": { 24 | "version": "1.0.0", 25 | "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", 26 | "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" 27 | }, 28 | "async": { 29 | "version": "0.9.0", 30 | "resolved": "https://registry.npmjs.org/async/-/async-0.9.0.tgz", 31 | "integrity": "sha1-rDYTsdqb7RtHUQu0ZRuJMeRxRsc=" 32 | }, 33 | "asynckit": { 34 | "version": "0.4.0", 35 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 36 | "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" 37 | }, 38 | "aws-sign2": { 39 | "version": "0.7.0", 40 | "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", 41 | "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" 42 | }, 43 | "aws4": { 44 | "version": "1.6.0", 45 | "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.6.0.tgz", 46 | "integrity": "sha1-g+9cqGCysy5KDe7e6MdxudtXRx4=" 47 | }, 48 | "bcrypt-pbkdf": { 49 | "version": "1.0.1", 50 | "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz", 51 | "integrity": "sha1-Y7xdy2EzG5K8Bf1SiVPDNGKgb40=", 52 | "optional": true, 53 | "requires": { 54 | "tweetnacl": "0.14.5" 55 | } 56 | }, 57 | "boom": { 58 | "version": "4.3.1", 59 | "resolved": "https://registry.npmjs.org/boom/-/boom-4.3.1.tgz", 60 | "integrity": "sha1-T4owBctKfjiJ90kDD9JbluAdLjE=", 61 | "requires": { 62 | "hoek": "4.2.0" 63 | } 64 | }, 65 | "browser-request": { 66 | "version": "0.3.3", 67 | "resolved": "https://registry.npmjs.org/browser-request/-/browser-request-0.3.3.tgz", 68 | "integrity": "sha1-ns5bWsqJopkyJC4Yv5M975h2zBc=" 69 | }, 70 | "buffer-writer": { 71 | "version": "1.0.1", 72 | "resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-1.0.1.tgz", 73 | "integrity": "sha1-Iqk2kB4wKa/NdUfrRIfOtpejvwg=" 74 | }, 75 | "caseless": { 76 | "version": "0.12.0", 77 | "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", 78 | "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" 79 | }, 80 | "co": { 81 | "version": "4.6.0", 82 | "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", 83 | "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=" 84 | }, 85 | "combined-stream": { 86 | "version": "1.0.5", 87 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.5.tgz", 88 | "integrity": "sha1-k4NwpXtKUd6ix3wV1cX9+JUWQAk=", 89 | "requires": { 90 | "delayed-stream": "1.0.0" 91 | } 92 | }, 93 | "core-util-is": { 94 | "version": "1.0.2", 95 | "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", 96 | "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" 97 | }, 98 | "cryptiles": { 99 | "version": "3.1.2", 100 | "resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-3.1.2.tgz", 101 | "integrity": "sha1-qJ+7Ig9c4l7FboxKqKT9e1sNKf4=", 102 | "requires": { 103 | "boom": "5.2.0" 104 | }, 105 | "dependencies": { 106 | "boom": { 107 | "version": "5.2.0", 108 | "resolved": "https://registry.npmjs.org/boom/-/boom-5.2.0.tgz", 109 | "integrity": "sha512-Z5BTk6ZRe4tXXQlkqftmsAUANpXmuwlsF5Oov8ThoMbQRzdGTA1ngYRW160GexgOgjsFOKJz0LYhoNi+2AMBUw==", 110 | "requires": { 111 | "hoek": "4.2.0" 112 | } 113 | } 114 | } 115 | }, 116 | "dashdash": { 117 | "version": "1.14.1", 118 | "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", 119 | "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", 120 | "requires": { 121 | "assert-plus": "1.0.0" 122 | } 123 | }, 124 | "debug": { 125 | "version": "0.7.4", 126 | "resolved": "https://registry.npmjs.org/debug/-/debug-0.7.4.tgz", 127 | "integrity": "sha1-BuHqgILCyxTjmAbiLi9vdX+Srzk=" 128 | }, 129 | "delayed-stream": { 130 | "version": "1.0.0", 131 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 132 | "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" 133 | }, 134 | "ecc-jsbn": { 135 | "version": "0.1.1", 136 | "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz", 137 | "integrity": "sha1-D8c6ntXw1Tw4GTOYUj735UN3dQU=", 138 | "optional": true, 139 | "requires": { 140 | "jsbn": "0.1.1" 141 | } 142 | }, 143 | "events": { 144 | "version": "1.0.2", 145 | "resolved": "https://registry.npmjs.org/events/-/events-1.0.2.tgz", 146 | "integrity": "sha1-dYSdz+k9EPsFfDAFWv29UdBqjiQ=" 147 | }, 148 | "extend": { 149 | "version": "3.0.1", 150 | "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.1.tgz", 151 | "integrity": "sha1-p1Xqe8Gt/MWjHOfnYtuq3F5jZEQ=" 152 | }, 153 | "extsprintf": { 154 | "version": "1.3.0", 155 | "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", 156 | "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" 157 | }, 158 | "fast-deep-equal": { 159 | "version": "1.0.0", 160 | "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.0.0.tgz", 161 | "integrity": "sha1-liVqO8l1WV6zbYLpkp0GDYk0Of8=" 162 | }, 163 | "follow": { 164 | "version": "0.11.4", 165 | "resolved": "https://registry.npmjs.org/follow/-/follow-0.11.4.tgz", 166 | "integrity": "sha1-h+mlDUQ/kh0FcE66xBKhSrnZIy8=", 167 | "requires": { 168 | "browser-request": "0.3.3", 169 | "debug": "0.7.4", 170 | "request": "2.83.0" 171 | } 172 | }, 173 | "forever-agent": { 174 | "version": "0.6.1", 175 | "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", 176 | "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" 177 | }, 178 | "form-data": { 179 | "version": "2.3.1", 180 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.1.tgz", 181 | "integrity": "sha1-b7lPvXGIUwbXPRXMSX/kzE7NRL8=", 182 | "requires": { 183 | "asynckit": "0.4.0", 184 | "combined-stream": "1.0.5", 185 | "mime-types": "2.1.17" 186 | } 187 | }, 188 | "generic-pool": { 189 | "version": "2.4.2", 190 | "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-2.4.2.tgz", 191 | "integrity": "sha1-iGvFvwvrfblugby7oHiBjeWmJoM=" 192 | }, 193 | "getpass": { 194 | "version": "0.1.7", 195 | "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", 196 | "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", 197 | "requires": { 198 | "assert-plus": "1.0.0" 199 | } 200 | }, 201 | "har-schema": { 202 | "version": "2.0.0", 203 | "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", 204 | "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" 205 | }, 206 | "har-validator": { 207 | "version": "5.0.3", 208 | "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.0.3.tgz", 209 | "integrity": "sha1-ukAsJmGU8VlW7xXg/PJCmT9qff0=", 210 | "requires": { 211 | "ajv": "5.2.3", 212 | "har-schema": "2.0.0" 213 | } 214 | }, 215 | "hawk": { 216 | "version": "6.0.2", 217 | "resolved": "https://registry.npmjs.org/hawk/-/hawk-6.0.2.tgz", 218 | "integrity": "sha512-miowhl2+U7Qle4vdLqDdPt9m09K6yZhkLDTWGoUiUzrQCn+mHHSmfJgAyGaLRZbPmTqfFFjRV1QWCW0VWUJBbQ==", 219 | "requires": { 220 | "boom": "4.3.1", 221 | "cryptiles": "3.1.2", 222 | "hoek": "4.2.0", 223 | "sntp": "2.0.2" 224 | } 225 | }, 226 | "hoek": { 227 | "version": "4.2.0", 228 | "resolved": "https://registry.npmjs.org/hoek/-/hoek-4.2.0.tgz", 229 | "integrity": "sha512-v0XCLxICi9nPfYrS9RL8HbYnXi9obYAeLbSP00BmnZwCK9+Ih9WOjoZ8YoHCoav2csqn4FOz4Orldsy2dmDwmQ==" 230 | }, 231 | "http-signature": { 232 | "version": "1.2.0", 233 | "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", 234 | "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", 235 | "requires": { 236 | "assert-plus": "1.0.0", 237 | "jsprim": "1.4.1", 238 | "sshpk": "1.13.1" 239 | } 240 | }, 241 | "is-typedarray": { 242 | "version": "1.0.0", 243 | "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", 244 | "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" 245 | }, 246 | "isstream": { 247 | "version": "0.1.2", 248 | "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", 249 | "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" 250 | }, 251 | "js-string-escape": { 252 | "version": "1.0.1", 253 | "resolved": "https://registry.npmjs.org/js-string-escape/-/js-string-escape-1.0.1.tgz", 254 | "integrity": "sha1-4mJbrbwNZ8dTPp7cEGjFh65BN+8=" 255 | }, 256 | "jsbn": { 257 | "version": "0.1.1", 258 | "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", 259 | "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", 260 | "optional": true 261 | }, 262 | "json-schema": { 263 | "version": "0.2.3", 264 | "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", 265 | "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" 266 | }, 267 | "json-schema-traverse": { 268 | "version": "0.3.1", 269 | "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz", 270 | "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=" 271 | }, 272 | "json-stable-stringify": { 273 | "version": "1.0.1", 274 | "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz", 275 | "integrity": "sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8=", 276 | "requires": { 277 | "jsonify": "0.0.0" 278 | } 279 | }, 280 | "json-stringify-safe": { 281 | "version": "5.0.1", 282 | "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", 283 | "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" 284 | }, 285 | "jsonify": { 286 | "version": "0.0.0", 287 | "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", 288 | "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=" 289 | }, 290 | "jsprim": { 291 | "version": "1.4.1", 292 | "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", 293 | "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", 294 | "requires": { 295 | "assert-plus": "1.0.0", 296 | "extsprintf": "1.3.0", 297 | "json-schema": "0.2.3", 298 | "verror": "1.10.0" 299 | } 300 | }, 301 | "kew": { 302 | "version": "0.5.0-alpha.1", 303 | "resolved": "https://registry.npmjs.org/kew/-/kew-0.5.0-alpha.1.tgz", 304 | "integrity": "sha1-HQWnBqsuML97uDq5XcLaJZSx/vk=" 305 | }, 306 | "mime-db": { 307 | "version": "1.30.0", 308 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.30.0.tgz", 309 | "integrity": "sha1-dMZD2i3Z1qRTmZY0ZbJtXKfXHwE=" 310 | }, 311 | "mime-types": { 312 | "version": "2.1.17", 313 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.17.tgz", 314 | "integrity": "sha1-Cdejk/A+mVp5+K+Fe3Cp4KsWVXo=", 315 | "requires": { 316 | "mime-db": "1.30.0" 317 | } 318 | }, 319 | "oauth-sign": { 320 | "version": "0.8.2", 321 | "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.8.2.tgz", 322 | "integrity": "sha1-Rqarfwrq2N6unsBWV4C31O/rnUM=" 323 | }, 324 | "packet-reader": { 325 | "version": "0.2.0", 326 | "resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-0.2.0.tgz", 327 | "integrity": "sha1-gZ300BC4LV6lZx+KGjrPA5vNdwA=" 328 | }, 329 | "performance-now": { 330 | "version": "2.1.0", 331 | "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", 332 | "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" 333 | }, 334 | "pg": { 335 | "version": "4.5.7", 336 | "resolved": "https://registry.npmjs.org/pg/-/pg-4.5.7.tgz", 337 | "integrity": "sha1-Ra4WsjcGpjRaAyed7MaveVwW0ps=", 338 | "requires": { 339 | "buffer-writer": "1.0.1", 340 | "generic-pool": "2.4.2", 341 | "js-string-escape": "1.0.1", 342 | "packet-reader": "0.2.0", 343 | "pg-connection-string": "0.1.3", 344 | "pg-types": "1.12.1", 345 | "pgpass": "0.0.3", 346 | "semver": "4.3.6" 347 | } 348 | }, 349 | "pg-connection-string": { 350 | "version": "0.1.3", 351 | "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-0.1.3.tgz", 352 | "integrity": "sha1-2hhHsglA5C7hSSvq9l1J2RskXfc=" 353 | }, 354 | "pg-escape": { 355 | "version": "0.0.3", 356 | "resolved": "https://registry.npmjs.org/pg-escape/-/pg-escape-0.0.3.tgz", 357 | "integrity": "sha1-zHomDWn+onY3o8lqEmWYB7a0/nY=" 358 | }, 359 | "pg-types": { 360 | "version": "1.12.1", 361 | "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-1.12.1.tgz", 362 | "integrity": "sha1-1kCH45A7WP+q0nnnWVxSIIoUw9I=", 363 | "requires": { 364 | "postgres-array": "1.0.2", 365 | "postgres-bytea": "1.0.0", 366 | "postgres-date": "1.0.3", 367 | "postgres-interval": "1.1.1" 368 | } 369 | }, 370 | "pgpass": { 371 | "version": "0.0.3", 372 | "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-0.0.3.tgz", 373 | "integrity": "sha1-EuZ+NDsxicLzEgbrycwL7//PkUA=", 374 | "requires": { 375 | "split": "0.3.3" 376 | } 377 | }, 378 | "postgres-array": { 379 | "version": "1.0.2", 380 | "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-1.0.2.tgz", 381 | "integrity": "sha1-jgsy6wO/d6XAp4UeBEHBaaJWojg=" 382 | }, 383 | "postgres-bytea": { 384 | "version": "1.0.0", 385 | "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz", 386 | "integrity": "sha1-AntTPAqokOJtFy1Hz5zOzFIazTU=" 387 | }, 388 | "postgres-date": { 389 | "version": "1.0.3", 390 | "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.3.tgz", 391 | "integrity": "sha1-4tiXAu/bJY/52c7g/pG9BpdSV6g=" 392 | }, 393 | "postgres-interval": { 394 | "version": "1.1.1", 395 | "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.1.1.tgz", 396 | "integrity": "sha512-OkuCi9t/3CZmeQreutGgx/OVNv9MKHGIT5jH8KldQ4NLYXkvmT9nDVxEuCENlNwhlGPE374oA/xMqn05G49pHA==", 397 | "requires": { 398 | "xtend": "4.0.1" 399 | } 400 | }, 401 | "punycode": { 402 | "version": "1.4.1", 403 | "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", 404 | "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" 405 | }, 406 | "qs": { 407 | "version": "6.5.1", 408 | "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", 409 | "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==" 410 | }, 411 | "request": { 412 | "version": "2.83.0", 413 | "resolved": "https://registry.npmjs.org/request/-/request-2.83.0.tgz", 414 | "integrity": "sha512-lR3gD69osqm6EYLk9wB/G1W/laGWjzH90t1vEa2xuxHD5KUrSzp9pUSfTm+YC5Nxt2T8nMPEvKlhbQayU7bgFw==", 415 | "requires": { 416 | "aws-sign2": "0.7.0", 417 | "aws4": "1.6.0", 418 | "caseless": "0.12.0", 419 | "combined-stream": "1.0.5", 420 | "extend": "3.0.1", 421 | "forever-agent": "0.6.1", 422 | "form-data": "2.3.1", 423 | "har-validator": "5.0.3", 424 | "hawk": "6.0.2", 425 | "http-signature": "1.2.0", 426 | "is-typedarray": "1.0.0", 427 | "isstream": "0.1.2", 428 | "json-stringify-safe": "5.0.1", 429 | "mime-types": "2.1.17", 430 | "oauth-sign": "0.8.2", 431 | "performance-now": "2.1.0", 432 | "qs": "6.5.1", 433 | "safe-buffer": "5.1.1", 434 | "stringstream": "0.0.5", 435 | "tough-cookie": "2.3.3", 436 | "tunnel-agent": "0.6.0", 437 | "uuid": "3.1.0" 438 | } 439 | }, 440 | "safe-buffer": { 441 | "version": "5.1.1", 442 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", 443 | "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" 444 | }, 445 | "semver": { 446 | "version": "4.3.6", 447 | "resolved": "https://registry.npmjs.org/semver/-/semver-4.3.6.tgz", 448 | "integrity": "sha1-MAvG4OhjdPe6YQaLWx7NV/xlMto=" 449 | }, 450 | "sntp": { 451 | "version": "2.0.2", 452 | "resolved": "https://registry.npmjs.org/sntp/-/sntp-2.0.2.tgz", 453 | "integrity": "sha1-UGQRDwr4X3z9t9a2ekACjOUrSys=", 454 | "requires": { 455 | "hoek": "4.2.0" 456 | } 457 | }, 458 | "split": { 459 | "version": "0.3.3", 460 | "resolved": "https://registry.npmjs.org/split/-/split-0.3.3.tgz", 461 | "integrity": "sha1-zQ7qXmOiEd//frDwkcQTPi0N0o8=", 462 | "requires": { 463 | "through": "2.3.8" 464 | } 465 | }, 466 | "sshpk": { 467 | "version": "1.13.1", 468 | "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.13.1.tgz", 469 | "integrity": "sha1-US322mKHFEMW3EwY/hzx2UBzm+M=", 470 | "requires": { 471 | "asn1": "0.2.3", 472 | "assert-plus": "1.0.0", 473 | "bcrypt-pbkdf": "1.0.1", 474 | "dashdash": "1.14.1", 475 | "ecc-jsbn": "0.1.1", 476 | "getpass": "0.1.7", 477 | "jsbn": "0.1.1", 478 | "tweetnacl": "0.14.5" 479 | } 480 | }, 481 | "stringstream": { 482 | "version": "0.0.5", 483 | "resolved": "https://registry.npmjs.org/stringstream/-/stringstream-0.0.5.tgz", 484 | "integrity": "sha1-TkhM1N5aC7vuGORjB3EKioFiGHg=" 485 | }, 486 | "through": { 487 | "version": "2.3.8", 488 | "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", 489 | "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" 490 | }, 491 | "tough-cookie": { 492 | "version": "2.3.3", 493 | "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.3.3.tgz", 494 | "integrity": "sha1-C2GKVWW23qkL80JdBNVe3EdadWE=", 495 | "requires": { 496 | "punycode": "1.4.1" 497 | } 498 | }, 499 | "tunnel-agent": { 500 | "version": "0.6.0", 501 | "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", 502 | "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", 503 | "requires": { 504 | "safe-buffer": "5.1.1" 505 | } 506 | }, 507 | "tweetnacl": { 508 | "version": "0.14.5", 509 | "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", 510 | "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", 511 | "optional": true 512 | }, 513 | "uuid": { 514 | "version": "3.1.0", 515 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.1.0.tgz", 516 | "integrity": "sha512-DIWtzUkw04M4k3bf1IcpS2tngXEL26YUD2M0tMDUpnUrz2hgzUBlD55a4FjdLGPvfHxS6uluGWvaVEqgBcVa+g==" 517 | }, 518 | "verror": { 519 | "version": "1.10.0", 520 | "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", 521 | "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", 522 | "requires": { 523 | "assert-plus": "1.0.0", 524 | "core-util-is": "1.0.2", 525 | "extsprintf": "1.3.0" 526 | } 527 | }, 528 | "xtend": { 529 | "version": "4.0.1", 530 | "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz", 531 | "integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=" 532 | } 533 | } 534 | } 535 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "couch-to-postgres", 3 | "version": "0.0.5", 4 | "description": "Node libary to watch CouchDB _changes and update PostgreSQL", 5 | "main": "index.js", 6 | "bin": { 7 | "couch-to-postgres": "bin/daemon.js" 8 | }, 9 | "scripts": { 10 | "test": "echo \"Error: no test specified\" && exit 1" 11 | }, 12 | "keywords": [ 13 | "postgresql", 14 | "postgres", 15 | "couchdb", 16 | "couch", 17 | "feed", 18 | "changes", 19 | "transfer" 20 | ], 21 | "author": "Mike Wolman ", 22 | "license": "BSD", 23 | "homepage": "https://github.com/sysadminmike/couch-to-postgres", 24 | "dependencies": { 25 | "kew": "0.5.0-alpha.1", 26 | "pg": "4.5.7", 27 | "pg-escape": "0.0.3", 28 | "async": "0.9.0", 29 | "follow": "0.11.4", 30 | "events": "1.0.2" 31 | } 32 | } 33 | --------------------------------------------------------------------------------