├── .gitignore ├── .gitmodules ├── AUTHORS ├── COPYRIGHT ├── INSTALL ├── Makefile ├── NEWS ├── README ├── autogen.sh ├── config.mak.in ├── configure.ac ├── debian ├── README.Debian ├── README.source ├── changelog ├── compat ├── control ├── control.in ├── copyright ├── docs ├── pgversions ├── postgresql-8.3-pgq3.dirs ├── postgresql-8.3-pgq3.docs ├── postgresql-8.3-pgq3.install ├── postgresql-8.4-pgq3.dirs ├── postgresql-8.4-pgq3.docs ├── postgresql-8.4-pgq3.install ├── postgresql-9.0-pgq3.dirs ├── postgresql-9.0-pgq3.docs ├── postgresql-9.0-pgq3.install ├── postgresql-9.1-pgq3.dirs ├── postgresql-9.1-pgq3.docs ├── postgresql-9.1-pgq3.install ├── postgresql-9.2-pgq3.dirs ├── postgresql-9.2-pgq3.docs ├── postgresql-9.2-pgq3.install ├── postgresql-9.3-pgq3.dirs ├── postgresql-9.3-pgq3.docs ├── postgresql-9.3-pgq3.install ├── postgresql-9.4-pgq3.dirs ├── postgresql-9.4-pgq3.docs ├── postgresql-9.4-pgq3.install ├── python-pgq3.docs ├── python-pgq3.install ├── python-skytools3.docs ├── python-skytools3.install ├── rules ├── skytools.ini ├── skytools3-ticker.dirs ├── skytools3-ticker.docs ├── skytools3-ticker.install ├── skytools3-ticker.manpages ├── skytools3-walmgr.docs ├── skytools3-walmgr.install ├── skytools3-walmgr.manpages ├── skytools3-walmgr.postinst ├── skytools3-walmgr.prerm ├── skytools3.dirs ├── skytools3.docs ├── skytools3.init.d ├── skytools3.install ├── skytools3.manpages ├── skytools3.postinst ├── skytools3.prerm └── source │ └── format ├── doc ├── Makefile ├── TODO.txt ├── common.config.txt ├── common.switches.txt ├── devnotes.txt ├── faq.txt ├── howto │ ├── londiste3_cascaded_rep_howto.txt │ ├── londiste3_merge_howto.txt │ ├── londiste3_partitioning_howto.txt │ ├── londiste3_simple_rep_howto.txt │ └── setup_walmgr_replication.txt ├── index.txt ├── londiste3.txt ├── pgq-nodupes.txt ├── pgq-sql.txt ├── pgqd.txt ├── qadmin.txt ├── queue_mover3.txt ├── queue_splitter3.txt ├── scriptmgr.txt ├── set.notes.txt ├── simple_consumer3.txt ├── simple_local_consumer3.txt ├── skytools3.txt ├── skytools_upgrade.txt ├── sql-grants.txt └── walmgr3.txt ├── misc ├── Cindent ├── checkver.sh ├── docheck.sh ├── extra.css ├── fixman.py ├── getattrs.py ├── kwcheck.py ├── lint.rc ├── pychecker.rc ├── pychecker.strict.rc └── run.lint.sh ├── old ├── bulk_loader.py ├── bulk_loader.txt ├── cube_dispatcher.py ├── cube_dispatcher.txt ├── simple_serial_consumer.py ├── table_dispatcher.py └── table_dispatcher.txt ├── python ├── conf │ ├── skylog.ini │ ├── wal-master.ini │ └── wal-slave.ini ├── londiste.py ├── londiste │ ├── __init__.py │ ├── compare.py │ ├── exec_attrs.py │ ├── handler.py │ ├── handlers │ │ ├── __init__.py │ │ ├── applyfn.py │ │ ├── bulk.py │ │ ├── dispatch.py │ │ ├── multimaster.py │ │ ├── qtable.py │ │ ├── shard.py │ │ └── vtable.py │ ├── playback.py │ ├── repair.py │ ├── setup.py │ ├── syncer.py │ ├── table_copy.py │ └── util.py ├── modules │ ├── cquoting.c │ └── hashtext.c ├── pgq │ ├── __init__.py │ ├── baseconsumer.py │ ├── cascade │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── consumer.py │ │ ├── nodeinfo.py │ │ └── worker.py │ ├── consumer.py │ ├── coopconsumer.py │ ├── event.py │ ├── localconsumer.py │ ├── producer.py │ ├── remoteconsumer.py │ └── status.py ├── pkgloader.py ├── qadmin.py ├── setadm.py ├── skytools │ ├── __init__.py │ ├── _pyquoting.py │ ├── adminscript.py │ ├── apipkg.py │ ├── checker.py │ ├── config.py │ ├── dbservice.py │ ├── dbstruct.py │ ├── fileutil.py │ ├── gzlog.py │ ├── hashtext.py │ ├── installer_config.py.in │ ├── natsort.py │ ├── parsing.py │ ├── plpy_applyrow.py │ ├── psycopgwrapper.py │ ├── querybuilder.py │ ├── quoting.py │ ├── scripting.py │ ├── skylog.py │ ├── sockutil.py │ ├── sqltools.py │ ├── timeutil.py │ ├── tnetstrings.py │ └── utf8.py └── walmgr.py ├── scripts ├── catsql.py ├── data_maintainer.py ├── find_sql_functions.py ├── grantfu.py ├── queue_loader.py ├── queue_mover.py ├── queue_splitter.py ├── scriptmgr.py ├── simple_consumer.py ├── simple_local_consumer.py └── skytools_upgrade.py ├── setup_pkgloader.py ├── setup_skytools.py ├── source.cfg ├── sql ├── Makefile ├── common-pgxs.mk ├── conflicthandler │ ├── Makefile │ ├── README │ ├── expected │ │ └── test_merge.out │ ├── merge_on_time.sql │ └── sql │ │ └── test_merge.sql ├── dispatch │ ├── Makefile │ ├── create_partition.sql │ ├── expected │ │ └── test_create_part.out │ └── sql │ │ └── test_create_part.sql ├── logtriga │ ├── Makefile │ ├── README.logtriga │ ├── expected │ │ └── logtriga.out │ ├── logtriga.c │ ├── logtriga.sql.in │ ├── sql │ │ └── logtriga.sql │ ├── textbuf.c │ └── textbuf.h ├── londiste │ ├── Makefile │ ├── docs │ │ ├── Languages.txt │ │ ├── Menu.txt │ │ └── Topics.txt │ ├── expected │ │ ├── init_ext.out │ │ ├── init_ext_1.out │ │ ├── init_noext.out │ │ ├── init_noext_1.out │ │ ├── londiste_create_part.out │ │ ├── londiste_execute.out │ │ ├── londiste_fkeys.out │ │ ├── londiste_install.out │ │ ├── londiste_leaf.out │ │ ├── londiste_leaf_1.out │ │ ├── londiste_merge.out │ │ ├── londiste_provider.out │ │ ├── londiste_provider_1.out │ │ ├── londiste_seqs.out │ │ └── londiste_subscriber.out │ ├── functions │ │ ├── londiste.create_partition.sql │ │ ├── londiste.create_trigger.sql │ │ ├── londiste.drop_obsolete_partitions.sql │ │ ├── londiste.drop_table_triggers.sql │ │ ├── londiste.execute_finish.sql │ │ ├── londiste.execute_start.sql │ │ ├── londiste.find_column_types.sql │ │ ├── londiste.find_table_fkeys.sql │ │ ├── londiste.find_table_oid.sql │ │ ├── londiste.get_seq_list.sql │ │ ├── londiste.get_table_list.sql │ │ ├── londiste.global_add_table.sql │ │ ├── londiste.global_remove_seq.sql │ │ ├── londiste.global_remove_table.sql │ │ ├── londiste.global_update_seq.sql │ │ ├── londiste.handle_fkeys.sql │ │ ├── londiste.is_obsolete_partition.sql │ │ ├── londiste.is_replica_func.sql │ │ ├── londiste.list_obsolete_partitions.sql │ │ ├── londiste.local_add_seq.sql │ │ ├── londiste.local_add_table.sql │ │ ├── londiste.local_change_handler.sql │ │ ├── londiste.local_remove_seq.sql │ │ ├── londiste.local_remove_table.sql │ │ ├── londiste.local_set_skip_truncate.sql │ │ ├── londiste.local_set_table_attrs.sql │ │ ├── londiste.local_set_table_state.sql │ │ ├── londiste.local_set_table_struct.sql │ │ ├── londiste.local_show_missing.sql │ │ ├── londiste.make_fqname.sql │ │ ├── londiste.periodic_maintenance.sql │ │ ├── londiste.quote_fqname.sql │ │ ├── londiste.root_check_seqs.sql │ │ ├── londiste.root_notify_change.sql │ │ ├── londiste.split_fqname.sql │ │ ├── londiste.table_info_trigger.sql │ │ ├── londiste.upgrade_schema.sql │ │ └── londiste.version.sql │ ├── londiste.control │ ├── sql │ │ ├── init_ext.sql │ │ ├── init_noext.sql │ │ ├── londiste_create_part.sql │ │ ├── londiste_execute.sql │ │ ├── londiste_fkeys.sql │ │ ├── londiste_install.sql │ │ ├── londiste_leaf.sql │ │ ├── londiste_merge.sql │ │ ├── londiste_provider.sql │ │ ├── londiste_seqs.sql │ │ └── londiste_subscriber.sql │ └── structure │ │ ├── ext_postproc.sql │ │ ├── ext_unpackaged.sql │ │ ├── functions.sql │ │ ├── grants.ini │ │ ├── grants.sql │ │ ├── install.sql │ │ ├── tables.sql │ │ ├── triggers.sql │ │ └── upgrade.sql ├── pgq │ ├── Makefile │ ├── README.pgq │ ├── docs │ │ ├── Languages.txt │ │ ├── Menu.txt │ │ └── Topics.txt │ ├── expected │ │ ├── clean.out │ │ ├── logutriga.out │ │ ├── pgq_core.out │ │ ├── pgq_init_ext.out │ │ ├── pgq_init_noext.out │ │ ├── pgq_init_upgrade.out │ │ ├── pgq_init_upgrade_1.out │ │ ├── pgq_perms.out │ │ ├── sqltriga.out │ │ └── trunctrg.out │ ├── functions │ │ ├── pgq.batch_event_sql.sql │ │ ├── pgq.batch_event_tables.sql │ │ ├── pgq.batch_retry.sql │ │ ├── pgq.create_queue.sql │ │ ├── pgq.current_event_table.sql │ │ ├── pgq.drop_queue.sql │ │ ├── pgq.event_retry.sql │ │ ├── pgq.event_retry_raw.sql │ │ ├── pgq.find_tick_helper.sql │ │ ├── pgq.finish_batch.sql │ │ ├── pgq.force_tick.sql │ │ ├── pgq.get_batch_cursor.sql │ │ ├── pgq.get_batch_events.sql │ │ ├── pgq.get_batch_info.sql │ │ ├── pgq.get_consumer_info.sql │ │ ├── pgq.get_queue_info.sql │ │ ├── pgq.grant_perms.sql │ │ ├── pgq.insert_event.sql │ │ ├── pgq.maint_operations.sql │ │ ├── pgq.maint_retry_events.sql │ │ ├── pgq.maint_rotate_tables.sql │ │ ├── pgq.maint_tables_to_vacuum.sql │ │ ├── pgq.next_batch.sql │ │ ├── pgq.quote_fqname.sql │ │ ├── pgq.register_consumer.sql │ │ ├── pgq.seq_funcs.sql │ │ ├── pgq.set_queue_config.sql │ │ ├── pgq.ticker.sql │ │ ├── pgq.tune_storage.sql │ │ ├── pgq.unregister_consumer.sql │ │ ├── pgq.upgrade_schema.sql │ │ └── pgq.version.sql │ ├── lowlevel │ │ ├── Makefile │ │ ├── insert_event.c │ │ └── pgq_lowlevel.sql │ ├── old │ │ ├── pgq.insert_event_raw.sql │ │ ├── pgq.logutriga.sql │ │ └── pgq.sqltriga.sql │ ├── pgq.control │ ├── sql │ │ ├── clean.sql │ │ ├── logutriga.sql │ │ ├── pgq_core.sql │ │ ├── pgq_init_ext.sql │ │ ├── pgq_init_noext.sql │ │ ├── pgq_init_upgrade.sql │ │ ├── pgq_perms.sql │ │ ├── sqltriga.sql │ │ └── trunctrg.sql │ ├── structure │ │ ├── ext_postproc.sql │ │ ├── ext_unpackaged.sql │ │ ├── func_internal.sql │ │ ├── func_public.sql │ │ ├── grants.ini │ │ ├── grants.sql │ │ ├── install.sql │ │ ├── tables.sql │ │ ├── triggers.sql │ │ ├── uninstall_pgq.sql │ │ └── upgrade.sql │ └── triggers │ │ ├── Makefile │ │ ├── common.c │ │ ├── common.h │ │ ├── logtriga.c │ │ ├── logutriga.c │ │ ├── makesql.c │ │ ├── parsesql.c │ │ ├── parsesql.h │ │ ├── pgq_triggers.sql │ │ ├── qbuilder.c │ │ ├── qbuilder.h │ │ ├── sqltriga.c │ │ ├── stringutil.c │ │ └── stringutil.h ├── pgq_coop │ ├── Makefile │ ├── docs │ │ ├── Languages.txt │ │ ├── Menu.txt │ │ └── Topics.txt │ ├── expected │ │ ├── pgq_coop_init_ext.out │ │ ├── pgq_coop_init_noext.out │ │ └── pgq_coop_test.out │ ├── functions │ │ ├── pgq_coop.finish_batch.sql │ │ ├── pgq_coop.next_batch.sql │ │ ├── pgq_coop.register_subconsumer.sql │ │ ├── pgq_coop.unregister_subconsumer.sql │ │ └── pgq_coop.version.sql │ ├── pgq_coop.control │ ├── sql │ │ ├── pgq_coop_init_ext.sql │ │ ├── pgq_coop_init_noext.sql │ │ └── pgq_coop_test.sql │ └── structure │ │ ├── ext_postproc.sql │ │ ├── ext_unpackaged.sql │ │ ├── functions.sql │ │ ├── grants.ini │ │ ├── grants.sql │ │ ├── install.sql │ │ ├── schema.sql │ │ └── upgrade.sql ├── pgq_ext │ ├── Makefile │ ├── README.pgq_ext │ ├── docs │ │ ├── Languages.txt │ │ ├── Menu.txt │ │ └── Topics.txt │ ├── expected │ │ ├── init_ext.out │ │ ├── init_noext.out │ │ ├── test_pgq_ext.out │ │ └── test_upgrade.out │ ├── functions │ │ ├── pgq_ext.get_last_tick.sql │ │ ├── pgq_ext.is_batch_done.sql │ │ ├── pgq_ext.is_event_done.sql │ │ ├── pgq_ext.set_batch_done.sql │ │ ├── pgq_ext.set_event_done.sql │ │ ├── pgq_ext.set_last_tick.sql │ │ ├── pgq_ext.upgrade_schema.sql │ │ └── pgq_ext.version.sql │ ├── pgq_ext.control │ ├── sql │ │ ├── init_ext.sql │ │ ├── init_noext.sql │ │ ├── old_ext.sql │ │ ├── test_pgq_ext.sql │ │ └── test_upgrade.sql │ └── structure │ │ ├── ext_postproc.sql │ │ ├── ext_unpackaged.sql │ │ ├── grants.ini │ │ ├── grants.sql │ │ ├── install.sql │ │ ├── tables.sql │ │ └── upgrade.sql ├── pgq_node │ ├── Makefile │ ├── docs │ │ ├── Languages.txt │ │ ├── Menu.txt │ │ └── Topics.txt │ ├── expected │ │ ├── pgq_node_init_ext.out │ │ ├── pgq_node_init_noext.out │ │ └── pgq_node_test.out │ ├── functions │ │ ├── pgq_node.change_consumer_provider.sql │ │ ├── pgq_node.create_node.sql │ │ ├── pgq_node.demote_root.sql │ │ ├── pgq_node.drop_node.sql │ │ ├── pgq_node.get_consumer_info.sql │ │ ├── pgq_node.get_consumer_state.sql │ │ ├── pgq_node.get_node_info.sql │ │ ├── pgq_node.get_queue_locations.sql │ │ ├── pgq_node.get_subscriber_info.sql │ │ ├── pgq_node.get_worker_state.sql │ │ ├── pgq_node.is_leaf_node.sql │ │ ├── pgq_node.is_root_node.sql │ │ ├── pgq_node.maint_watermark.sql │ │ ├── pgq_node.promote_branch.sql │ │ ├── pgq_node.register_consumer.sql │ │ ├── pgq_node.register_location.sql │ │ ├── pgq_node.register_subscriber.sql │ │ ├── pgq_node.set_consumer_completed.sql │ │ ├── pgq_node.set_consumer_error.sql │ │ ├── pgq_node.set_consumer_paused.sql │ │ ├── pgq_node.set_consumer_uptodate.sql │ │ ├── pgq_node.set_global_watermark.sql │ │ ├── pgq_node.set_node_attrs.sql │ │ ├── pgq_node.set_partition_watermark.sql │ │ ├── pgq_node.set_subscriber_watermark.sql │ │ ├── pgq_node.unregister_consumer.sql │ │ ├── pgq_node.unregister_location.sql │ │ ├── pgq_node.unregister_subscriber.sql │ │ ├── pgq_node.upgrade_schema.sql │ │ └── pgq_node.version.sql │ ├── pgq_node.control │ ├── sql │ │ ├── pgq_node_init_ext.sql │ │ ├── pgq_node_init_noext.sql │ │ └── pgq_node_test.sql │ └── structure │ │ ├── ext_postproc.sql │ │ ├── ext_unpackaged.sql │ │ ├── functions.sql │ │ ├── grants.ini │ │ ├── grants.sql │ │ ├── install.sql │ │ ├── tables.sql │ │ └── upgrade.sql ├── ticker │ ├── Makefile │ ├── maint.c │ ├── pgqd.c │ ├── pgqd.h │ ├── pgqd.ini │ ├── retry.c │ └── ticker.c ├── txid │ ├── Makefile │ ├── README.txid │ ├── epoch.c │ ├── expected │ │ └── txid.out │ ├── sql │ │ └── txid.sql │ ├── txid.c │ ├── txid.h │ ├── txid.internal.sql │ ├── txid.schema.sql │ ├── txid.std.sql │ └── uninstall_txid.sql └── ztestall.sh ├── tests ├── cascade │ ├── conf │ │ ├── nop_consumer.ini │ │ ├── setadm.ini │ │ ├── ticker_branch.ini │ │ ├── ticker_db1.ini │ │ ├── ticker_db2.ini │ │ ├── ticker_db3.ini │ │ ├── worker_db1.ini │ │ ├── worker_db2.ini │ │ └── worker_db3.ini │ ├── init.sh │ ├── plainconsumer.py │ ├── plainworker.py │ ├── regen.sh │ ├── status.sh │ ├── zcheck.sh │ ├── zstop.sh │ └── ztest.sh ├── env.sh ├── handler │ ├── init.sh │ └── regen.sh ├── localconsumer │ ├── init.sh │ ├── regen.sh │ └── testconsumer.py ├── londiste │ ├── ddl.sql │ ├── init.sh │ ├── loadgen.py │ ├── regen.sh │ ├── test-compare.sh │ ├── test-fkey.sh │ ├── test-resurrect.sh │ └── test-weird-merge.sh ├── merge │ ├── addcol-data2.sql │ ├── init.sh │ ├── overview.sh │ └── regen.sh ├── merge_all │ ├── addcol-data2.sql │ ├── init.sh │ ├── overview.sh │ └── regen.sh ├── merge_qnode │ ├── addcol-data2.sql │ ├── init.sh │ ├── overview.sh │ └── regen.sh ├── multimaster │ ├── init.sh │ └── regen.sh ├── newloader │ ├── conf │ │ ├── loader_dst.ini │ │ ├── loader_src.ini │ │ ├── setadm_loaderq.ini │ │ └── ticker_loadersrc.ini │ ├── init.sh │ ├── regen.sh │ ├── send.data.sql │ ├── tables.sql │ ├── triggers.sql │ ├── zcheck.sh │ └── zstop.sh ├── noqueue_merge │ ├── init.sh │ ├── overview.sh │ └── regen.sh ├── part │ ├── init.sh │ └── regen.sh ├── qtable │ ├── init.sh │ └── regen.sh ├── queue_loader │ ├── conf │ │ ├── loader_dst.ini │ │ ├── loader_src.ini │ │ ├── setadm_loaderq.ini │ │ └── ticker_loadersrc.ini │ ├── init.sh │ ├── regen.sh │ ├── send.data.sql │ ├── tables.sql │ ├── triggers.sql │ ├── zcheck.sh │ └── zstop.sh ├── quoting │ └── regtest.py ├── rename │ └── regen.sh ├── scripts │ ├── conf │ │ ├── cube.ini │ │ ├── mover.ini │ │ ├── table.ini │ │ └── ticker.ini │ ├── data.sql │ ├── env.sh │ ├── gendb.sh │ ├── install.sql │ ├── run-tests.sh │ └── stop.sh ├── setadm │ ├── conf │ │ ├── admin.ini │ │ ├── ticker_zbranch.ini │ │ ├── ticker_zleaf.ini │ │ ├── ticker_zroot.ini │ │ ├── zbranch.ini │ │ ├── zleaf.ini │ │ └── zroot.ini │ ├── gendb.sh │ ├── stop.sh │ └── testconsumer.py ├── simplecons │ ├── init.sh │ ├── regen.sh │ └── schema.sql ├── skylog │ ├── logtest.py │ ├── runtest.sh │ ├── skylog.ini │ └── test.ini ├── testlib.sh ├── walmgr │ ├── conf.master │ │ ├── pg_hba.conf │ │ ├── pg_ident.conf │ │ └── postgresql.conf │ ├── conf.slave │ │ ├── pg_hba.conf │ │ ├── pg_ident.conf │ │ └── postgresql.conf │ └── run-test.sh ├── zcheck.sh └── zstop.sh └── upgrade ├── Makefile ├── final ├── londiste.2.1.12.sql ├── londiste.upgrade_2.1_to_3.1.sql ├── pgq.upgrade_2.1_to_3.0.sql ├── pgq_core_2.1.13.sql ├── v2.1.5_londiste.sql ├── v2.1.5_pgq_core.sql ├── v2.1.5_pgq_ext.sql ├── v2.1.6_londiste.sql └── v2.1.6_pgq_ext.sql └── src ├── londiste.2to3.sql ├── v2.1.5_londiste.sql ├── v2.1.5_pgq_core.sql ├── v2.1.5_pgq_ext.sql ├── v2.1.6_londiste.sql ├── v2.1.6_pgq_ext.sql └── v3.0_pgq_core.sql /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | *.pyo 4 | *.[oas] 5 | *.so 6 | *.so.* 7 | *.diff 8 | *.log 9 | *.swp 10 | *.html 11 | *.xml 12 | .deps 13 | .DS_Store 14 | .idea 15 | config.mak 16 | config.log 17 | config.status 18 | config.guess 19 | config.sub 20 | configure 21 | install-sh 22 | autom4te.cache 23 | aclocal.m4 24 | source.list 25 | regression.diffs 26 | regression.out 27 | results 28 | pid 29 | log 30 | dist 31 | tags 32 | zconf* 33 | .gdb* 34 | 35 | debian/*-stamp 36 | debian/*tmp* 37 | debian/packages 38 | debian/files 39 | debian/*substvars 40 | debian/*debhelper 41 | debian/patches 42 | debian/python-pgq3/* 43 | debian/python-skytools3/* 44 | debian/postgresql-*-pgq3/* 45 | debian/skytools3*/* 46 | debian/skytools-pgq*/* 47 | debian/skytools-londiste*/* 48 | debian/control-pgstamp 49 | 50 | python/skytools/installer_config.py 51 | sql/txid/txid.sql 52 | sql/pgq_node/pgq_node.sql 53 | sql/pgq_node/pgq_node.upgrade.sql 54 | sql/londiste/londiste.sql 55 | sql/londiste/londiste.upgrade.sql 56 | sql/pgq/pgq.sql 57 | sql/pgq/pgq.upgrade.sql 58 | sql/pgq_coop/pgq_coop.sql 59 | sql/pgq_coop/pgq_coop.upgrade.sql 60 | sql/pgq_ext/pgq_ext.sql 61 | sql/pgq_ext/pgq_ext.upgrade.sql 62 | sql/ticker/pgqd 63 | sql/txid/txid.sql.in 64 | tests/londiste/conf 65 | tests/merge/conf 66 | 67 | sql/*/*--*--*.sql 68 | sql/*/*--*.sql 69 | sql/*/test.dump 70 | sql/*/structure/newgrants*.sql 71 | sql/*/structure/oldgrants*.sql 72 | 73 | tmp_files.lst 74 | sql/ticker/pgqd.ini.h 75 | 76 | build 77 | build.sk3 78 | doc/londiste.5 79 | doc/man 80 | reconfigure.sh 81 | *.orig 82 | *.rej 83 | 84 | .pc 85 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lib"] 2 | path = lib 3 | url = git://github.com/markokr/libusual.git 4 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | 2 | Maintainer 3 | ---------- 4 | Marko Kreen 5 | 6 | Contributors 7 | ------------ 8 | Aleksei Plotnikov 9 | André Malo 10 | Andrew Dunstan 11 | Artyom Nosov 12 | Asko Oja 13 | Asko Tiidumaa 14 | Cédric Villemain 15 | Charles Duffy 16 | Devrim Gündüz 17 | Dimitri Fontaine 18 | Dmitriy V'jukov 19 | Doug Gorley 20 | Eero Oja 21 | Egon Valdmees 22 | Emiel van de Laar 23 | Erik Jones 24 | Glenn Davy 25 | Götz Lange 26 | Hannu Krosing 27 | Hans-Juergen Schoenig 28 | Jason Buberel 29 | Juta Vaks 30 | Kaarel Kitsemets 31 | Kristo Kaiv 32 | Luc Van Hoeylandt 33 | Lukáš Lalinský 34 | Marcin Stępnicki 35 | Mark Kirkwood 36 | Martin Otto 37 | Martin Pihlak 38 | Nico Mandery 39 | Petr Jelinek 40 | Pierre-Emmanuel André 41 | Priit Kustala 42 | Sasha Aliashkevich 43 | Sébastien Lardière 44 | Sergey Burladyan 45 | Sergey Konoplev 46 | Shoaib Mir 47 | Steve Singer 48 | Tarvi Pillessaar 49 | Tony Arkles 50 | Zoltán Böszörményi 51 | 52 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | SkyTools - tool collection for PostgreSQL 2 | 3 | Copyright (c) 2007 Marko Kreen, Skype Technologies OÜ 4 | 5 | Permission to use, copy, modify, and/or distribute this software for any 6 | purpose with or without fee is hereby granted, provided that the above 7 | copyright notice and this permission notice appear in all copies. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 | 17 | -------------------------------------------------------------------------------- /autogen.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | ./lib/mk/std-autogen.sh lib 4 | 5 | -------------------------------------------------------------------------------- /config.mak.in: -------------------------------------------------------------------------------- 1 | 2 | PACKAGE_NAME = @PACKAGE_NAME@ 3 | PACKAGE_TARNAME = @PACKAGE_TARNAME@ 4 | PACKAGE_VERSION = @PACKAGE_VERSION@ 5 | PACKAGE_STRING = @PACKAGE_STRING@ 6 | 7 | SUFFIX = @SUFFIX@ 8 | 9 | prefix = @prefix@ 10 | datarootdir = @datarootdir@ 11 | exec_prefix = @exec_prefix@ 12 | datadir = @datadir@ 13 | docdir = @docdir@$(SUFFIX) 14 | mandir = @mandir@ 15 | bindir = @bindir@ 16 | 17 | PYTHON = @PYTHON@ 18 | PG_CONFIG = @PG_CONFIG@ 19 | 20 | # additional CPPFLAGS to pgxs modules 21 | PG_CPPFLAGS = $(filter -DHAVE%, @DEFS@) 22 | 23 | DESTDIR = 24 | 25 | ASCIIDOC = @ASCIIDOC@ 26 | XMLTO = @XMLTO@ 27 | 28 | SED = @SED@ 29 | GREP = @GREP@ 30 | EGREP = @EGREP@ 31 | MKDIR_P = @MKDIR_P@ 32 | LN_S = @LN_S@ 33 | 34 | CC = @CC@ 35 | CPPFLAGS = @CPPFLAGS@ 36 | CFLAGS = @CFLAGS@ @WFLAGS@ 37 | LDFLAGS = @LDFLAGS@ 38 | LIBS = @LIBS@ 39 | 40 | SHELL = @SHELL@ 41 | INSTALL = @INSTALL@ 42 | INSTALL_PROGRAM = @INSTALL_PROGRAM@ 43 | INSTALL_SCRIPT = @INSTALL_SCRIPT@ 44 | INSTALL_DATA = @INSTALL_DATA@ 45 | BININSTALL = $(INSTALL_SCRIPT) 46 | 47 | SKYLOG = @SKYLOG@ 48 | SK3_SUBDIR = @SK3_SUBDIR@ 49 | 50 | -------------------------------------------------------------------------------- /debian/README.Debian: -------------------------------------------------------------------------------- 1 | skytools-3.0 for Debian 2 | ----------------------- 3 | 4 | The skytools package for 3.0 has been reworked and split into a number of 5 | packages: 6 | 7 | skytools3 Skytool's replication and queuing 8 | python-pgq3 Skytool's PGQ python library 9 | python-skytools3 python scripts framework for skytools 10 | skytools-ticker3 PGQ ticker daemon service 11 | skytools-walmgr3 high-availability archive and restore commands 12 | postgresql-8.4-pgq3 PGQ server-side code (C module for PostgreSQL) 13 | postgresql-9.0-pgq3 PGQ server-side code (C module for PostgreSQL) 14 | 15 | You can install your script in /etc/skytools/*.ini and the skytools package 16 | will try to start them automatically, using scriptmgr. Of course you still 17 | need to install pgq for ticker services and londiste for replication. 18 | 19 | -- Dimitri Fontaine , Wed, 6 Apr 2011 17:07:35 +0200 20 | -------------------------------------------------------------------------------- /debian/README.source: -------------------------------------------------------------------------------- 1 | skytools-3.0 for Debian 2 | ----------------------- 3 | 4 | This package is maintained in git and uses a submodule. To get a fresh 5 | checkout and build the packages, follow those steps: 6 | 7 | ## fetch git tree, from dimitri who maintains the debian package 8 | ## real upstream is at git://github.com/markokr/skytools-dev.git 9 | $ git clone http://github.com/dimitri/skytools.git 10 | 11 | ## fetch libusual submodule 12 | $ git submodule update --init 13 | 14 | ## now build 15 | $ debuild ... 16 | -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 7 2 | -------------------------------------------------------------------------------- /debian/docs: -------------------------------------------------------------------------------- 1 | NEWS 2 | README 3 | -------------------------------------------------------------------------------- /debian/pgversions: -------------------------------------------------------------------------------- 1 | 8.3 2 | 8.4 3 | 9.0 4 | 9.1 5 | 9.2 6 | 9.3 7 | 9.4 8 | -------------------------------------------------------------------------------- /debian/postgresql-8.3-pgq3.dirs: -------------------------------------------------------------------------------- 1 | usr/share/doc/postgresql-8.3 2 | -------------------------------------------------------------------------------- /debian/postgresql-8.3-pgq3.docs: -------------------------------------------------------------------------------- 1 | sql/pgq/README.pgq 2 | sql/pgq_ext/README.pgq_ext 3 | -------------------------------------------------------------------------------- /debian/postgresql-8.3-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/postgresql/8.3/lib/pgq_triggers.so 2 | usr/lib/postgresql/8.3/lib/pgq_lowlevel.so 3 | usr/share/postgresql/8.3/contrib 4 | -------------------------------------------------------------------------------- /debian/postgresql-8.4-pgq3.dirs: -------------------------------------------------------------------------------- 1 | usr/share/doc/postgresql-8.4 2 | -------------------------------------------------------------------------------- /debian/postgresql-8.4-pgq3.docs: -------------------------------------------------------------------------------- 1 | sql/pgq/README.pgq 2 | sql/pgq_ext/README.pgq_ext 3 | -------------------------------------------------------------------------------- /debian/postgresql-8.4-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/postgresql/8.4/lib/pgq_triggers.so 2 | usr/lib/postgresql/8.4/lib/pgq_lowlevel.so 3 | usr/share/postgresql/8.4/contrib 4 | -------------------------------------------------------------------------------- /debian/postgresql-9.0-pgq3.dirs: -------------------------------------------------------------------------------- 1 | usr/share/doc/postgresql-9.0 2 | -------------------------------------------------------------------------------- /debian/postgresql-9.0-pgq3.docs: -------------------------------------------------------------------------------- 1 | sql/pgq/README.pgq 2 | sql/pgq_ext/README.pgq_ext 3 | -------------------------------------------------------------------------------- /debian/postgresql-9.0-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/postgresql/9.0/lib/pgq_triggers.so 2 | usr/lib/postgresql/9.0/lib/pgq_lowlevel.so 3 | usr/share/postgresql/9.0/contrib 4 | -------------------------------------------------------------------------------- /debian/postgresql-9.1-pgq3.dirs: -------------------------------------------------------------------------------- 1 | usr/share/doc/postgresql-9.1 2 | -------------------------------------------------------------------------------- /debian/postgresql-9.1-pgq3.docs: -------------------------------------------------------------------------------- 1 | sql/pgq/README.pgq 2 | sql/pgq_ext/README.pgq_ext 3 | -------------------------------------------------------------------------------- /debian/postgresql-9.1-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/postgresql/9.1/lib/pgq_triggers.so 2 | usr/lib/postgresql/9.1/lib/pgq_lowlevel.so 3 | usr/share/postgresql/9.1/contrib 4 | usr/share/postgresql/9.1/extension 5 | -------------------------------------------------------------------------------- /debian/postgresql-9.2-pgq3.dirs: -------------------------------------------------------------------------------- 1 | usr/share/doc/postgresql-9.2 2 | -------------------------------------------------------------------------------- /debian/postgresql-9.2-pgq3.docs: -------------------------------------------------------------------------------- 1 | sql/pgq/README.pgq 2 | sql/pgq_ext/README.pgq_ext 3 | -------------------------------------------------------------------------------- /debian/postgresql-9.2-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/postgresql/9.2/lib/pgq_triggers.so 2 | usr/lib/postgresql/9.2/lib/pgq_lowlevel.so 3 | usr/share/postgresql/9.2/contrib 4 | usr/share/postgresql/9.2/extension 5 | -------------------------------------------------------------------------------- /debian/postgresql-9.3-pgq3.dirs: -------------------------------------------------------------------------------- 1 | usr/share/doc/postgresql-9.3 2 | -------------------------------------------------------------------------------- /debian/postgresql-9.3-pgq3.docs: -------------------------------------------------------------------------------- 1 | sql/pgq/README.pgq 2 | sql/pgq_ext/README.pgq_ext 3 | -------------------------------------------------------------------------------- /debian/postgresql-9.3-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/postgresql/9.3/lib/pgq_triggers.so 2 | usr/lib/postgresql/9.3/lib/pgq_lowlevel.so 3 | usr/share/postgresql/9.3/contrib 4 | usr/share/postgresql/9.3/extension 5 | -------------------------------------------------------------------------------- /debian/postgresql-9.4-pgq3.dirs: -------------------------------------------------------------------------------- 1 | usr/share/doc/postgresql-9.4 2 | -------------------------------------------------------------------------------- /debian/postgresql-9.4-pgq3.docs: -------------------------------------------------------------------------------- 1 | sql/pgq/README.pgq 2 | sql/pgq_ext/README.pgq_ext 3 | -------------------------------------------------------------------------------- /debian/postgresql-9.4-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/postgresql/9.4/lib/pgq_triggers.so 2 | usr/lib/postgresql/9.4/lib/pgq_lowlevel.so 3 | usr/share/postgresql/9.4/contrib 4 | usr/share/postgresql/9.4/extension 5 | -------------------------------------------------------------------------------- /debian/python-pgq3.docs: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/doc/skytools3/pgq-sql.html 2 | debian/tmp/usr/share/doc/skytools3/pgq-nodupes.html 3 | debian/tmp/usr/share/doc/skytools3/set.notes.html 4 | -------------------------------------------------------------------------------- /debian/python-pgq3.install: -------------------------------------------------------------------------------- 1 | usr/lib/python*/site-packages/skytools-3.0/pgq 2 | -------------------------------------------------------------------------------- /debian/python-skytools3.docs: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/doc/skytools3/README.html 2 | debian/tmp/usr/share/doc/skytools3/TODO.html 3 | debian/tmp/usr/share/doc/skytools3/devnotes.html 4 | debian/tmp/usr/share/doc/skytools3/faq.html 5 | -------------------------------------------------------------------------------- /debian/python-skytools3.install: -------------------------------------------------------------------------------- 1 | usr/lib/python*/site-packages/pkgloader.py 2 | usr/lib/python*/site-packages/skytools-3.0/skytools 3 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | # -*- makefile -*- 3 | 4 | # Uncomment this to turn on verbose mode. 5 | #export DH_VERBOSE=1 6 | 7 | SRCDIR = $(CURDIR) 8 | PKGVERS = $(shell dpkg-parsechangelog | awk -F '[:-]' '/^Version:/ { print substr($$2, 2) }') 9 | ORIG_EXCLUDE=--exclude-vcs --exclude=debian 10 | 11 | PG_BUILDEXT = pg_buildext 12 | 13 | include /usr/share/postgresql-common/pgxs_debian_control.mk 14 | 15 | config.mak: 16 | #./autogen.sh 17 | ./configure --prefix=/usr --with-asciidoc --with-sk3-subdir 18 | 19 | override_dh_auto_configure: config.mak 20 | 21 | override_dh_auto_clean: config.mak 22 | $(MAKE) -C doc realclean 23 | dh_auto_clean -- distclean 24 | for version in `cat $(CURDIR)/debian/pgversions`; do \ 25 | rm -rf "debian/postgresql-$${version}-pgq3"; \ 26 | done 27 | 28 | # build sql modules for several postgres versions 29 | override_dh_auto_install: config.mak 30 | mkdir -p $(CURDIR)/debian/tmp 31 | dh_auto_install 32 | $(MAKE) -C doc htmlinstall DESTDIR=$(CURDIR)/debian/tmp 33 | # now care about any previous supported versions 34 | for version in $$($(PG_BUILDEXT) supported-versions $(CURDIR)); do \ 35 | echo "### Building for PostgreSQL $$version" && \ 36 | make -C sql clean install \ 37 | PG_CONFIG=/usr/lib/postgresql/$$version/bin/pg_config \ 38 | DESTDIR=$(CURDIR)/debian/tmp \ 39 | || exit 1 ; \ 40 | done 41 | 42 | orig: config.mak 43 | rm -rf dist 44 | make tgz 45 | mv dist/*.tar.gz ../skytools3_$(PKGVERS).orig.tar.gz 46 | 47 | %: 48 | dh $@ 49 | 50 | -------------------------------------------------------------------------------- /debian/skytools.ini: -------------------------------------------------------------------------------- 1 | # configure your skytools services here 2 | # man scriptmgr for details. 3 | 4 | [scriptmgr] 5 | job_name = skytools3 6 | logfile = /var/log/skytools/%(job_name)s.log 7 | pidfile = /var/run/skytools/%(job_name)s.pid 8 | 9 | config_list = /etc/skytools/*.ini 10 | 11 | [DEFAULT] 12 | cwd = / 13 | 14 | [londiste3] 15 | script = /usr/bin/londiste3 16 | args = worker 17 | 18 | [pgqd] 19 | script = /usr/bin/pgqd 20 | 21 | -------------------------------------------------------------------------------- /debian/skytools3-ticker.dirs: -------------------------------------------------------------------------------- 1 | usr/bin 2 | -------------------------------------------------------------------------------- /debian/skytools3-ticker.docs: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/doc/skytools3/conf/pgqd.ini.templ 2 | -------------------------------------------------------------------------------- /debian/skytools3-ticker.install: -------------------------------------------------------------------------------- 1 | usr/bin/pgqd usr/bin 2 | -------------------------------------------------------------------------------- /debian/skytools3-ticker.manpages: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/man/man1/pgqd.1 2 | -------------------------------------------------------------------------------- /debian/skytools3-walmgr.docs: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/doc/skytools3/walmgr3.html 2 | debian/tmp/usr/share/doc/skytools3/conf/wal-master.ini 3 | debian/tmp/usr/share/doc/skytools3/conf/wal-slave.ini 4 | -------------------------------------------------------------------------------- /debian/skytools3-walmgr.install: -------------------------------------------------------------------------------- 1 | usr/bin/walmgr3 2 | -------------------------------------------------------------------------------- /debian/skytools3-walmgr.manpages: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/man/man1/walmgr3.1 2 | -------------------------------------------------------------------------------- /debian/skytools3-walmgr.postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # postinst script for #PACKAGE# 3 | # 4 | # see: dh_installdeb(1) 5 | 6 | set -e 7 | 8 | # summary of how this script can be called: 9 | # * `configure' 10 | # * `abort-upgrade' 11 | # * `abort-remove' `in-favour' 12 | # 13 | # * `abort-remove' 14 | # * `abort-deconfigure' `in-favour' 15 | # `removing' 16 | # 17 | # for details, see http://www.debian.org/doc/debian-policy/ or 18 | # the debian-policy package 19 | 20 | 21 | case "$1" in 22 | configure) 23 | # scripts alternatives 24 | bin=/usr/bin 25 | man=/usr/share/man/man1 26 | for f in walmgr 27 | do 28 | update-alternatives --install $bin/${f} $f $bin/${f}3 3 \ 29 | --slave $man/${f}.1.gz $f.1 $man/${f}3.1.gz || exit 1 30 | done 31 | ;; 32 | esac 33 | 34 | # dh_installdeb will replace this with shell code automatically 35 | # generated by other debhelper scripts. 36 | 37 | #DEBHELPER# 38 | 39 | exit 0 40 | -------------------------------------------------------------------------------- /debian/skytools3-walmgr.prerm: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | case $1 in 4 | configure) 5 | for f in walmgr; do 6 | update-alternatives --remove $f /usr/bin/${f}3 || exit 1 ; 7 | done;; 8 | esac 9 | 10 | #DEBHELPER# 11 | -------------------------------------------------------------------------------- /debian/skytools3.dirs: -------------------------------------------------------------------------------- 1 | usr/bin 2 | etc/skytools 3 | usr/share/skytools3 4 | -------------------------------------------------------------------------------- /debian/skytools3.docs: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/doc/skytools3/scriptmgr.html 2 | debian/tmp/usr/share/doc/skytools3/skytools_upgrade.html 3 | debian/tmp/usr/share/doc/skytools3/qadmin.html 4 | debian/tmp/usr/share/doc/skytools3/skytools3.html 5 | debian/tmp/usr/share/doc/skytools3/queue_splitter3.html 6 | debian/tmp/usr/share/doc/skytools3/queue_mover3.html 7 | debian/tmp/usr/share/doc/skytools3/londiste3.html 8 | debian/tmp/usr/share/doc/skytools3/simple_consumer3.html 9 | debian/tmp/usr/share/doc/skytools3/simple_local_consumer3.html 10 | -------------------------------------------------------------------------------- /debian/skytools3.install: -------------------------------------------------------------------------------- 1 | usr/bin/qadmin 2 | usr/bin/londiste3 3 | usr/bin/scriptmgr3 4 | usr/bin/queue_mover3 5 | usr/bin/queue_splitter3 6 | usr/bin/simple_consumer3 7 | usr/bin/simple_local_consumer3 8 | usr/bin/data_maintainer3 9 | debian/skytools.ini /etc 10 | usr/lib/python*/site-packages/skytools-3.0/londiste 11 | usr/share/skytools3 12 | -------------------------------------------------------------------------------- /debian/skytools3.manpages: -------------------------------------------------------------------------------- 1 | debian/tmp/usr/share/man/man1/scriptmgr3.1 2 | debian/tmp/usr/share/man/man1/qadmin.1 3 | debian/tmp/usr/share/man/man1/londiste3.1 4 | debian/tmp/usr/share/man/man1/queue_mover3.1 5 | debian/tmp/usr/share/man/man1/queue_splitter3.1 6 | debian/tmp/usr/share/man/man1/simple_consumer3.1 7 | debian/tmp/usr/share/man/man1/simple_local_consumer3.1 8 | -------------------------------------------------------------------------------- /debian/skytools3.postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # postinst script for #PACKAGE# 3 | # 4 | # see: dh_installdeb(1) 5 | 6 | set -e 7 | 8 | # summary of how this script can be called: 9 | # * `configure' 10 | # * `abort-upgrade' 11 | # * `abort-remove' `in-favour' 12 | # 13 | # * `abort-remove' 14 | # * `abort-deconfigure' `in-favour' 15 | # `removing' 16 | # 17 | # for details, see http://www.debian.org/doc/debian-policy/ or 18 | # the debian-policy package 19 | 20 | 21 | case "$1" in 22 | configure) 23 | if getent passwd skytools > /dev/null; then 24 | echo user skytools already exists 25 | else 26 | adduser --system --no-create-home --home /var/lib/skytools --group --disabled-login skytools 27 | fi 28 | 29 | # care for transient data directories 30 | install -o skytools -g skytools -d /var/log/skytools /var/run/skytools 31 | 32 | # scripts alternatives 33 | bin=/usr/bin 34 | man=/usr/share/man/man1 35 | for f in londiste scriptmgr queue_mover queue_splitter 36 | do 37 | update-alternatives --install $bin/${f} $f $bin/${f}3 3 \ 38 | --slave $man/${f}.1.gz $f.1 $man/${f}3.1.gz || exit 1 39 | done 40 | ;; 41 | esac 42 | 43 | # dh_installdeb will replace this with shell code automatically 44 | # generated by other debhelper scripts. 45 | 46 | #DEBHELPER# 47 | 48 | exit 0 49 | -------------------------------------------------------------------------------- /debian/skytools3.prerm: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | case $1 in 4 | configure) 5 | for f in londiste scriptmgr queue_mover queue_splitter; do 6 | update-alternatives --remove $f /usr/bin/${f}3 || exit 1 ; 7 | done;; 8 | esac 9 | 10 | #DEBHELPER# 11 | -------------------------------------------------------------------------------- /debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (native) 2 | -------------------------------------------------------------------------------- /doc/common.config.txt: -------------------------------------------------------------------------------- 1 | 2 | === Common configuration parameters === 3 | 4 | job_name:: 5 | Name for particulat job the script does. Script will log under this name 6 | to logdb/logserver. The name is also used as default for PgQ consumer name. 7 | It should be unique. 8 | 9 | pidfile:: 10 | Location for pid file. If not given, script is disallowed to daemonize. 11 | 12 | logfile:: 13 | Location for log file. 14 | 15 | loop_delay:: 16 | If continuisly running process, how long to sleep after each work loop, 17 | in seconds. Default: 1. 18 | 19 | connection_lifetime:: 20 | Close and reconnect older database connections. 21 | 22 | use_skylog:: 23 | foo. 24 | 25 | ifdef::pgq[] 26 | 27 | === Common PgQ consumer parameters === 28 | 29 | queue_name:: 30 | Queue name to attach to. 31 | No default. 32 | 33 | consumer_name:: 34 | Consumers ID to use when registering. 35 | Default: %(job_name)s 36 | 37 | endif::pgq[] 38 | 39 | -------------------------------------------------------------------------------- /doc/common.switches.txt: -------------------------------------------------------------------------------- 1 | 2 | Following switches are common to all skytools.DBScript-based 3 | Python programs. 4 | 5 | -h, --help:: 6 | show help message and exit 7 | 8 | -q, --quiet:: 9 | make program silent 10 | 11 | -v, --verbose:: 12 | make program more verbose 13 | 14 | -d, --daemon:: 15 | make program go background 16 | 17 | --ini:: 18 | show commented template config file. 19 | 20 | Following switches are used to control already running process. 21 | The pidfile is read from config then signal is sent to process 22 | id specified there. 23 | 24 | -r, --reload:: 25 | reload config (send SIGHUP) 26 | 27 | -s, --stop:: 28 | stop program safely (send SIGINT) 29 | 30 | -k, --kill:: 31 | kill program immediately (send SIGTERM) 32 | 33 | -------------------------------------------------------------------------------- /doc/pgq-nodupes.txt: -------------------------------------------------------------------------------- 1 | = Avoiding duplicate events = 2 | 3 | It is pretty burdensome to check if event is already processed, 4 | especially on bulk data moving. Here's a way how this can be avoided. 5 | 6 | First, consumer must guarantee that it processes all events in one tx. 7 | 8 | Consumer itself can tag events for retry, but then it must be able to handle them later. 9 | 10 | 11 | == Only one db == 12 | 13 | If the PgQ queue and event data handling happen in same database, 14 | the consumer must simply call pgq.finish_batch() inside the event-processing 15 | transaction. 16 | 17 | == Several databases == 18 | 19 | If the event processing happens in different database, the consumer 20 | must store the batch_id into destination database, inside the same 21 | transaction as the event processing happens. 22 | 23 | - Only after committing it, consumer can call pgq.finish_batch() in queue database 24 | and commit that. 25 | 26 | - As the batches come in sequence, there's no need to remember full log of batch_id's, 27 | it's enough to keep the latest batch_id. 28 | 29 | - Then at the start of every batch, consumer can check if the batch_id already 30 | exists in destination database, and if it does, then just tag batch done, 31 | without processing. 32 | 33 | With this, there's no need for consumer to check for already processed 34 | events. 35 | 36 | == Note == 37 | 38 | This assumes the event processing is transaction-able - failures 39 | will be rollbacked. If event processing includes communication with 40 | world outside database, eg. sending email, such handling won't work. 41 | -------------------------------------------------------------------------------- /doc/simple_consumer3.txt: -------------------------------------------------------------------------------- 1 | 2 | = simple_consumer3(1) = 3 | 4 | == NAME == 5 | 6 | simple_consumer3 - PgQ consumer that executes query for each event 7 | 8 | == SYNOPSIS == 9 | 10 | simple_consumer3.py [switches] config.ini 11 | 12 | == DESCRIPTION == 13 | 14 | For each event in batch it will execute query, filling event 15 | values into it. 16 | 17 | Transactionality: query is executed in autocommit mode, 18 | no batch tracking is done. That means on failure, 19 | whole batch is fetched and all events are processed again. 20 | 21 | == CONFIG == 22 | 23 | Run `simple_consumer3 --ini` to see commented config template. 24 | 25 | == COMMAND LINE SWITCHES == 26 | 27 | include::common.switches.txt[] 28 | 29 | -------------------------------------------------------------------------------- /doc/simple_local_consumer3.txt: -------------------------------------------------------------------------------- 1 | 2 | = simple_local_consumer3(1) = 3 | 4 | == NAME == 5 | 6 | simple_local_consumer3 - PgQ consumer that executes query for each row 7 | 8 | == SYNOPSIS == 9 | 10 | simple_local_consumer3.py [switches] config.ini 11 | 12 | == DESCRIPTION == 13 | 14 | For each event in batch it will execute query, filling event 15 | values into it. 16 | 17 | Transactionality: query is executed in autocommit mode, 18 | completed batch is tracked in local file. It can be switched 19 | between nodes in cascaded queue. 20 | 21 | == CONFIG == 22 | 23 | Run `simple_local_consumer3 --ini` to see commented config template. 24 | 25 | == COMMAND LINE SWITCHES == 26 | 27 | include::common.switches.txt[] 28 | 29 | -------------------------------------------------------------------------------- /doc/skytools_upgrade.txt: -------------------------------------------------------------------------------- 1 | = skytools_upgrade(1) = 2 | 3 | == NAME == 4 | 5 | skytools_upgrade - utility for upgrading Skytools code in databases. 6 | 7 | == SYNOPSIS == 8 | 9 | skytools_upgrade.py [switches] connstr [connstr ...] 10 | 11 | == DESCRIPTION == 12 | 13 | It connects to given database, then looks for following schemas: 14 | 15 | pgq:: 16 | Main PgQ code. 17 | pgq_ext:: 18 | PgQ batch/event tracking in remote database. 19 | londiste:: 20 | Londiste replication. 21 | 22 | If schema exists, its version is detected by querying .version() 23 | function under schema. If the function does not exists, there 24 | is some heuristics built in to differentiate between 2.1.4 and 25 | 2.1.5 version of the schemas. 26 | 27 | If detected that version is older than current, it is upgraded 28 | by applying upgrade scripts in order. 29 | 30 | == COMMAND LINE SWITCHES == 31 | 32 | include::common.switches.txt[] 33 | 34 | Options specific to skytools_upgrade: 35 | 36 | --all:: 37 | Upgrade all databases. 38 | 39 | --not-really:: 40 | Don't actually do anything. 41 | -------------------------------------------------------------------------------- /doc/sql-grants.txt: -------------------------------------------------------------------------------- 1 | 2 | = SQL permissions (draft) = 3 | 4 | == Setup == 5 | 6 | Currently following no-login roles are created during upgrade: 7 | `pgq_reader`, `pgq_writer`, `pgq_admin`, `londiste_reader`, `londiste_writer`. 8 | 9 | Actual grants are not applied to functions, instead default 10 | `public:execute` grants are kept. New grants can be applied 11 | manually: 12 | 13 | newgrants_.sql:: 14 | applies new rights, drop old public access 15 | 16 | oldgrants_.sql:: 17 | restores old rights - public execute privilege to all functions 18 | 19 | == New roles == 20 | 21 | pgq_reader:: 22 | Can consume queues (source-side) 23 | 24 | pgq_writer:: 25 | Can write into queues (source-side / dest-side) 26 | Can use `pgq_node`/`pgq_ext` schema as regular 27 | consumer (dest-side) 28 | 29 | pgq_admin:: 30 | Admin operations on queues, required for CascadedWorker on dest-side. 31 | Member of `pgq_reader` and `pgq_writer`. 32 | 33 | londiste_reader:: 34 | Member of `pgq_reader`, needs additional read access to tables. 35 | (source-side) 36 | 37 | londiste_writer:: 38 | Member of `pgq_admin`, needs additional write access to tables. 39 | (dest-side) 40 | 41 | -------------------------------------------------------------------------------- /misc/Cindent: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | PARAM="-npro -kr -i8 -ts8 -sob -l120 -ss -ncs -cp1" 4 | PARAM="-npro -kr -i8 -ts8 -nsob -l80 -ss -ncs -cp1 -il0" 5 | PARAM="-npro -kr -i8 -ts8 -nsob -hnl -l110 -ss -ncs -cp1 -il0" 6 | 7 | for t in Datum PgqTriggerEvent TriggerData uint8 uint32 uint64 \ 8 | StringInfo Oid TransactionId 9 | do 10 | PARAM="$PARAM -T $t" 11 | done 12 | 13 | echo indent $PARAM "$@" 14 | indent $PARAM "$@" 15 | 16 | -------------------------------------------------------------------------------- /misc/checkver.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | err=0 4 | 5 | for s in pgq pgq_node pgq_coop londiste pgq_ext; do 6 | code_hash=$(git log --raw -n 1 sql/$s/functions | head -1) 7 | fn="sql/$s/functions/$s.version.sql" 8 | ver_hash=$(git log --raw -n 1 "$fn" | head -1) 9 | test "${code_hash}" = "${ver_hash}" || echo "$s has code changes, needs new version" 10 | 11 | ver_func=$(sed -n "s/.*return *'\(.*\)';/\1/;T;p" $fn) 12 | ver_control=$(sed -n "s/default_version = '\(.*\)'/\1/;T;p" sql/$s/$s.control) 13 | ver_make=$(sed -n "s/EXT_VERSION = \(.*\)/\1/;T;p" sql/$s/Makefile) 14 | 15 | if test "${ver_func}|${ver_control}" = "${ver_make}|${ver_make}"; then 16 | echo "$s: $ver_control" 17 | else 18 | echo "$s: version mismatch" 19 | echo " Makefile: $ver_make" 20 | echo " version(): $ver_func" 21 | echo " control: $ver_control" 22 | err=1 23 | fi 24 | done 25 | 26 | exit $err 27 | 28 | -------------------------------------------------------------------------------- /misc/docheck.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | PYTHONPATH=python:$PYTHONPATH 4 | export PYTHONPATH 5 | 6 | if test "$1" = ""; then 7 | for f in \ 8 | python/skytools/*.py \ 9 | python/pgq/*.py \ 10 | python/pgq/cascade/*.py \ 11 | python/londiste/*.py \ 12 | python/*.py \ 13 | scripts/*.py 14 | do 15 | pychecker --config misc/pychecker.rc "$f" 16 | done 17 | else 18 | for f in "$@"; do 19 | pychecker --config misc/pychecker.rc "$f" 20 | done 21 | fi 22 | -------------------------------------------------------------------------------- /misc/extra.css: -------------------------------------------------------------------------------- 1 | 2 | /* extra.css: make code blocks more different */ 3 | div.literalblock { 4 | border: 1px solid silver; 5 | background: #f4f4f4; 6 | padding: 0.5em; 7 | } 8 | /* eof extra.css */ 9 | 10 | -------------------------------------------------------------------------------- /misc/fixman.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import sys,re 4 | 5 | # hacks to force empty lines into manpage 6 | ln1 = r"\1\2" 7 | xml = sys.stdin.read() 8 | xml = re.sub(r"(\s*)(\s*)(= 0: 8 | print "-a pgq" 9 | 10 | -------------------------------------------------------------------------------- /misc/kwcheck.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import sys 4 | import re 5 | 6 | import pkgloader 7 | pkgloader.require('skytools', '3.0') 8 | import skytools.quoting 9 | 10 | kwmap = skytools.quoting._ident_kwmap 11 | 12 | fn = "/opt/src/pgsql/postgresql/src/include/parser/kwlist.h" 13 | if len(sys.argv) == 2: 14 | fn = sys.argv[1] 15 | 16 | rc = re.compile(r'PG_KEYWORD[(]"(.*)" , \s* \w+ , \s* (\w+) [)]', re.X) 17 | 18 | data = open(fn, 'r').read() 19 | full_map = {} 20 | cur_map = {} 21 | print "== new ==" 22 | for kw, cat in rc.findall(data): 23 | full_map[kw] = 1 24 | if cat == 'UNRESERVED_KEYWORD': 25 | continue 26 | if cat == 'COL_NAME_KEYWORD': 27 | continue 28 | cur_map[kw] = 1 29 | if kw not in kwmap: 30 | print kw, cat 31 | kwmap[kw] = 1 32 | 33 | print "== obsolete ==" 34 | kws = kwmap.keys() 35 | kws.sort() 36 | for k in kws: 37 | if k not in full_map: 38 | print k, '(not in full_map)' 39 | elif k not in cur_map: 40 | print k, '(not in cur_map)' 41 | 42 | print "== full list ==" 43 | ln = "" 44 | for k in kws: 45 | ln += '"%s":1, ' % k 46 | if len(ln) > 70: 47 | print ln.strip() 48 | ln = "" 49 | print ln.strip() 50 | 51 | -------------------------------------------------------------------------------- /misc/run.lint.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | bdir=`echo build/lib.*` 4 | #cd python 5 | #export PYTHONPATH=.:../$bdir:$PYTHONPATH 6 | #echo $PYTHONPATH 7 | 8 | #cd $bdir 9 | cd python 10 | export PYTHONPATH=.:$PYTHONPATH 11 | pylint -i yes --rcfile=../misc/lint.rc -E skytools pgq londiste 12 | 13 | -------------------------------------------------------------------------------- /python/conf/wal-master.ini: -------------------------------------------------------------------------------- 1 | [walmgr] 2 | job_name = wal-master 3 | logfile = ~/log/%(job_name)s.log 4 | pidfile = ~/pid/%(job_name)s.pid 5 | use_skylog = 1 6 | 7 | master_db = dbname=template1 8 | master_data = /var/lib/postgresql/8.3/main 9 | master_config = /etc/postgresql/8.3/main/postgresql.conf 10 | master_bin = /usr/lib/postgresql/8.3/bin 11 | 12 | # set this only if you can afford database restarts during setup and stop. 13 | #master_restart_cmd = /etc/init.d/postgresql-8.3 restart 14 | 15 | slave = slave-host 16 | slave_config = /var/lib/postgresql/conf/wal-slave.ini 17 | 18 | walmgr_data = /var/lib/postgresql/walshipping 19 | completed_wals = %(walmgr_data)s/logs.complete 20 | partial_wals = %(walmgr_data)s/logs.partial 21 | full_backup = %(walmgr_data)s/data.master 22 | config_backup = %(walmgr_data)s/config.backup 23 | 24 | # syncdaemon update frequency 25 | loop_delay = 10.0 26 | # use record based shipping available since 8.2 27 | use_xlog_functions = 0 28 | 29 | # pass -z to rsync, useful on low bandwidth links 30 | compression = 0 31 | 32 | # keep symlinks for pg_xlog and pg_log 33 | keep_symlinks = 1 34 | 35 | # tell walmgr to set wal_level to hot_standby during setup 36 | #hot_standby = 1 37 | 38 | # periodic sync 39 | #command_interval = 600 40 | #periodic_command = /var/lib/postgresql/walshipping/periodic.sh 41 | 42 | -------------------------------------------------------------------------------- /python/conf/wal-slave.ini: -------------------------------------------------------------------------------- 1 | [walmgr] 2 | job_name = wal-slave 3 | logfile = ~/log/%(job_name)s.log 4 | use_skylog = 1 5 | 6 | slave_data = /var/lib/postgresql/8.3/main 7 | slave_bin = /usr/lib/postgresql/8.3/bin 8 | slave_stop_cmd = /etc/init.d/postgresql-8.3 stop 9 | slave_start_cmd = /etc/init.d/postgresql-8.3 start 10 | slave_config_dir = /etc/postgresql/8.3/main 11 | 12 | # alternative pg_xlog directory for slave, symlinked to pg_xlog on restore 13 | #slave_pg_xlog = /vol2/pg_xlog 14 | 15 | walmgr_data = ~/walshipping 16 | completed_wals = %(walmgr_data)s/logs.complete 17 | partial_wals = %(walmgr_data)s/logs.partial 18 | full_backup = %(walmgr_data)s/data.master 19 | config_backup = %(walmgr_data)s/config.backup 20 | 21 | backup_datadir = yes 22 | keep_backups = 0 23 | archive_command = 24 | 25 | # primary database connect string for hot standby -- enabling 26 | # this will cause the slave to be started in hot standby mode. 27 | #primary_conninfo = host=master port=5432 user=postgres 28 | 29 | -------------------------------------------------------------------------------- /python/londiste/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | """Replication on top of PgQ.""" 3 | 4 | __pychecker__ = 'no-miximport' 5 | 6 | import londiste.playback 7 | import londiste.compare 8 | import londiste.setup 9 | import londiste.table_copy 10 | import londiste.repair 11 | import londiste.handler 12 | 13 | from londiste.playback import * 14 | from londiste.compare import * 15 | from londiste.setup import * 16 | from londiste.table_copy import * 17 | from londiste.repair import * 18 | from londiste.handler import * 19 | 20 | __all__ = ( 21 | londiste.playback.__all__ + 22 | londiste.compare.__all__ + 23 | londiste.handler.__all__ + 24 | londiste.setup.__all__ + 25 | londiste.table_copy.__all__ + 26 | londiste.repair.__all__ ) 27 | 28 | -------------------------------------------------------------------------------- /python/londiste/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | # handlers module 2 | import new 3 | import sys 4 | 5 | DEFAULT_HANDLERS = [ 6 | 'londiste.handlers.qtable', 7 | 'londiste.handlers.applyfn', 8 | 'londiste.handlers.shard', 9 | 'londiste.handlers.multimaster', 10 | 'londiste.handlers.vtable', 11 | 12 | 'londiste.handlers.bulk', 13 | 'londiste.handlers.dispatch', 14 | ] 15 | 16 | def handler_args(name, cls): 17 | """Handler arguments initialization decorator 18 | 19 | Define successor for handler class cls with func as argument generator 20 | """ 21 | def wrapper(func): 22 | def _init_override(self, table_name, args, dest_table): 23 | cls.__init__(self, table_name, func(args.copy()), dest_table) 24 | dct = {'__init__': _init_override, 'handler_name': name} 25 | module = sys.modules[cls.__module__] 26 | newname = '%s_%s' % (cls.__name__, name.replace('.','_')) 27 | newcls = new.classobj(newname, (cls,), dct) 28 | setattr(module, newname, newcls) 29 | module.__londiste_handlers__.append(newcls) 30 | module.__all__.append(newname) 31 | return func 32 | return wrapper 33 | 34 | def update(*p): 35 | """ Update dicts given in params with its predecessor param dict 36 | in reverse order """ 37 | return reduce(lambda x, y: x.update(y) or x, 38 | (p[i] for i in range(len(p)-1,-1,-1)), {}) 39 | -------------------------------------------------------------------------------- /python/londiste/handlers/applyfn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Send all events to a DB function. 3 | """ 4 | 5 | import skytools 6 | from londiste.handler import BaseHandler 7 | 8 | __all__ = ['ApplyFuncHandler'] 9 | 10 | class ApplyFuncHandler(BaseHandler): 11 | """Call DB function to apply event. 12 | 13 | Parameters: 14 | func_name=NAME - database function name 15 | func_conf=CONF - database function conf 16 | """ 17 | handler_name = 'applyfn' 18 | 19 | def prepare_batch(self, batch_info, dst_curs): 20 | self.cur_tick = batch_info['tick_id'] 21 | 22 | def process_event(self, ev, sql_queue_func, qfunc_arg): 23 | """Ignore events for this table""" 24 | fn = self.args.get('func_name') 25 | fnconf = self.args.get('func_conf', '') 26 | 27 | args = [fnconf, self.cur_tick, 28 | ev.ev_id, ev.ev_time, 29 | ev.ev_txid, ev.ev_retry, 30 | ev.ev_type, ev.ev_data, 31 | ev.ev_extra1, ev.ev_extra2, 32 | ev.ev_extra3, ev.ev_extra4] 33 | 34 | qfn = skytools.quote_fqident(fn) 35 | qargs = [skytools.quote_literal(a) for a in args] 36 | sql = "select %s(%s);" % (qfn, ', '.join(qargs)) 37 | self.log.debug('applyfn.sql: %s', sql) 38 | sql_queue_func(sql, qfunc_arg) 39 | 40 | #------------------------------------------------------------------------------ 41 | # register handler class 42 | #------------------------------------------------------------------------------ 43 | 44 | __londiste_handlers__ = [ApplyFuncHandler] 45 | -------------------------------------------------------------------------------- /python/londiste/handlers/multimaster.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | """ 4 | Handler for replica with multiple master nodes. 5 | 6 | Can only handle initial copy from one master. Add other masters with 7 | expect-sync option. 8 | 9 | NB! needs merge_on_time function to be compiled on database first. 10 | """ 11 | 12 | import skytools 13 | from londiste.handlers.applyfn import ApplyFuncHandler 14 | from londiste.handlers import update 15 | 16 | __all__ = ['MultimasterHandler'] 17 | 18 | class MultimasterHandler(ApplyFuncHandler): 19 | __doc__ = __doc__ 20 | handler_name = 'multimaster' 21 | 22 | def __init__(self, table_name, args, dest_table): 23 | """Init per-batch table data cache.""" 24 | conf = args.copy() 25 | # remove Multimaster args from conf 26 | for name in ['func_name','func_conf']: 27 | if name in conf: 28 | conf.pop(name) 29 | conf = skytools.db_urlencode(conf) 30 | args = update(args, {'func_name': 'merge_on_time', 'func_conf': conf}) 31 | ApplyFuncHandler.__init__(self, table_name, args, dest_table) 32 | 33 | def _check_args (self, args): 34 | pass # any arg can be passed 35 | 36 | def add(self, trigger_arg_list): 37 | """Create SKIP and BEFORE INSERT trigger""" 38 | trigger_arg_list.append('no_merge') 39 | 40 | 41 | #------------------------------------------------------------------------------ 42 | # register handler class 43 | #------------------------------------------------------------------------------ 44 | 45 | __londiste_handlers__ = [MultimasterHandler] 46 | -------------------------------------------------------------------------------- /python/londiste/handlers/vtable.py: -------------------------------------------------------------------------------- 1 | """Virtual Table handler. 2 | 3 | Hack to get local=t for a table, but without processing any events. 4 | """ 5 | 6 | from londiste.handler import BaseHandler 7 | 8 | __all__ = ['VirtualTableHandler', 'FakeLocalHandler'] 9 | 10 | class VirtualTableHandler(BaseHandler): 11 | __doc__ = __doc__ 12 | handler_name = 'vtable' 13 | 14 | def add(self, trigger_arg_list): 15 | trigger_arg_list.append('virtual_table') 16 | 17 | def needs_table(self): 18 | return False 19 | 20 | class FakeLocalHandler(VirtualTableHandler): 21 | """Deprecated compat name for vtable.""" 22 | handler_name = 'fake_local' 23 | 24 | __londiste_handlers__ = [VirtualTableHandler, FakeLocalHandler] 25 | -------------------------------------------------------------------------------- /python/pgq/__init__.py: -------------------------------------------------------------------------------- 1 | """PgQ framework for Python.""" 2 | 3 | __pychecker__ = 'no-miximport' 4 | 5 | import pgq.event 6 | import pgq.consumer 7 | import pgq.remoteconsumer 8 | import pgq.producer 9 | 10 | import pgq.status 11 | 12 | import pgq.cascade 13 | import pgq.cascade.nodeinfo 14 | import pgq.cascade.admin 15 | import pgq.cascade.consumer 16 | import pgq.cascade.worker 17 | 18 | from pgq.event import * 19 | from pgq.consumer import * 20 | from pgq.coopconsumer import * 21 | from pgq.remoteconsumer import * 22 | from pgq.localconsumer import * 23 | from pgq.producer import * 24 | 25 | from pgq.status import * 26 | 27 | from pgq.cascade.nodeinfo import * 28 | from pgq.cascade.admin import * 29 | from pgq.cascade.consumer import * 30 | from pgq.cascade.worker import * 31 | 32 | __all__ = ( 33 | pgq.event.__all__ + 34 | pgq.consumer.__all__ + 35 | pgq.coopconsumer.__all__ + 36 | pgq.remoteconsumer.__all__ + 37 | pgq.localconsumer.__all__ + 38 | pgq.cascade.nodeinfo.__all__ + 39 | pgq.cascade.admin.__all__ + 40 | pgq.cascade.consumer.__all__ + 41 | pgq.cascade.worker.__all__ + 42 | pgq.producer.__all__ + 43 | pgq.status.__all__ ) 44 | 45 | 46 | -------------------------------------------------------------------------------- /python/pgq/cascade/__init__.py: -------------------------------------------------------------------------------- 1 | """Cascaded Queue support.""" 2 | 3 | -------------------------------------------------------------------------------- /python/pgq/producer.py: -------------------------------------------------------------------------------- 1 | 2 | """PgQ producer helpers for Python. 3 | """ 4 | 5 | import skytools 6 | 7 | __all__ = ['bulk_insert_events', 'insert_event'] 8 | 9 | _fldmap = { 10 | 'id': 'ev_id', 11 | 'time': 'ev_time', 12 | 'type': 'ev_type', 13 | 'data': 'ev_data', 14 | 'extra1': 'ev_extra1', 15 | 'extra2': 'ev_extra2', 16 | 'extra3': 'ev_extra3', 17 | 'extra4': 'ev_extra4', 18 | 19 | 'ev_id': 'ev_id', 20 | 'ev_time': 'ev_time', 21 | 'ev_type': 'ev_type', 22 | 'ev_data': 'ev_data', 23 | 'ev_extra1': 'ev_extra1', 24 | 'ev_extra2': 'ev_extra2', 25 | 'ev_extra3': 'ev_extra3', 26 | 'ev_extra4': 'ev_extra4', 27 | } 28 | 29 | def bulk_insert_events(curs, rows, fields, queue_name): 30 | q = "select pgq.current_event_table(%s)" 31 | curs.execute(q, [queue_name]) 32 | tbl = curs.fetchone()[0] 33 | db_fields = map(_fldmap.get, fields) 34 | skytools.magic_insert(curs, tbl, rows, db_fields) 35 | 36 | def insert_event(curs, queue, ev_type, ev_data, 37 | extra1=None, extra2=None, 38 | extra3=None, extra4=None): 39 | q = "select pgq.insert_event(%s, %s, %s, %s, %s, %s, %s)" 40 | curs.execute(q, [queue, ev_type, ev_data, 41 | extra1, extra2, extra3, extra4]) 42 | return curs.fetchone()[0] 43 | 44 | -------------------------------------------------------------------------------- /python/setadm.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | """SetAdmin launcher. 4 | """ 5 | 6 | import sys 7 | 8 | import pkgloader 9 | pkgloader.require('skytools', '3.0') 10 | 11 | import pgq.cascade.admin 12 | 13 | if __name__ == '__main__': 14 | script = pgq.cascade.admin.CascadeAdmin('cascade_admin', 'node_db', sys.argv[1:], worker_setup = False) 15 | script.start() 16 | 17 | -------------------------------------------------------------------------------- /python/skytools/gzlog.py: -------------------------------------------------------------------------------- 1 | 2 | """Atomic append of gzipped data. 3 | 4 | The point is - if several gzip streams are concatenated, 5 | they are read back as one whole stream. 6 | """ 7 | 8 | import gzip 9 | from cStringIO import StringIO 10 | 11 | __all__ = ['gzip_append'] 12 | 13 | # 14 | # gzip storage 15 | # 16 | def gzip_append(filename, data, level = 6): 17 | """Append a block of data to file with safety checks.""" 18 | 19 | # compress data 20 | buf = StringIO() 21 | g = gzip.GzipFile(fileobj = buf, compresslevel = level, mode = "w") 22 | g.write(data) 23 | g.close() 24 | zdata = buf.getvalue() 25 | 26 | # append, safely 27 | f = open(filename, "a+", 0) 28 | f.seek(0, 2) 29 | pos = f.tell() 30 | try: 31 | f.write(zdata) 32 | f.close() 33 | except Exception, ex: 34 | # rollback on error 35 | f.seek(pos, 0) 36 | f.truncate() 37 | f.close() 38 | raise ex 39 | -------------------------------------------------------------------------------- /python/skytools/installer_config.py.in: -------------------------------------------------------------------------------- 1 | 2 | """SQL script locations.""" 3 | 4 | __all__ = ['sql_locations'] 5 | 6 | sql_locations = [ 7 | "@SQLDIR@", 8 | ] 9 | 10 | package_version = "@PACKAGE_VERSION@" 11 | 12 | skylog = @SKYLOG@ 13 | 14 | -------------------------------------------------------------------------------- /scripts/queue_mover.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | """This script simply mover events from one queue to another. 4 | 5 | Config parameters:: 6 | 7 | ## Parameters for queue_mover 8 | 9 | src_db = dbname=sourcedb 10 | dst_db = dbname=targetdb 11 | 12 | dst_queue_name = dest_queue 13 | """ 14 | 15 | import sys, os 16 | 17 | import pkgloader 18 | pkgloader.require('skytools', '3.0') 19 | 20 | import pgq 21 | 22 | class QueueMover(pgq.SerialConsumer): 23 | __doc__ = __doc__ 24 | 25 | def __init__(self, args): 26 | pgq.SerialConsumer.__init__(self, "queue_mover3", "src_db", "dst_db", args) 27 | self.dst_queue_name = self.cf.get("dst_queue_name") 28 | 29 | def process_remote_batch(self, db, batch_id, ev_list, dst_db): 30 | 31 | # load data 32 | rows = [] 33 | for ev in ev_list: 34 | data = [ev.type, ev.data, ev.extra1, ev.extra2, ev.extra3, ev.extra4, ev.time] 35 | rows.append(data) 36 | fields = ['type', 'data', 'extra1', 'extra2', 'extra3', 'extra4', 'time'] 37 | 38 | # insert data 39 | curs = dst_db.cursor() 40 | pgq.bulk_insert_events(curs, rows, fields, self.dst_queue_name) 41 | 42 | if __name__ == '__main__': 43 | script = QueueMover(sys.argv[1:]) 44 | script.start() 45 | 46 | -------------------------------------------------------------------------------- /setup_pkgloader.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from distutils.core import setup 4 | 5 | setup( 6 | name = "pkgloader", 7 | license = "ISC", 8 | version = '1.0', 9 | maintainer = "Marko Kreen", 10 | maintainer_email = "markokr@gmail.com", 11 | package_dir = {'': 'python'}, 12 | py_modules = ['pkgloader'], 13 | ) 14 | 15 | -------------------------------------------------------------------------------- /sql/Makefile: -------------------------------------------------------------------------------- 1 | 2 | include ../config.mak 3 | 4 | SUBDIRS = londiste pgq pgq_coop pgq_ext pgq_node ticker txid 5 | 6 | all install clean distclean installcheck test: 7 | for dir in $(SUBDIRS); do \ 8 | $(MAKE) -C $$dir $@ \ 9 | DESTDIR="$(DESTDIR)" \ 10 | PG_CONFIG="$(PG_CONFIG)" \ 11 | PG_CPPFLAGS="$(PG_CPPFLAGS)" \ 12 | PYTHON="$(PYTHON)" \ 13 | || exit $?; \ 14 | done 15 | 16 | -------------------------------------------------------------------------------- /sql/conflicthandler/Makefile: -------------------------------------------------------------------------------- 1 | 2 | REGRESS = test_merge 3 | REGRESS_OPTS = --load-language=plpgsql --load-language=plpythonu 4 | 5 | PG_CONFIG = pg_config 6 | PGXS = $(shell $(PG_CONFIG) --pgxs) 7 | include $(PGXS) 8 | 9 | test: 10 | make installcheck || { less regression.diffs ; exit 1; } 11 | 12 | ack: 13 | cp results/* expected/ 14 | 15 | -------------------------------------------------------------------------------- /sql/conflicthandler/README: -------------------------------------------------------------------------------- 1 | 2 | Merge function to be used with londiste 'applyfn' handler. 3 | 4 | londiste3 add-table foo --handler=applyfn --handler-arg="func_name=merge_on_time" --handler-arg="func_conf=timefield=modified_date" 5 | 6 | -------------------------------------------------------------------------------- /sql/conflicthandler/merge_on_time.sql: -------------------------------------------------------------------------------- 1 | create or replace function merge_on_time( 2 | fn_conf text, 3 | cur_tick text, 4 | ev_id text, 5 | ev_time text, 6 | ev_txid text, 7 | ev_retry text, 8 | ev_type text, 9 | ev_data text, 10 | ev_extra1 text, 11 | ev_extra2 text, 12 | ev_extra3 text, 13 | ev_extra4 text) 14 | returns text as $$ 15 | # callback function for londiste applyfn handler 16 | try: 17 | import pkgloader 18 | pkgloader.require('skytools', '3.0') 19 | from skytools.plpy_applyrow import ts_conflict_handler 20 | args = [fn_conf, ev_type, ev_data, ev_extra1, ev_extra2, ev_extra3, ev_extra4] 21 | return ts_conflict_handler(SD, args) 22 | except: 23 | import traceback 24 | for ln in traceback.format_exc().split('\n'): 25 | if ln: 26 | plpy.warning(ln) 27 | raise 28 | 29 | $$ language plpythonu; 30 | 31 | -- select merge_on_time('timefield=modified_date', 'I:id_ccard', 'key_user=foo&id_ccard=1&modified_date=2005-01-01', 'ccdb.ccard', '', '', ''); -------------------------------------------------------------------------------- /sql/conflicthandler/sql/test_merge.sql: -------------------------------------------------------------------------------- 1 | 2 | \set ECHO none 3 | \i merge_on_time.sql 4 | \set ECHO all 5 | 6 | set DateStyle='ISO'; 7 | 8 | create table mergetest ( 9 | intcol int4, 10 | txtcol text, 11 | timecol timestamp 12 | ); 13 | 14 | -- insert to empty 15 | select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v1&timecol=2010-09-09+12:12', 'mergetest', null, null, null); 16 | select * from mergetest; 17 | 18 | -- insert to with time earlier 19 | select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v2&timecol=2010-09-08+12:12', 'mergetest', null, null, null); 20 | select * from mergetest; 21 | 22 | -- insert to with time later 23 | select merge_on_time('timefield=timecol', null, null, null, null, null, 'I:intcol', 'intcol=5&txtcol=v3&timecol=2010-09-10+12:12', 'mergetest', null, null, null); 24 | select * from mergetest; 25 | 26 | -------------------------------------------------------------------------------- /sql/dispatch/Makefile: -------------------------------------------------------------------------------- 1 | 2 | REGRESS = test_create_part 3 | REGRESS_OPTS = --load-language=plpgsql --load-language=plpythonu 4 | 5 | PG_CONFIG = pg_config 6 | PGXS = $(shell $(PG_CONFIG) --pgxs) 7 | include $(PGXS) 8 | 9 | test: 10 | make installcheck || { less regression.diffs ; exit 1; } 11 | 12 | ack: 13 | cp results/* expected/ 14 | 15 | -------------------------------------------------------------------------------- /sql/dispatch/sql/test_create_part.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | \set ECHO none 4 | set log_error_verbosity = 'terse'; 5 | set client_min_messages = 'warning'; 6 | 7 | \i create_partition.sql 8 | \set ECHO all 9 | 10 | drop role if exists ptest1; 11 | drop role if exists ptest2; 12 | create group ptest1; 13 | create group ptest2; 14 | 15 | create table events ( 16 | id int4 primary key, 17 | txt text not null, 18 | ctime timestamptz not null default now(), 19 | someval int4 check (someval > 0) 20 | ); 21 | create index ctime_idx on events (ctime); 22 | 23 | create rule ignore_dups AS 24 | on insert to events 25 | where (exists (select 1 from events 26 | where (events.id = new.id))) 27 | do instead nothing; 28 | 29 | 30 | 31 | grant select,delete on events to ptest1; 32 | grant select,update,delete on events to ptest2 with grant option; 33 | 34 | select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01', 'month'); 35 | select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamptz, 'month'); 36 | 37 | select create_partition('events', 'events_2011_01', 'id', 'ctime', '2011-01-01'::timestamp, 'month'); 38 | 39 | select count(*) from pg_indexes where schemaname='public' and tablename = 'events_2011_01'; 40 | select count(*) from pg_constraint where conrelid = 'public.events_2011_01'::regclass; 41 | select count(*) from pg_rules where schemaname = 'public' and tablename = 'events_2011_01'; 42 | 43 | -- \d events_2011_01 44 | -- \dp events 45 | -- \dp events_2011_01 46 | 47 | -------------------------------------------------------------------------------- /sql/logtriga/Makefile: -------------------------------------------------------------------------------- 1 | 2 | MODULE_big = logtriga 3 | SRCS = logtriga.c textbuf.c 4 | OBJS = $(SRCS:.c=.o) 5 | DATA_built = logtriga.sql 6 | 7 | REGRESS = logtriga 8 | 9 | PG_CONFIG = pg_config 10 | PGXS = $(shell $(PG_CONFIG) --pgxs) 11 | include $(PGXS) 12 | 13 | test: install 14 | make installcheck || { less regression.diffs; exit 1; } 15 | 16 | 17 | -------------------------------------------------------------------------------- /sql/logtriga/README.logtriga: -------------------------------------------------------------------------------- 1 | 2 | logtriga - generic table changes logger 3 | ======================================= 4 | 5 | logtriga provides generic table changes logging trigger. 6 | It prepares partial SQL statement about a change and 7 | gives it to user query. 8 | 9 | Usage 10 | ----- 11 | 12 | CREATE TRIGGER foo_log AFTER INSERT OR UPDATE OR DELETE ON foo_tbl 13 | FOR EACH ROW EXECUTE PROCEDURE logtriga(column_types, query); 14 | 15 | Where column_types is a string where each charater defines type of 16 | that column. Known types: 17 | 18 | * k - one of primary key columns for table. 19 | * v - data column 20 | * i - uninteresting column, to be ignored. 21 | 22 | Trigger function prepares 2 string arguments for query and executes it. 23 | 24 | * $1 - Operation type: I/U/D. 25 | * $2 - Partial SQL for event playback. 26 | 27 | * INSERT INTO FOO_TBL (field, list) values (val1, val2) 28 | * UPDATE FOO_TBL SET field1 = val1, field2 = val2 where key1 = kval1 29 | * DELETE FROM FOO_TBL WHERE key1 = keyval1 30 | 31 | The upper-case part is left out. 32 | 33 | Example 34 | ------- 35 | 36 | Following query emulates Slony-I behaviour: 37 | 38 | insert into SL_SCHEMA.sl_log_1 39 | (log_origin, log_xid, log_tableid, 40 | log_actionseq, log_cmdtype, log_cmddata) 41 | values (CLUSTER_IDENT, SL_SCHEMA.getCurrentXid(), TABLE_OID, 42 | nextval('SL_SCHEMA.sl_action_seq'), $1, $2) 43 | 44 | The upper-case strings should be replaced with actual values 45 | on trigger creation. 46 | 47 | 48 | -------------------------------------------------------------------------------- /sql/logtriga/logtriga.sql.in: -------------------------------------------------------------------------------- 1 | 2 | -- usage: logtriga(flds, query) 3 | -- 4 | -- query should include 2 args: 5 | -- $1 - for op type I/U/D, 6 | -- $2 - for op data 7 | 8 | CREATE OR REPLACE FUNCTION logtriga() RETURNS trigger 9 | AS 'MODULE_PATHNAME', 'logtriga' LANGUAGE C; 10 | 11 | -------------------------------------------------------------------------------- /sql/logtriga/textbuf.h: -------------------------------------------------------------------------------- 1 | struct TBuf; 2 | 3 | typedef struct TBuf TBuf; 4 | 5 | TBuf *tbuf_alloc(int start_size); 6 | void tbuf_free(TBuf *tbuf); 7 | int tbuf_get_size(TBuf *tbuf); 8 | void tbuf_reset(TBuf *tbuf); 9 | 10 | const text *tbuf_look_text(TBuf *tbuf); 11 | const char *tbuf_look_cstring(TBuf *tbuf); 12 | 13 | void tbuf_append_cstring(TBuf *tbuf, const char *str); 14 | void tbuf_append_text(TBuf *tbuf, const text *str); 15 | void tbuf_append_char(TBuf *tbuf, char chr); 16 | 17 | text *tbuf_steal_text(TBuf *tbuf); 18 | 19 | void tbuf_encode_cstring(TBuf *tbuf, 20 | const char *str, 21 | const char *encoding); 22 | 23 | void tbuf_encode_data(TBuf *tbuf, 24 | const uint8 *data, int len, 25 | const char *encoding); 26 | 27 | -------------------------------------------------------------------------------- /sql/londiste/Makefile: -------------------------------------------------------------------------------- 1 | 2 | EXTENSION = londiste 3 | 4 | EXT_VERSION = 3.2.4 5 | EXT_OLD_VERSIONS = 3.1 3.1.1 3.1.3 3.1.4 3.1.6 3.2 3.2.3 6 | 7 | base_regress = londiste_provider londiste_subscriber \ 8 | londiste_fkeys londiste_execute londiste_seqs londiste_merge \ 9 | londiste_leaf londiste_create_part 10 | 11 | Contrib_regress = init_noext $(base_regress) 12 | Extension_regress = init_ext $(base_regress) 13 | 14 | include ../common-pgxs.mk 15 | 16 | dox: cleandox 17 | mkdir -p docs/html 18 | mkdir -p docs/sql 19 | $(CATSQL) --ndoc structure/tables.sql > docs/sql/schema.sql 20 | $(CATSQL) --ndoc structure/functions.sql > docs/sql/functions.sql 21 | $(NDOC) $(NDOCARGS) 22 | 23 | -------------------------------------------------------------------------------- /sql/londiste/expected/init_ext.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | create extension londiste from 'unpackaged'; 8 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; 9 | dumpable 10 | ---------- 11 | 4 12 | (1 row) 13 | 14 | drop extension londiste; 15 | create extension londiste; 16 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; 17 | dumpable 18 | ---------- 19 | 4 20 | (1 row) 21 | 22 | -------------------------------------------------------------------------------- /sql/londiste/expected/init_ext_1.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | upgrade_schema 3 | ---------------- 4 | 2 5 | (1 row) 6 | 7 | create extension londiste from 'unpackaged'; 8 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; 9 | dumpable 10 | ---------- 11 | 4 12 | (1 row) 13 | 14 | drop extension londiste; 15 | create extension londiste; 16 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; 17 | dumpable 18 | ---------- 19 | 4 20 | (1 row) 21 | 22 | -------------------------------------------------------------------------------- /sql/londiste/expected/init_noext.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | upgrade_schema 8 | ---------------- 9 | 0 10 | (1 row) 11 | 12 | upgrade_schema 13 | ---------------- 14 | 0 15 | (1 row) 16 | 17 | -------------------------------------------------------------------------------- /sql/londiste/expected/init_noext_1.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | upgrade_schema 8 | ---------------- 9 | 0 10 | (1 row) 11 | 12 | upgrade_schema 13 | ---------------- 14 | 0 15 | (1 row) 16 | 17 | -------------------------------------------------------------------------------- /sql/londiste/expected/londiste_install.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | upgrade_schema 8 | ---------------- 9 | 0 10 | (1 row) 11 | 12 | upgrade_schema 13 | ---------------- 14 | 0 15 | (1 row) 16 | 17 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.drop_obsolete_partitions.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.drop_obsolete_partitions 3 | ( 4 | in i_parent_table text, 5 | in i_retention_period interval, 6 | in i_partition_period text 7 | ) 8 | returns setof text 9 | as $$ 10 | ------------------------------------------------------------------------------- 11 | -- Function: londiste.drop_obsolete_partitions(3) 12 | -- 13 | -- Drop obsolete partitions of partition-by-date parent table. 14 | -- 15 | -- Parameters: 16 | -- i_parent_table Master table from which partitions are inherited 17 | -- i_retention_period How long to keep partitions around 18 | -- i_partition_period One of: year, month, day, hour 19 | -- 20 | -- Returns: 21 | -- Names of partitions dropped 22 | ------------------------------------------------------------------------------- 23 | declare 24 | _part text; 25 | begin 26 | for _part in 27 | select londiste.list_obsolete_partitions (i_parent_table, i_retention_period, i_partition_period) 28 | loop 29 | execute 'drop table '|| _part; 30 | return next _part; 31 | end loop; 32 | end; 33 | $$ language plpgsql; 34 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.execute_finish.sql: -------------------------------------------------------------------------------- 1 | create or replace function londiste.execute_finish( 2 | in i_queue_name text, 3 | in i_file_name text, 4 | out ret_code int4, 5 | out ret_note text) 6 | as $$ 7 | -- ---------------------------------------------------------------------- 8 | -- Function: londiste.execute_finish(2) 9 | -- 10 | -- Finish execution of DDL. Should be called at the 11 | -- end of the transaction that does the SQL execution. 12 | -- 13 | -- Called-by: 14 | -- Londiste setup tool on root, replay on branches/leafs. 15 | -- 16 | -- Returns: 17 | -- 200 - Proceed. 18 | -- 404 - Current entry not found, execute_start() was not called? 19 | -- ---------------------------------------------------------------------- 20 | declare 21 | is_root boolean; 22 | sql text; 23 | attrs text; 24 | begin 25 | is_root := pgq_node.is_root_node(i_queue_name); 26 | 27 | select execute_sql, execute_attrs 28 | into sql, attrs 29 | from londiste.applied_execute 30 | where execute_file = i_file_name; 31 | if not found then 32 | select 404, 'execute_file called without execute_start' 33 | into ret_code, ret_note; 34 | return; 35 | end if; 36 | 37 | if is_root then 38 | perform pgq.insert_event(i_queue_name, 'EXECUTE', sql, i_file_name, attrs, null, null); 39 | end if; 40 | 41 | select 200, 'Execute finished: ' || i_file_name into ret_code, ret_note; 42 | return; 43 | end; 44 | $$ language plpgsql strict; 45 | 46 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.find_column_types.sql: -------------------------------------------------------------------------------- 1 | create or replace function londiste.find_column_types(tbl text) 2 | returns text as $$ 3 | -- ---------------------------------------------------------------------- 4 | -- Function: londiste.find_column_types(1) 5 | -- 6 | -- Returns columnt type string for logtriga(). 7 | -- 8 | -- Parameters: 9 | -- tbl - fqname 10 | -- 11 | -- Returns: 12 | -- String of 'kv'. 13 | -- ---------------------------------------------------------------------- 14 | declare 15 | res text; 16 | col record; 17 | tbl_oid oid; 18 | begin 19 | tbl_oid := londiste.find_table_oid(tbl); 20 | res := ''; 21 | for col in 22 | SELECT CASE WHEN k.attname IS NOT NULL THEN 'k' ELSE 'v' END AS type 23 | FROM pg_attribute a LEFT JOIN ( 24 | SELECT k.attname FROM pg_index i, pg_attribute k 25 | WHERE i.indrelid = tbl_oid AND k.attrelid = i.indexrelid 26 | AND i.indisprimary AND k.attnum > 0 AND NOT k.attisdropped 27 | ) k ON (k.attname = a.attname) 28 | WHERE a.attrelid = tbl_oid AND a.attnum > 0 AND NOT a.attisdropped 29 | ORDER BY a.attnum 30 | loop 31 | res := res || col.type; 32 | end loop; 33 | 34 | return res; 35 | end; 36 | $$ language plpgsql strict stable; 37 | 38 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.get_seq_list.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.get_seq_list( 3 | in i_queue_name text, 4 | out seq_name text, 5 | out last_value int8, 6 | out local boolean) 7 | returns setof record as $$ 8 | -- ---------------------------------------------------------------------- 9 | -- Function: londiste.get_seq_list(1) 10 | -- 11 | -- Returns registered seqs on this Londiste node. 12 | -- 13 | -- Result fields: 14 | -- seq_name - fully qualified name of sequence 15 | -- last_value - last globally published value 16 | -- local - is locally registered 17 | -- ---------------------------------------------------------------------- 18 | declare 19 | rec record; 20 | begin 21 | for seq_name, last_value, local in 22 | select s.seq_name, s.last_value, s.local from londiste.seq_info s 23 | where s.queue_name = i_queue_name 24 | order by s.nr, s.seq_name 25 | loop 26 | return next; 27 | end loop; 28 | return; 29 | end; 30 | $$ language plpgsql strict; 31 | 32 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.global_remove_seq.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.global_remove_seq( 3 | in i_queue_name text, in i_seq_name text, 4 | out ret_code int4, out ret_note text) 5 | as $$ 6 | -- ---------------------------------------------------------------------- 7 | -- Function: londiste.global_remove_seq(2) 8 | -- 9 | -- Removes sequence registration in set. 10 | -- 11 | -- Called by: 12 | -- - On root by londiste.local_remove_seq() 13 | -- - Elsewhere by consumer receiving seq remove event 14 | -- 15 | -- Returns: 16 | -- 200 - OK 17 | -- 400 - not found 18 | -- ---------------------------------------------------------------------- 19 | declare 20 | fq_name text; 21 | begin 22 | fq_name := londiste.make_fqname(i_seq_name); 23 | delete from londiste.seq_info 24 | where queue_name = i_queue_name 25 | and seq_name = fq_name; 26 | if not found then 27 | select 400, 'Sequence not found: '||fq_name into ret_code, ret_note; 28 | return; 29 | end if; 30 | if pgq_node.is_root_node(i_queue_name) then 31 | perform londiste.root_notify_change(i_queue_name, 'londiste.remove-seq', fq_name); 32 | end if; 33 | select 200, 'Sequence removed: '||fq_name into ret_code, ret_note; 34 | return; 35 | end; 36 | $$ language plpgsql strict; 37 | 38 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.global_remove_table.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.global_remove_table( 3 | in i_queue_name text, in i_table_name text, 4 | out ret_code int4, out ret_note text) 5 | as $$ 6 | -- ---------------------------------------------------------------------- 7 | -- Function: londiste.global_remove_table(2) 8 | -- 9 | -- Removes tables registration in set. 10 | -- 11 | -- Means that nodes cannot attach to this table anymore. 12 | -- 13 | -- Called by: 14 | -- - On root by londiste.local_remove_table() 15 | -- - Elsewhere by consumer receiving table remove event 16 | -- 17 | -- Returns: 18 | -- 200 - OK 19 | -- 400 - not found 20 | -- ---------------------------------------------------------------------- 21 | declare 22 | fq_table_name text; 23 | begin 24 | fq_table_name := londiste.make_fqname(i_table_name); 25 | if not pgq_node.is_root_node(i_queue_name) then 26 | perform londiste.local_remove_table(i_queue_name, fq_table_name); 27 | end if; 28 | delete from londiste.table_info 29 | where queue_name = i_queue_name 30 | and table_name = fq_table_name; 31 | if not found then 32 | select 400, 'Table not found: ' || fq_table_name 33 | into ret_code, ret_note; 34 | return; 35 | end if; 36 | select 200, 'Table removed: ' || i_table_name 37 | into ret_code, ret_note; 38 | return; 39 | end; 40 | $$ language plpgsql strict; 41 | 42 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.is_replica_func.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.is_replica_func(func_oid oid) 3 | returns boolean as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: londiste.is_replica_func(1) 6 | -- 7 | -- Returns true if function is a PgQ-based replication functions. 8 | -- This also means it takes queue name as first argument. 9 | -- ---------------------------------------------------------------------- 10 | select count(1) > 0 11 | from pg_proc f join pg_namespace n on (n.oid = f.pronamespace) 12 | where f.oid = $1 and n.nspname = 'pgq' and f.proname in ('sqltriga', 'logutriga'); 13 | $$ language sql strict stable; 14 | 15 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.local_remove_seq.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.local_remove_seq( 3 | in i_queue_name text, in i_seq_name text, 4 | out ret_code int4, out ret_note text) 5 | as $$ 6 | -- ---------------------------------------------------------------------- 7 | -- Function: londiste.local_remove_seq(2) 8 | -- 9 | -- Remove sequence. 10 | -- 11 | -- Parameters: 12 | -- i_queue_name - set name 13 | -- i_seq_name - sequence name 14 | -- 15 | -- Returns: 16 | -- 200 - OK 17 | -- 404 - Sequence not found 18 | -- ---------------------------------------------------------------------- 19 | declare 20 | fqname text; 21 | begin 22 | fqname := londiste.make_fqname(i_seq_name); 23 | if pgq_node.is_root_node(i_queue_name) then 24 | select f.ret_code, f.ret_note 25 | into ret_code, ret_note 26 | from londiste.global_remove_seq(i_queue_name, fqname) f; 27 | return; 28 | end if; 29 | update londiste.seq_info 30 | set local = false 31 | where queue_name = i_queue_name 32 | and seq_name = fqname 33 | and local; 34 | if not found then 35 | select 404, 'Sequence not found: '||fqname into ret_code, ret_note; 36 | return; 37 | end if; 38 | 39 | select 200, 'Sequence removed: '||fqname into ret_code, ret_note; 40 | return; 41 | end; 42 | $$ language plpgsql strict; 43 | 44 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.local_set_skip_truncate.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.local_set_skip_truncate( 3 | in i_queue_name text, 4 | in i_table text, 5 | in i_value bool, 6 | out ret_code int4, 7 | out ret_note text) 8 | returns record as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: londiste.local_set_skip_truncate(3) 11 | -- 12 | -- Change skip_truncate flag for table. 13 | -- ---------------------------------------------------------------------- 14 | begin 15 | update londiste.table_info 16 | set skip_truncate = i_value 17 | where queue_name = i_queue_name 18 | and table_name = i_table; 19 | if found then 20 | select 200, 'skip_truncate=' || i_value::text 21 | into ret_code, ret_note; 22 | else 23 | select 404, 'table not found: ' || i_table 24 | into ret_code, ret_note; 25 | end if; 26 | return; 27 | end; 28 | $$ language plpgsql; 29 | 30 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.local_set_table_attrs.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.local_set_table_attrs( 3 | in i_queue_name text, 4 | in i_table_name text, 5 | in i_table_attrs text, 6 | out ret_code int4, 7 | out ret_note text) 8 | as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: londiste.local_set_table_attrs(3) 11 | -- 12 | -- Store urlencoded table attributes. 13 | -- 14 | -- Parameters: 15 | -- i_queue_name - cascaded queue name 16 | -- i_table - table name 17 | -- i_table_attrs - urlencoded attributes 18 | -- ---------------------------------------------------------------------- 19 | begin 20 | update londiste.table_info 21 | set table_attrs = i_table_attrs 22 | where queue_name = i_queue_name 23 | and table_name = i_table_name 24 | and local; 25 | if found then 26 | select 200, i_table_name || ': Table attributes stored' 27 | into ret_code, ret_note; 28 | else 29 | select 404, 'no such local table: ' || i_table_name 30 | into ret_code, ret_note; 31 | end if; 32 | return; 33 | end; 34 | $$ language plpgsql; 35 | 36 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.local_set_table_state.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.local_set_table_state( 3 | in i_queue_name text, 4 | in i_table_name text, 5 | in i_snapshot text, 6 | in i_merge_state text, 7 | out ret_code int4, 8 | out ret_note text) 9 | as $$ 10 | -- ---------------------------------------------------------------------- 11 | -- Function: londiste.local_set_table_state(4) 12 | -- 13 | -- Change table state. 14 | -- 15 | -- Parameters: 16 | -- i_queue_name - cascaded queue name 17 | -- i_table - table name 18 | -- i_snapshot - optional remote snapshot info 19 | -- i_merge_state - merge state 20 | -- ---------------------------------------------------------------------- 21 | declare 22 | _tbl text; 23 | begin 24 | _tbl = londiste.make_fqname(i_table_name); 25 | 26 | update londiste.table_info 27 | set custom_snapshot = i_snapshot, 28 | merge_state = i_merge_state 29 | where queue_name = i_queue_name 30 | and table_name = _tbl 31 | and local; 32 | if not found then 33 | select 404, 'No such table: ' || _tbl 34 | into ret_code, ret_note; 35 | return; 36 | end if; 37 | 38 | select 200, 'Table ' || _tbl || ' state set to ' 39 | || coalesce(quote_literal(i_merge_state), 'NULL') 40 | into ret_code, ret_note; 41 | return; 42 | end; 43 | $$ language plpgsql; 44 | 45 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.local_set_table_struct.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.local_set_table_struct( 3 | in i_queue_name text, 4 | in i_table_name text, 5 | in i_dropped_ddl text, 6 | out ret_code int4, 7 | out ret_note text) 8 | as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: londiste.local_set_table_struct(3) 11 | -- 12 | -- Store dropped table struct temporarily. 13 | -- 14 | -- Parameters: 15 | -- i_queue_name - cascaded queue name 16 | -- i_table - table name 17 | -- i_dropped_ddl - merge state 18 | -- ---------------------------------------------------------------------- 19 | begin 20 | update londiste.table_info 21 | set dropped_ddl = i_dropped_ddl 22 | where queue_name = i_queue_name 23 | and table_name = i_table_name 24 | and local; 25 | if found then 26 | select 200, 'Table struct stored' 27 | into ret_code, ret_note; 28 | else 29 | select 404, 'no such local table: '||i_table_name 30 | into ret_code, ret_note; 31 | 32 | end if; 33 | return; 34 | end; 35 | $$ language plpgsql; 36 | 37 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.make_fqname.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.make_fqname(i_name text) 3 | returns text as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: londiste.make_fqname(1) 6 | -- 7 | -- Make name to schema-qualified one. 8 | -- 9 | -- First dot is taken as schema separator. 10 | -- 11 | -- If schema is missing, 'public' is assumed. 12 | -- 13 | -- Parameters: 14 | -- i_name - object name. 15 | -- 16 | -- Returns: 17 | -- Schema qualified name. 18 | -- ---------------------------------------------------------------------- 19 | begin 20 | if position('.' in i_name) > 0 then 21 | return i_name; 22 | else 23 | return 'public.' || i_name; 24 | end if; 25 | end; 26 | $$ language plpgsql strict immutable; 27 | 28 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.periodic_maintenance.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.periodic_maintenance() 3 | returns integer as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: londiste.periodic_maintenance(0) 6 | -- 7 | -- Clean random stuff. 8 | -- ---------------------------------------------------------------------- 9 | begin 10 | 11 | -- clean old EXECUTE entries 12 | delete from londiste.applied_execute 13 | where execute_time < now() - '3 months'::interval; 14 | 15 | return 0; 16 | end; 17 | $$ language plpgsql; -- need admin access 18 | 19 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.quote_fqname.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.quote_fqname(i_name text) 3 | returns text as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: londiste.quote_fqname(1) 6 | -- 7 | -- Quete fully-qualified object name for SQL. 8 | -- 9 | -- First dot is taken as schema separator. 10 | -- 11 | -- If schema is missing, 'public' is assumed. 12 | -- 13 | -- Parameters: 14 | -- i_name - fully qualified object name. 15 | -- 16 | -- Returns: 17 | -- Quoted name. 18 | -- ---------------------------------------------------------------------- 19 | declare 20 | res text; 21 | pos integer; 22 | s text; 23 | n text; 24 | begin 25 | pos := position('.' in i_name); 26 | if pos > 0 then 27 | s := substring(i_name for pos - 1); 28 | n := substring(i_name from pos + 1); 29 | else 30 | s := 'public'; 31 | n := i_name; 32 | end if; 33 | return quote_ident(s) || '.' || quote_ident(n); 34 | end; 35 | $$ language plpgsql strict immutable; 36 | 37 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.root_notify_change.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.root_notify_change(i_queue_name text, i_ev_type text, i_ev_data text) 3 | returns integer as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: londiste.root_notify_change(3) 6 | -- 7 | -- Send event about change in root downstream. 8 | -- ---------------------------------------------------------------------- 9 | declare 10 | que text; 11 | ntype text; 12 | begin 13 | 14 | if not coalesce(pgq_node.is_root_node(i_queue_name), false) then 15 | raise exception 'only root node can send events'; 16 | end if; 17 | perform pgq.insert_event(i_queue_name, i_ev_type, i_ev_data); 18 | 19 | return 1; 20 | end; 21 | $$ language plpgsql; 22 | 23 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.split_fqname.sql: -------------------------------------------------------------------------------- 1 | create or replace function londiste.split_fqname( 2 | in i_fqname text, 3 | out schema_part text, 4 | out name_part text) 5 | as $$ 6 | -- ---------------------------------------------------------------------- 7 | -- Function: londiste.split_fqname(1) 8 | -- 9 | -- Split fqname to schema and name parts. 10 | -- 11 | -- First dot is taken as schema separator. 12 | -- 13 | -- If schema is missing, 'public' is assumed. 14 | -- 15 | -- Parameters: 16 | -- i_fqname - object name. 17 | -- ---------------------------------------------------------------------- 18 | declare 19 | dot integer; 20 | begin 21 | dot = position('.' in i_fqname); 22 | if dot > 0 then 23 | schema_part = substring(i_fqname for dot - 1); 24 | name_part = substring(i_fqname from dot + 1); 25 | else 26 | schema_part = 'public'; 27 | name_part = i_fqname; 28 | end if; 29 | return; 30 | end; 31 | $$ language plpgsql strict immutable; 32 | 33 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.table_info_trigger.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.table_info_trigger() 3 | returns trigger as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: londiste.table_info_trigger(0) 6 | -- 7 | -- Trigger on londiste.table_info. Cleans triggers from tables 8 | -- when table is removed from londiste.table_info. 9 | -- ---------------------------------------------------------------------- 10 | begin 11 | if TG_OP = 'DELETE' then 12 | perform londiste.drop_table_triggers(OLD.queue_name, OLD.table_name); 13 | end if; 14 | return OLD; 15 | end; 16 | $$ language plpgsql; 17 | 18 | -------------------------------------------------------------------------------- /sql/londiste/functions/londiste.version.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function londiste.version() 3 | returns text as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: londiste.version(0) 6 | -- 7 | -- Returns version string for londiste. ATM it is based on SkyTools 8 | -- version and only bumped when database code changes. 9 | -- ---------------------------------------------------------------------- 10 | begin 11 | return '3.2.4'; 12 | end; 13 | $$ language plpgsql; 14 | 15 | -------------------------------------------------------------------------------- /sql/londiste/londiste.control: -------------------------------------------------------------------------------- 1 | # Londiste extensions 2 | comment = 'Londiste Replication' 3 | default_version = '3.2.4' 4 | relocatable = false 5 | superuser = true 6 | schema = 'pg_catalog' 7 | requires = 'pgq_node' 8 | 9 | -------------------------------------------------------------------------------- /sql/londiste/sql/init_ext.sql: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | 3 | set log_error_verbosity = 'terse'; 4 | set client_min_messages = 'fatal'; 5 | create language plpgsql; 6 | set client_min_messages = 'warning'; 7 | 8 | create extension pgq; 9 | create extension pgq_node; 10 | 11 | \i londiste.sql 12 | 13 | \set ECHO all 14 | 15 | create extension londiste from 'unpackaged'; 16 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; 17 | 18 | drop extension londiste; 19 | 20 | create extension londiste; 21 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'londiste'; 22 | 23 | -------------------------------------------------------------------------------- /sql/londiste/sql/init_noext.sql: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | 3 | set log_error_verbosity = 'terse'; 4 | set client_min_messages = 'fatal'; 5 | create language plpgsql; 6 | set client_min_messages = 'warning'; 7 | 8 | -- \i ../txid/txid.sql 9 | \i ../pgq/pgq.sql 10 | \i ../pgq_node/pgq_node.sql 11 | 12 | \i londiste.sql 13 | 14 | \set ECHO all 15 | 16 | -------------------------------------------------------------------------------- /sql/londiste/sql/londiste_execute.sql: -------------------------------------------------------------------------------- 1 | 2 | set log_error_verbosity = 'terse'; 3 | 4 | select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', false); 5 | select * from londiste.execute_start('branch_set', 'DDL-A.sql', 'drop all', false); 6 | 7 | select * from londiste.execute_finish('branch_set', 'DDL-A.sql'); 8 | select * from londiste.execute_finish('branch_set', 'DDL-A.sql'); 9 | select * from londiste.execute_finish('branch_set', 'DDL-XXX.sql'); 10 | 11 | select * from londiste.execute_start('branch_set', 'DDL-B.sql', 'drop all', true); 12 | select * from londiste.execute_start('branch_set', 'DDL-B.sql', 'drop all', true); 13 | 14 | 15 | 16 | select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); 17 | select * from londiste.execute_start('aset', 'DDL-root.sql', 'drop all', true); 18 | select * from londiste.execute_finish('aset', 'DDL-root.sql'); 19 | select * from londiste.execute_finish('aset', 'DDL-root.sql'); 20 | 21 | -------------------------------------------------------------------------------- /sql/londiste/sql/londiste_install.sql: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | 3 | set log_error_verbosity = 'terse'; 4 | set client_min_messages = 'fatal'; 5 | create language plpgsql; 6 | set client_min_messages = 'warning'; 7 | 8 | -- \i ../txid/txid.sql 9 | \i ../pgq/pgq.sql 10 | \i ../pgq_node/pgq_node.sql 11 | 12 | \i londiste.sql 13 | 14 | \set ECHO all 15 | 16 | -------------------------------------------------------------------------------- /sql/londiste/sql/londiste_subscriber.sql: -------------------------------------------------------------------------------- 1 | 2 | set client_min_messages = 'warning'; 3 | \set VERBOSITY 'terse' 4 | 5 | -- 6 | -- tables 7 | -- 8 | create table slavedata ( 9 | id serial primary key, 10 | data text 11 | ); 12 | 13 | select current_database(); 14 | 15 | select * from pgq_node.register_location('branch_set', 'snode', 'dbname=db', false); 16 | select * from pgq_node.register_location('branch_set', 'pnode', 'dbname=db2', false); 17 | select * from pgq_node.create_node('branch_set', 'branch', 'snode', 'londiste_branch', 'pnode', 100, null::text); 18 | 19 | select * from londiste.local_show_missing('branch_set'); 20 | 21 | select * from londiste.local_add_table('branch_set', 'public.slavedata'); 22 | select * from londiste.global_add_table('branch_set', 'public.slavedata'); 23 | select * from londiste.local_add_table('branch_set', 'public.slavedata'); 24 | select * from londiste.global_add_table('branch_set', 'public.tmp'); 25 | select * from londiste.get_table_list('branch_set'); 26 | 27 | select * from londiste.local_set_table_state('branch_set', 'public.slavedata', null, 'in-copy'); 28 | select * from londiste.get_table_list('branch_set'); 29 | 30 | select * from londiste.global_remove_table('branch_set', 'public.tmp'); 31 | select * from londiste.local_remove_table('branch_set', 'public.slavedata'); 32 | select * from londiste.local_remove_table('branch_set', 'public.slavedata'); 33 | select * from londiste.get_table_list('branch_set'); 34 | 35 | select * from londiste.local_show_missing('branch_set'); 36 | 37 | -------------------------------------------------------------------------------- /sql/londiste/structure/ext_postproc.sql: -------------------------------------------------------------------------------- 1 | 2 | -- tag data objects as dumpable 3 | 4 | SELECT pg_catalog.pg_extension_config_dump('londiste.table_info', ''); 5 | SELECT pg_catalog.pg_extension_config_dump('londiste.seq_info', ''); 6 | SELECT pg_catalog.pg_extension_config_dump('londiste.applied_execute', ''); 7 | SELECT pg_catalog.pg_extension_config_dump('londiste.pending_fkeys', ''); 8 | 9 | 10 | -------------------------------------------------------------------------------- /sql/londiste/structure/ext_unpackaged.sql: -------------------------------------------------------------------------------- 1 | ALTER EXTENSION londiste ADD SCHEMA londiste; 2 | 3 | ALTER EXTENSION londiste ADD TABLE londiste.table_info; 4 | ALTER EXTENSION londiste ADD TABLE londiste.seq_info; 5 | ALTER EXTENSION londiste ADD TABLE londiste.applied_execute; 6 | ALTER EXTENSION londiste ADD TABLE londiste.pending_fkeys; 7 | 8 | -------------------------------------------------------------------------------- /sql/londiste/structure/grants.sql: -------------------------------------------------------------------------------- 1 | 2 | grant usage on schema londiste to public; 3 | grant select on londiste.table_info to public; 4 | grant select on londiste.seq_info to public; 5 | grant select on londiste.pending_fkeys to public; 6 | grant select on londiste.applied_execute to public; 7 | 8 | -------------------------------------------------------------------------------- /sql/londiste/structure/install.sql: -------------------------------------------------------------------------------- 1 | \i structure/tables.sql 2 | \i structure/functions.sql 3 | \i structure/triggers.sql 4 | \i structure/grants.sql 5 | -------------------------------------------------------------------------------- /sql/londiste/structure/triggers.sql: -------------------------------------------------------------------------------- 1 | 2 | create trigger table_info_trigger_sync before delete on londiste.table_info 3 | for each row execute procedure londiste.table_info_trigger(); 4 | 5 | -------------------------------------------------------------------------------- /sql/londiste/structure/upgrade.sql: -------------------------------------------------------------------------------- 1 | \i structure/functions.sql 2 | -------------------------------------------------------------------------------- /sql/pgq/README.pgq: -------------------------------------------------------------------------------- 1 | 2 | Schema overview 3 | =============== 4 | 5 | pgq.consumer consumer name <> id mapping 6 | pgq.queue queue information 7 | pgq.subscription consumer registrations 8 | pgq.tick snapshots that group events into batches 9 | pgq.retry_queue events to be retried 10 | pgq.failed_queue events that have failed 11 | pgq.event_* data tables 12 | 13 | -------------------------------------------------------------------------------- /sql/pgq/expected/clean.out: -------------------------------------------------------------------------------- 1 | \set VERBOSITY 'terse' 2 | set client_min_messages = 'warning'; 3 | drop schema pgq cascade; 4 | drop sequence tmptest_seq; 5 | drop table custom_expr; 6 | drop table custom_expr2; 7 | drop table custom_fields; 8 | drop table custom_fields2; 9 | drop table custom_pkey; 10 | drop table deny_test; 11 | drop table nopkey; 12 | drop table nopkey2; 13 | drop table rtest; 14 | drop table if exists trunctrg1; 15 | drop table if exists trunctrg2; 16 | drop table ucustom_pkey; 17 | drop table udata; 18 | drop table when_test; 19 | -------------------------------------------------------------------------------- /sql/pgq/expected/pgq_init_ext.out: -------------------------------------------------------------------------------- 1 | -- create noext schema 2 | \set ECHO none 3 | upgrade_schema 4 | ---------------- 5 | 0 6 | (1 row) 7 | 8 | create_queue 9 | -------------- 10 | 1 11 | (1 row) 12 | 13 | -- convert to extension 14 | create extension pgq from 'unpackaged'; 15 | select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; 16 | array_length 17 | -------------- 18 | 7 19 | (1 row) 20 | 21 | select pgq.create_queue('testqueue2'); 22 | create_queue 23 | -------------- 24 | 1 25 | (1 row) 26 | 27 | --drop extension pgq; -- will fail 28 | select pgq.drop_queue('testqueue2'); 29 | drop_queue 30 | ------------ 31 | 1 32 | (1 row) 33 | 34 | select pgq.drop_queue('testqueue1'); 35 | drop_queue 36 | ------------ 37 | 1 38 | (1 row) 39 | 40 | -- drop schema failure 41 | drop extension pgq; 42 | -- create clean schema 43 | create extension pgq; 44 | select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; 45 | array_length 46 | -------------- 47 | 7 48 | (1 row) 49 | 50 | -------------------------------------------------------------------------------- /sql/pgq/expected/pgq_init_noext.out: -------------------------------------------------------------------------------- 1 | \set ECHO none 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | -------------------------------------------------------------------------------- /sql/pgq/expected/pgq_init_upgrade.out: -------------------------------------------------------------------------------- 1 | \set ECHO none 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | upgrade_schema 8 | ---------------- 9 | 0 10 | (1 row) 11 | 12 | -------------------------------------------------------------------------------- /sql/pgq/expected/pgq_init_upgrade_1.out: -------------------------------------------------------------------------------- 1 | \set ECHO none 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | upgrade_schema 8 | ---------------- 9 | 3 10 | (1 row) 11 | 12 | -------------------------------------------------------------------------------- /sql/pgq/expected/pgq_perms.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | drop role if exists pgq_test_producer; 3 | drop role if exists pgq_test_consumer; 4 | drop role if exists pgq_test_admin; 5 | create role pgq_test_consumer with login in role pgq_reader; 6 | create role pgq_test_producer with login in role pgq_writer; 7 | create role pgq_test_admin with login in role pgq_admin; 8 | \c - pgq_test_admin 9 | select * from pgq.create_queue('pqueue'); -- ok 10 | create_queue 11 | -------------- 12 | 1 13 | (1 row) 14 | 15 | \c - pgq_test_producer 16 | select * from pgq.create_queue('pqueue'); -- fail 17 | ERROR: permission denied for function create_queue 18 | select * from pgq.insert_event('pqueue', 'test', 'data'); -- ok 19 | insert_event 20 | -------------- 21 | 1 22 | (1 row) 23 | 24 | select * from pgq.register_consumer('pqueue', 'prod'); -- fail 25 | ERROR: permission denied for function register_consumer 26 | \c - pgq_test_consumer 27 | select * from pgq.create_queue('pqueue'); -- fail 28 | ERROR: permission denied for function create_queue 29 | select * from pgq.insert_event('pqueue', 'test', 'data'); -- fail 30 | ERROR: permission denied for function insert_event 31 | select * from pgq.register_consumer('pqueue', 'cons'); -- ok 32 | register_consumer 33 | ------------------- 34 | 1 35 | (1 row) 36 | 37 | select * from pgq.next_batch('pqueue', 'cons'); -- ok 38 | next_batch 39 | ------------ 40 | 41 | (1 row) 42 | 43 | -------------------------------------------------------------------------------- /sql/pgq/expected/trunctrg.out: -------------------------------------------------------------------------------- 1 | \set VERBOSITY 'terse' 2 | set client_min_messages = 'warning'; 3 | -- test sqltriga truncate 4 | create table trunctrg1 ( 5 | dat1 text not null primary key, 6 | dat2 int2 not null, 7 | dat3 text 8 | ); 9 | create trigger trunc1_trig after truncate on trunctrg1 10 | for each statement execute procedure pgq.sqltriga('que3'); 11 | truncate trunctrg1; 12 | WARNING: insert_event(que3, R, , public.trunctrg1) 13 | -- test logutriga truncate 14 | create table trunctrg2 ( 15 | dat1 text not null primary key, 16 | dat2 int2 not null, 17 | dat3 text 18 | ); 19 | create trigger trunc2_trig after truncate on trunctrg2 20 | for each statement execute procedure pgq.logutriga('que3'); 21 | truncate trunctrg2; 22 | WARNING: insert_event(que3, R, , public.trunctrg2) 23 | -- test deny 24 | create trigger deny_triga2 after truncate on trunctrg2 25 | for each statement execute procedure pgq.logutriga('noqueue', 'deny'); 26 | truncate trunctrg2; 27 | ERROR: Table 'public.trunctrg2' to queue 'noqueue': change not allowed (R) 28 | -------------------------------------------------------------------------------- /sql/pgq/functions/pgq.current_event_table.sql: -------------------------------------------------------------------------------- 1 | create or replace function pgq.current_event_table(x_queue_name text) 2 | returns text as $$ 3 | -- ---------------------------------------------------------------------- 4 | -- Function: pgq.current_event_table(1) 5 | -- 6 | -- Return active event table for particular queue. 7 | -- Event can be added to it without going via functions, 8 | -- e.g. by COPY. 9 | -- 10 | -- If queue is disabled and GUC session_replication_role <> 'replica' 11 | -- then raises exception. 12 | -- 13 | -- or expressed in a different way - an even table of a disabled queue 14 | -- is returned only on replica 15 | -- 16 | -- Note: 17 | -- The result is valid only during current transaction. 18 | -- 19 | -- Permissions: 20 | -- Actual insertion requires superuser access. 21 | -- 22 | -- Parameters: 23 | -- x_queue_name - Queue name. 24 | -- ---------------------------------------------------------------------- 25 | declare 26 | res text; 27 | disabled boolean; 28 | begin 29 | select queue_data_pfx || '_' || queue_cur_table::text, 30 | queue_disable_insert 31 | into res, disabled 32 | from pgq.queue where queue_name = x_queue_name; 33 | if not found then 34 | raise exception 'Event queue not found'; 35 | end if; 36 | if disabled then 37 | if current_setting('session_replication_role') <> 'replica' then 38 | raise exception 'Writing to queue disabled'; 39 | end if; 40 | end if; 41 | return res; 42 | end; 43 | $$ language plpgsql; -- no perms needed 44 | 45 | -------------------------------------------------------------------------------- /sql/pgq/functions/pgq.finish_batch.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq.finish_batch( 3 | x_batch_id bigint) 4 | returns integer as $$ 5 | -- ---------------------------------------------------------------------- 6 | -- Function: pgq.finish_batch(1) 7 | -- 8 | -- Closes a batch. No more operations can be done with events 9 | -- of this batch. 10 | -- 11 | -- Parameters: 12 | -- x_batch_id - id of batch. 13 | -- 14 | -- Returns: 15 | -- 1 if batch was found, 0 otherwise. 16 | -- Calls: 17 | -- None 18 | -- Tables directly manipulated: 19 | -- update - pgq.subscription 20 | -- ---------------------------------------------------------------------- 21 | begin 22 | update pgq.subscription 23 | set sub_active = now(), 24 | sub_last_tick = sub_next_tick, 25 | sub_next_tick = null, 26 | sub_batch = null 27 | where sub_batch = x_batch_id; 28 | if not found then 29 | raise warning 'finish_batch: batch % not found', x_batch_id; 30 | return 0; 31 | end if; 32 | 33 | return 1; 34 | end; 35 | $$ language plpgsql security definer; 36 | 37 | -------------------------------------------------------------------------------- /sql/pgq/functions/pgq.get_batch_events.sql: -------------------------------------------------------------------------------- 1 | create or replace function pgq.get_batch_events( 2 | in x_batch_id bigint, 3 | out ev_id bigint, 4 | out ev_time timestamptz, 5 | out ev_txid bigint, 6 | out ev_retry int4, 7 | out ev_type text, 8 | out ev_data text, 9 | out ev_extra1 text, 10 | out ev_extra2 text, 11 | out ev_extra3 text, 12 | out ev_extra4 text) 13 | returns setof record as $$ 14 | -- ---------------------------------------------------------------------- 15 | -- Function: pgq.get_batch_events(1) 16 | -- 17 | -- Get all events in batch. 18 | -- 19 | -- Parameters: 20 | -- x_batch_id - ID of active batch. 21 | -- 22 | -- Returns: 23 | -- List of events. 24 | -- ---------------------------------------------------------------------- 25 | declare 26 | sql text; 27 | begin 28 | sql := pgq.batch_event_sql(x_batch_id); 29 | for ev_id, ev_time, ev_txid, ev_retry, ev_type, ev_data, 30 | ev_extra1, ev_extra2, ev_extra3, ev_extra4 31 | in execute sql 32 | loop 33 | return next; 34 | end loop; 35 | return; 36 | end; 37 | $$ language plpgsql; -- no perms needed 38 | 39 | 40 | -------------------------------------------------------------------------------- /sql/pgq/functions/pgq.quote_fqname.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq.quote_fqname(i_name text) 3 | returns text as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: pgq.quote_fqname(1) 6 | -- 7 | -- Quete fully-qualified object name for SQL. 8 | -- 9 | -- First dot is taken as schema separator. 10 | -- 11 | -- If schema is missing, 'public' is assumed. 12 | -- 13 | -- Parameters: 14 | -- i_name - fully qualified object name. 15 | -- 16 | -- Returns: 17 | -- Quoted name. 18 | -- ---------------------------------------------------------------------- 19 | declare 20 | res text; 21 | pos integer; 22 | s text; 23 | n text; 24 | begin 25 | pos := position('.' in i_name); 26 | if pos > 0 then 27 | s := substring(i_name for pos - 1); 28 | n := substring(i_name from pos + 1); 29 | else 30 | s := 'public'; 31 | n := i_name; 32 | end if; 33 | return quote_ident(s) || '.' || quote_ident(n); 34 | end; 35 | $$ language plpgsql strict immutable; 36 | 37 | -------------------------------------------------------------------------------- /sql/pgq/functions/pgq.tune_storage.sql: -------------------------------------------------------------------------------- 1 | create or replace function pgq.tune_storage(i_queue_name text) 2 | returns integer as $$ 3 | -- ---------------------------------------------------------------------- 4 | -- Function: pgq.tune_storage(1) 5 | -- 6 | -- Tunes storage settings for queue data tables 7 | -- ---------------------------------------------------------------------- 8 | declare 9 | tbl text; 10 | tbloid oid; 11 | q record; 12 | i int4; 13 | sql text; 14 | pgver int4; 15 | begin 16 | pgver := current_setting('server_version_num'); 17 | 18 | select * into q 19 | from pgq.queue where queue_name = i_queue_name; 20 | if not found then 21 | return 0; 22 | end if; 23 | 24 | for i in 0 .. (q.queue_ntables - 1) loop 25 | tbl := q.queue_data_pfx || '_' || i::text; 26 | 27 | -- set fillfactor 28 | sql := 'alter table ' || tbl || ' set (fillfactor = 100'; 29 | 30 | -- autovacuum for 8.4+ 31 | if pgver >= 80400 then 32 | sql := sql || ', autovacuum_enabled=off, toast.autovacuum_enabled =off'; 33 | end if; 34 | sql := sql || ')'; 35 | execute sql; 36 | 37 | -- autovacuum for 8.3 38 | if pgver < 80400 then 39 | tbloid := tbl::regclass::oid; 40 | delete from pg_catalog.pg_autovacuum where vacrelid = tbloid; 41 | insert into pg_catalog.pg_autovacuum values (tbloid, false, -1,-1,-1,-1,-1,-1,-1,-1); 42 | end if; 43 | end loop; 44 | 45 | return 1; 46 | end; 47 | $$ language plpgsql strict; 48 | 49 | -------------------------------------------------------------------------------- /sql/pgq/functions/pgq.upgrade_schema.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq.upgrade_schema() 3 | returns int4 as $$ 4 | -- updates table structure if necessary 5 | declare 6 | cnt int4 = 0; 7 | begin 8 | 9 | -- pgq.subscription.sub_last_tick: NOT NULL -> NULL 10 | perform 1 from information_schema.columns 11 | where table_schema = 'pgq' 12 | and table_name = 'subscription' 13 | and column_name ='sub_last_tick' 14 | and is_nullable = 'NO'; 15 | if found then 16 | alter table pgq.subscription 17 | alter column sub_last_tick 18 | drop not null; 19 | cnt := cnt + 1; 20 | end if; 21 | 22 | -- create roles 23 | perform 1 from pg_catalog.pg_roles where rolname = 'pgq_reader'; 24 | if not found then 25 | create role pgq_reader; 26 | cnt := cnt + 1; 27 | end if; 28 | perform 1 from pg_catalog.pg_roles where rolname = 'pgq_writer'; 29 | if not found then 30 | create role pgq_writer; 31 | cnt := cnt + 1; 32 | end if; 33 | perform 1 from pg_catalog.pg_roles where rolname = 'pgq_admin'; 34 | if not found then 35 | create role pgq_admin in role pgq_reader, pgq_writer; 36 | cnt := cnt + 1; 37 | end if; 38 | 39 | return cnt; 40 | end; 41 | $$ language plpgsql; 42 | 43 | 44 | -------------------------------------------------------------------------------- /sql/pgq/functions/pgq.version.sql: -------------------------------------------------------------------------------- 1 | create or replace function pgq.version() 2 | returns text as $$ 3 | -- ---------------------------------------------------------------------- 4 | -- Function: pgq.version(0) 5 | -- 6 | -- Returns version string for pgq. ATM it is based on SkyTools 7 | -- version and only bumped when database code changes. 8 | -- ---------------------------------------------------------------------- 9 | begin 10 | return '3.2.6'; 11 | end; 12 | $$ language plpgsql; 13 | 14 | -------------------------------------------------------------------------------- /sql/pgq/lowlevel/Makefile: -------------------------------------------------------------------------------- 1 | 2 | MODULE_big = pgq_lowlevel 3 | DATA = pgq_lowlevel.sql 4 | 5 | SRCS = insert_event.c 6 | OBJS = $(SRCS:.c=.o) 7 | 8 | PG_CONFIG = pg_config 9 | PGXS = $(shell $(PG_CONFIG) --pgxs) 10 | include $(PGXS) 11 | 12 | -------------------------------------------------------------------------------- /sql/pgq/lowlevel/pgq_lowlevel.sql: -------------------------------------------------------------------------------- 1 | 2 | -- ---------------------------------------------------------------------- 3 | -- Function: pgq.insert_event_raw(11) 4 | -- 5 | -- Actual event insertion. Used also by retry queue maintenance. 6 | -- 7 | -- Parameters: 8 | -- queue_name - Name of the queue 9 | -- ev_id - Event ID. If NULL, will be taken from seq. 10 | -- ev_time - Event creation time. 11 | -- ev_owner - Subscription ID when retry event. If NULL, the event is for everybody. 12 | -- ev_retry - Retry count. NULL for first-time events. 13 | -- ev_type - user data 14 | -- ev_data - user data 15 | -- ev_extra1 - user data 16 | -- ev_extra2 - user data 17 | -- ev_extra3 - user data 18 | -- ev_extra4 - user data 19 | -- 20 | -- Returns: 21 | -- Event ID. 22 | -- ---------------------------------------------------------------------- 23 | CREATE OR REPLACE FUNCTION pgq.insert_event_raw( 24 | queue_name text, ev_id bigint, ev_time timestamptz, 25 | ev_owner integer, ev_retry integer, ev_type text, ev_data text, 26 | ev_extra1 text, ev_extra2 text, ev_extra3 text, ev_extra4 text) 27 | RETURNS int8 AS '$libdir/pgq_lowlevel', 'pgq_insert_event_raw' LANGUAGE C; 28 | 29 | -------------------------------------------------------------------------------- /sql/pgq/pgq.control: -------------------------------------------------------------------------------- 1 | # pgq extension 2 | comment = 'Generic queue for PostgreSQL' 3 | default_version = '3.2.6' 4 | relocatable = false 5 | superuser = true 6 | schema = 'pg_catalog' 7 | 8 | -------------------------------------------------------------------------------- /sql/pgq/sql/clean.sql: -------------------------------------------------------------------------------- 1 | \set VERBOSITY 'terse' 2 | set client_min_messages = 'warning'; 3 | 4 | drop schema pgq cascade; 5 | 6 | drop sequence tmptest_seq; 7 | 8 | drop table custom_expr; 9 | drop table custom_expr2; 10 | drop table custom_fields; 11 | drop table custom_fields2; 12 | drop table custom_pkey; 13 | drop table deny_test; 14 | drop table nopkey; 15 | drop table nopkey2; 16 | drop table rtest; 17 | drop table if exists trunctrg1; 18 | drop table if exists trunctrg2; 19 | drop table ucustom_pkey; 20 | drop table udata; 21 | drop table when_test; 22 | 23 | -------------------------------------------------------------------------------- /sql/pgq/sql/pgq_init_ext.sql: -------------------------------------------------------------------------------- 1 | 2 | -- create noext schema 3 | \set ECHO none 4 | \set VERBOSITY 'terse' 5 | set client_min_messages = 'warning'; 6 | \i structure/install.sql 7 | select pgq.create_queue('testqueue1'); 8 | \set ECHO all 9 | -- convert to extension 10 | create extension pgq from 'unpackaged'; 11 | select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; 12 | 13 | select pgq.create_queue('testqueue2'); 14 | --drop extension pgq; -- will fail 15 | select pgq.drop_queue('testqueue2'); 16 | select pgq.drop_queue('testqueue1'); 17 | 18 | -- drop schema failure 19 | drop extension pgq; 20 | 21 | -- create clean schema 22 | create extension pgq; 23 | 24 | select array_length(extconfig, 1) from pg_catalog.pg_extension where extname = 'pgq'; 25 | 26 | -------------------------------------------------------------------------------- /sql/pgq/sql/pgq_init_noext.sql: -------------------------------------------------------------------------------- 1 | 2 | \set ECHO none 3 | \set VERBOSITY 'terse' 4 | set client_min_messages = 'warning'; 5 | -- \i ../txid/txid.sql 6 | -- \i pgq.sql 7 | \i structure/install.sql 8 | 9 | \set ECHO all 10 | 11 | -------------------------------------------------------------------------------- /sql/pgq/sql/pgq_init_upgrade.sql: -------------------------------------------------------------------------------- 1 | \set ECHO none 2 | \set VERBOSITY 'terse' 3 | set client_min_messages = 'warning'; 4 | \i ../../upgrade/final/pgq_core_2.1.13.sql 5 | \i ../../upgrade/final/pgq.upgrade_2.1_to_3.0.sql 6 | \i pgq.upgrade.sql 7 | \set ECHO all 8 | 9 | -------------------------------------------------------------------------------- /sql/pgq/sql/pgq_perms.sql: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | \set VERBOSITY 'terse' 3 | set client_min_messages = 'warning'; 4 | 5 | -- drop public perms 6 | \i structure/newgrants_pgq.sql 7 | 8 | -- select proname, proacl from pg_proc p, pg_namespace n where n.nspname = 'pgq' and p.pronamespace = n.oid; 9 | 10 | \set ECHO all 11 | 12 | drop role if exists pgq_test_producer; 13 | drop role if exists pgq_test_consumer; 14 | drop role if exists pgq_test_admin; 15 | 16 | create role pgq_test_consumer with login in role pgq_reader; 17 | create role pgq_test_producer with login in role pgq_writer; 18 | create role pgq_test_admin with login in role pgq_admin; 19 | 20 | 21 | \c - pgq_test_admin 22 | 23 | select * from pgq.create_queue('pqueue'); -- ok 24 | 25 | \c - pgq_test_producer 26 | 27 | select * from pgq.create_queue('pqueue'); -- fail 28 | 29 | select * from pgq.insert_event('pqueue', 'test', 'data'); -- ok 30 | 31 | select * from pgq.register_consumer('pqueue', 'prod'); -- fail 32 | 33 | \c - pgq_test_consumer 34 | 35 | select * from pgq.create_queue('pqueue'); -- fail 36 | select * from pgq.insert_event('pqueue', 'test', 'data'); -- fail 37 | select * from pgq.register_consumer('pqueue', 'cons'); -- ok 38 | select * from pgq.next_batch('pqueue', 'cons'); -- ok 39 | 40 | -------------------------------------------------------------------------------- /sql/pgq/sql/trunctrg.sql: -------------------------------------------------------------------------------- 1 | \set VERBOSITY 'terse' 2 | set client_min_messages = 'warning'; 3 | 4 | -- test sqltriga truncate 5 | create table trunctrg1 ( 6 | dat1 text not null primary key, 7 | dat2 int2 not null, 8 | dat3 text 9 | ); 10 | create trigger trunc1_trig after truncate on trunctrg1 11 | for each statement execute procedure pgq.sqltriga('que3'); 12 | truncate trunctrg1; 13 | 14 | 15 | -- test logutriga truncate 16 | create table trunctrg2 ( 17 | dat1 text not null primary key, 18 | dat2 int2 not null, 19 | dat3 text 20 | ); 21 | create trigger trunc2_trig after truncate on trunctrg2 22 | for each statement execute procedure pgq.logutriga('que3'); 23 | truncate trunctrg2; 24 | 25 | -- test deny 26 | create trigger deny_triga2 after truncate on trunctrg2 27 | for each statement execute procedure pgq.logutriga('noqueue', 'deny'); 28 | truncate trunctrg2; 29 | 30 | -------------------------------------------------------------------------------- /sql/pgq/structure/ext_postproc.sql: -------------------------------------------------------------------------------- 1 | 2 | -- tag data objects as dumpable 3 | 4 | SELECT pg_catalog.pg_extension_config_dump('pgq.queue', ''); 5 | SELECT pg_catalog.pg_extension_config_dump('pgq.consumer', ''); 6 | SELECT pg_catalog.pg_extension_config_dump('pgq.tick', ''); 7 | SELECT pg_catalog.pg_extension_config_dump('pgq.subscription', ''); 8 | SELECT pg_catalog.pg_extension_config_dump('pgq.event_template', ''); 9 | SELECT pg_catalog.pg_extension_config_dump('pgq.retry_queue', ''); 10 | 11 | -- This needs pg_dump 9.1.7+ 12 | SELECT pg_catalog.pg_extension_config_dump('pgq.batch_id_seq', ''); 13 | 14 | -------------------------------------------------------------------------------- /sql/pgq/structure/ext_unpackaged.sql: -------------------------------------------------------------------------------- 1 | 2 | ALTER EXTENSION pgq ADD SCHEMA pgq; 3 | 4 | ALTER EXTENSION pgq ADD TABLE pgq.queue; 5 | ALTER EXTENSION pgq ADD TABLE pgq.consumer; 6 | ALTER EXTENSION pgq ADD TABLE pgq.tick; 7 | ALTER EXTENSION pgq ADD TABLE pgq.subscription; 8 | ALTER EXTENSION pgq ADD TABLE pgq.event_template; 9 | ALTER EXTENSION pgq ADD TABLE pgq.retry_queue; 10 | 11 | ALTER EXTENSION pgq ADD SEQUENCE pgq.batch_id_seq; 12 | 13 | -------------------------------------------------------------------------------- /sql/pgq/structure/func_internal.sql: -------------------------------------------------------------------------------- 1 | -- Section: Internal Functions 2 | 3 | -- install & launch schema upgrade 4 | \i functions/pgq.upgrade_schema.sql 5 | select pgq.upgrade_schema(); 6 | 7 | -- Group: Low-level event handling 8 | 9 | \i functions/pgq.batch_event_sql.sql 10 | \i functions/pgq.batch_event_tables.sql 11 | \i functions/pgq.event_retry_raw.sql 12 | \i functions/pgq.find_tick_helper.sql 13 | 14 | -- \i functions/pgq.insert_event_raw.sql 15 | \i lowlevel/pgq_lowlevel.sql 16 | 17 | -- Group: Ticker 18 | 19 | \i functions/pgq.ticker.sql 20 | 21 | -- Group: Periodic maintenence 22 | 23 | \i functions/pgq.maint_retry_events.sql 24 | \i functions/pgq.maint_rotate_tables.sql 25 | \i functions/pgq.maint_tables_to_vacuum.sql 26 | \i functions/pgq.maint_operations.sql 27 | 28 | -- Group: Random utility functions 29 | 30 | \i functions/pgq.grant_perms.sql 31 | \i functions/pgq.tune_storage.sql 32 | \i functions/pgq.force_tick.sql 33 | \i functions/pgq.seq_funcs.sql 34 | \i functions/pgq.quote_fqname.sql 35 | 36 | -------------------------------------------------------------------------------- /sql/pgq/structure/grants.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | grant usage on schema pgq to public; 4 | 5 | -- old default grants 6 | grant select on table pgq.consumer to public; 7 | grant select on table pgq.queue to public; 8 | grant select on table pgq.tick to public; 9 | grant select on table pgq.queue to public; 10 | grant select on table pgq.subscription to public; 11 | grant select on table pgq.event_template to public; 12 | grant select on table pgq.retry_queue to public; 13 | 14 | -------------------------------------------------------------------------------- /sql/pgq/structure/install.sql: -------------------------------------------------------------------------------- 1 | 2 | \i structure/tables.sql 3 | \i structure/func_internal.sql 4 | \i structure/func_public.sql 5 | \i structure/triggers.sql 6 | \i structure/grants.sql 7 | 8 | -------------------------------------------------------------------------------- /sql/pgq/structure/triggers.sql: -------------------------------------------------------------------------------- 1 | 2 | -- Section: Public Triggers 3 | 4 | -- Group: Trigger Functions 5 | 6 | -- \i triggers/pgq.logutriga.sql 7 | \i triggers/pgq_triggers.sql 8 | 9 | -------------------------------------------------------------------------------- /sql/pgq/structure/uninstall_pgq.sql: -------------------------------------------------------------------------------- 1 | 2 | -- brute-force uninstall 3 | drop schema pgq cascade; 4 | 5 | -------------------------------------------------------------------------------- /sql/pgq/structure/upgrade.sql: -------------------------------------------------------------------------------- 1 | \i structure/func_internal.sql 2 | \i structure/func_public.sql 3 | \i structure/triggers.sql 4 | -------------------------------------------------------------------------------- /sql/pgq/triggers/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | MODULE_big = pgq_triggers 4 | SRCS = logtriga.c logutriga.c sqltriga.c \ 5 | common.c makesql.c stringutil.c \ 6 | parsesql.c qbuilder.c 7 | OBJS = $(SRCS:.c=.o) 8 | DATA = pgq_triggers.sql 9 | 10 | PG_CONFIG = pg_config 11 | PGXS = $(shell $(PG_CONFIG) --pgxs) 12 | include $(PGXS) 13 | 14 | cs: 15 | cscope -b -f .cscope.out *.c 16 | 17 | -------------------------------------------------------------------------------- /sql/pgq/triggers/parsesql.h: -------------------------------------------------------------------------------- 1 | 2 | /* multi-char tokens */ 3 | enum SqlToken { 4 | T_SPACE = 257, 5 | T_STRING, 6 | T_NUMBER, 7 | T_WORD, 8 | T_FQIDENT, 9 | }; 10 | 11 | int sql_tokenizer(const char *sql, int *len_p, bool stdstr); 12 | 13 | -------------------------------------------------------------------------------- /sql/pgq/triggers/qbuilder.h: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | 4 | /* 5 | * Callbacks that to argument name/type/value lookups. 6 | */ 7 | struct QueryBuilderOps { 8 | /* returns name index or < 0 if unknown. str is not null-terminated */ 9 | int (*name_lookup)(void *arg, const char *str, int len); 10 | 11 | /* returns type oid for nr that .name_lookup returned */ 12 | Oid (*type_lookup)(void *arg, int nr); 13 | 14 | /* returns value for nr that .name_lookup returned */ 15 | Datum (*value_lookup)(void *arg, int nr, bool *isnull); 16 | }; 17 | 18 | /* 19 | * Parsed query 20 | */ 21 | struct QueryBuilder { 22 | StringInfoData sql; 23 | bool stdstr; 24 | const struct QueryBuilderOps *op; 25 | 26 | void *plan; 27 | 28 | int nargs; 29 | int maxargs; 30 | int *arg_map; 31 | }; 32 | 33 | struct QueryBuilder *qb_create(const struct QueryBuilderOps *ops, MemoryContext ctx); 34 | void qb_add_raw(struct QueryBuilder *q, const char *str, int len); 35 | void qb_add_parse(struct QueryBuilder *q, const char *str, void *arg); 36 | void qb_free(struct QueryBuilder *q); 37 | 38 | void qb_prepare(struct QueryBuilder *q, void *arg); 39 | int qb_execute(struct QueryBuilder *q, void *arg); 40 | 41 | -------------------------------------------------------------------------------- /sql/pgq/triggers/stringutil.h: -------------------------------------------------------------------------------- 1 | 2 | enum PgqEncode { 3 | TBUF_QUOTE_IDENT, 4 | TBUF_QUOTE_LITERAL, 5 | TBUF_QUOTE_URLENC, 6 | }; 7 | 8 | StringInfo pgq_init_varbuf(void); 9 | Datum pgq_finish_varbuf(StringInfo buf); 10 | bool pgq_strlist_contains(const char *liststr, const char *str); 11 | void pgq_encode_cstring(StringInfo tbuf, const char *str, enum PgqEncode encoding); 12 | -------------------------------------------------------------------------------- /sql/pgq_coop/Makefile: -------------------------------------------------------------------------------- 1 | 2 | EXTENSION = pgq_coop 3 | 4 | EXT_VERSION = 3.1.1 5 | EXT_OLD_VERSIONS = 3.1 6 | 7 | Contrib_regress = pgq_coop_init_noext pgq_coop_test 8 | Extension_regress = pgq_coop_init_ext pgq_coop_test 9 | 10 | include ../common-pgxs.mk 11 | 12 | # 13 | # docs 14 | # 15 | dox: cleandox $(SRCS) 16 | mkdir -p docs/html 17 | mkdir -p docs/sql 18 | $(CATSQL) --ndoc structure/functions.sql > docs/sql/functions.sql 19 | $(NDOC) $(NDOCARGS) 20 | 21 | -------------------------------------------------------------------------------- /sql/pgq_coop/expected/pgq_coop_init_ext.out: -------------------------------------------------------------------------------- 1 | create extension pgq; 2 | \set ECHO none 3 | create extension pgq_coop from 'unpackaged'; 4 | drop extension pgq_coop; 5 | create extension pgq_coop; 6 | -------------------------------------------------------------------------------- /sql/pgq_coop/expected/pgq_coop_init_noext.out: -------------------------------------------------------------------------------- 1 | \set ECHO none 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | -------------------------------------------------------------------------------- /sql/pgq_coop/functions/pgq_coop.finish_batch.sql: -------------------------------------------------------------------------------- 1 | create or replace function pgq_coop.finish_batch( 2 | i_batch_id bigint) 3 | returns integer as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: pgq_coop.finish_batch(1) 6 | -- 7 | -- Closes a batch. 8 | -- 9 | -- Parameters: 10 | -- i_batch_id - id of the batch to be closed 11 | -- 12 | -- Returns: 13 | -- 1 if success (batch was found), 0 otherwise 14 | -- Calls: 15 | -- None 16 | -- Tables directly manipulated: 17 | -- update - pgq.subscription 18 | -- ---------------------------------------------------------------------- 19 | begin 20 | -- we are dealing with subconsumer, so nullify all tick info 21 | -- tick columns for master consumer contain adequate data 22 | update pgq.subscription 23 | set sub_active = now(), 24 | sub_last_tick = null, 25 | sub_next_tick = null, 26 | sub_batch = null 27 | where sub_batch = i_batch_id; 28 | if not found then 29 | raise warning 'coop_finish_batch: batch % not found', i_batch_id; 30 | return 0; 31 | else 32 | return 1; 33 | end if; 34 | end; 35 | $$ language plpgsql security definer; 36 | 37 | -------------------------------------------------------------------------------- /sql/pgq_coop/functions/pgq_coop.version.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_coop.version() 3 | returns text as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: pgq_coop.version(0) 6 | -- 7 | -- Returns version string for pgq_coop. ATM it is based on SkyTools version 8 | -- and only bumped when database code changes. 9 | -- ---------------------------------------------------------------------- 10 | begin 11 | return '3.1.1'; 12 | end; 13 | $$ language plpgsql; 14 | 15 | -------------------------------------------------------------------------------- /sql/pgq_coop/pgq_coop.control: -------------------------------------------------------------------------------- 1 | # pgq_coop 2 | comment = 'Cooperative queue consuming for PgQ' 3 | default_version = '3.1.1' 4 | relocatable = false 5 | superuser = true 6 | schema = 'pg_catalog' 7 | requires = 'pgq' 8 | -------------------------------------------------------------------------------- /sql/pgq_coop/sql/pgq_coop_init_ext.sql: -------------------------------------------------------------------------------- 1 | 2 | create extension pgq; 3 | 4 | \set ECHO none 5 | \i structure/install.sql 6 | \set ECHO all 7 | 8 | create extension pgq_coop from 'unpackaged'; 9 | drop extension pgq_coop; 10 | 11 | create extension pgq_coop; 12 | 13 | -------------------------------------------------------------------------------- /sql/pgq_coop/sql/pgq_coop_init_noext.sql: -------------------------------------------------------------------------------- 1 | 2 | \set ECHO none 3 | \i ../pgq/pgq.sql 4 | \i structure/schema.sql 5 | \i structure/functions.sql 6 | \set ECHO all 7 | 8 | -------------------------------------------------------------------------------- /sql/pgq_coop/structure/ext_postproc.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markokr/skytools/11afc5210a92d4a2ccf701e4f92138488c31a3cb/sql/pgq_coop/structure/ext_postproc.sql -------------------------------------------------------------------------------- /sql/pgq_coop/structure/ext_unpackaged.sql: -------------------------------------------------------------------------------- 1 | 2 | ALTER EXTENSION pgq_coop ADD SCHEMA pgq_coop; 3 | 4 | -------------------------------------------------------------------------------- /sql/pgq_coop/structure/functions.sql: -------------------------------------------------------------------------------- 1 | 2 | -- ---------------------------------------------------------------------- 3 | -- Section: Functions 4 | -- 5 | -- Overview: 6 | -- 7 | -- The usual flow of a cooperative consumer is to 8 | -- 9 | -- 1. register itself as a subconsumer for a queue: 10 | -- pgq_coop.register_subconsumer() 11 | -- 12 | -- And the run a loop doing 13 | -- 14 | -- 2A. pgq_coop.next_batch () 15 | -- 16 | -- 2B. pgq_coop.finish_batch() 17 | -- 18 | -- Once the cooperative (or sub-)consuber is done, it should unregister 19 | -- itself before exiting 20 | -- 21 | -- 3. pgq_coop.unregister_subconsumer() 22 | -- 23 | -- 24 | -- ---------------------------------------------------------------------- 25 | 26 | -- Group: Subconsumer registration 27 | \i functions/pgq_coop.register_subconsumer.sql 28 | \i functions/pgq_coop.unregister_subconsumer.sql 29 | 30 | -- Group: Event processing 31 | \i functions/pgq_coop.next_batch.sql 32 | \i functions/pgq_coop.finish_batch.sql 33 | 34 | -- Group: General Info 35 | \i functions/pgq_coop.version.sql 36 | 37 | -------------------------------------------------------------------------------- /sql/pgq_coop/structure/grants.ini: -------------------------------------------------------------------------------- 1 | [GrantFu] 2 | roles = pgq_reader, pgq_writer, pgq_admin, public 3 | 4 | [1.consumer] 5 | on.functions = %(pgq_coop_fns)s 6 | pgq_reader = execute 7 | 8 | [2.public] 9 | on.functions = pgq_coop.version() 10 | public = execute 11 | 12 | [DEFAULT] 13 | pgq_coop_fns = 14 | pgq_coop.register_subconsumer(text, text, text), 15 | pgq_coop.unregister_subconsumer(text, text, text, integer), 16 | pgq_coop.next_batch(text, text, text), 17 | pgq_coop.next_batch(text, text, text, interval), 18 | pgq_coop.next_batch_custom(text, text, text, interval, int4, interval), 19 | pgq_coop.next_batch_custom(text, text, text, interval, int4, interval, interval), 20 | pgq_coop.finish_batch(bigint) 21 | 22 | -------------------------------------------------------------------------------- /sql/pgq_coop/structure/grants.sql: -------------------------------------------------------------------------------- 1 | 2 | GRANT usage ON SCHEMA pgq_coop TO public; 3 | 4 | -------------------------------------------------------------------------------- /sql/pgq_coop/structure/install.sql: -------------------------------------------------------------------------------- 1 | \i structure/schema.sql 2 | \i structure/functions.sql 3 | \i structure/grants.sql 4 | -------------------------------------------------------------------------------- /sql/pgq_coop/structure/schema.sql: -------------------------------------------------------------------------------- 1 | 2 | create schema pgq_coop; 3 | 4 | -------------------------------------------------------------------------------- /sql/pgq_coop/structure/upgrade.sql: -------------------------------------------------------------------------------- 1 | \i structure/functions.sql 2 | -------------------------------------------------------------------------------- /sql/pgq_ext/Makefile: -------------------------------------------------------------------------------- 1 | 2 | EXTENSION = pgq_ext 3 | 4 | EXT_VERSION = 3.1 5 | EXT_OLD_VERSIONS = 6 | 7 | Contrib_regress = init_noext test_pgq_ext test_upgrade 8 | Extension_regress = init_ext test_pgq_ext 9 | 10 | DOCS = README.pgq_ext 11 | 12 | include ../common-pgxs.mk 13 | 14 | dox: cleandox $(SRCS) 15 | mkdir -p docs/html 16 | mkdir -p docs/sql 17 | $(CATSQL) --ndoc structure/tables.sql > docs/sql/schema.sql 18 | $(CATSQL) --ndoc structure/upgrade.sql > docs/sql/functions.sql 19 | $(NDOC) $(NDOCARGS) 20 | 21 | -------------------------------------------------------------------------------- /sql/pgq_ext/README.pgq_ext: -------------------------------------------------------------------------------- 1 | 2 | Track processed batches and events in target DB 3 | ================================================ 4 | 5 | Batch tracking is OK. 6 | 7 | Event tracking is OK if consumer does not use retry queue. 8 | 9 | Batch tracking 10 | -------------- 11 | 12 | is_batch_done(consumer, batch) 13 | 14 | returns: 15 | 16 | true - batch is done already 17 | false - batch is not done yet 18 | 19 | set_batch_done(consumer, batch) 20 | 21 | returns: 22 | 23 | true - tagging successful, batch was not done yet 24 | false - batch was done already 25 | 26 | Event tracking 27 | -------------- 28 | 29 | is_batch_done(consumer, batch, event) 30 | 31 | returns: 32 | 33 | true - event is done 34 | false - event is not done yet 35 | 36 | 37 | set_batch_done(consumer, batch, event) 38 | 39 | returns: 40 | 41 | true - tagging was successful, event was not done 42 | false - event is done already 43 | 44 | 45 | Fastvacuum 46 | ---------- 47 | 48 | pgq.ext.completed_batch 49 | pgq.ext.completed_event 50 | pgq.ext.completed_tick 51 | pgq.ext.partial_batch 52 | 53 | -------------------------------------------------------------------------------- /sql/pgq_ext/expected/init_ext.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | upgrade_schema 3 | ---------------- 4 | 4 5 | (1 row) 6 | 7 | create extension pgq_ext from 'unpackaged'; 8 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; 9 | dumpable 10 | ---------- 11 | 4 12 | (1 row) 13 | 14 | drop extension pgq_ext; 15 | create extension pgq_ext; 16 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; 17 | dumpable 18 | ---------- 19 | 4 20 | (1 row) 21 | 22 | -------------------------------------------------------------------------------- /sql/pgq_ext/expected/init_noext.out: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | upgrade_schema 3 | ---------------- 4 | 4 5 | (1 row) 6 | 7 | -------------------------------------------------------------------------------- /sql/pgq_ext/functions/pgq_ext.version.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_ext.version() 3 | returns text as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: pgq_ext.version(0) 6 | -- 7 | -- Returns version string for pgq_ext. ATM it is based SkyTools version 8 | -- only bumped when database code changes. 9 | -- ---------------------------------------------------------------------- 10 | begin 11 | return '3.1'; 12 | end; 13 | $$ language plpgsql; 14 | 15 | -------------------------------------------------------------------------------- /sql/pgq_ext/pgq_ext.control: -------------------------------------------------------------------------------- 1 | # pgq_ext 2 | comment = 'Target-side batch tracking infrastructure' 3 | default_version = '3.1' 4 | relocatable = false 5 | superuser = true 6 | schema = 'pg_catalog' 7 | 8 | -------------------------------------------------------------------------------- /sql/pgq_ext/sql/init_ext.sql: -------------------------------------------------------------------------------- 1 | 2 | \set ECHO off 3 | \i structure/install.sql 4 | \set ECHO all 5 | create extension pgq_ext from 'unpackaged'; 6 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; 7 | drop extension pgq_ext; 8 | 9 | create extension pgq_ext; 10 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_ext'; 11 | 12 | -------------------------------------------------------------------------------- /sql/pgq_ext/sql/init_noext.sql: -------------------------------------------------------------------------------- 1 | \set ECHO off 2 | \i structure/install.sql 3 | 4 | -------------------------------------------------------------------------------- /sql/pgq_ext/sql/test_pgq_ext.sql: -------------------------------------------------------------------------------- 1 | -- 2 | -- test batch tracking 3 | -- 4 | select pgq_ext.is_batch_done('c', 1); 5 | select pgq_ext.set_batch_done('c', 1); 6 | select pgq_ext.is_batch_done('c', 1); 7 | select pgq_ext.set_batch_done('c', 1); 8 | select pgq_ext.is_batch_done('c', 2); 9 | select pgq_ext.set_batch_done('c', 2); 10 | 11 | -- 12 | -- test event tracking 13 | -- 14 | select pgq_ext.is_batch_done('c', 3); 15 | select pgq_ext.is_event_done('c', 3, 101); 16 | select pgq_ext.set_event_done('c', 3, 101); 17 | select pgq_ext.is_event_done('c', 3, 101); 18 | select pgq_ext.set_event_done('c', 3, 101); 19 | select pgq_ext.set_batch_done('c', 3); 20 | select * from pgq_ext.completed_event order by 1,2; 21 | 22 | -- 23 | -- test tick tracking 24 | -- 25 | select pgq_ext.get_last_tick('c'); 26 | select pgq_ext.set_last_tick('c', 1); 27 | select pgq_ext.get_last_tick('c'); 28 | select pgq_ext.set_last_tick('c', 2); 29 | select pgq_ext.get_last_tick('c'); 30 | select pgq_ext.set_last_tick('c', NULL); 31 | select pgq_ext.get_last_tick('c'); 32 | 33 | -------------------------------------------------------------------------------- /sql/pgq_ext/sql/test_upgrade.sql: -------------------------------------------------------------------------------- 1 | 2 | \set ECHO off 3 | 4 | set log_error_verbosity = 'terse'; 5 | set client_min_messages = 'fatal'; 6 | create language plpgsql; 7 | set client_min_messages = 'warning'; 8 | 9 | drop schema pgq_ext cascade; 10 | \i sql/old_ext.sql 11 | \i structure/upgrade.sql 12 | \set ECHO all 13 | 14 | -- 15 | -- test batch tracking 16 | -- 17 | select pgq_ext.is_batch_done('c', 1); 18 | select pgq_ext.set_batch_done('c', 1); 19 | select pgq_ext.is_batch_done('c', 1); 20 | select pgq_ext.set_batch_done('c', 1); 21 | select pgq_ext.is_batch_done('c', 2); 22 | select pgq_ext.set_batch_done('c', 2); 23 | 24 | -- 25 | -- test event tracking 26 | -- 27 | select pgq_ext.is_batch_done('c', 3); 28 | select pgq_ext.is_event_done('c', 3, 101); 29 | select pgq_ext.set_event_done('c', 3, 101); 30 | select pgq_ext.is_event_done('c', 3, 101); 31 | select pgq_ext.set_event_done('c', 3, 101); 32 | select pgq_ext.set_batch_done('c', 3); 33 | select * from pgq_ext.completed_event order by 1,2; 34 | 35 | -- 36 | -- test tick tracking 37 | -- 38 | select pgq_ext.get_last_tick('c'); 39 | select pgq_ext.set_last_tick('c', 1); 40 | select pgq_ext.get_last_tick('c'); 41 | select pgq_ext.set_last_tick('c', 2); 42 | select pgq_ext.get_last_tick('c'); 43 | select pgq_ext.set_last_tick('c', NULL); 44 | select pgq_ext.get_last_tick('c'); 45 | 46 | -------------------------------------------------------------------------------- /sql/pgq_ext/structure/ext_postproc.sql: -------------------------------------------------------------------------------- 1 | 2 | -- tag data objects as dumpable 3 | 4 | SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_tick', ''); 5 | SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_batch', ''); 6 | SELECT pg_catalog.pg_extension_config_dump('pgq_ext.completed_event', ''); 7 | SELECT pg_catalog.pg_extension_config_dump('pgq_ext.partial_batch', ''); 8 | 9 | -------------------------------------------------------------------------------- /sql/pgq_ext/structure/ext_unpackaged.sql: -------------------------------------------------------------------------------- 1 | 2 | ALTER EXTENSION pgq_ext ADD SCHEMA pgq_ext; 3 | 4 | ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_tick; 5 | ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_batch; 6 | ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.completed_event; 7 | ALTER EXTENSION pgq_ext ADD TABLE pgq_ext.partial_batch; 8 | 9 | -------------------------------------------------------------------------------- /sql/pgq_ext/structure/grants.ini: -------------------------------------------------------------------------------- 1 | [GrantFu] 2 | roles = pgq_writer, public 3 | 4 | [1.public] 5 | on.functions = pgq_ext.version() 6 | public = execute 7 | 8 | [2.pgq_ext] 9 | on.functions = %(pgq_ext_fns)s 10 | pgq_writer = execute 11 | 12 | 13 | [DEFAULT] 14 | pgq_ext_fns = 15 | pgq_ext.upgrade_schema(), 16 | pgq_ext.is_batch_done(text, text, bigint), 17 | pgq_ext.is_batch_done(text, bigint), 18 | pgq_ext.set_batch_done(text, text, bigint), 19 | pgq_ext.set_batch_done(text, bigint), 20 | pgq_ext.is_event_done(text, text, bigint, bigint), 21 | pgq_ext.is_event_done(text, bigint, bigint), 22 | pgq_ext.set_event_done(text, text, bigint, bigint), 23 | pgq_ext.set_event_done(text, bigint, bigint), 24 | pgq_ext.get_last_tick(text, text), 25 | pgq_ext.get_last_tick(text), 26 | pgq_ext.set_last_tick(text, text, bigint), 27 | pgq_ext.set_last_tick(text, bigint) 28 | 29 | -------------------------------------------------------------------------------- /sql/pgq_ext/structure/grants.sql: -------------------------------------------------------------------------------- 1 | 2 | grant usage on schema pgq_ext to public; 3 | 4 | -------------------------------------------------------------------------------- /sql/pgq_ext/structure/install.sql: -------------------------------------------------------------------------------- 1 | \i structure/tables.sql 2 | \i structure/upgrade.sql 3 | \i structure/grants.sql 4 | 5 | -------------------------------------------------------------------------------- /sql/pgq_ext/structure/upgrade.sql: -------------------------------------------------------------------------------- 1 | -- 2 | -- Section: Functions 3 | -- 4 | 5 | \i functions/pgq_ext.upgrade_schema.sql 6 | 7 | select pgq_ext.upgrade_schema(); 8 | 9 | -- Group: track batches via batch id 10 | \i functions/pgq_ext.is_batch_done.sql 11 | \i functions/pgq_ext.set_batch_done.sql 12 | 13 | -- Group: track batches via tick id 14 | \i functions/pgq_ext.get_last_tick.sql 15 | \i functions/pgq_ext.set_last_tick.sql 16 | 17 | 18 | -- Group: Track events separately 19 | \i functions/pgq_ext.is_event_done.sql 20 | \i functions/pgq_ext.set_event_done.sql 21 | 22 | -- Group: Schema info 23 | \i functions/pgq_ext.version.sql 24 | 25 | -------------------------------------------------------------------------------- /sql/pgq_node/Makefile: -------------------------------------------------------------------------------- 1 | 2 | EXTENSION = pgq_node 3 | 4 | EXT_VERSION = 3.2.5 5 | EXT_OLD_VERSIONS = 3.1 3.1.3 3.1.6 3.2 6 | 7 | Extension_regress = pgq_node_init_ext pgq_node_test 8 | Contrib_regress = pgq_node_init_noext pgq_node_test 9 | 10 | include ../common-pgxs.mk 11 | 12 | # 13 | # docs 14 | # 15 | 16 | dox: cleandox $(SRCS) 17 | mkdir -p docs/html 18 | mkdir -p docs/sql 19 | $(CATSQL) --ndoc structure/tables.sql > docs/sql/pgq_node.sql 20 | $(CATSQL) --ndoc structure/functions.sql > docs/sql/functions.sql 21 | $(NDOC) $(NDOCARGS) 22 | 23 | -------------------------------------------------------------------------------- /sql/pgq_node/expected/pgq_node_init_ext.out: -------------------------------------------------------------------------------- 1 | create extension pgq; 2 | \set ECHO none 3 | upgrade_schema 4 | ---------------- 5 | 0 6 | (1 row) 7 | 8 | create extension pgq_node from unpackaged; 9 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; 10 | dumpable 11 | ---------- 12 | 4 13 | (1 row) 14 | 15 | drop extension pgq_node; 16 | create extension pgq_node; 17 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; 18 | dumpable 19 | ---------- 20 | 4 21 | (1 row) 22 | 23 | -------------------------------------------------------------------------------- /sql/pgq_node/expected/pgq_node_init_noext.out: -------------------------------------------------------------------------------- 1 | \set ECHO none 2 | upgrade_schema 3 | ---------------- 4 | 0 5 | (1 row) 6 | 7 | upgrade_schema 8 | ---------------- 9 | 0 10 | (1 row) 11 | 12 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.get_consumer_info.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.get_consumer_info( 3 | in i_queue_name text, 4 | 5 | out consumer_name text, 6 | out provider_node text, 7 | out last_tick_id int8, 8 | out paused boolean, 9 | out uptodate boolean, 10 | out cur_error text) 11 | returns setof record as $$ 12 | -- ---------------------------------------------------------------------- 13 | -- Function: pgq_node.get_consumer_info(1) 14 | -- 15 | -- Get consumer list that work on the local node. 16 | -- 17 | -- Parameters: 18 | -- i_queue_name - cascaded queue name 19 | -- 20 | -- Returns: 21 | -- consumer_name - cascaded consumer name 22 | -- provider_node - node from where the consumer reads from 23 | -- last_tick_id - last committed tick 24 | -- paused - if consumer is paused 25 | -- uptodate - if consumer is uptodate 26 | -- cur_error - failure reason 27 | -- ---------------------------------------------------------------------- 28 | begin 29 | for consumer_name, provider_node, last_tick_id, paused, uptodate, cur_error in 30 | select s.consumer_name, s.provider_node, s.last_tick_id, 31 | s.paused, s.uptodate, s.cur_error 32 | from pgq_node.local_state s 33 | where s.queue_name = i_queue_name 34 | order by 1 35 | loop 36 | return next; 37 | end loop; 38 | return; 39 | end; 40 | $$ language plpgsql security definer; 41 | 42 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.get_queue_locations.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.get_queue_locations( 3 | in i_queue_name text, 4 | 5 | out node_name text, 6 | out node_location text, 7 | out dead boolean 8 | ) returns setof record as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: pgq_node.get_queue_locations(1) 11 | -- 12 | -- Get node list for the queue. 13 | -- 14 | -- Parameters: 15 | -- i_queue_name - queue name 16 | -- 17 | -- Returns: 18 | -- node_name - node name 19 | -- node_location - libpq connect string for the node 20 | -- dead - whether the node should be considered dead 21 | -- ---------------------------------------------------------------------- 22 | begin 23 | for node_name, node_location, dead in 24 | select l.node_name, l.node_location, l.dead 25 | from pgq_node.node_location l 26 | where l.queue_name = i_queue_name 27 | loop 28 | return next; 29 | end loop; 30 | return; 31 | end; 32 | $$ language plpgsql security definer; 33 | 34 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.get_subscriber_info.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.get_subscriber_info( 3 | in i_queue_name text, 4 | 5 | out node_name text, 6 | out worker_name text, 7 | out node_watermark int8) 8 | returns setof record as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: pgq_node.get_subscriber_info(1) 11 | -- 12 | -- Get subscriber list for the local node. 13 | -- 14 | -- It may be out-of-date, due to in-progress 15 | -- administrative change. 16 | -- Node's local provider info ( pgq_node.get_node_info() or pgq_node.get_worker_state(1) ) 17 | -- is the authoritative source. 18 | -- 19 | -- Parameters: 20 | -- i_queue_name - cascaded queue name 21 | -- 22 | -- Returns: 23 | -- node_name - node name that uses current node as provider 24 | -- worker_name - consumer that maintains remote node 25 | -- local_watermark - lowest tick_id on subscriber 26 | -- ---------------------------------------------------------------------- 27 | declare 28 | _watermark_name text; 29 | begin 30 | for node_name, worker_name, _watermark_name in 31 | select s.subscriber_node, s.worker_name, s.watermark_name 32 | from pgq_node.subscriber_info s 33 | where s.queue_name = i_queue_name 34 | order by 1 35 | loop 36 | select last_tick into node_watermark 37 | from pgq.get_consumer_info(i_queue_name, _watermark_name); 38 | return next; 39 | end loop; 40 | return; 41 | end; 42 | $$ language plpgsql security definer; 43 | 44 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.is_leaf_node.sql: -------------------------------------------------------------------------------- 1 | create or replace function pgq_node.is_leaf_node(i_queue_name text) 2 | returns bool as $$ 3 | -- ---------------------------------------------------------------------- 4 | -- Function: pgq_node.is_leaf_node(1) 5 | -- 6 | -- Checs if node is leaf. 7 | -- 8 | -- Parameters: 9 | -- i_queue_name - queue name 10 | -- Returns: 11 | -- true - if this this the leaf node for queue 12 | -- ---------------------------------------------------------------------- 13 | declare 14 | res bool; 15 | begin 16 | select n.node_type = 'leaf' into res 17 | from pgq_node.node_info n 18 | where n.queue_name = i_queue_name; 19 | if not found then 20 | raise exception 'queue does not exist: %', i_queue_name; 21 | end if; 22 | return res; 23 | end; 24 | $$ language plpgsql; 25 | 26 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.is_root_node.sql: -------------------------------------------------------------------------------- 1 | create or replace function pgq_node.is_root_node(i_queue_name text) 2 | returns bool as $$ 3 | -- ---------------------------------------------------------------------- 4 | -- Function: pgq_node.is_root_node(1) 5 | -- 6 | -- Checs if node is root. 7 | -- 8 | -- Parameters: 9 | -- i_queue_name - queue name 10 | -- Returns: 11 | -- true - if this this the root node for queue 12 | -- ---------------------------------------------------------------------- 13 | declare 14 | res bool; 15 | begin 16 | select n.node_type = 'root' into res 17 | from pgq_node.node_info n 18 | where n.queue_name = i_queue_name; 19 | if not found then 20 | raise exception 'queue does not exist: %', i_queue_name; 21 | end if; 22 | return res; 23 | end; 24 | $$ language plpgsql; 25 | 26 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.maint_watermark.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.maint_watermark(i_queue_name text) 3 | returns int4 as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: pgq_node.maint_watermark(1) 6 | -- 7 | -- Move global watermark on root node. 8 | -- 9 | -- Returns: 10 | -- 0 - tells pgqd to call just once 11 | -- ---------------------------------------------------------------------- 12 | declare 13 | _lag interval; 14 | begin 15 | perform 1 from pgq_node.node_info 16 | where queue_name = i_queue_name 17 | and node_type = 'root' 18 | for update; 19 | if not found then 20 | return 0; 21 | end if; 22 | 23 | select lag into _lag from pgq.get_consumer_info(i_queue_name, '.global_watermark'); 24 | if _lag >= '5 minutes'::interval then 25 | perform pgq_node.set_global_watermark(i_queue_name, NULL); 26 | end if; 27 | 28 | return 0; 29 | end; 30 | $$ language plpgsql; 31 | 32 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.set_consumer_completed.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.set_consumer_completed( 3 | in i_queue_name text, 4 | in i_consumer_name text, 5 | in i_tick_id int8, 6 | out ret_code int4, 7 | out ret_note text) 8 | as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: pgq_node.set_consumer_completed(3) 11 | -- 12 | -- Set last completed tick id for the cascaded consumer 13 | -- that it has committed to local node. 14 | -- 15 | -- Parameters: 16 | -- i_queue_name - cascaded queue name 17 | -- i_consumer_name - cascaded consumer name 18 | -- i_tick_id - tick id 19 | -- Returns: 20 | -- 200 - ok 21 | -- 404 - consumer not known 22 | -- ---------------------------------------------------------------------- 23 | begin 24 | update pgq_node.local_state 25 | set last_tick_id = i_tick_id, 26 | cur_error = NULL 27 | where queue_name = i_queue_name 28 | and consumer_name = i_consumer_name; 29 | if found then 30 | select 100, 'Consumer ' || i_consumer_name || ' compleded tick = ' || i_tick_id::text 31 | into ret_code, ret_note; 32 | else 33 | select 404, 'Consumer not known: ' 34 | || i_queue_name || '/' || i_consumer_name 35 | into ret_code, ret_note; 36 | end if; 37 | return; 38 | end; 39 | $$ language plpgsql security definer; 40 | 41 | 42 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.set_consumer_error.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.set_consumer_error( 3 | in i_queue_name text, 4 | in i_consumer_name text, 5 | in i_error_msg text, 6 | out ret_code int4, 7 | out ret_note text) 8 | as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: pgq_node.set_consumer_error(3) 11 | -- 12 | -- If batch processing fails, consumer can store it's last error in db. 13 | -- Returns: 14 | -- 100 - ok 15 | -- 101 - consumer not known 16 | -- ---------------------------------------------------------------------- 17 | begin 18 | update pgq_node.local_state 19 | set cur_error = i_error_msg 20 | where queue_name = i_queue_name 21 | and consumer_name = i_consumer_name; 22 | if found then 23 | select 100, 'Consumer ' || i_consumer_name || ' error = ' || i_error_msg 24 | into ret_code, ret_note; 25 | else 26 | select 101, 'Consumer not known, ignoring: ' 27 | || i_queue_name || '/' || i_consumer_name 28 | into ret_code, ret_note; 29 | end if; 30 | return; 31 | end; 32 | $$ language plpgsql security definer; 33 | 34 | 35 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.set_consumer_uptodate.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.set_consumer_uptodate( 3 | in i_queue_name text, 4 | in i_consumer_name text, 5 | in i_uptodate boolean, 6 | out ret_code int4, 7 | out ret_note text) 8 | returns record as $$ 9 | -- ---------------------------------------------------------------------- 10 | -- Function: pgq_node.set_consumer_uptodate(3) 11 | -- 12 | -- Set consumer uptodate flag..... 13 | -- 14 | -- Parameters: 15 | -- i_queue_name - queue name 16 | -- i_consumer_name - consumer name 17 | -- i_uptodate - new flag state 18 | -- 19 | -- Returns: 20 | -- 200 - ok 21 | -- 404 - consumer not known 22 | -- ---------------------------------------------------------------------- 23 | begin 24 | update pgq_node.local_state 25 | set uptodate = i_uptodate 26 | where queue_name = i_queue_name 27 | and consumer_name = i_consumer_name; 28 | if found then 29 | select 200, 'Consumer uptodate = ' || i_uptodate::int4::text 30 | into ret_code, ret_note; 31 | else 32 | select 404, 'Consumer not known: ' 33 | || i_queue_name || '/' || i_consumer_name 34 | into ret_code, ret_note; 35 | end if; 36 | return; 37 | end; 38 | $$ language plpgsql security definer; 39 | 40 | 41 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.set_node_attrs.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.set_node_attrs( 3 | in i_queue_name text, 4 | in i_node_attrs text, 5 | out ret_code int4, 6 | out ret_note text) 7 | returns record as $$ 8 | -- ---------------------------------------------------------------------- 9 | -- Function: pgq_node.create_attrs(2) 10 | -- 11 | -- Set node attributes. 12 | -- 13 | -- Parameters: 14 | -- i_node_name - cascaded queue name 15 | -- i_node_attrs - urlencoded node attrs 16 | -- 17 | -- Returns: 18 | -- 200 - ok 19 | -- 404 - node not found 20 | -- ---------------------------------------------------------------------- 21 | begin 22 | update pgq_node.node_info 23 | set node_attrs = i_node_attrs 24 | where queue_name = i_queue_name; 25 | if not found then 26 | select 404, 'Node not found' into ret_code, ret_note; 27 | return; 28 | end if; 29 | 30 | select 200, 'Node attributes updated' 31 | into ret_code, ret_note; 32 | return; 33 | end; 34 | $$ language plpgsql security definer; 35 | 36 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.unregister_consumer.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.unregister_consumer( 3 | in i_queue_name text, 4 | in i_consumer_name text, 5 | out ret_code int4, 6 | out ret_note text) 7 | returns record as $$ 8 | -- ---------------------------------------------------------------------- 9 | -- Function: pgq_node.unregister_consumer(2) 10 | -- 11 | -- Unregister cascaded consumer from local node. 12 | -- 13 | -- Parameters: 14 | -- i_queue_name - cascaded queue name 15 | -- i_consumer_name - cascaded consumer name 16 | -- 17 | -- Returns: 18 | -- ret_code - error code 19 | -- 200 - ok 20 | -- 404 - no such queue 21 | -- ret_note - description 22 | -- ---------------------------------------------------------------------- 23 | begin 24 | perform 1 from pgq_node.node_info where queue_name = i_queue_name 25 | for update; 26 | if not found then 27 | select 404, 'Unknown queue: ' || i_queue_name into ret_code, ret_note; 28 | return; 29 | end if; 30 | 31 | delete from pgq_node.local_state 32 | where queue_name = i_queue_name 33 | and consumer_name = i_consumer_name; 34 | 35 | select 200, 'Consumer '||i_consumer_name||' unregistered from '||i_queue_name 36 | into ret_code, ret_note; 37 | return; 38 | end; 39 | $$ language plpgsql security definer; 40 | 41 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.unregister_subscriber.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.unregister_subscriber( 3 | in i_queue_name text, 4 | in i_remote_node_name text, 5 | out ret_code int4, 6 | out ret_note text) 7 | returns record as $$ 8 | -- ---------------------------------------------------------------------- 9 | -- Function: pgq_node.unregister_subscriber(2) 10 | -- 11 | -- Unsubscribe remote node from local node. 12 | -- 13 | -- Parameters: 14 | -- i_queue_name - set name 15 | -- i_remote_node_name - node name 16 | -- 17 | -- Returns: 18 | -- ret_code - error code 19 | -- ret_note - description 20 | -- ---------------------------------------------------------------------- 21 | declare 22 | n_wm_name text; 23 | worker_name text; 24 | begin 25 | n_wm_name := '.' || i_remote_node_name || '.watermark'; 26 | select s.worker_name into worker_name from pgq_node.subscriber_info s 27 | where queue_name = i_queue_name and subscriber_node = i_remote_node_name; 28 | if not found then 29 | select 304, 'Subscriber not found' into ret_code, ret_note; 30 | return; 31 | end if; 32 | 33 | delete from pgq_node.subscriber_info 34 | where queue_name = i_queue_name 35 | and subscriber_node = i_remote_node_name; 36 | 37 | perform pgq.unregister_consumer(i_queue_name, n_wm_name); 38 | perform pgq.unregister_consumer(i_queue_name, worker_name); 39 | 40 | select 200, 'Subscriber unregistered: '||i_remote_node_name 41 | into ret_code, ret_note; 42 | return; 43 | end; 44 | $$ language plpgsql security definer; 45 | 46 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.upgrade_schema.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.upgrade_schema() 3 | returns int4 as $$ 4 | -- updates table structure if necessary 5 | declare 6 | cnt int4 = 0; 7 | begin 8 | -- node_info.node_attrs 9 | perform 1 from information_schema.columns 10 | where table_schema = 'pgq_node' 11 | and table_name = 'node_info' 12 | and column_name = 'node_attrs'; 13 | if not found then 14 | alter table pgq_node.node_info add column node_attrs text; 15 | cnt := cnt + 1; 16 | end if; 17 | 18 | return cnt; 19 | end; 20 | $$ language plpgsql; 21 | 22 | -------------------------------------------------------------------------------- /sql/pgq_node/functions/pgq_node.version.sql: -------------------------------------------------------------------------------- 1 | 2 | create or replace function pgq_node.version() 3 | returns text as $$ 4 | -- ---------------------------------------------------------------------- 5 | -- Function: pgq_node.version(0) 6 | -- 7 | -- Returns version string for pgq_node. ATM it is based on SkyTools 8 | -- version and only bumped when database code changes. 9 | -- ---------------------------------------------------------------------- 10 | begin 11 | return '3.2.5'; 12 | end; 13 | $$ language plpgsql; 14 | 15 | -------------------------------------------------------------------------------- /sql/pgq_node/pgq_node.control: -------------------------------------------------------------------------------- 1 | # pgq_node 2 | comment = 'Cascaded queue infrastructure' 3 | default_version = '3.2.5' 4 | relocatable = false 5 | superuser = true 6 | schema = 'pg_catalog' 7 | requires = 'pgq' 8 | -------------------------------------------------------------------------------- /sql/pgq_node/sql/pgq_node_init_ext.sql: -------------------------------------------------------------------------------- 1 | 2 | create extension pgq; 3 | 4 | \set ECHO none 5 | \i structure/install.sql 6 | \set ECHO all 7 | create extension pgq_node from unpackaged; 8 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; 9 | drop extension pgq_node; 10 | 11 | create extension pgq_node; 12 | select array_length(extconfig, 1) as dumpable from pg_catalog.pg_extension where extname = 'pgq_node'; 13 | 14 | -------------------------------------------------------------------------------- /sql/pgq_node/sql/pgq_node_init_noext.sql: -------------------------------------------------------------------------------- 1 | 2 | \set ECHO none 3 | \i ../pgq/pgq.sql 4 | \i structure/tables.sql 5 | \i structure/functions.sql 6 | 7 | -------------------------------------------------------------------------------- /sql/pgq_node/structure/ext_postproc.sql: -------------------------------------------------------------------------------- 1 | 2 | -- tag data objects as dumpable 3 | 4 | SELECT pg_catalog.pg_extension_config_dump('pgq_node.node_location', ''); 5 | SELECT pg_catalog.pg_extension_config_dump('pgq_node.node_info', ''); 6 | SELECT pg_catalog.pg_extension_config_dump('pgq_node.local_state', ''); 7 | SELECT pg_catalog.pg_extension_config_dump('pgq_node.subscriber_info', ''); 8 | 9 | 10 | -------------------------------------------------------------------------------- /sql/pgq_node/structure/ext_unpackaged.sql: -------------------------------------------------------------------------------- 1 | 2 | ALTER EXTENSION pgq_node ADD SCHEMA pgq_node; 3 | 4 | ALTER EXTENSION pgq_node ADD TABLE pgq_node.node_location; 5 | ALTER EXTENSION pgq_node ADD TABLE pgq_node.node_info; 6 | ALTER EXTENSION pgq_node ADD TABLE pgq_node.local_state; 7 | ALTER EXTENSION pgq_node ADD TABLE pgq_node.subscriber_info; 8 | 9 | -------------------------------------------------------------------------------- /sql/pgq_node/structure/grants.sql: -------------------------------------------------------------------------------- 1 | 2 | grant usage on schema pgq_node to public; 3 | 4 | -------------------------------------------------------------------------------- /sql/pgq_node/structure/install.sql: -------------------------------------------------------------------------------- 1 | 2 | \i structure/tables.sql 3 | \i structure/functions.sql 4 | \i structure/grants.sql 5 | 6 | -------------------------------------------------------------------------------- /sql/pgq_node/structure/upgrade.sql: -------------------------------------------------------------------------------- 1 | \i structure/functions.sql 2 | -------------------------------------------------------------------------------- /sql/ticker/Makefile: -------------------------------------------------------------------------------- 1 | 2 | include ../../config.mak 3 | 4 | PG_INCDIR = $(shell $(PG_CONFIG) --includedir) 5 | PG_LIBDIR = $(shell $(PG_CONFIG) --libdir) 6 | 7 | bin_PROGRAMS = pgqd 8 | 9 | pgqd_SOURCES = pgqd.c maint.c ticker.c retry.c pgqd.h 10 | nodist_pgqd_SOURCES = pgqd.ini.h 11 | pgqd_CPPFLAGS = -I$(PG_INCDIR) 12 | pgqd_LDFLAGS = -L$(PG_LIBDIR) 13 | pgqd_LIBS = -lpq -lm 14 | 15 | pgqd_EMBED_LIBUSUAL = 1 16 | USUAL_DIR = ../../lib 17 | AM_FEATURES = libusual 18 | 19 | EXTRA_DIST = pgqd.ini 20 | CLEANFILES = pgqd.ini.h 21 | 22 | include $(USUAL_DIR)/mk/antimake.mk 23 | 24 | pgqd.ini.h: pgqd.ini 25 | sed -e 's/.*/"&\\n"/' $< > $@ 26 | 27 | install: install-conf 28 | install-conf: 29 | mkdir -p '$(DESTDIR)$(docdir)/conf' 30 | $(INSTALL) -m 644 pgqd.ini '$(DESTDIR)$(docdir)/conf/pgqd.ini.templ' 31 | 32 | tags: 33 | ctags *.[ch] ../../lib/usual/*.[ch] 34 | 35 | -------------------------------------------------------------------------------- /sql/ticker/pgqd.ini: -------------------------------------------------------------------------------- 1 | [pgqd] 2 | 3 | # where to log 4 | logfile = ~/log/pgqd.log 5 | 6 | # pidfile 7 | pidfile = ~/pid/pgqd.pid 8 | 9 | ## optional parameters ## 10 | 11 | # libpq connect string without dbname= 12 | #base_connstr = 13 | 14 | # startup db to query other databases 15 | #initial_database = template1 16 | 17 | # limit ticker to specific databases 18 | #database_list = 19 | 20 | # log into syslog 21 | #syslog = 1 22 | #syslog_ident = pgqd 23 | 24 | ## optional timeouts ## 25 | 26 | # how often to check for new databases 27 | #check_period = 60 28 | 29 | # how often to flush retry queue 30 | #retry_period = 30 31 | 32 | # how often to do maintentance 33 | #maint_period = 120 34 | 35 | # how often to run ticker 36 | #ticker_period = 1 37 | 38 | -------------------------------------------------------------------------------- /sql/txid/Makefile: -------------------------------------------------------------------------------- 1 | 2 | PG_CONFIG = pg_config 3 | PGXS = $(shell $(PG_CONFIG) --pgxs) 4 | 5 | PGVER := $(shell $(PG_CONFIG) --version | sed 's/PostgreSQL //') 6 | 7 | ifeq ($(PGVER),) 8 | $(error Failed to get Postgres version) 9 | else 10 | # postgres >= manages epoch itself, so skip epoch tables 11 | pg83 = $(shell test $(PGVER) "<" "8.3" && echo "false" || echo "true") 12 | pg82 = $(shell test $(PGVER) "<" "8.2" && echo "false" || echo "true") 13 | endif 14 | 15 | ifeq ($(pg83),true) # we have 8.3 with internal txid 16 | 17 | # install empty txid.sql 18 | DATA_built = txid.sql 19 | include $(PGXS) 20 | txid.sql: txid.internal.sql 21 | cp $< $@ 22 | 23 | else # 8.2 or 8.1 24 | # 25 | # pg < 8.3 needs this module 26 | # 27 | MODULE_big = txid 28 | SRCS = txid.c epoch.c 29 | OBJS = $(SRCS:.c=.o) 30 | REGRESS = txid 31 | REGRESS_OPTS = --load-language=plpgsql 32 | DATA = uninstall_txid.sql 33 | DOCS = README.txid 34 | DATA_built = txid.sql 35 | EXTRA_CLEAN = txid.sql.in 36 | 37 | # PGXS build procedure 38 | include $(PGXS) 39 | 40 | ifeq ($(pg82),true) 41 | # 8.2 tracks epoch internally 42 | TXID_SQL = txid.std.sql 43 | else 44 | # 8.1 needs epoch-tracking code 45 | TXID_SQL = txid.std.sql txid.schema.sql 46 | endif # ! 8.2 47 | 48 | # additional deps 49 | txid.o: txid.h 50 | epoch.o: txid.h 51 | 52 | txid.sql.in: $(TXID_SQL) 53 | cat $(TXID_SQL) > $@ 54 | 55 | endif # ! 8.3 56 | 57 | test: install 58 | make installcheck || { less regression.diffs; exit 1; } 59 | 60 | -------------------------------------------------------------------------------- /sql/txid/sql/txid.sql: -------------------------------------------------------------------------------- 1 | -- init 2 | \set ECHO none 3 | \i txid.sql 4 | \set ECHO all 5 | 6 | -- i/o 7 | select '12:13:'::txid_snapshot; 8 | select '12:13:1,2'::txid_snapshot; 9 | 10 | -- errors 11 | select '31:12:'::txid_snapshot; 12 | select '0:1:'::txid_snapshot; 13 | select '12:13:0'::txid_snapshot; 14 | select '12:13:2,1'::txid_snapshot; 15 | 16 | create table snapshot_test ( 17 | nr integer, 18 | snap txid_snapshot 19 | ); 20 | 21 | insert into snapshot_test values (1, '12:13:'); 22 | insert into snapshot_test values (2, '12:20:13,15,18'); 23 | insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); 24 | 25 | select snap from snapshot_test order by nr; 26 | 27 | select txid_snapshot_xmin(snap), 28 | txid_snapshot_xmax(snap), 29 | txid_snapshot_xip(snap) 30 | from snapshot_test order by nr; 31 | 32 | select id, txid_visible_in_snapshot(id, snap) 33 | from snapshot_test, generate_series(11, 21) id 34 | where nr = 2; 35 | 36 | -- test current values also 37 | select txid_current() >= txid_snapshot_xmin(txid_current_snapshot()); 38 | -- select txid_current_txid() < txid_snapshot_xmax(txid_current_snapshot()); 39 | 40 | -- select txid_in_snapshot(txid_current_txid(), txid_current_snapshot()), 41 | -- txid_not_in_snapshot(txid_current_txid(), txid_current_snapshot()); 42 | 43 | -------------------------------------------------------------------------------- /sql/txid/txid.h: -------------------------------------------------------------------------------- 1 | #ifndef _TXID_H_ 2 | #define _TXID_H_ 3 | 4 | #define MAX_INT64 0x7FFFFFFFFFFFFFFFLL 5 | 6 | /* Use unsigned variant internally */ 7 | typedef uint64 txid; 8 | 9 | typedef struct 10 | { 11 | int32 __varsz; /* should not be touched directly */ 12 | uint32 nxip; 13 | txid xmin; 14 | txid xmax; 15 | txid xip[1]; 16 | } TxidSnapshot; 17 | 18 | #define TXID_SNAPSHOT_SIZE(nxip) (offsetof(TxidSnapshot, xip) + sizeof(txid) * nxip) 19 | 20 | typedef struct { 21 | uint64 last_value; 22 | uint64 epoch; 23 | } TxidEpoch; 24 | 25 | /* internal functions */ 26 | void txid_load_epoch(TxidEpoch *state, int try_write); 27 | txid txid_convert_xid(TransactionId xid, TxidEpoch *state); 28 | 29 | /* public functions */ 30 | Datum txid_current(PG_FUNCTION_ARGS); 31 | Datum txid_current_snapshot(PG_FUNCTION_ARGS); 32 | 33 | Datum txid_snapshot_in(PG_FUNCTION_ARGS); 34 | Datum txid_snapshot_out(PG_FUNCTION_ARGS); 35 | Datum txid_snapshot_recv(PG_FUNCTION_ARGS); 36 | Datum txid_snapshot_send(PG_FUNCTION_ARGS); 37 | 38 | Datum txid_snapshot_xmin(PG_FUNCTION_ARGS); 39 | Datum txid_snapshot_xmax(PG_FUNCTION_ARGS); 40 | Datum txid_snapshot_xip(PG_FUNCTION_ARGS); 41 | Datum txid_visible_in_snapshot(PG_FUNCTION_ARGS); 42 | 43 | Datum txid_snapshot_active(PG_FUNCTION_ARGS); 44 | Datum txid_in_snapshot(PG_FUNCTION_ARGS); 45 | Datum txid_not_in_snapshot(PG_FUNCTION_ARGS); 46 | 47 | 48 | #endif /* _TXID_H_ */ 49 | 50 | -------------------------------------------------------------------------------- /sql/txid/txid.internal.sql: -------------------------------------------------------------------------------- 1 | -- txid is included in 8.3 2 | -------------------------------------------------------------------------------- /sql/txid/txid.schema.sql: -------------------------------------------------------------------------------- 1 | -- ---------- 2 | -- txid.sql 3 | -- 4 | -- SQL script for loading the transaction ID compatible datatype 5 | -- 6 | -- Copyright (c) 2003-2004, PostgreSQL Global Development Group 7 | -- Author: Jan Wieck, Afilias USA INC. 8 | -- 9 | -- ---------- 10 | 11 | -- 12 | -- now the epoch storage 13 | -- 14 | 15 | CREATE SCHEMA txid; 16 | 17 | -- remember txid settings 18 | -- use bigint so we can do arithmetic with it 19 | create table txid.epoch ( 20 | epoch bigint, 21 | last_value bigint 22 | ); 23 | 24 | -- make sure there exist exactly one row 25 | insert into txid.epoch values (0, 1); 26 | 27 | 28 | -- then protect it 29 | create function txid.epoch_guard() 30 | returns trigger as $$ 31 | begin 32 | if TG_OP = 'UPDATE' then 33 | -- epoch: allow only small increase 34 | if NEW.epoch > OLD.epoch and NEW.epoch < (OLD.epoch + 3) then 35 | return NEW; 36 | end if; 37 | -- last_value: allow only increase 38 | if NEW.epoch = OLD.epoch and NEW.last_value > OLD.last_value then 39 | return NEW; 40 | end if; 41 | end if; 42 | raise exception 'bad operation on txid.epoch'; 43 | end; 44 | $$ language plpgsql; 45 | 46 | -- the trigger 47 | create trigger epoch_guard_trigger 48 | before insert or update or delete on txid.epoch 49 | for each row execute procedure txid.epoch_guard(); 50 | 51 | -------------------------------------------------------------------------------- /sql/txid/uninstall_txid.sql: -------------------------------------------------------------------------------- 1 | 2 | DROP DOMAIN txid; 3 | DROP TYPE txid_snapshot cascade; 4 | DROP SCHEMA txid CASCADE; 5 | DROP FUNCTION txid_current(); 6 | 7 | 8 | -------------------------------------------------------------------------------- /sql/ztestall.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | set -e 4 | 5 | for pg in pg83 pg84 pg90 pg91 pg92 pg93; do 6 | for mod in pgq pgq_coop pgq_node pgq_ext londiste; do 7 | echo " #### $pg/$mod ####" 8 | $pg make -s -C $mod clean test 9 | done 10 | done 11 | 12 | -------------------------------------------------------------------------------- /tests/cascade/conf/nop_consumer.ini: -------------------------------------------------------------------------------- 1 | [nop_consumer] 2 | 3 | job_name = nop_consumer 4 | 5 | #_provider_db = dbname=db1 6 | dst_db = dbname=db2 7 | 8 | logfile = log/%(job_name)s.log 9 | pidfile = pid/%(job_name)s.pid 10 | 11 | queue_name = fooqueue 12 | 13 | -------------------------------------------------------------------------------- /tests/cascade/conf/setadm.ini: -------------------------------------------------------------------------------- 1 | [cascade_admin] 2 | 3 | node_db = dbname=db2 4 | 5 | queue_name = fooqueue 6 | 7 | -------------------------------------------------------------------------------- /tests/cascade/conf/ticker_branch.ini: -------------------------------------------------------------------------------- 1 | [pgqadm] 2 | 3 | job_name = ticker_branch 4 | 5 | db = dbname=db_branch 6 | 7 | # how often to run maintenance [minutes] 8 | maint_delay_min = 1 9 | 10 | # how often to check for activity [secs] 11 | loop_delay = 0.5 12 | 13 | logfile = log/%(job_name)s.log 14 | pidfile = pid/%(job_name)s.pid 15 | 16 | use_skylog = 0 17 | 18 | connection_lifetime = 21 19 | 20 | queue_refresh_period = 10 21 | 22 | -------------------------------------------------------------------------------- /tests/cascade/conf/ticker_db1.ini: -------------------------------------------------------------------------------- 1 | [pgqadm] 2 | job_name = ticker_db1 3 | db = dbname=db1 4 | loop_delay = 0.5 5 | logfile = log/%(job_name)s.log 6 | pidfile = pid/%(job_name)s.pid 7 | 8 | -------------------------------------------------------------------------------- /tests/cascade/conf/ticker_db2.ini: -------------------------------------------------------------------------------- 1 | [pgqadm] 2 | job_name = ticker_db2 3 | db = dbname=db2 4 | loop_delay = 0.5 5 | logfile = log/%(job_name)s.log 6 | pidfile = pid/%(job_name)s.pid 7 | 8 | -------------------------------------------------------------------------------- /tests/cascade/conf/ticker_db3.ini: -------------------------------------------------------------------------------- 1 | [pgqadm] 2 | job_name = ticker_db3 3 | db = dbname=db3 4 | loop_delay = 0.5 5 | logfile = log/%(job_name)s.log 6 | pidfile = pid/%(job_name)s.pid 7 | 8 | -------------------------------------------------------------------------------- /tests/cascade/conf/worker_db1.ini: -------------------------------------------------------------------------------- 1 | [nop_worker] 2 | 3 | job_name = node1_worker 4 | 5 | dst_db = dbname=db1 6 | 7 | logfile = log/%(job_name)s.log 8 | pidfile = pid/%(job_name)s.pid 9 | 10 | queue_name = fooqueue 11 | 12 | -------------------------------------------------------------------------------- /tests/cascade/conf/worker_db2.ini: -------------------------------------------------------------------------------- 1 | [nop_worker] 2 | 3 | job_name = node2_worker 4 | 5 | dst_db = dbname=db2 6 | 7 | logfile = log/%(job_name)s.log 8 | pidfile = pid/%(job_name)s.pid 9 | 10 | queue_name = fooqueue 11 | 12 | -------------------------------------------------------------------------------- /tests/cascade/conf/worker_db3.ini: -------------------------------------------------------------------------------- 1 | [nop_worker] 2 | 3 | job_name = node3_worker 4 | 5 | dst_db = dbname=db3 6 | 7 | logfile = log/%(job_name)s.log 8 | pidfile = pid/%(job_name)s.pid 9 | 10 | queue_name = fooqueue 11 | 12 | -------------------------------------------------------------------------------- /tests/cascade/init.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | . ../env.sh 4 | 5 | mkdir -p log pid 6 | 7 | dropdb db1 8 | dropdb db2 9 | dropdb db3 10 | 11 | createdb db1 12 | createdb db2 13 | createdb db3 14 | 15 | pgqadm conf/ticker_db1.ini install 16 | pgqadm conf/ticker_db2.ini install 17 | pgqadm conf/ticker_db3.ini install 18 | 19 | -------------------------------------------------------------------------------- /tests/cascade/plainconsumer.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import sys, time, skytools 4 | 5 | from pgq.cascade.consumer import CascadedConsumer 6 | 7 | class PlainCascadedConsumer(CascadedConsumer): 8 | def process_remote_event(self, src_curs, dst_curs, ev): 9 | ev.tag_done() 10 | 11 | if __name__ == '__main__': 12 | script = PlainCascadedConsumer('nop_consumer', 'dst_db', sys.argv[1:]) 13 | script.start() 14 | 15 | -------------------------------------------------------------------------------- /tests/cascade/plainworker.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import sys, time, skytools 4 | 5 | from pgq.cascade.worker import CascadedWorker 6 | 7 | class PlainCascadedWorker(CascadedWorker): 8 | def process_remote_event(self, src_curs, dst_curs, ev): 9 | self.log.info("got events: %s / %s" % (ev.ev_type, ev.ev_data)) 10 | ev.tag_done() 11 | 12 | if __name__ == '__main__': 13 | script = PlainCascadedWorker('nop_worker', 'dst_db', sys.argv[1:]) 14 | script.start() 15 | 16 | -------------------------------------------------------------------------------- /tests/cascade/status.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | . ../env.sh 4 | 5 | pgqadm conf/ticker_db1.ini status 6 | pgqadm conf/ticker_db2.ini status 7 | pgqadm conf/ticker_db3.ini status 8 | 9 | setadm -v conf/setadm.ini status 10 | 11 | 12 | -------------------------------------------------------------------------------- /tests/cascade/zcheck.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | grep -E 'ERR|WARN|CRIT' log/*.log || echo "All OK" 4 | 5 | -------------------------------------------------------------------------------- /tests/cascade/zstop.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | #. ../env.sh 4 | 5 | for p in pid/*.pid*; do 6 | test -f "$p" || continue 7 | pid=`cat "$p"` 8 | test -d "/proc/$pid" || { 9 | rm -f "$p" 10 | continue 11 | } 12 | kill "$pid" 13 | done 14 | 15 | -------------------------------------------------------------------------------- /tests/cascade/ztest.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | . ../env.sh 4 | 5 | ./plainconsumer.py -v conf/nop_consumer.ini --register 6 | ./plainconsumer.py -v conf/nop_consumer.ini 7 | 8 | -------------------------------------------------------------------------------- /tests/env.sh: -------------------------------------------------------------------------------- 1 | 2 | for dir in . .. ../.. 3 | do 4 | config=$dir/config.mak 5 | test -f $config && break 6 | done 7 | 8 | pfx=`grep ^prefix $config | awk '{ print $3}'` 9 | pyver=`python -V 2>&1 | sed 's/Python \([0-9]*.[0-9]*\).*/\1/'` 10 | PYTHONPATH=$pfx/lib/python$pyver/site-packages:$PYTHONPATH 11 | PATH=$pfx/bin:$PATH 12 | #PYTHONPATH=../../python:$PYTHONPATH 13 | #PATH=../../python:../../python/bin:../../scripts:$PATH 14 | #LD_LIBRARY_PATH=/opt/apps/py26/lib:$LD_LIBRARY_PATH 15 | #PATH=/opt/apps/py26/bin:$PATH 16 | export PYTHONPATH PATH LD_LIBRARY_PATH PATH 17 | 18 | PGHOST=localhost 19 | export PGHOST 20 | 21 | 22 | -------------------------------------------------------------------------------- /tests/handler/init.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | . ../env.sh 4 | 5 | for db in hsrc hdst; do 6 | echo dropdb $db 7 | dropdb $db 8 | done 9 | 10 | echo createdb hsrc 11 | createdb hsrc --encoding=sql_ascii --template=template0 12 | 13 | echo createdb hdst 14 | createdb hdst --encoding=utf-8 --template=template0 15 | -------------------------------------------------------------------------------- /tests/localconsumer/init.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | . ../env.sh 4 | 5 | mkdir -p log pid 6 | 7 | dropdb qdb 8 | createdb qdb 9 | 10 | -------------------------------------------------------------------------------- /tests/localconsumer/regen.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | . ../testlib.sh 4 | 5 | for db in qdb; do 6 | cleardb $db 7 | done 8 | 9 | rm -f log/*.log 10 | mkdir -p state 11 | rm -f state/* 12 | 13 | set -e 14 | 15 | title LocalConsumer test 16 | 17 | title2 Initialization 18 | 19 | msg Install PgQ 20 | 21 | run_qadmin qdb "install pgq;" 22 | run_qadmin qdb "create queue test_queue;" 23 | 24 | msg Run ticker 25 | 26 | cat_file conf/pgqd.ini < $@ 16 | 17 | final/londiste.upgrade_2.1_to_3.1.sql: src/londiste.2to3.sql ../sql/londiste/londiste.sql 18 | echo "begin;" > $@ 19 | cat src/londiste.2to3.sql >> $@ 20 | grep -v 'create schema' ../sql/londiste/londiste.sql >> $@ 21 | echo "commit;" >> $@ 22 | 23 | PSQL = psql -q 24 | 25 | ltest: ../sql/pgq_node/pgq_node.sql 26 | $(PSQL) -d postgres -c "drop database if exists londiste_upgrade_test" 27 | $(PSQL) -d postgres -c "create database londiste_upgrade_test" 28 | $(PSQL) -d londiste_upgrade_test -f final/pgq_core_2.1.13.sql 29 | $(PSQL) -d londiste_upgrade_test -f final/londiste.2.1.12.sql 30 | $(PSQL) -d londiste_upgrade_test -f final/pgq.upgrade_2.1_to_3.0.sql 31 | $(PSQL) -d londiste_upgrade_test -f ../sql/pgq_node/pgq_node.sql 32 | $(PSQL) -d londiste_upgrade_test -f final/londiste.upgrade_2.1_to_3.1.sql 33 | 34 | -------------------------------------------------------------------------------- /upgrade/final/v2.1.5_pgq_ext.sql: -------------------------------------------------------------------------------- 1 | 2 | begin; 3 | 4 | 5 | 6 | create or replace function pgq_ext.get_last_tick(a_consumer text) 7 | returns int8 as $$ 8 | declare 9 | res int8; 10 | begin 11 | select last_tick_id into res 12 | from pgq_ext.completed_tick 13 | where consumer_id = a_consumer; 14 | return res; 15 | end; 16 | $$ language plpgsql security definer; 17 | 18 | create or replace function pgq_ext.set_last_tick(a_consumer text, a_tick_id bigint) 19 | returns integer as $$ 20 | begin 21 | if a_tick_id is null then 22 | delete from pgq_ext.completed_tick 23 | where consumer_id = a_consumer; 24 | else 25 | update pgq_ext.completed_tick 26 | set last_tick_id = a_tick_id 27 | where consumer_id = a_consumer; 28 | if not found then 29 | insert into pgq_ext.completed_tick (consumer_id, last_tick_id) 30 | values (a_consumer, a_tick_id); 31 | end if; 32 | end if; 33 | 34 | return 1; 35 | end; 36 | $$ language plpgsql security definer; 37 | 38 | 39 | 40 | end; 41 | 42 | 43 | -------------------------------------------------------------------------------- /upgrade/final/v2.1.6_londiste.sql: -------------------------------------------------------------------------------- 1 | 2 | begin; 3 | 4 | 5 | 6 | create or replace function londiste.version() 7 | returns text as $$ 8 | begin 9 | return '2.1.6'; 10 | end; 11 | $$ language plpgsql; 12 | 13 | 14 | 15 | end; 16 | 17 | 18 | -------------------------------------------------------------------------------- /upgrade/final/v2.1.6_pgq_ext.sql: -------------------------------------------------------------------------------- 1 | 2 | begin; 3 | 4 | 5 | 6 | create or replace function pgq_ext.version() 7 | returns text as $$ 8 | begin 9 | return '2.1.6'; 10 | end; 11 | $$ language plpgsql; 12 | 13 | 14 | 15 | end; 16 | 17 | 18 | -------------------------------------------------------------------------------- /upgrade/src/londiste.2to3.sql: -------------------------------------------------------------------------------- 1 | 2 | drop function if exists londiste.find_table_fkeys(text); 3 | 4 | -------------------------------------------------------------------------------- /upgrade/src/v2.1.5_londiste.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | 3 | create table londiste.subscriber_pending_fkeys( 4 | from_table text not null, 5 | to_table text not null, 6 | fkey_name text not null, 7 | fkey_def text not null, 8 | 9 | primary key (from_table, fkey_name) 10 | ); 11 | 12 | create table londiste.subscriber_pending_triggers ( 13 | table_name text not null, 14 | trigger_name text not null, 15 | trigger_def text not null, 16 | 17 | primary key (table_name, trigger_name) 18 | ); 19 | 20 | -- drop function londiste.denytrigger(); 21 | 22 | \i ../sql/londiste/functions/londiste.find_table_fkeys.sql 23 | \i ../sql/londiste/functions/londiste.find_table_triggers.sql 24 | \i ../sql/londiste/functions/londiste.find_column_types.sql 25 | \i ../sql/londiste/functions/londiste.subscriber_fkeys_funcs.sql 26 | \i ../sql/londiste/functions/londiste.subscriber_trigger_funcs.sql 27 | \i ../sql/londiste/functions/londiste.quote_fqname.sql 28 | 29 | \i ../sql/londiste/functions/londiste.find_table_oid.sql 30 | \i ../sql/londiste/functions/londiste.get_last_tick.sql 31 | \i ../sql/londiste/functions/londiste.provider_add_table.sql 32 | \i ../sql/londiste/functions/londiste.provider_create_trigger.sql 33 | \i ../sql/londiste/functions/londiste.provider_notify_change.sql 34 | \i ../sql/londiste/functions/londiste.provider_remove_table.sql 35 | \i ../sql/londiste/functions/londiste.set_last_tick.sql 36 | \i ../sql/londiste/functions/londiste.subscriber_remove_table.sql 37 | 38 | \i ../sql/londiste/structure/grants.sql 39 | 40 | end; 41 | 42 | -------------------------------------------------------------------------------- /upgrade/src/v2.1.5_pgq_core.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | 3 | alter table pgq.subscription add constraint subscription_ukey unique (sub_queue, sub_consumer); 4 | create index rq_retry_owner_idx on pgq.retry_queue (ev_owner, ev_id); 5 | 6 | \i ../sql/pgq/functions/pgq.current_event_table.sql 7 | \i ../sql/pgq/functions/pgq.event_failed.sql 8 | \i ../sql/pgq/functions/pgq.event_retry.sql 9 | \i ../sql/pgq/functions/pgq.force_tick.sql 10 | \i ../sql/pgq/functions/pgq.grant_perms.sql 11 | \i ../sql/pgq/functions/pgq.insert_event.sql 12 | \i ../sql/pgq/functions/pgq.maint_tables_to_vacuum.sql 13 | \i ../sql/pgq/functions/pgq.next_batch.sql 14 | \i ../sql/pgq/functions/pgq.register_consumer.sql 15 | \i ../sql/pgq/functions/pgq.version.sql 16 | \i ../sql/pgq/structure/grants.sql 17 | 18 | end; 19 | 20 | -------------------------------------------------------------------------------- /upgrade/src/v2.1.5_pgq_ext.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | 3 | \i ../sql/pgq_ext/functions/track_tick.sql 4 | 5 | end; 6 | 7 | -------------------------------------------------------------------------------- /upgrade/src/v2.1.6_londiste.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | 3 | \i ../sql/londiste/functions/londiste.version.sql 4 | 5 | end; 6 | 7 | -------------------------------------------------------------------------------- /upgrade/src/v2.1.6_pgq_ext.sql: -------------------------------------------------------------------------------- 1 | begin; 2 | 3 | \i ../sql/pgq_ext/functions/version.sql 4 | 5 | end; 6 | 7 | --------------------------------------------------------------------------------