├── .final_builds ├── jobs │ ├── pgpool │ │ └── index.yml │ ├── postgres │ │ └── index.yml │ ├── smoke-tests │ │ └── index.yml │ └── vip │ │ └── index.yml ├── license │ └── index.yml └── packages │ ├── haproxy │ └── index.yml │ ├── keepalived │ └── index.yml │ ├── pgpool2 │ └── index.yml │ ├── pgrt │ └── index.yml │ └── postgres │ └── index.yml ├── .gitignore ├── LICENSE ├── README.md ├── Rakefile ├── ci ├── pipeline.yml ├── repipe ├── scripts │ ├── rc │ ├── shipit │ ├── testflight │ ├── update-blob │ └── update-manifest └── settings.yml ├── config ├── blobs.yml └── final.yml ├── docs └── ha.png ├── jobs ├── .gitkeep ├── postgres │ ├── monit │ ├── spec │ └── templates │ │ ├── bin │ │ ├── ctl │ │ ├── functions │ │ ├── healthy │ │ ├── monitor │ │ ├── recover │ │ └── tune │ │ ├── config │ │ ├── .gitkeep │ │ ├── hba.conf │ │ ├── ident.conf │ │ └── postgresql.conf │ │ ├── data │ │ └── properties.sh.erb │ │ ├── envrc │ │ ├── helpers │ │ ├── ctl_setup.sh │ │ └── ctl_utils.sh │ │ └── sql │ │ └── setup-replication.sql ├── smoke-tests │ ├── monit │ ├── spec │ └── templates │ │ ├── bin │ │ └── run │ │ ├── helpers │ │ ├── ctl_setup.sh │ │ └── ctl_utils.sh │ │ └── sql │ │ ├── .gitkeep │ │ └── pgbench_cleanup.sql └── vip │ ├── monit │ ├── spec │ └── templates │ ├── bin │ ├── haproxy │ └── keepalived │ ├── config │ ├── haproxy.conf │ └── keepalived.tpl │ ├── helpers │ ├── ctl_setup.sh │ └── ctl_utils.sh │ ├── properties.sh.erb │ └── ssl_redirect.map.erb ├── manifests ├── ha.yml └── postgres.yml ├── packages ├── .gitkeep ├── haproxy │ ├── packaging │ └── spec ├── keepalived │ ├── packaging │ └── spec ├── pgrt │ ├── packaging │ └── spec └── postgres │ ├── packaging │ └── spec ├── releases └── postgres │ ├── index.yml │ ├── postgres-1.0.0.yml │ ├── postgres-1.0.2.yml │ ├── postgres-1.0.3.yml │ ├── postgres-1.0.4.yml │ ├── postgres-1.1.0.yml │ ├── postgres-2.0.0.yml │ ├── postgres-3.0.0.yml │ ├── postgres-3.1.0.yml │ ├── postgres-3.1.1.yml │ ├── postgres-3.1.2.yml │ ├── postgres-3.1.3.yml │ ├── postgres-3.1.4.yml │ ├── postgres-3.1.5.yml │ ├── postgres-3.2.0.yml │ ├── postgres-3.2.1.yml │ └── postgres-3.2.2.yml ├── src ├── .gitkeep ├── common │ └── utils.sh └── hatop │ └── hatop └── templates ├── deployment.yml ├── infrastructure-aws-ec2.yml ├── infrastructure-warden.yml ├── jobs.yml ├── make_manifest └── stub.yml /.final_builds/jobs/pgpool/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 232e1d6a1d504df42c31c34c845c65c79e7cb38c: 3 | version: 232e1d6a1d504df42c31c34c845c65c79e7cb38c 4 | blobstore_id: 287191ac-8b17-4795-bbef-1527ae811d1c 5 | sha1: a8e1020712f0885e4187db3d9ee932d342a28cf8 6 | 31e75c9a2d3f6c1406887319def9b7c27fe8360d: 7 | version: 31e75c9a2d3f6c1406887319def9b7c27fe8360d 8 | blobstore_id: 8c81c773-1e3e-4499-bfb4-524f4de985a0 9 | sha1: d6f9f0441c4a1d026b15db9372fc9147b393b9bd 10 | 39c430611966dd13d9ecc4c24c1d5b94470c89bf: 11 | version: 39c430611966dd13d9ecc4c24c1d5b94470c89bf 12 | blobstore_id: cd20891c-daeb-4858-6eb5-342596107a61 13 | sha1: bf8b7118ba780b6810b79a9443c55491ad4f5b42 14 | 751805a78a400009ef2961bf21850abc29bee910: 15 | version: 751805a78a400009ef2961bf21850abc29bee910 16 | blobstore_id: ea79856c-c214-44ea-9364-e8036ad66db5 17 | sha1: 1ee0636ea84eaa538d96b3b548b795647b264b9c 18 | ee5697f0f1d54eccc2ad49034f0a8a5ae95e2306: 19 | version: ee5697f0f1d54eccc2ad49034f0a8a5ae95e2306 20 | blobstore_id: 179050b5-9f73-4ae9-8d11-401355ca7153 21 | sha1: 407405699f75c9ea62536fc55f16416b06c20a42 22 | format-version: "2" 23 | -------------------------------------------------------------------------------- /.final_builds/jobs/postgres/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 06ff778a048b81cf830872c70cb1b5ece3c1b8e9: 3 | version: 06ff778a048b81cf830872c70cb1b5ece3c1b8e9 4 | blobstore_id: 75263957-af02-45a2-bdad-cd94d87da805 5 | sha1: 90508786e65252aee2dadcc8868856c19a6a23da 6 | 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121: 7 | version: 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121 8 | blobstore_id: 4f25e6c3-c3ea-464d-5dd1-18799c505ac8 9 | sha1: sha256:1285cea7b26518a7117b2863aac26caefe5c177ff213af99c49a5daa4e1435a1 10 | 56b55ff4547348601a728ac54e550a5824de56c3: 11 | version: 56b55ff4547348601a728ac54e550a5824de56c3 12 | blobstore_id: 130dadb9-0dd2-4e81-b1fa-532e7229c19f 13 | sha1: 088a6265d744c7218f30201475b39f413ff25cdd 14 | 63e9e86100fb9717a0ce0e3049a678d72a82351f: 15 | version: 63e9e86100fb9717a0ce0e3049a678d72a82351f 16 | blobstore_id: 7f19061a-14e0-4ee4-5418-f201d163621e 17 | sha1: e292d83d742f5245cf75b8cda37bb640957139e8 18 | 6617cb6161f5f193be537a007af6348ff93d65e2: 19 | version: 6617cb6161f5f193be537a007af6348ff93d65e2 20 | blobstore_id: b43b7d4c-bf8f-4f2f-7eca-45c5f232271f 21 | sha1: 9b6d141fec86702308d00b88e25653b9d7e591c9 22 | 6cb5f1dbf79cb4ec531cd392465c037eebe4b28f: 23 | version: 6cb5f1dbf79cb4ec531cd392465c037eebe4b28f 24 | blobstore_id: cafd863e-b3f7-4e29-8dbe-ba7cb93977a6 25 | sha1: a25a1831f6ca704c04e1afe4d4c708a0ba0c2b10 26 | 6fc4ada1355fd55e6c5c095086492aa93526b1e7: 27 | version: 6fc4ada1355fd55e6c5c095086492aa93526b1e7 28 | blobstore_id: 0f2030c8-4532-4ab1-8f0e-efba220b6d02 29 | sha1: 3a282cf7abc7211cc6141dbfcbb7b5b5eca028fa 30 | 75eb1be9a33cecc9541b6f2945656296b1587ab6: 31 | version: 75eb1be9a33cecc9541b6f2945656296b1587ab6 32 | blobstore_id: 5a4a0ab9-fdd5-4838-be34-9891f9dbd631 33 | sha1: 93607d91627cefd5f5a8b9fb539b966ab81b22ee 34 | a0e2b8b8995b9b2d563b1ce7094e7e77a8fba94b: 35 | version: a0e2b8b8995b9b2d563b1ce7094e7e77a8fba94b 36 | blobstore_id: 5f0e6d8b-5e36-4f7b-68da-259ba2da7427 37 | sha1: 7d2affaf1484251ad45ec9d2a42c95427fcd9561 38 | d736b096cbe7854bf0cb11b6471ba7fac89eaf8e: 39 | version: d736b096cbe7854bf0cb11b6471ba7fac89eaf8e 40 | blobstore_id: 39b9ef4e-a315-4992-6762-d0633ba86888 41 | sha1: 45fc0124c13056a8f93896bace65391138d83220 42 | d849b3c95be04e2cbae3d827b059f3340cc3d277: 43 | version: d849b3c95be04e2cbae3d827b059f3340cc3d277 44 | blobstore_id: afd522f1-6e79-47ee-4193-a25363e2345d 45 | sha1: 51422ea48f17d7fb652e79e8004bd3e7935c54d8 46 | f1cca9cdac7f4345a37f4b9cd6d432010fc3304e: 47 | version: f1cca9cdac7f4345a37f4b9cd6d432010fc3304e 48 | blobstore_id: 79978426-6d29-48a2-488e-5006a9c31114 49 | sha1: 84e0a6dbdc3223f27874789bee2c3acb5f278789 50 | fce369182274f4e57c23e970f320f86e206e0add: 51 | version: fce369182274f4e57c23e970f320f86e206e0add 52 | blobstore_id: a316ae12-5ca3-422c-62f5-71f1b8a4f811 53 | sha1: 4eb8583bb0ebe0811cfb68a7aa00468723023446 54 | format-version: "2" 55 | -------------------------------------------------------------------------------- /.final_builds/jobs/smoke-tests/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 173de226b3f33ec19362e3d869879f3e2d932c4ea78fb51b4c3b1ec534638002: 3 | version: 173de226b3f33ec19362e3d869879f3e2d932c4ea78fb51b4c3b1ec534638002 4 | blobstore_id: ab44d161-6803-45ca-7bcb-ac095105e55d 5 | sha1: sha256:8c9ec2763bbf9a240faff5b5829777f7d26904662bac9477fada00f559fa5ac7 6 | a08dc385aacde0fd9a3526177551a4de293fa9056e8f2228110e6c083fdca7c9: 7 | version: a08dc385aacde0fd9a3526177551a4de293fa9056e8f2228110e6c083fdca7c9 8 | blobstore_id: 1cae5f9f-dcd2-412c-4f2f-23566b8b2021 9 | sha1: sha256:a246ef95120c757dd8740856b29735f70429b02fc6db1edd1c093b5a9f42b84e 10 | a73a6108927abdadff05042a5df17916f18a9090: 11 | version: a73a6108927abdadff05042a5df17916f18a9090 12 | blobstore_id: 6ec329df-d977-4942-a212-1915673c4dbe 13 | sha1: 928187a4409cec9f5681c034e921e81a554a40b5 14 | ad59026e232d3ea9d4ddf1149272efc4d62758c9: 15 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 16 | blobstore_id: 2fec31cb-a4d9-498c-5306-351955aa4439 17 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 18 | c70d7c4abb65cb5dc064416c60021dee6db11e57: 19 | version: c70d7c4abb65cb5dc064416c60021dee6db11e57 20 | blobstore_id: 79c8c266-2da7-4e1c-825e-421069ace3fc 21 | sha1: ea74f9da8949de680e930a9d05b8f2167058b218 22 | format-version: "2" 23 | -------------------------------------------------------------------------------- /.final_builds/jobs/vip/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 2a03d5631458cd5894ed1277ac1c6599d0a2a762: 3 | version: 2a03d5631458cd5894ed1277ac1c6599d0a2a762 4 | blobstore_id: 8904144f-0eb4-49f5-471c-fc60ed67b031 5 | sha1: 07c219d066537dbcbb74bdd5d7c0c080028dee2c 6 | 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f: 7 | version: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 8 | blobstore_id: dcce33fd-c1d3-4dfc-78d8-b096808ed1f7 9 | sha1: e2392d30b7c2a66017697b206934247fe109ab06 10 | 2d7e5195e36368e5bb4a6ce645ef7db8d449ff7e: 11 | version: 2d7e5195e36368e5bb4a6ce645ef7db8d449ff7e 12 | blobstore_id: a3c532a8-0ff7-4008-4ff0-67cd26578ac1 13 | sha1: c4e9f824dde586d8d7e0aec8ce18705711c1149e 14 | 44e341f4fcfd1964fee13f884eed592fd5aec23f59204d0fcd5ef6b506d684cc: 15 | version: 44e341f4fcfd1964fee13f884eed592fd5aec23f59204d0fcd5ef6b506d684cc 16 | blobstore_id: 54892143-3ae2-468f-5248-77b10b8aabfb 17 | sha1: sha256:23c1ba4923b6e871b1b64bdd9b8318b47631f9b578c7650372ec69a17cfa6d35 18 | c532352928175597d9e2e6968a7c0cc9eec4877a76cefde1152c8b3efefaabe4: 19 | version: c532352928175597d9e2e6968a7c0cc9eec4877a76cefde1152c8b3efefaabe4 20 | blobstore_id: 108443c5-f694-4964-6a2b-cc6d5997e214 21 | sha1: sha256:65ed036a08ac3fdd232071853807d02c87d42be9ecd9539aa94f8f94d94e315c 22 | format-version: "2" 23 | -------------------------------------------------------------------------------- /.final_builds/license/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b: 3 | version: 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b 4 | blobstore_id: cba305c5-fa9b-4076-7567-8dbe469fed33 5 | sha1: sha256:00c81214e4d7e2af778b3f2d6e07f2ed1bf8b08d6ee3116b959dc9b654f38492 6 | f0f6caa3fde87a6d14a5faaf366b31155ccf78a0: 7 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 8 | blobstore_id: 319450a6-42be-4a13-b90b-8c095da99aae 9 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 10 | format-version: "2" 11 | -------------------------------------------------------------------------------- /.final_builds/packages/haproxy/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 3840aff7102cd05e3d84387908236db5c82bd444dc84b38dbb7d4e54ea71d808: 3 | version: 3840aff7102cd05e3d84387908236db5c82bd444dc84b38dbb7d4e54ea71d808 4 | blobstore_id: 803565e1-cca7-4c00-4747-11f19dc7a516 5 | sha1: sha256:8c70dbcdbf12d2d49f9dfd13d2953f4caa193a2015df97992a72572cf630b7f2 6 | 888c05456b91cb06db85659acc774fcabbfd09e7: 7 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 8 | blobstore_id: 82fe746c-f9c0-430c-4d31-8af802eea71d 9 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 10 | aa51a5ba97479ee10e1583d0c9f33b56cded83c44d1fc6500a73d5bfb91083b4: 11 | version: aa51a5ba97479ee10e1583d0c9f33b56cded83c44d1fc6500a73d5bfb91083b4 12 | blobstore_id: 64d92037-e6fe-4f03-7b9c-c43910d86332 13 | sha1: sha256:03af57399c9efc0c0cff0cc95c3bc2764e9617b2178a3bff8f08db8451ca1a08 14 | acda58c18cc39e1099e3ba5570b783b33b9d079b5e30b25ede18bcc75c825534: 15 | version: acda58c18cc39e1099e3ba5570b783b33b9d079b5e30b25ede18bcc75c825534 16 | blobstore_id: 0119dace-bf91-46d3-73e8-3fb8fb47b458 17 | sha1: sha256:2227c9dfb0b0dd55012e20448a9a14920b05ad83e4503b111378b7c5d219bfbc 18 | format-version: "2" 19 | -------------------------------------------------------------------------------- /.final_builds/packages/keepalived/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733: 3 | version: 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733 4 | blobstore_id: 90c3c7fe-f909-4bd8-789e-1c2019a8dd3a 5 | sha1: sha256:fc5e7c66d1e098ee30229a32b77937cc1a47f89ebd3599653bae1e08ae074f1f 6 | a8e18e8e2a5159e933377c8d8af36978a1f4cbf0: 7 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 8 | blobstore_id: 848ff052-7477-4eee-7d6c-255ebeba13b8 9 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 10 | format-version: "2" 11 | -------------------------------------------------------------------------------- /.final_builds/packages/pgpool2/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 0989b207209bea0bdcbffb90b09046a68415ae28: 3 | version: 0989b207209bea0bdcbffb90b09046a68415ae28 4 | blobstore_id: 7ff76edb-b191-440c-aca2-ba306d71b1b5 5 | sha1: 53703c74c0fd49022250bb164c2abf9bcfc41117 6 | 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac: 7 | version: 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac 8 | blobstore_id: 28fb3771-001c-410d-9583-0bb24607a522 9 | sha1: ed1d3ee8879106c56a615b80bf145fe2a240ec31 10 | 31ffabc1a00122bf9f62956e0fa26c959ea7c07a: 11 | version: 31ffabc1a00122bf9f62956e0fa26c959ea7c07a 12 | blobstore_id: 4b72a923-3a49-4de8-ab1a-3b8209c3cbef 13 | sha1: 8ad38de0edd185f917b53c40141ec6d70ebb2f68 14 | 39ae1587bb89bc5df75cce7a05e01e9e9ffb579e: 15 | version: 39ae1587bb89bc5df75cce7a05e01e9e9ffb579e 16 | blobstore_id: dd35930a-60bc-40d3-7440-5b2eb68e1134 17 | sha1: 10b5d639f85974af413d8482eee403ae747d25cf 18 | format-version: "2" 19 | -------------------------------------------------------------------------------- /.final_builds/packages/pgrt/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775: 3 | version: bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775 4 | blobstore_id: ad424651-89e0-4a26-65ed-3927f7c19953 5 | sha1: sha256:131df4a02f3eb3ee589d25ee1c7746171e74e5801fd1244ded292fc5412a47e4 6 | dafda7c29675f507adc4effbee68094c3d5dabce: 7 | version: dafda7c29675f507adc4effbee68094c3d5dabce 8 | blobstore_id: 12117420-ca1a-44bf-a7a6-114ac6702a79 9 | sha1: fadd380592639abf60204e71210b85e66e0dbd53 10 | fdec5588f5c2c34039820660d73ae0055709f8bc: 11 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 12 | blobstore_id: 1b20fd1e-a073-4b09-703b-91c5258d3599 13 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 14 | format-version: "2" 15 | -------------------------------------------------------------------------------- /.final_builds/packages/postgres/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 230580030604fb50dc88971df00a58918e454936353c81595d99c20caae02885: 3 | version: 230580030604fb50dc88971df00a58918e454936353c81595d99c20caae02885 4 | blobstore_id: eadb1f05-487b-4735-5e25-854bd6c02bc2 5 | sha1: sha256:4c9fd4747c904712e6fd96ba8b29e07dcc248e316c125a31facbacbc98851729 6 | 664fd4f2accda02570a30290d1d45a0abc459f9c: 7 | version: 664fd4f2accda02570a30290d1d45a0abc459f9c 8 | blobstore_id: d3e346c0-18a8-49ce-a2cf-dfda7e152ad3 9 | sha1: 1272d0a8297c300647cf7446537117b742222022 10 | a72141d28d97c02c4348633d31995953e75a6183: 11 | version: a72141d28d97c02c4348633d31995953e75a6183 12 | blobstore_id: fb4bf171-3b11-479f-87f6-8530f9fc4582 13 | sha1: 1df80253697ca51dca59b294a057bb68d6fb5d9f 14 | a9ef943bde149989a4f5077a14ed16b5b1b29100: 15 | version: a9ef943bde149989a4f5077a14ed16b5b1b29100 16 | blobstore_id: 751eedac-dfeb-4a4c-9d60-fd50d9616798 17 | sha1: 03645d7cac757e492dbea3599d9cfd2d8c7592dd 18 | afffbdce123957c1fd39f7697356808668ff3748640fccc629c8024baf2adcbe: 19 | version: afffbdce123957c1fd39f7697356808668ff3748640fccc629c8024baf2adcbe 20 | blobstore_id: ab39857b-b5bd-4a68-4e68-014034fd460c 21 | sha1: sha256:33504b684ea0cbdec04b94ea1e0f2e3880f3338b2cb2500a29081d041fd5d0a8 22 | d6c6d82bf172de3708c80986055f26b855f03f70afb269af8ddf7fbf1749746e: 23 | version: d6c6d82bf172de3708c80986055f26b855f03f70afb269af8ddf7fbf1749746e 24 | blobstore_id: a748c19c-e821-4b9b-4826-b51dd2212f0a 25 | sha1: sha256:5450082278257aa1255e083b1a9f5c4ace19dd6ca3b87c9bafef76f3dad3c72f 26 | f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b: 27 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 28 | blobstore_id: 79faf840-2568-46c8-7440-ea2bc059f5f5 29 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 30 | format-version: "2" 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config/dev.yml 2 | config/private.yml 3 | config/settings.yml 4 | releases/**/*.tgz 5 | dev_releases 6 | blobs/* 7 | .blobs 8 | .dev_builds 9 | .vagrant 10 | .idea 11 | .DS_Store 12 | .final_builds/jobs/**/*.tgz 13 | .final_builds/packages/**/*.tgz 14 | *.swp 15 | *~ 16 | *# 17 | #* 18 | tmp 19 | my*.yml 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 James Hunt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to 7 | deal in the Software without restriction, including without limitation the 8 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | sell copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software.. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 | IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL, BOSH-Style 2 | 3 | This BOSH release packages up PostgreSQL so that you can deploy it 4 | on BOSH. It supports standalone, clustered, and HA 5 | configurations. 6 | 7 | # Supported Topologies 8 | 9 | ## Standalone Configuration 10 | 11 | For a single, standalone PostgreSQL node, you only need the 12 | `postgres` job: 13 | 14 | ``` 15 | instance_groups: 16 | - name: db 17 | jobs: 18 | - name: postgres 19 | release: postgres 20 | ``` 21 | 22 | ## Clustered Configuration 23 | 24 | To enable replication, deploy multiple nodes and set the 25 | `postgres.replication.enabled` property to "yes": 26 | 27 | ``` 28 | instance_groups: 29 | - name: db 30 | instances: 4 31 | jobs: 32 | - name: postgres 33 | release: postgres 34 | properties: 35 | replication: 36 | enabled: true 37 | ``` 38 | 39 | In replicated mode, the bootstrap VM will assume the role of 40 | master, and the remaining nodes will replicate from it, forming a 41 | star topology. No special query routing is done in this 42 | configuration; applications that wish to make use of read replicas 43 | must do so explicitly. 44 | 45 | Promotion of a replica is left to the operator. 46 | 47 | ## HA Configuration 48 | 49 | For a highly-available, single-IP pair of PostgreSQL nodes, the 50 | `vip` job can be added. Note that you *must* deploy exactly two 51 | instances, or HA won't work. Replication must also be enabled. 52 | 53 | ``` 54 | instance_groups: 55 | - name: db 56 | jobs: 57 | - name: postgres 58 | release: postgres 59 | properties: 60 | replication: 61 | enabled: true # don't forget this! 62 | 63 | - name: vip 64 | release: postgres 65 | properties: 66 | vip: 10.3.4.5 67 | ``` 68 | 69 | HA is implemented with automatic failover, if you set 70 | `postgres.replication.enabled` to true. 71 | 72 | On bootstrap, if there is no data directory, the postgres job will 73 | revert to a normal, index-based setup. The first node will assume 74 | the role of the master, and the second will become a replica. 75 | 76 | Once the data directory has been populated, future restarts of the 77 | postgres job will attempt to contact the other node to see if it 78 | is a master. If the other node responds, and reports itself as a 79 | master, the local node will attempt a `pg_basebackup` from the 80 | master and assume the role of a replica. 81 | 82 | If the other node doesn't respond, or reports itself as a replica, 83 | the local node will keep trying, for up to 84 | `postgres.replication.grace` seconds, at which point it will 85 | assume the mantle of leadership and become the master node, 86 | using its current data directory as the canonical truth. 87 | 88 | Each node then starts up a `monitor` process; this process is 89 | responsible for ultimately promoting a local replica to be a 90 | master, in the event that the real master goes offline. It works 91 | like this: 92 | 93 | 1. Busy-loop (via 1-second sleeps) until the local postgres 94 | instance is available on its configured port. This prevents 95 | monitor from trying to restart the postgres while it is 96 | running a replica `pg_basebackup`. 97 | 98 | 2. Busy-loop (again via 1-second sleeps) for as long as the 99 | local postgres is a master. 100 | 101 | 3. Busy-loop (again via 1-second sleeps), checking the master 102 | status of the other postgres node, until it detects that 103 | either the master node has gone away (via a connection 104 | timeout), or the master node has somehow become a replica. 105 | 106 | 4. Promote the local postgres node to a master. 107 | 108 | Intelligent routing can be done by colocating the `haproxy` and 109 | `keepalived` jobs on the instance groups with `postgres`. HAproxy 110 | is configured with an external check that will only treat the 111 | master postgres node as healthy. This ensures that either load 112 | balancer node will only ever route to the write master. 113 | 114 | The `keepalived` node trades a VRRP VIP between the `haproxy` 115 | instances. This ensures that the cluster can be accessed over a 116 | single, fixed IP address. Each keepalived process watches its own 117 | haproxy process; if it notices haproxy is down, it will terminate, 118 | to allow the VIP to transgress to the other node, who is assumed 119 | to be healthy. 120 | 121 | It is possible to "instance-up" a single postgres node deploy to a 122 | HA cluster by adding the `vip` job and changing postgres `instances` 123 | to 2. More information about this can be found in `manifests/ha.yml` 124 | 125 | For backup purposes, a route is exposed through haproxy which 126 | routes directly to the read-only replica for backup jobs. By default 127 | it is port `7432`, but is also configurable via `vip.readonly_port` 128 | 129 | Here's a diagram: 130 | 131 | ![High Availability Diagram](docs/ha.png) 132 | 133 | The following parameters affect high availability: 134 | 135 | - `postgres.replication.enabled` - Enables replication, which is 136 | necessary for HA. Defaults to `false`. 137 | 138 | - `postgres.replication.grace` - How many seconds to wait for 139 | the other node to report itself as a master, during boot. 140 | Defaults to `15`. 141 | 142 | - `postgres.replication.connect_timeout` - How many seconds to 143 | allow a `psql` health check to attempt to connect to the other 144 | node before considering it a failure. The lower this value, 145 | the faster your cluster will failover, but the higher a risk 146 | of accidental failover and split-brain. Defaults to `5`. 147 | 148 | - `postgres.replication.psql_error_count` - How many failed PSQL 149 | commands allowed before considering it a failure. The health 150 | checks are PSQL commands executed every second. Poor network 151 | conditions may result in a "Connection dropped" PSQL error. 152 | The lower this value, the higher potential for accidental 153 | failover and split-brain. Defaults to `3`. 154 | 155 | - `vip.readonly_port` - Which port to access the read-only node 156 | of the cluster. Defaults to `7542`. 157 | 158 | - `vip.vip` - Which IP to use as a VIP that is traded between the 159 | two nodes. 160 | 161 | ### HA Failure Modes 162 | 163 | Our HA solution is focused on preventing downtime in the face of 164 | upgrades or other single-node failure. As such, we do not attempt to 165 | solve scenarios where the two databases cannot communicate with one 166 | another (e.g. network partition). In this case, it is possible that the 167 | replica believes the master to be down, and will promote itself to be 168 | master. The Postgres servers are then in a state of "split-brain" and 169 | requests to the DB will be split between the two nodes. 170 | 171 | To mitigate this, each node checks to see who is master. If both 172 | nodes are master (split-brain), both immediately shut down to prevent 173 | inconsistent data states. *This will result in downtime*. But we 174 | believe downtime is preferable over inconsistent database states. 175 | 176 | However, this mitigation is not a silver bullet; it is possible that 177 | prolonged network outage between the two nodes will prevent them from 178 | checking who is master, and will continue to operate in split-brain 179 | fashion. We do not attempt to solve this. 180 | 181 | ### Recovery From Failure Mode 182 | 183 | After the database has been validated, and a node to become master 184 | is chosen, SSH into the node via `bosh ssh postgres/#` and then 185 | execute `/var/vcap/jobs/postgres/bin/recover` as root. This node 186 | will then become master. 187 | 188 | Once the script executes successfully, then SSH into the other node 189 | via `bosh ssh postgres/#` and then execute 190 | `/var/vcap/jobs/postgres/bin/recover` as root. This node will then 191 | replicate from the new master. 192 | 193 | You will now have a nominal Postgres running. -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | desc "Generates a properties file for each job based on properties.X.Y used in templates" 2 | task :job_properties do 3 | require "fileutils" 4 | Dir["jobs/*"].each do |path| 5 | puts "Searching job #{File.basename(path)}..." 6 | FileUtils.chdir(path) do 7 | properties = [] 8 | Dir["templates/*.erb"].each do |template_path| 9 | properties |= File.read(template_path).scan(/\bproperties\.[\w\.]*\b/) 10 | puts properties.join("\n") 11 | File.open("properties", "w") { |file| file << properties.join("\n") } 12 | end 13 | end 14 | end 15 | end -------------------------------------------------------------------------------- /ci/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # ci/pipeline.yml 4 | # 5 | # Pipeline structure file for a BOSH Release pipeline 6 | # 7 | # DO NOT MAKE CHANGES TO THIS FILE. Instead, modify 8 | # ci/settings.yml and override what needs overridden. 9 | # This uses spruce, so you have some options there. 10 | # 11 | # author: James Hunt 12 | # created: 2016-03-30 13 | 14 | meta: 15 | name: (( param "Please name your pipeline" )) 16 | release: (( grab meta.name )) 17 | target: (( param "Please identify the name of the target Concourse CI" )) 18 | url: (( param "Please specify the full url of the target Concourse CI" )) 19 | pipeline: (( concat meta.name "-boshrelease" )) 20 | manifest: 21 | path: (( concat "manifests/" meta.name ".yml" )) 22 | 23 | git: 24 | email: (( param "Please provide the git email for automated commits" )) 25 | name: (( param "Please provide the git name for automated commits" )) 26 | 27 | image: 28 | name: starkandwayne/concourse 29 | tag: latest 30 | 31 | aws: 32 | bucket: (( concat meta.pipeline "-pipeline" )) 33 | region_name: us-east-1 34 | access_key: (( param "Please set your AWS Access Key ID for your pipeline S3 Bucket" )) 35 | secret_key: (( param "Please set your AWS Secret Key ID for your pipeline S3 Bucket" )) 36 | 37 | github: 38 | uri: (( concat "git@github.com:" meta.github.owner "/" meta.github.repo )) 39 | owner: (( param "Please specify the name of the user / organization that owns the Github repository" )) 40 | repo: (( param "Please specify the name of the Github repository" )) 41 | branch: master 42 | private_key: (( param "Please generate an SSH Deployment Key for this repo and specify it here" )) 43 | access_token: (( param "Please generate a Personal Access Token to be used for creating github releases (do you have a ci-bot?)" )) 44 | 45 | bosh-lite: 46 | target: (( param "Please specify the BOSH target URI for the bosh-lite to run test deployments against" )) 47 | cacert: (( param "Please specify the BOSH Director Root CA cert" )) 48 | username: admin 49 | password: (( param "Please specify the BOSH Director admin password" )) 50 | deployment: (( concat meta.name "-testflight" )) 51 | 52 | shout: 53 | topic: (( concat meta.name "-pipeline" )) 54 | url: (( param "Please specify the Shout! endpoint" )) 55 | username: (( param "Please specify the Shout! operations username" )) 56 | password: (( param "Please specify the Shout! operations password" )) 57 | 58 | metadata: 59 | build-team-name: $BUILD_TEAM_NAME 60 | build-job-name: $BUILD_JOB_NAME 61 | build-pipeline-name: $BUILD_PIPELINE_NAME 62 | 63 | links: 64 | pipeline: (( concat meta.url "/teams/$BUILD_TEAM_NAME/pipelines/$BUILD_PIPELINE_NAME" )) 65 | build: (( concat meta.shout.links.pipeline "/jobs/$BUILD_JOB_NAME/builds/$BUILD_NAME" )) 66 | 67 | groups: 68 | - name: (( grab meta.pipeline )) 69 | jobs: 70 | - testflight 71 | - pre 72 | - rc 73 | - shipit 74 | - major 75 | - minor 76 | 77 | jobs: 78 | - name: testflight 79 | public: true 80 | serial: true 81 | plan: 82 | - name: main 83 | do: 84 | - name: get 85 | in_parallel: 86 | - { get: git, trigger: true } 87 | - name: testflights 88 | in_parallel: 89 | - name: testflight 90 | task: testflight 91 | config: 92 | platform: linux 93 | image_resource: 94 | type: docker-image 95 | source: 96 | repository: (( grab meta.image.name )) 97 | tag: (( grab meta.image.tag )) 98 | inputs: 99 | - { name: git } 100 | run: 101 | path: ./git/ci/scripts/testflight 102 | args: [] 103 | params: 104 | REPO_ROOT: git 105 | BOSH_ENVIRONMENT: (( grab meta.bosh-lite.target )) 106 | BOSH_CA_CERT: (( grab meta.bosh-lite.cacert )) 107 | BOSH_CLIENT: (( grab meta.bosh-lite.username )) 108 | BOSH_CLIENT_SECRET: (( grab meta.bosh-lite.password )) 109 | BOSH_DEPLOYMENT: (( grab meta.bosh-lite.deployment )) 110 | TEST_ERRANDS: (( grab meta.test-errands || meta.test-errand || ~ )) 111 | AWS_ACCESS_KEY: (( grab meta.aws.access_key )) 112 | AWS_SECRET_KEY: (( grab meta.aws.secret_key )) 113 | MANIFEST_PATH: (( grab meta.manifest.path )) 114 | on_success: 115 | put: notify 116 | params: 117 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 118 | message: tests job '$BUILD_JOB_NAME' succeeded. 119 | ok: yes 120 | link: (( grab meta.shout.links.build )) 121 | on_failure: 122 | put: notify 123 | params: 124 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 125 | message: tests job '$BUILD_JOB_NAME' failed. 126 | ok: no 127 | link: (( grab meta.shout.links.build )) 128 | 129 | - name: pre 130 | public: true 131 | serial: true 132 | plan: 133 | - do: 134 | - in_parallel: 135 | - { get: git, trigger: true, passed: [testflight] } 136 | - { get: version, trigger: true } 137 | - task: release-notes 138 | config: 139 | platform: linux 140 | image_resource: 141 | type: docker-image 142 | source: 143 | repository: (( grab meta.image.name )) 144 | tag: (( grab meta.image.tag )) 145 | inputs: 146 | - { name: git } 147 | run: 148 | path: sh 149 | args: 150 | - -ce 151 | - | 152 | cd git 153 | if [ -f ci/release_notes.md ]; then 154 | echo "###### RELEASE NOTES ###############" 155 | echo 156 | cat ci/release_notes.md 157 | echo 158 | echo "########################################" 159 | echo 160 | else 161 | echo "NO RELEASE NOTES HAVE BEEN WRITTEN" 162 | echo "You *might* want to do that before" 163 | echo "hitting (+) on that shipit job..." 164 | echo 165 | fi 166 | on_success: 167 | put: notify 168 | params: 169 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 170 | message: release candidate job 'pre' succeeded. 171 | ok: yes 172 | link: (( grab meta.shout.links.build )) 173 | on_failure: 174 | put: notify 175 | params: 176 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 177 | message: release candidate job 'pre' failed (which is unusual). 178 | ok: no 179 | link: (( grab meta.shout.links.build )) 180 | 181 | - name: rc 182 | public: true 183 | serial: true 184 | plan: 185 | - do: 186 | - in_parallel: 187 | - { get: git, trigger: true, passed: [pre] } 188 | - { get: version, trigger: false, passed: [pre], params: {pre: rc} } 189 | - put: version 190 | params: {file: version/number} 191 | on_success: 192 | put: notify 193 | params: 194 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 195 | message: release candidate job 'rc' succeeded. 196 | ok: yes 197 | link: (( grab meta.shout.links.build )) 198 | on_failure: 199 | put: notify 200 | params: 201 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 202 | message: release candidate job 'rc' failed (which is unusual). 203 | ok: no 204 | link: (( grab meta.shout.links.build )) 205 | 206 | - name: minor 207 | public: true 208 | plan: 209 | - do: 210 | - { get: version, trigger: false, params: {bump: minor} } 211 | - { put: version, params: {file: version/number} } 212 | on_success: 213 | put: notify 214 | params: 215 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 216 | message: minor version bump job 'minor' succeeded. 217 | ok: yes 218 | link: (( grab meta.shout.links.build )) 219 | on_failure: 220 | put: notify 221 | params: 222 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 223 | message: minor version bump job 'minor' failed (which is unusual). 224 | ok: no 225 | link: (( grab meta.shout.links.build )) 226 | 227 | - name: major 228 | public: true 229 | plan: 230 | - do: 231 | - { get: version, trigger: false, params: {bump: major} } 232 | - { put: version, params: {file: version/number} } 233 | on_success: 234 | put: notify 235 | params: 236 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 237 | message: major version bump job '$BUILD_JOB_NAME' succeeded. 238 | ok: no 239 | link: (( grab meta.shout.links.build )) 240 | on_failure: 241 | put: notify 242 | params: 243 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 244 | message: major version bump job '$BUILD_JOB_NAME' failed (which is unusual). 245 | ok: no 246 | link: (( grab meta.shout.links.build )) 247 | 248 | - name: shipit 249 | public: true 250 | serial: true 251 | plan: 252 | - do: 253 | - name: inputs 254 | in_parallel: 255 | - { get: version, passed: [rc], params: {bump: final} } 256 | - { get: git, passed: [rc] } 257 | - name: release 258 | task: release 259 | config: 260 | platform: linux 261 | image_resource: 262 | type: docker-image 263 | source: 264 | repository: (( grab meta.image.name )) 265 | tag: (( grab meta.image.tag )) 266 | inputs: 267 | - name: version 268 | - name: git 269 | outputs: 270 | - name: gh 271 | - name: pushme 272 | - name: notifications 273 | run: 274 | path: ./git/ci/scripts/shipit 275 | args: [] 276 | params: 277 | REPO_ROOT: git 278 | VERSION_FROM: version/number 279 | RELEASE_ROOT: gh 280 | REPO_OUT: pushme 281 | BRANCH: (( grab meta.github.branch )) 282 | GITHUB_OWNER: (( grab meta.github.owner )) 283 | GIT_EMAIL: (( grab meta.git.email )) 284 | GIT_NAME: (( grab meta.git.name )) 285 | AWS_ACCESS_KEY: (( grab meta.aws.access_key )) 286 | AWS_SECRET_KEY: (( grab meta.aws.secret_key )) 287 | 288 | - name: upload-git 289 | put: git 290 | params: 291 | rebase: true 292 | repository: pushme/git 293 | - name: tarball 294 | put: s3-tarball 295 | params: 296 | file: (( concat "gh/artifacts/" meta.name "-*.tgz" )) 297 | - name: github-release 298 | put: github 299 | params: 300 | name: gh/name 301 | tag: gh/tag 302 | body: gh/notes.md 303 | globs: [gh/artifacts/*] 304 | - name: version-bump 305 | put: version 306 | params: 307 | bump: patch 308 | - name: notify 309 | in_parallel: 310 | - put: notify 311 | params: 312 | method: announce 313 | file: notifications/message 314 | link: (( concat meta.github.uri "/releases" )) 315 | 316 | on_success: 317 | put: notify 318 | params: 319 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 320 | message: release job '$BUILD_JOB_NAME' succeeded. 321 | ok: yes 322 | link: (( grab meta.shout.links.build )) 323 | on_failure: 324 | put: notify 325 | params: 326 | topic: (( concat meta.shout.topic "-$BUILD_JOB_NAME" )) 327 | message: release job '$BUILD_JOB_NAME' failed. 328 | ok: no 329 | link: (( grab meta.shout.links.build )) 330 | 331 | resource_types: 332 | - name: shout-notification 333 | type: docker-image 334 | source: 335 | repository: huntprod/shout-resource 336 | 337 | resources: 338 | - name: git 339 | type: git 340 | source: 341 | uri: (( grab meta.github.uri )) 342 | branch: (( grab meta.github.branch )) 343 | private_key: (( grab meta.github.private_key )) 344 | 345 | - name: version 346 | type: semver 347 | source : 348 | driver: s3 349 | bucket: (( grab meta.aws.bucket )) 350 | region_name: (( grab meta.aws.region_name )) 351 | key: version 352 | access_key_id: (( grab meta.aws.access_key )) 353 | secret_access_key: (( grab meta.aws.secret_key )) 354 | initial_version: (( grab meta.initial_version || "0.0.1" )) 355 | 356 | - name: notify 357 | type: shout-notification 358 | source: 359 | topic: (( grab meta.shout.topic )) 360 | url: (( grab meta.shout.url )) 361 | username: (( grab meta.shout.username )) 362 | password: (( grab meta.shout.password )) 363 | 364 | - name: github 365 | type: github-release 366 | source: 367 | user: (( grab meta.github.owner )) 368 | repository: (( grab meta.github.repo )) 369 | access_token: (( grab meta.github.access_token )) 370 | 371 | - name: s3-tarball 372 | type: s3 373 | source: 374 | bucket: (( grab meta.aws.bucket )) 375 | region_name: (( grab meta.aws.region_name )) 376 | regexp: (( concat meta.name "-(.*).tgz" )) 377 | access_key_id: (( grab meta.aws.access_key )) 378 | secret_access_key: (( grab meta.aws.secret_key )) 379 | -------------------------------------------------------------------------------- /ci/repipe: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # ci/repipe 4 | # 5 | # Script for merging together pipeline configuration files 6 | # (via Spruce!) and configuring Concourse. 7 | # 8 | # author: James Hunt 9 | # Dennis Bell 10 | # created: 2016-03-04 11 | 12 | need_command() { 13 | local cmd=${1:?need_command() - no command name given} 14 | 15 | if [[ ! -x "$(command -v $cmd)" ]]; then 16 | echo >&2 "${cmd} is not installed." 17 | if [[ "${cmd}" == "spruce" ]]; then 18 | echo >&2 "Please download it from https://github.com/geofffranks/spruce/releases" 19 | fi 20 | exit 2 21 | fi 22 | } 23 | 24 | NO_FLY= 25 | SAVE_MANIFEST= 26 | VALIDATE_PIPELINE= 27 | NON_INTERACTIVE= 28 | 29 | cleanup() { 30 | rm -f save-manifest.yml 31 | if [[ -n ${SAVE_MANIFEST} && -e .deploy.yml ]]; then 32 | mv .deploy.yml save-manifest.yml 33 | fi 34 | rm -f .deploy.yml 35 | } 36 | 37 | usage() { 38 | echo Command line arguments: 39 | echo "no-fly Do not execute any fly commands" 40 | echo "save-manifest Save manifest to file save-manifest" 41 | echo "validate Validatei pipeline instead of set pipeline" 42 | echo "validate-strict Validate pipeline with strict mode" 43 | echo "non-interactivet Run set-pipeline in non-interactive mode" 44 | } 45 | 46 | for arg do 47 | case "${arg}" in 48 | no-fly|no_fly) NO_FLY="yes" ;; 49 | save-manifest|save_manifest) SAVE_MANIFEST="yes" ;; 50 | validate) VALIDATE_PIPELINE="normal" ;; 51 | validate-strict|validate_strict) VALIDATE_PIPELINE="strict" ;; 52 | non-interactive|non_interactive) NON_INTERACTIVE="--non-interactive" ;; 53 | help|-h|--help) usage; exit 0 ;; 54 | *) echo Invalid argument 55 | usage 56 | exit 1 57 | esac 58 | done 59 | 60 | cd $(dirname $BASH_SOURCE[0]) 61 | echo "Working in $(pwd)" 62 | need_command spruce 63 | 64 | # Allow for target-specific settings 65 | settings_file="$(ls -1 settings.yml ${CONCOURSE_TARGET:+"settings-${CONCOURSE_TARGET}.yml"} 2>/dev/null | tail -n1)" 66 | if [[ -z "$settings_file" ]] 67 | then 68 | echo >&2 "Missing local settings in ci/settings.yml${CONCOURSE_TARGET:+" or ci/settings-${CONCOURSE_TARGET}.yml"}!" 69 | exit 1 70 | fi 71 | 72 | echo >&2 "Using settings found in ${settings_file}" 73 | 74 | set -e 75 | trap "cleanup" QUIT TERM EXIT INT 76 | spruce merge pipeline.yml ${settings_file} > .deploy.yml 77 | PIPELINE=$(spruce json .deploy.yml | jq -r '.meta.pipeline // ""') 78 | if [[ -z ${PIPELINE} ]]; then 79 | echo >&2 "Missing pipeline name in ci/settings.yml!" 80 | exit 1 81 | fi 82 | 83 | TARGET_FROM_SETTINGS=$(spruce json .deploy.yml | jq -r '.meta.target // ""') 84 | if [[ -z ${CONCOURSE_TARGET} ]]; then 85 | TARGET=${TARGET_FROM_SETTINGS} 86 | elif [[ "$CONCOURSE_TARGET" != "$TARGET_FROM_SETTINGS" ]] 87 | then 88 | echo >&2 "Target in {$settings_file} differs from target in \$CONCOURSE_TARGET" 89 | echo >&2 " \$CONCOURSE_TARGET: $CONCOURSE_TARGET" 90 | echo >&2 " Target in file: $TARGET_FROM_SETTINGS" 91 | exit 1 92 | else 93 | TARGET=${CONCOURSE_TARGET} 94 | fi 95 | 96 | if [[ -z ${TARGET} ]]; then 97 | echo >&2 "Missing Concourse Target in ci/settings.yml!" 98 | exit 1 99 | fi 100 | 101 | fly_cmd="${FLY_CMD:-fly}" 102 | 103 | [[ -n ${NO_FLY} ]] && { echo no fly execution requested ; exit 0; } 104 | 105 | case "${VALIDATE_PIPELINE}" in 106 | normal) fly_opts="validate-pipeline" ;; 107 | strict) fly_opts="validate-pipeline --strict" ;; 108 | *) fly_opts="set-pipeline ${NON_INTERACTIVE} --pipeline ${PIPELINE}" ;; 109 | esac 110 | 111 | set +x 112 | $fly_cmd --target ${TARGET} ${fly_opts} --config .deploy.yml 113 | [[ -n ${VALIDATE_PIPELINE} ]] && exit 0 114 | $fly_cmd --target ${TARGET} unpause-pipeline --pipeline ${PIPELINE} 115 | -------------------------------------------------------------------------------- /ci/scripts/rc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # ci/scripts/rc 5 | # 6 | # Script for doing some last-minute, pre-shipit 7 | # checks, like verifying that we actually wrote 8 | # release notes for the next cut. 9 | # 10 | # author: James Hunt 11 | # created: 2017-03-23 12 | # 13 | 14 | ROOT=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd ) 15 | cd $ROOT 16 | 17 | if [[ $(git log -1 --format=%aE) = "ci@starkandwayne.com" ]]; then 18 | echo "Skipping Release Notes check; commit was from CI Bot:" 19 | git --no-pager log -1 20 | exit 0 21 | fi 22 | 23 | if [[ ! -s ci/release_notes.md ]]; then 24 | cat < 9 | 10 | set -eu 11 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 12 | 13 | header() { 14 | echo 15 | echo "###############################################" 16 | echo 17 | echo $* 18 | echo 19 | } 20 | 21 | ###### 22 | ###### 23 | ###### 24 | header "Checking the Concourse Pipeline Environment" 25 | envok=0 26 | checkenv() { 27 | local name=$1 28 | local val=$2 29 | 30 | if [[ -z "$val" ]]; then 31 | echo >&2 "The $name variable must be set." 32 | envok=1 33 | fi 34 | } 35 | checkenv REPO_ROOT "${REPO_ROOT:-}" 36 | checkenv RELEASE_ROOT "${RELEASE_ROOT:-}" 37 | checkenv REPO_OUT "${REPO_OUT:-}" 38 | checkenv BRANCH "${BRANCH:-}" 39 | checkenv GITHUB_OWNER "${GITHUB_OWNER:-}" 40 | checkenv VERSION_FROM "${VERSION_FROM:-}" 41 | checkenv AWS_ACCESS_KEY "${AWS_ACCESS_KEY:-}" 42 | checkenv AWS_SECRET_KEY "${AWS_SECRET_KEY:-}" 43 | checkenv GIT_EMAIL "${GIT_EMAIL:-}" 44 | checkenv GIT_NAME "${GIT_NAME:-}" 45 | if [[ $envok != 0 ]]; then 46 | echo >&2 "Is your Concourse Pipeline misconfigured?" 47 | exit 1 48 | fi 49 | 50 | VERSION=$(cat ${VERSION_FROM}) 51 | if [[ -z "${VERSION}" ]]; then 52 | echo >&2 "Version file (${VERSION_FROM}) was empty." 53 | exit 1 54 | fi 55 | 56 | if [[ ! -f ${REPO_ROOT}/ci/release_notes.md ]]; then 57 | echo >&2 "ci/release_notes.md not found. Did you forget to write them?" 58 | exit 1 59 | fi 60 | 61 | ############################################################### 62 | 63 | cd ${REPO_ROOT} 64 | RELEASE_NAME=$(bosh int config/final.yml --path /final_name) 65 | 66 | ###### 67 | ###### 68 | ###### 69 | header "Configuring blobstore (AWS)" 70 | cat > config/private.yml < ${RELEASE_ROOT}/tag 94 | echo "${RELEASE_NAME} v${VERSION}" > ${RELEASE_ROOT}/name 95 | mv ${REPO_ROOT}/releases/*/*-${VERSION}.tgz ${RELEASE_ROOT}/artifacts 96 | mv ${REPO_ROOT}/ci/release_notes.md ${RELEASE_ROOT}/notes.md 97 | cat >> ${RELEASE_ROOT}/notes.md < ${RELEASE_ROOT}/notification < $MANIFEST_PATH < notifications/message 151 | echo "https://github.com/${GITHUB_OWNER}/${RELEASE_NAME}-boshrelease/releases/tag/v${VERSION}" > notifications/link 152 | 153 | echo 154 | echo 155 | echo 156 | echo "SUCCESS" 157 | exit 0 158 | -------------------------------------------------------------------------------- /ci/scripts/testflight: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # ci/scripts/testflight 4 | # 5 | # Script for testing a BOSH release 6 | # 7 | # author: James Hunt 8 | 9 | set -eu 10 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 11 | 12 | header() { 13 | echo 14 | echo "###############################################" 15 | echo 16 | echo $* 17 | echo 18 | } 19 | 20 | ###### 21 | ###### 22 | ###### 23 | header "Checking the Concourse Pipeline Environment" 24 | envok=0 25 | checkenv() { 26 | local name=$1 27 | local val=$2 28 | 29 | if [[ -z "$val" ]]; then 30 | echo >&2 "The $name variable must be set." 31 | envok=1 32 | fi 33 | } 34 | checkenv BOSH_ENVIRONMENT "${BOSH_ENVIRONMENT:-}" 35 | checkenv BOSH_CA_CERT "${BOSH_CA_CERT:-}" 36 | checkenv BOSH_CLIENT "${BOSH_CLIENT:-}" 37 | checkenv BOSH_CLIENT_SECRET "${BOSH_CLIENT_SECRET:-}" 38 | checkenv BOSH_DEPLOYMENT "${BOSH_DEPLOYMENT:-}" 39 | checkenv MANIFEST_PATH "${MANIFEST_PATH:-}" 40 | checkenv AWS_ACCESS_KEY "${AWS_ACCESS_KEY:-}" 41 | checkenv AWS_SECRET_KEY "${AWS_SECRET_KEY:-}" 42 | checkenv REPO_ROOT "${REPO_ROOT:-}" 43 | if [[ $envok != 0 ]]; then 44 | echo >&2 "Is your Concourse Pipeline misconfigured?" 45 | exit 1 46 | fi 47 | 48 | cd $REPO_ROOT 49 | if [[ ! -f ${MANIFEST_PATH} ]]; then 50 | echo "Deployment manifest ${MANIFEST_PATH} does not exist" 51 | exit 1 52 | fi 53 | 54 | ###### 55 | ###### 56 | ###### 57 | header "Cleaning up any previous '${BOSH_DEPLOYMENT}' deployments..." 58 | bosh -n delete-deployment --force || true 59 | 60 | ###### 61 | ###### 62 | ###### 63 | header "Creating candidate BOSH release..." 64 | bosh -n reset-release # in case dev_releases/ is in repo accidentally 65 | cat > config/private.yml < overrides.yml 125 | 126 | ###### 127 | ###### 128 | ###### 129 | header "Deploying to ${BOSH_ENVIRONMENT}..." 130 | bosh -n deploy $MANIFEST_PATH -o overrides.yml 131 | 132 | ###### 133 | ###### 134 | ###### 135 | if [[ -n ${TEST_ERRAND:-} ]]; then 136 | header "Running '${TEST_ERRAND}' errand" 137 | bosh -n run-errand ${TEST_ERRAND} 138 | fi 139 | 140 | ###### 141 | ###### 142 | ###### 143 | header "Cleaning up testflight '${BOSH_DEPLOYMENT}' deployment..." 144 | bosh -n delete-deployment --force 145 | 146 | echo 147 | echo 148 | echo 149 | echo "SUCCESS" 150 | exit 0 151 | -------------------------------------------------------------------------------- /ci/scripts/update-blob: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # ci/scripts/update-blob 4 | # 5 | # Script for automatically updating a BOSH blob 6 | # in a BOSH release, for use in a pipeline. 7 | # 8 | # author: James Hunt 9 | 10 | set -eu 11 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 12 | 13 | header() { 14 | echo 15 | echo "###############################################" 16 | echo 17 | echo $* 18 | echo 19 | } 20 | 21 | ###### 22 | ###### 23 | ###### 24 | header "Checking Concourse Pipeline Environment" 25 | envok=0 26 | checkenv() { 27 | local name=$1 28 | local val=$2 29 | 30 | if [[ -z "$val" ]]; then 31 | echo >&2 "The $name variable must be set." 32 | envok=1 33 | fi 34 | } 35 | checkenv REPO_ROOT "${REPO_ROOT:-}" 36 | checkenv BLOB_DIR "${BLOB_DIR:-}" 37 | checkenv BLOB_NAME "${BLOB_NAME:-}" 38 | checkenv BLOB_BINARY "${BLOB_BINARY:-}" 39 | checkenv BLOB_CLEANUP "${BLOB_CLEANUP:-}" 40 | checkenv BLOB_DESTINATION "${BLOB_DESTINATION:-}" 41 | checkenv AWS_ACCESS_KEY "${AWS_ACCESS_KEY:-}" 42 | checkenv AWS_SECRET_KEY "${AWS_SECRET_KEY:-}" 43 | if [[ $envok != 0 ]]; then 44 | echo >&2 "Is your Concourse Pipeline misconfigured?" 45 | exit 1 46 | fi 47 | 48 | VERSION=$(cat ${BLOB_DIR}/version) 49 | 50 | ###### 51 | ###### 52 | ###### 53 | header "Configuring blobstore (AWS)" 54 | pushd ${REPO_ROOT} 55 | cat >config/private.yml < ../${BLOB_DIR}/${BLOB_BINARY} 81 | else 82 | curl -Lsk $(printf ${BLOB_SOURCE} ${VERSION}) > ../${BLOB_DIR}/${BLOB_BINARY} 83 | fi 84 | fi 85 | 86 | # remove things that github-release puts in the directory that we don't want. 87 | rm -f ../${BLOB_DIR}/tag \ 88 | ../${BLOB_DIR}/body \ 89 | ../${BLOB_DIR}/version 90 | 91 | ls -lah ../${BLOB_DIR} 92 | 93 | ###### 94 | ###### 95 | ###### 96 | header "Updating blobs" 97 | # expand ${VERSION} env var into file path 98 | eval "blob_dst=${BLOB_DESTINATION}" 99 | if [[ "$BLOB_BINARY" == "*" ]]; then 100 | for blob_src in ../${BLOB_DIR}/${BLOB_BINARY}; do 101 | base=${blob_src##*/} 102 | bosh add-blob "${blob_src}" "${blob_dst}/${base}" 103 | done 104 | else 105 | bosh add-blob "../${BLOB_DIR}/${BLOB_BINARY}" "${blob_dst}" 106 | fi 107 | 108 | ###### 109 | ###### 110 | ###### 111 | header "Uploading blobs to blobstore (AWS)" 112 | bosh -n upload-blobs 113 | rm config/private.yml 114 | popd 115 | 116 | ###### 117 | ###### 118 | ###### 119 | header "Commiting and pushing changes" 120 | if [[ -n "$(cd ${REPO_ROOT}; git status --porcelain)" ]]; then 121 | pushd ${REPO_ROOT} 122 | cat >>ci/release_notes.md < $MANIFEST_PATH < 8 | check process monitor 9 | with pidfile /var/vcap/sys/run/postgres/monitor.pid 10 | start program "/var/vcap/jobs/postgres/bin/monitor start" with timeout 60 seconds 11 | stop program "/var/vcap/jobs/postgres/bin/monitor stop" 12 | group vcap 13 | <% end %> 14 | -------------------------------------------------------------------------------- /jobs/postgres/spec: -------------------------------------------------------------------------------- 1 | --- 2 | name: postgres 3 | packages: 4 | - postgres 5 | templates: 6 | bin/ctl: bin/ctl 7 | bin/tune: bin/tune 8 | bin/healthy: bin/healthy 9 | bin/monitor: bin/monitor 10 | 11 | bin/recover: bin/recover 12 | 13 | bin/functions: bin/functions 14 | 15 | data/properties.sh.erb: data/properties.sh 16 | helpers/ctl_setup.sh: helpers/ctl_setup.sh 17 | helpers/ctl_utils.sh: helpers/ctl_utils.sh 18 | envrc: envrc 19 | 20 | config/postgresql.conf: config/postgresql.conf 21 | config/hba.conf: config/hba.conf 22 | config/ident.conf: config/ident.conf 23 | 24 | sql/setup-replication.sql: sql/setup-replication.sql 25 | 26 | provides: 27 | - name: db 28 | type: postgresql 29 | properties: 30 | - postgres.config 31 | - postgres.users 32 | - postgres.databases 33 | 34 | consumes: 35 | - name: db 36 | type: postgresql 37 | 38 | 39 | properties: 40 | postgres.config: 41 | description: A map of postgresql.conf configuration directives, keyed by name. 42 | default: 43 | port: 6432 44 | 45 | postgres.hba: 46 | description: A list of pg_hba.conf configuration acls. See Section 19.1 of the PostgreSQL manual. 47 | default: [] 48 | 49 | postgres.ident: 50 | description: A list of pg_ident.conf configuration entries See Section 19.2 of the PostgreSQL manual. 51 | default: [] 52 | 53 | postgres.tuning.enable: 54 | description: Enable experimental Kernel Tuning code, utilizing values from postgres.tuning.* properties hierarchy. THIS IS EXPERIMENTAL. 55 | default: false 56 | postgres.tuning.force: 57 | description: Whether or not to adhere strictly to specified and/or calculated tuning values. If set to false (the default), tuning parameters are taken as guides, usually in the form of minimums. 58 | default: false 59 | 60 | # NOTE: Neither SHMMIN nor SHMSEG are modifiable on Linux (at least not without a 61 | # kernel reconfigure / recompile) so they are not represented here. 62 | # 63 | postgres.tuning.ipc.shmmax: 64 | description: Maximum size of shared memory segment (bytes) - at least 1kB (more if running many copies of the server). 65 | default: auto 66 | postgres.tuning.ipc.shmall: 67 | description: Total amount of shared memory available (bytes or pages) - this should normally be left at 'auto'. 68 | default: auto 69 | postgres.tuning.ipc.shmmni: 70 | description: Maximum number of shared memory segments system-wide - like postgres.tuning.ipc.shmseg plus room for other applications. 71 | default: auto 72 | 73 | # NOTE: Neither SEMMAP nor SEMVMX are modifiable on Linux (at least not without a 74 | # kernel reconfigure / recompile) so they are not represented here. 75 | # 76 | # HOWEVER, SEMOPM *is* configurable, so even though the PostgreSQL documentation 77 | # doesn't mention it explicitly in section 17.1, it's available for tuning. 78 | # 79 | postgres.tuning.ipc.semmni: 80 | description: Maximum number of semaphore identifiers (i.e., sets) - at least ceil((max_connections + autovacuum_max_workers + 4) / 16). 81 | default: auto 82 | postgres.tuning.ipc.semmns: 83 | description: Maximum number of semaphores system-wide - ceil((max_connections + autovacuum_max_workers + 4) / 16) * 17 plus room for other applications. 84 | default: auto 85 | postgres.tuning.ipc.semmsl: 86 | description: Maximum number of semaphores per set - at least 17. 87 | default: 17 88 | postgres.tuning.ipc.semopm: 89 | description: The maximum number of operations that may be specified in a semop(2) call. 90 | default: auto 91 | 92 | postgres.tuning.limits.fds: 93 | description: The maximum number of open files (including sockets) that any given process can have. 94 | default: auto 95 | 96 | postgres.replication.enabled: 97 | description: Whether or not to enable streaming PostgreSQL replication 98 | default: false 99 | postgres.replication.master: 100 | description: IP address of the preferred master node (should be the 0th postgres node's IP) 101 | default: ~ 102 | postgres.replication.grace: 103 | description: Grace period (in seconds) to look for an existing PostgreSQL master node on boot. 104 | default: 15 105 | postgres.replication.connect_timeout: 106 | description: How long (in seconds) to wait before timing out a failover health check from one node to the other. 107 | default: 5 108 | postgres.replication.psql_error_count: 109 | description: How many failed attempts to check the other node's status before assuming failure. 110 | default: 3 111 | 112 | postgres.users: 113 | description: "A list of {username: ..., password: ...} objects for defining PostgreSQL users. Setting the 'admin:' key on a user will make them a superuser." 114 | default: [] 115 | 116 | postgres.databases: 117 | description: A list of databases to create in Postgres. 118 | default: [] 119 | example: | 120 | postgres: 121 | databases: 122 | - name: animals 123 | users: 124 | - porcupine 125 | - hedgehog 126 | extensions: # optional array of extensions to enable on this database 127 | - citext -------------------------------------------------------------------------------- /jobs/postgres/templates/bin/ctl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | <% 4 | 5 | masterip = link("db").instances.first.address 6 | replication = p('postgres.replication.enabled', false) && masterip != '' 7 | master = replication && spec.index == 0 8 | port = p('postgres.config.port', (replication ? 6432 : 5432)) 9 | facility = replication ? (master ? 'repl:master' : 'repl:slave') : 'standalone' 10 | 11 | %> 12 | # first, some diagnostic info 13 | # --------------------------- 14 | # masterip | '<%= masterip %>' 15 | # spec.index | <%= spec.index %> 16 | # port | <%= port %> 17 | # replication | <%= replication %> 18 | # master | <%= master %> 19 | # 20 | 21 | # Setup env vars and folders for the webapp_ctl script 22 | source /var/vcap/jobs/postgres/helpers/ctl_setup.sh 'postgres' 23 | source /var/vcap/jobs/postgres/bin/functions 24 | exec >>$LOG_DIR/$JOB_NAME.log 2>&1 25 | 26 | # datadir is where PostgreSQL will store the database files. This really 27 | # *shouuld* be a persistent disk, unless you know what you are doing. 28 | datadir=/var/vcap/store/postgres/db 29 | 30 | # construct the list of options to pass to pg_ctl. These mostly get passed 31 | # onto the `postgres` process on startup, but the `-p` flag is used by the 32 | # "wait" feature (-w) to ensure that the database has finished booting. 33 | opts="-p <%= port %>" 34 | opts="${opts} -c external_pid_file=${PIDFILE}" 35 | opts="${opts} -c data_directory=${datadir}" 36 | opts="${opts} -c config_file=${JOB_DIR}/config/postgresql.conf" 37 | opts="${opts} -c hba_file=${JOB_DIR}/config/hba.conf" 38 | opts="${opts} -c ident_file=${JOB_DIR}/config/ident.conf" 39 | 40 | <% if p('postgres.tuning.enable') %> 41 | # set some ulimits... 42 | ulimit -v unlimited 43 | <% if p('postgres.tuning.limits.fds') != 'auto' 44 | %>ulimit -n <%= p('postgres.tuning.limits.fds') %> 45 | <% end 46 | end %> 47 | 48 | # 49 | # Bootstrap a new postgres installation, where we may not 50 | # have a data directory (usually, we don't). This is the 51 | # only time we honor masterip and the BOSH configuration 52 | # as far as who is master, because we have no prior state. 53 | # 54 | bootstrap_postgres() { 55 | echo >&2 "[<%= facility %>] bootstrapping this postgres instance" 56 | 57 | # do we need to init the database (as a replica or a master)? 58 | if [[ ! -d ${datadir} ]]; then 59 | <% if replication && master %> 60 | echo >&2 "[<%= facility %>] DATADIR (${datadir}) not found; initializing" 61 | chpst -u vcap:vcap pg_ctl -D ${datadir} initdb \ 62 | || rm -rf ${datadir} 63 | <% elsif replication && !master %> 64 | echo >&2 "[<%= facility %>] DATADIR (${datadir}) not found; seeding from preferred master (<%= masterip %>)" 65 | recover_postgres <%= masterip %> 66 | <% else %> 67 | echo >&2 "[<%= facility %>] DATADIR (${datadir}) not found; initializing" 68 | chpst -u vcap:vcap pg_ctl -D ${datadir} initdb \ 69 | || rm -rf ${datadir} 70 | <% end %> 71 | fi 72 | } 73 | 74 | start_postgres() { 75 | echo >&2 "[<%= facility %>] setting up location for WAL archives" 76 | mkdir -p ${datadir}/archive 77 | chown -R vcap:vcap ${datadir}/archive 78 | 79 | echo >&2 "[<%= facility %>] starting postgres..." 80 | chpst -u vcap:vcap pg_ctl -o "${opts}" -D ${datadir} -w -t 30 start 81 | } 82 | 83 | populate_postgres() { 84 | echo >&2 "[<%= facility %>] populating postgres instance with users and databases..." 85 | (set +e 86 | cat < 102 | echo "[postgres] setting up user '<%= user['username'] %>'" 103 | <% admin = '-DRS' %> 104 | <% admin = '-drs' if user["admin"] %> 105 | 106 | createuser -U vcap -p <%= port %> <%=admin %> -El <%= user['username'] %> 107 | createdb -U vcap -p <%= port %> -O <%= user['username'] %> <%= user['username'] %> 108 | 109 | psql -p <%= port %> -U vcap postgres -c "ALTER ROLE \"<%= user['username'] %>\" WITH PASSWORD '<%= user['password'] %>'" 110 | psql -p <%= port %> -U vcap postgres -c "GRANT ALL PRIVILEGES ON DATABASE \"<%= user['username'] %>\" TO \"<%= user['username'] %>\"" 111 | echo 112 | 113 | <% end %> 114 | 115 | <% p('postgres.databases', []).each do |database| %> 116 | echo "[postgres] setting up database '<%= database['name'] %>'" 117 | 118 | createdb -U vcap -p <%= port %> -O vcap <%= database['name'] %> 119 | <% database['users'].each do |user| %> 120 | psql -p <%= port %> -U vcap postgres -c "GRANT ALL PRIVILEGES ON DATABASE \"<%= database['name'] %>\" TO \"<%= user %>\"" 121 | <% end %> 122 | 123 | <% if database["extensions"] 124 | database["extensions"].each do |ext| %> 125 | echo "Trying to install <%= ext %>..." 126 | psql -p <%= port %> -U vcap "<%= database['name'] %>" -c "CREATE EXTENSION IF NOT EXISTS <%= ext %>" 127 | <% end 128 | end %> 129 | echo 130 | 131 | <% end %> 132 | 133 | ) 2>&1 | grep -v 'already exists' 134 | } 135 | 136 | setup_replication() { 137 | <% if replication && master %> 138 | echo "[<%= facility %>] setting up replication" 139 | psql -p <%= port %> -U vcap postgres \ 140 | < ${JOB_DIR}/sql/setup-replication.sql 141 | <% end %> 142 | return 143 | } 144 | 145 | MASTER_IP= 146 | wait_for_master() { 147 | <% if replication %> 148 | echo >&2 "[<%= facility %>] looking for an existing postgres master node..." 149 | # loop through our peer IPs, and attempt to connect to them on pg ports 150 | # FIXME: put a timeout loop on these 151 | for x in $(seq 1 <%= p('postgres.replication.grace') %>); do 152 | <% link("db").instances.reject { |the| the.address == spec.address }.each do |peer| %> 153 | echo "[<%= facility %>] checking to see if <%= peer.address %>:<%= port %> is a master..." 154 | if is_master <%= peer.address %> <%= port %>; then 155 | echo "[<%= facility %>] master is <%= peer.address %>" 156 | MASTER_IP=<%= peer.address %> 157 | return 158 | fi 159 | <% end %> 160 | 161 | sleep 1 162 | done 163 | <% else %> 164 | MASTER_IP= 165 | <% end %> 166 | return 167 | } 168 | 169 | recover_postgres() { 170 | from=${1:?recover_postgres() needs the IP address of the master to recover from} 171 | echo >&2 "[<%= facility %>] recovering as a replica of ${from}" 172 | 173 | echo >&2 "[<%= facility %>] removing existing datadir (if any) in ${datadir}" 174 | rm -rf ${datadir} 175 | mkdir -p ${datadir} -m 0700 176 | chown vcap:vcap ${datadir} 177 | 178 | echo >&2 "[<%= facility %>] performing a pg_basebackup from ${from}:<%= port %>" 179 | chpst -u vcap:vcap pg_basebackup -h ${from} -p <%= port %> -D ${datadir} -P -U replication --xlog-method=stream -R \ 180 | || rm -rf ${datadir} 181 | 182 | echo >&2 "[repl:master] RECOVERED" 183 | } 184 | 185 | case $1 in 186 | 187 | start) 188 | pid_guard $PIDFILE $JOB_NAME 189 | 190 | <% if p('postgres.tuning.enable') %> 191 | # tune that kernel! 192 | echo >&2 "[<%= facility %>] tuning the Linux kernel" 193 | COMMIT_TUNING=1 ${JOB_DIR}/bin/tune 194 | <% end %> 195 | 196 | # check if we have initialized already, and need to determine who 197 | # the database master is, or if we can rely on bootstrap configuration 198 | # (from BOSH) to select one. 199 | # 200 | if [[ ! -d ${datadir} ]]; then 201 | # initialize per BOSH configuration (masterip and the db link) 202 | bootstrap_postgres 203 | start_postgres 204 | populate_postgres 205 | setup_replication 206 | 207 | else 208 | # determine who our master is; 209 | wait_for_master 210 | 211 | if [[ -n "$MASTER_IP" ]]; then 212 | # we are not the master; we have another master. 213 | # recover ourselves from that master via IP. 214 | recover_postgres $MASTER_IP 215 | start_postgres 216 | 217 | else 218 | # we are the master, we get to (re-)create dbs and users. 219 | # and validate our replication configuration. 220 | start_postgres 221 | populate_postgres 222 | setup_replication 223 | fi 224 | fi 225 | ;; 226 | 227 | stop) 228 | chpst -u vcap:vcap pg_ctl -o "${opts}" -D ${datadir} -w -t 30 -m fast stop 229 | ;; 230 | 231 | *) 232 | echo "Usage: postgres {start|stop}" 233 | 234 | ;; 235 | 236 | esac 237 | exit 0 238 | -------------------------------------------------------------------------------- /jobs/postgres/templates/bin/functions: -------------------------------------------------------------------------------- 1 | # to be sourced 2 | 3 | is_master() { 4 | local opts="-U vcap" 5 | if [[ -n "${1:-}" ]]; then 6 | opts="$opts -h $1" 7 | fi 8 | if [[ -n "${2:-}" ]]; then 9 | opts="$opts -p $2" 10 | else 11 | opts="$opts -p <%= p('postgres.config')["port"] || 6432 %>" 12 | fi 13 | 14 | # psql can experience transient issues (like connection reset) 15 | # make is_master more resilient against these kinds of errors 16 | error_tolerance=<%= p('postgres.replication.psql_error_count') %> 17 | error_count=0 18 | while (( $error_count < $error_tolerance )) ; do 19 | tf=$(echo $(psql $opts postgres -t -c 'SELECT pg_is_in_recovery()' 2>&1)); 20 | 21 | if [[ "$tf" == "f" ]]; then 22 | return 0 23 | elif [[ "$tf" == "t" || "$tf" =~ (could not connect to server)|(starting up) ]]; then 24 | return 1 25 | else 26 | echo "[monitor] received unexpected response from postgres DB while checking master/replica status:" 27 | echo "[monitor] $tf" 28 | ((error_count++)) 29 | echo "[monitor] will attempt to check master/replica status again (check $error_count of $error_tolerance)" 30 | sleep 1 31 | continue 32 | fi 33 | done 34 | # we errored out <%= p('postgres.replication.psql_error_count') %> times, return that other node is not master 35 | echo "[monitor] couldn't determine who was master or replica due to postgres errors. assuming i'm master." 36 | return 1 37 | } -------------------------------------------------------------------------------- /jobs/postgres/templates/bin/healthy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # Setup env vars and folders for the webapp_ctl script 5 | exec >/var/vcap/sys/log/postgres/health.$3.$4.log 2>&1 6 | source /var/vcap/jobs/postgres/bin/functions 7 | 8 | echo "today is $(date); checking $3:$4 for writability" 9 | psql -U vcap -h $3 -p $4 postgres -c 'SELECT pg_is_in_recovery()' 10 | 11 | if [ $HAPROXY_PROXY_NAME == "postgres-read-replica" ]; then 12 | if is_master $3 $4; then 13 | exit 1 14 | else 15 | exit 0 16 | fi 17 | else 18 | if is_master $3 $4; then 19 | exit 0 20 | else 21 | exit 1 22 | fi 23 | fi -------------------------------------------------------------------------------- /jobs/postgres/templates/bin/monitor: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | <% replication = p('postgres.replication.enabled', false) && link("db").instances.size() > 1 %> 4 | <% port = p('postgres.config.port', (replication ? 6432 : 5432)) %> 5 | 6 | # Setup env vars and folders for the webapp_ctl script 7 | source /var/vcap/jobs/postgres/helpers/ctl_setup.sh postgres 8 | source /var/vcap/jobs/postgres/bin/functions 9 | exec >>$LOG_DIR/postgres.log 2>&1 10 | 11 | # datadir is where PostgreSQL will store the database files. This really 12 | # *shouuld* be a persistent disk, unless you know what you are doing. 13 | datadir=/var/vcap/store/postgres/db 14 | 15 | promote() { 16 | # construct the list of options to pass to pg_ctl. These mostly get passed 17 | # onto the `postgres` process on startup, but the `-p` flag is used by the 18 | # "wait" feature (-w) to ensure that the database has finished booting. 19 | opts="-p <%= port %>" 20 | opts="${opts} -c external_pid_file=${RUN_DIR}/postgres.pid" 21 | opts="${opts} -c data_directory=${datadir}" 22 | opts="${opts} -c config_file=${JOB_DIR}/config/postgresql.conf" 23 | opts="${opts} -c hba_file=${JOB_DIR}/config/hba.conf" 24 | opts="${opts} -c ident_file=${JOB_DIR}/config/ident.conf" 25 | 26 | mkdir -p ${datadir}/archive 27 | chown -R vcap:vcap ${datadir}/archive 28 | 29 | echo >&2 "[monitor] promoting postgres to a master..." 30 | chpst -u vcap:vcap pg_ctl -D ${datadir} promote 31 | } 32 | 33 | case $1 in 34 | start) 35 | pid_guard $RUN_DIR/monitor.pid monitor 36 | echo $$ > $RUN_DIR/monitor.pid 37 | 38 | <% link("db").instances.reject { |the| the.address == spec.address }.each do |peer| %> 39 | MASTER_IP=<%= peer.address %> 40 | <% end %> 41 | 42 | # how long to wait for psql to do its work while health checking 43 | export PGCONNECT_TIMEOUT=<%= p('postgres.replication.connect_timeout') %> 44 | 45 | # wait for the local node to be up before we ever try to promote it 46 | echo "[monitor] waiting for local postgres node to become available on :<%= port %>" 47 | while ! psql -p <%= port %> -U vcap postgres -c 'SELECT 1' >/dev/null 2>&1; do 48 | sleep 1 49 | done 50 | 51 | echo "[monitor] entering main loop..." 52 | # loop indefinitely 53 | while true; do 54 | sleep 1 55 | if is_master; then 56 | if ! is_master $MASTER_IP <%= port %>; then 57 | continue 58 | else 59 | echo "[monitor] split-brain detected. both nodes are master. shutting down postgres, haproxy, and monitor to prevent inconsistent data" 60 | sleep 2 # done to ensure the other node notices split-brain as well. 61 | /var/vcap/bosh/bin/monit stop postgres 62 | /var/vcap/bosh/bin/monit stop haproxy 63 | /var/vcap/bosh/bin/monit stop monitor 64 | rm -f $RUN_DIR/monitor.pid 65 | exit 0 66 | fi 67 | fi 68 | 69 | # we are a replica, determine who we talk to 70 | if [[ -z ${MASTER_IP:-} ]]; then 71 | echo "[monitor] unable to determine master node; bailing out" 72 | exit 1 73 | fi 74 | if ! is_master $MASTER_IP <%= port %>; then 75 | unset PGCONNECT_TIMEOUT 76 | promote 77 | rm -f $RUN_DIR/monitor.pid 78 | exit 0 79 | fi 80 | done 81 | ;; 82 | 83 | stop) 84 | kill_and_wait $RUN_DIR/monitor.pid 85 | ;; 86 | 87 | *) 88 | echo "Usage: monitor {start|stop}" 89 | ;; 90 | 91 | esac 92 | exit 0 93 | -------------------------------------------------------------------------------- /jobs/postgres/templates/bin/recover: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -u # report the usage of uninitialized variables 3 | 4 | if [ "$EUID" != 0 ] 5 | then echo "Please run recovery as root" 6 | exit 1 7 | fi 8 | 9 | running_processes=$(ps cax | grep -Po "(haproxy)|(postgres)|(monitor)") 10 | if [[ ! -z "$running_processes" ]]; then 11 | echo "Services are currently running on this node that should've been stopped." 12 | echo "Currently running services that should not be running:" 13 | echo "$running_processes" 14 | exit 1 15 | else 16 | /var/vcap/bosh/bin/monit start monitor 17 | /var/vcap/bosh/bin/monit start postgres 18 | /var/vcap/bosh/bin/monit start haproxy 19 | EXITSTATUS=0 20 | sleep 2 21 | if [[ $(ps cax | grep -Pzo "(?s)^(?=.*\monitor\b)(?=.*\bpostgres\b)(?=.*\bhaproxy\b).*$") ]] ; then 22 | echo "Failed to start procceses." 23 | /var/vcap/bosh/bin/monit status 24 | exit 1 25 | else 26 | echo "All processes have been started." 27 | exit 0 28 | fi 29 | fi 30 | -------------------------------------------------------------------------------- /jobs/postgres/templates/bin/tune: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | force() { 4 | return <%= p('postgres.tuning.force') ? 0 : 1 %> 5 | } 6 | toobig() { 7 | # 64-bit signed integers max out at 9,223,372,036,854,775,807, 8 | # a.k.a. 9 billion billion, or 9 * 10^19. THEREFORE if we have 9 | # a string of more than 18 characters, chances are it's too big 10 | # to deal with in any meaningful way. 11 | # 12 | test ${#1} -gt 18 13 | return $? 14 | } 15 | max() { 16 | if force; then echo $1 17 | elif toobig $1; then echo $1 18 | elif toobig $2; then echo $2 19 | elif [[ $1 -gt $2 ]]; then echo $1 20 | else echo $2 21 | fi 22 | } 23 | 24 | unit() { 25 | # usage: unit $value $base $suffix 26 | local value=$1 27 | local base=$2 28 | local units=" $3" 29 | local suffix=$4 30 | 31 | if toobig $value; then 32 | echo "*unfathomable*" # warden / garden does this 33 | return 34 | fi 35 | 36 | while [[ ${#units} -gt 1 && $value -gt $base ]]; do 37 | value=$(( value / base )) 38 | units=${units:1} 39 | done 40 | echo "${value} ${units:0:1}${suffix}" 41 | } 42 | 43 | summarize() { 44 | local from=$2 45 | local to=$3 46 | if [[ -n $4 ]]; then 47 | # called as `summarize key $a $b 1024 KMG b 48 | from=$(unit $from $4 $5 $6) 49 | to=$(unit $to $4 $5 $6) 50 | fi 51 | printf "%20s %16s -> %16s" $1 "$from" "$to" 52 | if [[ "$2" = "$3" ]]; then 53 | echo " (no change)" 54 | else 55 | echo " OK" 56 | fi 57 | } 58 | 59 | ##################################### 60 | 61 | 62 | 63 | 64 | ###### ## ## ## ## 65 | ## ## ## ## ### ### 66 | ## ## ## #### #### 67 | ###### ######### ## ### ## 68 | ## ## ## ## ## 69 | ## ## ## ## ## ## 70 | ###### ## ## ## ## 71 | 72 | cur_shmmax=$(cat /proc/sys/kernel/shmmax) 73 | cur_shmall=$(cat /proc/sys/kernel/shmall) 74 | cur_shmmni=$(cat /proc/sys/kernel/shmmni) 75 | 76 | ##################################### 77 | # SHMAX 78 | 79 | shmmax="<%= p('postgres.tuning.ipc.shmmax') %>" 80 | if [[ "${shmmax}" == "auto" || shmmax < 1024 ]]; then 81 | shmmax=1024 # MINIMUM per PostgreSQL 17.1 82 | fi 83 | shmmax=$(max ${shmmax} ${cur_shmmax}) 84 | 85 | ##################################### 86 | # SHMALL 87 | 88 | shmall="<%= p('postgres.tuning.ipc.shmall') %>" 89 | if [[ "${shmall}" == "auto" ]]; then 90 | pagesize=$(getconf PAGE_SIZE) 91 | shmall=$(( (shmmax + (pagesize - 1)) / pagesize )) 92 | fi 93 | shmall=$(max ${shmall} ${cur_shmall}) 94 | 95 | ##################################### 96 | # SHMMNI 97 | 98 | shmmni="<%= p('postgres.tuning.ipc.shmmni') %>" 99 | if [[ "${shmmni}" == "auto" ]]; then 100 | shmmni=$(cat /proc/sys/kernel/shmmni) 101 | fi 102 | shmmni=$(max ${shmmni} ${cur_shmmni}) 103 | 104 | 105 | 106 | 107 | ###### ######## ## ## 108 | ## ## ## ### ### 109 | ## ## #### #### 110 | ###### ###### ## ### ## 111 | ## ## ## ## 112 | ## ## ## ## ## 113 | ###### ######## ## ## 114 | 115 | read cur_semmsl \ 116 | cur_semmns \ 117 | cur_semopm \ 118 | cur_semmni \ 119 | < /proc/sys/kernel/sem 120 | 121 | ##################################### 122 | # SEMMNI 123 | 124 | semmni="<%= p('postgres.tuning.ipc.semmni') %>" 125 | if [[ "${semmni}" == "auto" ]]; then 126 | max_connections=<%= p('postgres.config.max_connections', 100) %> 127 | autovac_workers=<%= p('postgres.config.autovacuum_max_workers', 3) %> 128 | semmni=$(( (max_connections + autovac_workers + 3 + (16 - 1)) / 16 )) 129 | fi 130 | semmni=$(max ${semmni} ${cur_semmni}) 131 | 132 | ##################################### 133 | # SEMMNS 134 | 135 | semmns="<%= p('postgres.tuning.ipc.semmns') %>" 136 | if [[ "${semmns}" == "auto" ]]; then 137 | max_connections=<%= p('postgres.config.max_connections', 100) %> 138 | autovac_workers=<%= p('postgres.config.autovacuum_max_workers', 3) %> 139 | semmns=$(( (max_connections + autovac_workers + 3 + (16 - 1)) / 16 * (17 + 3) )) 140 | fi 141 | semmns=$(max ${semmns} ${cur_semmns}) 142 | 143 | ##################################### 144 | # SEMMSL 145 | 146 | semmsl="<%= p('postgres.tuning.ipc.semmsl') %>" 147 | if [[ "${semmsl}" == "auto" || ${semmsl} < 17 ]]; then 148 | semmsl=17 # per PostgreSQL 17.1 149 | fi 150 | semmsl=$(max ${semmsl} ${cur_semmsl}) 151 | 152 | ##################################### 153 | # SEMOPM 154 | 155 | semopm="<%= p('postgres.tuning.ipc.semopm') %>" 156 | if [[ "${semopm}" == "auto" ]]; then 157 | semopm=500 # sane default, per Ubuntu 158 | fi 159 | semopm=$(max ${semopm} ${cur_semopm}) 160 | 161 | 162 | 163 | 164 | echo "TUNING KERNEL PARAMETERS" 165 | echo 166 | summarize SHMMAX ${cur_shmmax} ${shmmax} 1024 KMG B 167 | summarize SHMALL ${cur_shmall} ${shmall} 1024 KMG B 168 | summarize SHMMNI ${cur_shmmni} ${shmmni} 1000 kM 169 | echo 170 | 171 | summarize SEMMNI ${cur_semmni} ${semmni} 1000 kM 172 | summarize SEMMNS ${cur_semmns} ${semmns} 1000 kM 173 | summarize SEMMSL ${cur_semmsl} ${semmsl} 1000 kM 174 | summarize SEMOPM ${cur_semopm} ${semopm} 1000 kM 175 | echo 176 | 177 | 178 | if [[ -n ${COMMIT_TUNING} ]]; then 179 | echo "commiting changes to kernel configuration..." 180 | echo ${shmmax} > /proc/sys/kernel/shmmax 181 | echo ${shmall} > /proc/sys/kernel/shmall 182 | echo ${shmmni} > /proc/sys/kernel/shmmni 183 | echo ${semmsl} ${semmns} ${semopm} ${semmni} > /proc/sys/kernel/sem 184 | echo "done." 185 | 186 | else 187 | echo "COMMIT_TUNING env var not set; skipping actual changes to kernel configuration..." 188 | echo "done (sort of)." 189 | fi 190 | 191 | echo 192 | -------------------------------------------------------------------------------- /jobs/postgres/templates/config/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry-community/postgres-boshrelease/265157cbcc5f31a1ea50f0371a1a8addcc656b7d/jobs/postgres/templates/config/.gitkeep -------------------------------------------------------------------------------- /jobs/postgres/templates/config/hba.conf: -------------------------------------------------------------------------------- 1 | # postgres hba configuration 2 | 3 | # base connectivity for psql 4 | local all vcap trust 5 | 6 | # base connectivity for pgpool 7 | local all all md5 8 | <% p("postgres.pgpool.pool", []).each do |ip| 9 | %>host all all <%= ip %>/32 md5 10 | <% end %> 11 | 12 | # replication hosts 13 | <% 14 | link("db").instances.each do |instance| 15 | if instance.address =~ /\b[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\b/ 16 | -%> 17 | 18 | host all all <%= instance.address %>/32 trust 19 | host replication replication <%= instance.address %>/32 trust 20 | <% else -%> 21 | host all all <%= instance.address %> trust 22 | host replication replication <%= instance.address %> trust 23 | <% end; end -%> 24 | 25 | # user-configured acls 26 | <% p("postgres.hba", []).each do |acl| %><%= acl %> 27 | <% end %> 28 | -------------------------------------------------------------------------------- /jobs/postgres/templates/config/ident.conf: -------------------------------------------------------------------------------- 1 | # postgres ident configuration 2 | <% 3 | if_p("postgres.ident") do |_| 4 | properties.postgres.ident.each do |entry| %> 5 | <%= entry %> 6 | <% 7 | end 8 | end 9 | %> 10 | -------------------------------------------------------------------------------- /jobs/postgres/templates/config/postgresql.conf: -------------------------------------------------------------------------------- 1 | # postgres main configuration 2 | unix_socket_directories = '/var/vcap/sys/run/postgres/' 3 | log_line_prefix = '%m: proc=%p,user=%u,db=%d,host=%r ' 4 | 5 | ## 6 | <% 7 | pgs = link("db").instances 8 | replication = p('postgres.replication.enabled', false) && pgs.size() > 1 9 | master = replication && spec.index == 0 10 | 11 | config = p('postgres.config', {}) 12 | 13 | # set up defaults for replication 14 | config['listen_addresses'] ||= '0.0.0.0' 15 | config["port"] ||= (replication ? 6432 : 5432) 16 | 17 | if replication 18 | if pgs.size() != 2 19 | raise "PostgreSQL replication only works with two instances; not 1, and definitely not #{pgs.size()}" 20 | end 21 | 22 | config["wal_level"] ||= 'hot_standby' 23 | config["max_wal_senders "] ||= 5 24 | config["wal_keep_segments"] ||= 128 25 | config["archive_mode"] ||= 'on' 26 | config["archive_command"] ||= '/bin/true' 27 | config["hot_standby"] ||= 'on' 28 | end 29 | 30 | def val(v) 31 | return case 32 | when v == true; "on" 33 | when v == false; "off" 34 | when v.to_i == v; "#{v}" 35 | else; "'#{v}'" 36 | end 37 | end 38 | 39 | config.each do |key, value| 40 | %><%= key %> = <%= val(value) %> 41 | <% end %> 42 | -------------------------------------------------------------------------------- /jobs/postgres/templates/data/properties.sh.erb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # job template binding variables 4 | 5 | # job name & index of this VM within cluster 6 | # e.g. JOB_NAME=redis, JOB_INDEX=0 7 | export NAME='<%= name %>' 8 | export JOB_INDEX=<%= index %> 9 | # full job name, like redis/0 or webapp/3 10 | export JOB_FULL="$NAME/$JOB_INDEX" 11 | -------------------------------------------------------------------------------- /jobs/postgres/templates/envrc: -------------------------------------------------------------------------------- 1 | # envrc for postgres stuff 2 | job=postgres 3 | 4 | for bin in /var/vcap/jobs/${job}/packages/*/*bin; do 5 | PATH="${PATH}:${bin}" 6 | done 7 | export PATH 8 | 9 | for lib in /var/vcap/jobs/${job}/packages/*/*lib; do 10 | LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${lib}" 11 | done 12 | export LD_LIBRARY_PATH=${LD_LIBRARY_PATH#:} 13 | -------------------------------------------------------------------------------- /jobs/postgres/templates/helpers/ctl_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Setup env vars and folders for the ctl script 4 | # This helps keep the ctl script as readable 5 | # as possible 6 | 7 | # Usage options: 8 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh JOB_NAME OUTPUT_LABEL 9 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar 10 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar foobar 11 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar nginx 12 | 13 | set -e # exit immediately if a simple command exits with a non-zero status 14 | set -u # report the usage of uninitialized variables 15 | 16 | JOB_NAME=$1 17 | output_label=${2:-${JOB_NAME}} 18 | 19 | export JOB_DIR=/var/vcap/jobs/$JOB_NAME 20 | chmod 755 $JOB_DIR # to access file via symlink 21 | 22 | # Load some bosh deployment properties into env vars 23 | # Try to put all ERb into data/properties.sh.erb 24 | # incl $NAME, $JOB_INDEX, $WEBAPP_DIR 25 | source $JOB_DIR/data/properties.sh 26 | 27 | source $JOB_DIR/helpers/ctl_utils.sh 28 | redirect_output ${output_label} 29 | 30 | export HOME=${HOME:-/home/vcap} 31 | 32 | # Add all packages' /bin & /sbin into $PATH 33 | for package_bin_dir in $(ls -d /var/vcap/packages/*/*bin) 34 | do 35 | export PATH=${package_bin_dir}:$PATH 36 | done 37 | 38 | export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-''} # default to empty 39 | for package_bin_dir in $(ls -d /var/vcap/packages/*/lib) 40 | do 41 | export LD_LIBRARY_PATH=${package_bin_dir}:$LD_LIBRARY_PATH 42 | done 43 | 44 | # Setup log, run and tmp folders 45 | 46 | export RUN_DIR=/var/vcap/sys/run/$JOB_NAME 47 | export LOG_DIR=/var/vcap/sys/log/$JOB_NAME 48 | export TMP_DIR=/var/vcap/sys/tmp/$JOB_NAME 49 | export STORE_DIR=/var/vcap/store/$JOB_NAME 50 | for dir in $RUN_DIR $LOG_DIR $TMP_DIR $STORE_DIR 51 | do 52 | mkdir -p ${dir} 53 | chown vcap:vcap ${dir} 54 | chmod 775 ${dir} 55 | done 56 | export TMPDIR=$TMP_DIR 57 | 58 | export C_INCLUDE_PATH=/var/vcap/packages/mysqlclient/include/mysql:/var/vcap/packages/sqlite/include:/var/vcap/packages/libpq/include 59 | export LIBRARY_PATH=/var/vcap/packages/mysqlclient/lib/mysql:/var/vcap/packages/sqlite/lib:/var/vcap/packages/libpq/lib 60 | 61 | # consistent place for vendoring python libraries within package 62 | if [[ -d ${WEBAPP_DIR:-/xxxx} ]] 63 | then 64 | export PYTHONPATH=$WEBAPP_DIR/vendor/lib/python 65 | fi 66 | 67 | if [[ -d /var/vcap/packages/java7 ]] 68 | then 69 | export JAVA_HOME="/var/vcap/packages/java7" 70 | fi 71 | 72 | # setup CLASSPATH for all jars/ folders within packages 73 | export CLASSPATH=${CLASSPATH:-''} # default to empty 74 | for java_jar in $(ls -d /var/vcap/packages/*/*/*.jar) 75 | do 76 | export CLASSPATH=${java_jar}:$CLASSPATH 77 | done 78 | 79 | PIDFILE=$RUN_DIR/$output_label.pid 80 | 81 | echo '$PATH' $PATH 82 | -------------------------------------------------------------------------------- /jobs/postgres/templates/helpers/ctl_utils.sh: -------------------------------------------------------------------------------- 1 | # Helper functions used by ctl scripts 2 | 3 | # links a job file (probably a config file) into a package 4 | # Example usage: 5 | # link_job_file_to_package config/redis.yml [config/redis.yml] 6 | # link_job_file_to_package config/wp-config.php wp-config.php 7 | link_job_file_to_package() { 8 | source_job_file=$1 9 | target_package_file=${2:-$source_job_file} 10 | full_package_file=$WEBAPP_DIR/${target_package_file} 11 | 12 | link_job_file ${source_job_file} ${full_package_file} 13 | } 14 | 15 | # links a job file (probably a config file) somewhere 16 | # Example usage: 17 | # link_job_file config/bashrc /home/vcap/.bashrc 18 | link_job_file() { 19 | source_job_file=$1 20 | target_file=$2 21 | full_job_file=$JOB_DIR/${source_job_file} 22 | 23 | echo link_job_file ${full_job_file} ${target_file} 24 | if [[ ! -f ${full_job_file} ]] 25 | then 26 | echo "file to link ${full_job_file} does not exist" 27 | else 28 | # Create/recreate the symlink to current job file 29 | # If another process is using the file, it won't be 30 | # deleted, so don't attempt to create the symlink 31 | mkdir -p $(dirname ${target_file}) 32 | ln -nfs ${full_job_file} ${target_file} 33 | fi 34 | } 35 | 36 | # If loaded within monit ctl scripts then pipe output 37 | # If loaded from 'source ../utils.sh' then normal STDOUT 38 | redirect_output() { 39 | SCRIPT=$1 40 | mkdir -p /var/vcap/sys/log/monit 41 | exec 1>> /var/vcap/sys/log/monit/$SCRIPT.log 2>&1 42 | } 43 | 44 | pid_guard() { 45 | pidfile=$1 46 | name=$2 47 | 48 | if [ -f "$pidfile" ]; then 49 | pid=$(head -1 "$pidfile") 50 | 51 | if [ -n "$pid" ] && [ -e /proc/$pid ]; then 52 | echo "$name is already running, please stop it first" 53 | exit 1 54 | fi 55 | 56 | echo "Removing stale pidfile..." 57 | rm $pidfile 58 | fi 59 | } 60 | 61 | wait_pid() { 62 | pid=$1 63 | try_kill=$2 64 | timeout=${3:-0} 65 | force=${4:-0} 66 | countdown=$(( $timeout * 10 )) 67 | 68 | echo wait_pid $pid $try_kill $timeout $force $countdown 69 | if [ -e /proc/$pid ]; then 70 | if [ "$try_kill" = "1" ]; then 71 | echo "Killing $pidfile: $pid " 72 | kill $pid 73 | fi 74 | while [ -e /proc/$pid ]; do 75 | sleep 0.1 76 | [ "$countdown" != '0' -a $(( $countdown % 10 )) = '0' ] && echo -n . 77 | if [ $timeout -gt 0 ]; then 78 | if [ $countdown -eq 0 ]; then 79 | if [ "$force" = "1" ]; then 80 | echo -ne "\nKill timed out, using kill -9 on $pid... " 81 | kill -9 $pid 82 | sleep 0.5 83 | fi 84 | break 85 | else 86 | countdown=$(( $countdown - 1 )) 87 | fi 88 | fi 89 | done 90 | if [ -e /proc/$pid ]; then 91 | echo "Timed Out" 92 | else 93 | echo "Stopped" 94 | fi 95 | else 96 | echo "Process $pid is not running" 97 | echo "Attempting to kill pid anyway..." 98 | kill $pid 99 | fi 100 | } 101 | 102 | wait_pidfile() { 103 | pidfile=$1 104 | try_kill=$2 105 | timeout=${3:-0} 106 | force=${4:-0} 107 | countdown=$(( $timeout * 10 )) 108 | 109 | if [ -f "$pidfile" ]; then 110 | pid=$(head -1 "$pidfile") 111 | if [ -z "$pid" ]; then 112 | echo "Unable to get pid from $pidfile" 113 | exit 1 114 | fi 115 | 116 | wait_pid $pid $try_kill $timeout $force 117 | 118 | rm -f $pidfile 119 | else 120 | echo "Pidfile $pidfile doesn't exist" 121 | fi 122 | } 123 | 124 | kill_and_wait() { 125 | pidfile=$1 126 | # Monit default timeout for start/stop is 30s 127 | # Append 'with timeout {n} seconds' to monit start/stop program configs 128 | timeout=${2:-25} 129 | force=${3:-1} 130 | if [[ -f ${pidfile} ]] 131 | then 132 | wait_pidfile $pidfile 1 $timeout $force 133 | else 134 | # TODO assume $1 is something to grep from 'ps ax' 135 | pid="$(ps auwwx | grep "$1" | awk '{print $2}')" 136 | wait_pid $pid 1 $timeout $force 137 | fi 138 | } 139 | 140 | check_nfs_mount() { 141 | opts=$1 142 | exports=$2 143 | mount_point=$3 144 | 145 | if grep -qs $mount_point /proc/mounts; then 146 | echo "Found NFS mount $mount_point" 147 | else 148 | echo "Mounting NFS..." 149 | mount $opts $exports $mount_point 150 | if [ $? != 0 ]; then 151 | echo "Cannot mount NFS from $exports to $mount_point, exiting..." 152 | exit 1 153 | fi 154 | fi 155 | } 156 | -------------------------------------------------------------------------------- /jobs/postgres/templates/sql/setup-replication.sql: -------------------------------------------------------------------------------- 1 | DO 2 | $body$ 3 | BEGIN 4 | IF NOT EXISTS (SELECT * FROM pg_catalog.pg_user WHERE usename = 'replication') THEN 5 | CREATE ROLE replication WITH REPLICATION LOGIN; 6 | END IF; 7 | END 8 | $body$; 9 | -------------------------------------------------------------------------------- /jobs/smoke-tests/monit: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry-community/postgres-boshrelease/265157cbcc5f31a1ea50f0371a1a8addcc656b7d/jobs/smoke-tests/monit -------------------------------------------------------------------------------- /jobs/smoke-tests/spec: -------------------------------------------------------------------------------- 1 | --- 2 | name: smoke-tests 3 | packages: 4 | - postgres 5 | - pgrt 6 | 7 | consumes: 8 | - name: db 9 | type: postgresql 10 | 11 | templates: 12 | bin/run: bin/run 13 | helpers/ctl_setup.sh: helpers/ctl_setup.sh 14 | helpers/ctl_utils.sh: helpers/ctl_utils.sh 15 | sql/pgbench_cleanup.sql: sql/pgbench_cleanup.sql 16 | 17 | properties: 18 | postgres.smoke-tests.target.port: 19 | description: Port to run pgbench against, for generating load. 20 | default: 5432 21 | postgres.smoke-tests.target.username: 22 | description: User to connect as, to run the pgbench load generation component. 23 | default: smoke-tests 24 | postgres.smoke-tests.target.password: 25 | description: Password of the smoke-test pgbench user. 26 | postgres.smoke-tests.target.clients: 27 | description: How many separate database clients to simulate during the load test. 28 | default: 30 29 | 30 | postgres.smoke-tests.backend.port: 31 | description: Port that PostgreSQL itself (not HAProxy) is listening on, for replication health checking. 32 | default: 6432 33 | 34 | postgres.smoke-tests.thresholds.replication-lag: 35 | description: Maximum allowable replication lag for any single read slave from master (in kb). 36 | default: 64 37 | postgres.smoke-tests.replication.enabled: 38 | description: Whether or not replication is enabled for Postgres. 39 | default: false 40 | -------------------------------------------------------------------------------- /jobs/smoke-tests/templates/bin/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | <% 3 | master = link("db").instances.first.address 4 | slaves = [] 5 | link("db").instances.each do |instance| 6 | if instance.address != master 7 | slaves.push(instance.address) 8 | end 9 | end 10 | 11 | pgs = link("db").instances 12 | replication = p('postgres.smoke-tests.replication.enabled', false) && pgs.size() > 1 13 | clusterport = (replication ? p('postgres.smoke-tests.backend.port') : p('postgres.smoke-tests.target.port')) 14 | %> 15 | 16 | set -u # report the usage of uninitialized variables 17 | 18 | # Setup env vars and folders for the webapp_ctl script 19 | source /var/vcap/jobs/smoke-tests/helpers/ctl_setup.sh 'smoke-tests' 20 | 21 | check() { 22 | echo 23 | date 24 | pgrt -M <%= master %> \<% slaves.each do |slave| %> 25 | -S <%= slave %> \<% end %> 26 | -u <%= p('postgres.smoke-tests.target.username') %> \ 27 | -w <%= p('postgres.smoke-tests.target.password') %> \ 28 | -p <%= clusterport %> \ 29 | -l <%= p('postgres.smoke-tests.thresholds.replication-lag', 64) * 1024 %> 30 | return $? 31 | } 32 | 33 | EXITSTATUS=0 34 | 35 | # create the .pgpass file 36 | rm -f ~/.pgpass 37 | echo "*:*:*:<%= p('postgres.smoke-tests.target.username') %>:<%= p('postgres.smoke-tests.target.password') %>" \ 38 | > ~/.pgpass 39 | chmod 0600 ~/.pgpass 40 | 41 | echo "INITIALIZING PGBENCH DATABASE" 42 | pgbench -h <%= master %> \ 43 | -p <%= p('postgres.smoke-tests.target.port') %> \ 44 | -U <%= p('postgres.smoke-tests.target.username') %> \ 45 | -i 2>&1 46 | 47 | echo "STARTING PGBENCH IN THE BACKGROUND" 48 | CPUS=$(grep -c ^processor /proc/cpuinfo) 49 | CLIENTS=<%= p('postgres.smoke-tests.target.clients') %> 50 | CLIENTS=$(( CLIENTS - (CLIENTS % CPUS) + CPUS )) 51 | time \ 52 | pgbench -h <%= master %> \ 53 | -p <%= p('postgres.smoke-tests.target.port') %> \ 54 | -U <%= p('postgres.smoke-tests.target.username') %> \ 55 | -c ${CLIENTS} -j ${CPUS} -T 125 -C 2>&1 & 56 | PGBENCH=$! 57 | 58 | for x in $(seq 1 120); do 59 | check || EXITSTATUS=$? 60 | sleep 1 61 | done 62 | 63 | echo 64 | echo "done with tests; waiting for pgbench to finish up" 65 | echo 66 | wait $PGBENCH 67 | 68 | echo 69 | echo "pgbench exited $?" 70 | echo 71 | 72 | 73 | psql -h <%= master %> \ 74 | -p <%= p('postgres.smoke-tests.target.port') %> \ 75 | -U <%= p('postgres.smoke-tests.target.username') %> \ 76 | --file "/var/vcap/jobs/smoke-tests/sql/pgbench_cleanup.sql" 77 | 78 | echo "---------------------------------------------" 79 | echo "Errand smoke-tests is complete; exit status $EXITSTATUS" 80 | exit $EXITSTATUS 81 | -------------------------------------------------------------------------------- /jobs/smoke-tests/templates/helpers/ctl_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Setup env vars and folders for the ctl script 4 | # This helps keep the ctl script as readable 5 | # as possible 6 | 7 | # Usage options: 8 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh JOB_NAME OUTPUT_LABEL 9 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar 10 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar foobar 11 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar nginx 12 | 13 | set -e # exit immediately if a simple command exits with a non-zero status 14 | set -u # report the usage of uninitialized variables 15 | 16 | JOB_NAME=$1 17 | output_label=${2:-${JOB_NAME}} 18 | 19 | export JOB_DIR=/var/vcap/jobs/$JOB_NAME 20 | chmod 755 $JOB_DIR # to access file via symlink 21 | 22 | source $JOB_DIR/helpers/ctl_utils.sh 23 | 24 | # Add all packages' /bin & /sbin into $PATH 25 | for package_bin_dir in $(ls -d /var/vcap/packages/*/*bin) 26 | do 27 | export PATH=${package_bin_dir}:$PATH 28 | done 29 | 30 | export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-''} # default to empty 31 | for package_bin_dir in $(ls -d /var/vcap/packages/*/lib) 32 | do 33 | export LD_LIBRARY_PATH=${package_bin_dir}:$LD_LIBRARY_PATH 34 | done 35 | 36 | # Setup log, run and tmp folders 37 | 38 | export RUN_DIR=/var/vcap/sys/run/$JOB_NAME 39 | export LOG_DIR=/var/vcap/sys/log/$JOB_NAME 40 | export TMP_DIR=/var/vcap/sys/tmp/$JOB_NAME 41 | export STORE_DIR=/var/vcap/store/$JOB_NAME 42 | for dir in $RUN_DIR $LOG_DIR $TMP_DIR $STORE_DIR 43 | do 44 | mkdir -p ${dir} 45 | chown vcap:vcap ${dir} 46 | chmod 775 ${dir} 47 | done 48 | export TMPDIR=$TMP_DIR 49 | 50 | export C_INCLUDE_PATH=/var/vcap/packages/mysqlclient/include/mysql:/var/vcap/packages/sqlite/include:/var/vcap/packages/libpq/include 51 | export LIBRARY_PATH=/var/vcap/packages/mysqlclient/lib/mysql:/var/vcap/packages/sqlite/lib:/var/vcap/packages/libpq/lib 52 | 53 | echo '$PATH' $PATH 54 | -------------------------------------------------------------------------------- /jobs/smoke-tests/templates/helpers/ctl_utils.sh: -------------------------------------------------------------------------------- 1 | # Helper functions used by ctl scripts 2 | 3 | # links a job file (probably a config file) into a package 4 | # Example usage: 5 | # link_job_file_to_package config/redis.yml [config/redis.yml] 6 | # link_job_file_to_package config/wp-config.php wp-config.php 7 | link_job_file_to_package() { 8 | source_job_file=$1 9 | target_package_file=${2:-$source_job_file} 10 | full_package_file=$WEBAPP_DIR/${target_package_file} 11 | 12 | link_job_file ${source_job_file} ${full_package_file} 13 | } 14 | 15 | # links a job file (probably a config file) somewhere 16 | # Example usage: 17 | # link_job_file config/bashrc /home/vcap/.bashrc 18 | link_job_file() { 19 | source_job_file=$1 20 | target_file=$2 21 | full_job_file=$JOB_DIR/${source_job_file} 22 | 23 | echo link_job_file ${full_job_file} ${target_file} 24 | if [[ ! -f ${full_job_file} ]] 25 | then 26 | echo "file to link ${full_job_file} does not exist" 27 | else 28 | # Create/recreate the symlink to current job file 29 | # If another process is using the file, it won't be 30 | # deleted, so don't attempt to create the symlink 31 | mkdir -p $(dirname ${target_file}) 32 | ln -nfs ${full_job_file} ${target_file} 33 | fi 34 | } 35 | 36 | # If loaded within monit ctl scripts then pipe output 37 | # If loaded from 'source ../utils.sh' then normal STDOUT 38 | redirect_output() { 39 | SCRIPT=$1 40 | mkdir -p /var/vcap/sys/log/monit 41 | exec 1>> /var/vcap/sys/log/monit/$SCRIPT.log 2>&1 42 | } 43 | 44 | pid_guard() { 45 | pidfile=$1 46 | name=$2 47 | 48 | if [ -f "$pidfile" ]; then 49 | pid=$(head -1 "$pidfile") 50 | 51 | if [ -n "$pid" ] && [ -e /proc/$pid ]; then 52 | echo "$name is already running, please stop it first" 53 | exit 1 54 | fi 55 | 56 | echo "Removing stale pidfile..." 57 | rm $pidfile 58 | fi 59 | } 60 | 61 | wait_pid() { 62 | pid=$1 63 | try_kill=$2 64 | timeout=${3:-0} 65 | force=${4:-0} 66 | countdown=$(( $timeout * 10 )) 67 | 68 | echo wait_pid $pid $try_kill $timeout $force $countdown 69 | if [ -e /proc/$pid ]; then 70 | if [ "$try_kill" = "1" ]; then 71 | echo "Killing $pidfile: $pid " 72 | kill $pid 73 | fi 74 | while [ -e /proc/$pid ]; do 75 | sleep 0.1 76 | [ "$countdown" != '0' -a $(( $countdown % 10 )) = '0' ] && echo -n . 77 | if [ $timeout -gt 0 ]; then 78 | if [ $countdown -eq 0 ]; then 79 | if [ "$force" = "1" ]; then 80 | echo -ne "\nKill timed out, using kill -9 on $pid... " 81 | kill -9 $pid 82 | sleep 0.5 83 | fi 84 | break 85 | else 86 | countdown=$(( $countdown - 1 )) 87 | fi 88 | fi 89 | done 90 | if [ -e /proc/$pid ]; then 91 | echo "Timed Out" 92 | else 93 | echo "Stopped" 94 | fi 95 | else 96 | echo "Process $pid is not running" 97 | echo "Attempting to kill pid anyway..." 98 | kill $pid 99 | fi 100 | } 101 | 102 | wait_pidfile() { 103 | pidfile=$1 104 | try_kill=$2 105 | timeout=${3:-0} 106 | force=${4:-0} 107 | countdown=$(( $timeout * 10 )) 108 | 109 | if [ -f "$pidfile" ]; then 110 | pid=$(head -1 "$pidfile") 111 | if [ -z "$pid" ]; then 112 | echo "Unable to get pid from $pidfile" 113 | exit 1 114 | fi 115 | 116 | wait_pid $pid $try_kill $timeout $force 117 | 118 | rm -f $pidfile 119 | else 120 | echo "Pidfile $pidfile doesn't exist" 121 | fi 122 | } 123 | 124 | kill_and_wait() { 125 | pidfile=$1 126 | # Monit default timeout for start/stop is 30s 127 | # Append 'with timeout {n} seconds' to monit start/stop program configs 128 | timeout=${2:-25} 129 | force=${3:-1} 130 | if [[ -f ${pidfile} ]] 131 | then 132 | wait_pidfile $pidfile 1 $timeout $force 133 | else 134 | # TODO assume $1 is something to grep from 'ps ax' 135 | pid="$(ps auwwx | grep "$1" | awk '{print $2}')" 136 | wait_pid $pid 1 $timeout $force 137 | fi 138 | } 139 | 140 | check_nfs_mount() { 141 | opts=$1 142 | exports=$2 143 | mount_point=$3 144 | 145 | if grep -qs $mount_point /proc/mounts; then 146 | echo "Found NFS mount $mount_point" 147 | else 148 | echo "Mounting NFS..." 149 | mount $opts $exports $mount_point 150 | if [ $? != 0 ]; then 151 | echo "Cannot mount NFS from $exports to $mount_point, exiting..." 152 | exit 1 153 | fi 154 | fi 155 | } 156 | -------------------------------------------------------------------------------- /jobs/smoke-tests/templates/sql/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry-community/postgres-boshrelease/265157cbcc5f31a1ea50f0371a1a8addcc656b7d/jobs/smoke-tests/templates/sql/.gitkeep -------------------------------------------------------------------------------- /jobs/smoke-tests/templates/sql/pgbench_cleanup.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | DECLARE 3 | row pg_tables%ROWTYPE; 4 | BEGIN 5 | FOR row IN 6 | SELECT 7 | schemaname, 8 | tablename 9 | FROM 10 | pg_catalog.pg_tables 11 | WHERE 12 | tableowner = '<%= p('postgres.smoke-tests.target.username') %>' 13 | AND 14 | tablename LIKE 'pgbench\_%' 15 | LOOP 16 | EXECUTE 'DROP TABLE ' || quote_ident(row.schemaname) || '.' || quote_ident(row.tablename); 17 | END LOOP; 18 | END $$ 19 | LANGUAGE plpgsql; 20 | -------------------------------------------------------------------------------- /jobs/vip/monit: -------------------------------------------------------------------------------- 1 | check process haproxy 2 | with pidfile /var/vcap/sys/run/vip/haproxy.pid 3 | start program "/var/vcap/jobs/vip/bin/haproxy start" 4 | stop program "/var/vcap/jobs/vip/bin/haproxy stop" 5 | group vcap 6 | 7 | <% if p("keepalived.enabled") %> 8 | check process keepalived 9 | with pidfile /var/vcap/sys/run/vip/keepalived.pid 10 | start program "/var/vcap/jobs/vip/bin/keepalived start" 11 | stop program "/var/vcap/jobs/vip/bin/keepalived stop" 12 | group vcap 13 | <% end %> 14 | -------------------------------------------------------------------------------- /jobs/vip/spec: -------------------------------------------------------------------------------- 1 | --- 2 | name: vip 3 | packages: 4 | - haproxy 5 | - keepalived 6 | 7 | templates: 8 | bin/haproxy: bin/haproxy 9 | config/haproxy.conf: config/haproxy.conf 10 | 11 | bin/keepalived: bin/keepalived 12 | config/keepalived.tpl: config/keepalived.tpl 13 | 14 | helpers/ctl_setup.sh: helpers/ctl_setup.sh 15 | helpers/ctl_utils.sh: helpers/ctl_utils.sh 16 | properties.sh.erb: data/properties.sh 17 | 18 | consumes: 19 | - name: db 20 | type: postgresql 21 | 22 | properties: 23 | vip: 24 | description: Virtual IP address to trade between the nodes. This will be the public address of your cluster. This property is required. 25 | 26 | readonly_port: 27 | description: Port for external access that is routed directly to readonly Postgres server node 28 | default: 7432 29 | 30 | port: 31 | description: What port to listen on for incoming PostgreSQL connections 32 | default: 5432 33 | 34 | backend_port: 35 | description: What port to connect to on backend PostgreSQL nodes. 36 | default: 6432 37 | 38 | keepalived.enabled: 39 | description: By default keepalived will be enabled. If you wish to roll your own VRRP-like system, disable this. 40 | default: true 41 | 42 | keepalived.interface: 43 | description: What Linux kernel network interface to attach teh floating VIP to; or 'auto' to use the default. 44 | default: auto 45 | 46 | keepalived.virtual_router_id: 47 | description: The VRRP virtual router identifier (VRID). Must be a network-unique number in the range of 1 to 255. 48 | default: 1 49 | 50 | ha_proxy.log_level: 51 | description: "Log level" 52 | default: "info" 53 | 54 | ha_proxy.connect_timeout: 55 | description: "Timeout waiting for connections to establish to a server (in seconds)" 56 | default: 5 57 | ha_proxy.client_timeout: 58 | description: "Timeout waiting for data from a client (in seconds)" 59 | default: 300 60 | ha_proxy.server_timeout: 61 | description: "Timeout waiting for data from a server (in seconds)" 62 | default: 300 63 | ha_proxy.queue_timeout: 64 | description: "Timeout for requests queued waiting for free connection slots (in seconds)" 65 | default: 30 66 | 67 | ha_proxy.stats_enable: 68 | description: "If true, haproxy will enable a socket for stats. You can see the stats on haproxy_ip:9000/haproxy_stats" 69 | default: false 70 | ha_proxy.stats_bind: 71 | description: "Define one or several listening addresses and/or ports in a frontend." 72 | default: "*:9000" 73 | ha_proxy.stats_user: 74 | description: "User name to authenticate haproxy stats" 75 | ha_proxy.stats_password: 76 | description: "Password to authenticate haproxy stats" 77 | ha_proxy.stats_uri: 78 | description: "URI used to access the stats UI." 79 | default: "haproxy_stats" 80 | ha_proxy.trusted_stats_cidrs: 81 | description: "Trusted ip range that can access the stats UI" 82 | default: 0.0.0.0/32 83 | -------------------------------------------------------------------------------- /jobs/vip/templates/bin/haproxy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | exec >>/var/vcap/sys/log/vip/haproxy.log 2>&1 5 | source /var/vcap/jobs/vip/helpers/ctl_setup.sh vip haproxy 6 | PATH=/var/vcap/packages/haproxy/bin:$PATH 7 | 8 | NAME=haproxy 9 | DAEMON=$(which haproxy) 10 | CONFIG=/var/vcap/jobs/vip/config/haproxy.conf 11 | PID_FILE=/var/vcap/sys/run/vip/haproxy.pid 12 | 13 | test -x "${DAEMON}" 14 | test -f "${CONFIG}" 15 | 16 | RETVAL=0 17 | 18 | start_haproxy() { 19 | status_haproxy 20 | if [ "${RETVAL}" = 0 ]; then 21 | echo "HAProxy is currently running" 22 | else 23 | RETVAL=0 24 | mkdir -p /var/vcap/sys/run/vip 25 | chmod 0755 /var/vcap/sys/run/vip 26 | chown -R vcap:vcap /var/vcap/sys/run/vip 27 | 28 | echo "$(date): Starting HAProxy" 29 | set +e 30 | "${DAEMON}" -f "${CONFIG}" -D -p "${PID_FILE}" 0<&- 31 | RETVAL=$? 32 | set -e 33 | case "${RETVAL}" in 34 | 0) 35 | echo "$(date): Finished starting HAProxy" 36 | echo "SUCCESS" 37 | ;; 38 | *) 39 | echo "$(date): Errored starting HAProxy" 40 | echo "$(date): Errored starting HAProxy" >&2 41 | rm -f $PID_FILE 42 | echo "FAILED - check logs" 43 | RETVAL=1 44 | ;; 45 | esac 46 | fi 47 | } 48 | 49 | stop_haproxy() { 50 | status_haproxy 51 | if [ "${RETVAL}" = 0 ]; then 52 | set +e 53 | for pid in $(cat "${PID_FILE}"); do 54 | kill ${pid} || RETVAL=$? 55 | done 56 | set -e 57 | if [ "${RETVAL}" = 0 ]; then 58 | rm -f $PID_FILE 59 | else 60 | echo "FAILED - check logs" 61 | fi 62 | else 63 | echo "HAProxy not running" 64 | fi 65 | } 66 | 67 | # RETVAL = 0 if running, != 0 if stopped 68 | status_haproxy() { 69 | if [ -f "${PID_FILE}" ]; then 70 | RETVAL=0 71 | set +e 72 | for pid in $(cat "${PID_FILE}"); do 73 | kill -0 "${pid}" > /dev/null 2>&1 || RETVAL=3 74 | done 75 | kill -0 $(cat "${PID_FILE}") > /dev/null 2>&1 76 | if [ $? != 0 ]; then 77 | RETVAL=3 78 | fi 79 | set -e 80 | else 81 | RETVAL=1 82 | fi 83 | } 84 | 85 | restart_haproxy() { 86 | stop_haproxy 87 | start_haproxy 88 | } 89 | 90 | case "$1" in 91 | start) 92 | echo -n "Starting haproxy: " 93 | start_haproxy 94 | echo "${NAME}." 95 | ;; 96 | stop) 97 | echo -n "Stopping haproxy: " 98 | stop_haproxy 99 | echo "${NAME}." 100 | ;; 101 | status) 102 | status_haproxy 103 | ;; 104 | force-reload|reload|restart) 105 | echo -n "Restarting haproxy: " 106 | restart_haproxy 107 | echo "${NAME}." 108 | ;; 109 | *) 110 | echo "Usage: $0 {start|stop|restart|reload|force-reload}" >&2 111 | RETVAL=1 112 | ;; 113 | esac 114 | 115 | exit "${RETVAL}" 116 | -------------------------------------------------------------------------------- /jobs/vip/templates/bin/keepalived: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | APP_DIR=/var/vcap/jobs/vip 3 | PKG_DIR=/var/vcap/packages/keepalived 4 | BIN_DIR=${PKG_DIR}/bin 5 | SBIN_DIR=${PKG_DIR}/sbin 6 | CONF_DIR=${APP_DIR}/config 7 | RUN_DIR=/var/vcap/sys/run/vip 8 | LOG_DIR=/var/vcap/sys/log/vip 9 | 10 | exec >>${LOG_DIR}/keepalived.log 2>&1 11 | 12 | source /var/vcap/packages/keepalived/common/utils.sh 13 | 14 | interface_for_ip() { 15 | local ip=$1 16 | local network="" 17 | 18 | for route in $(ip route show scope link | cut -d " " -f1); do 19 | if [[ $(ip_in_network $route $ip) == 0 ]]; then 20 | network=$route 21 | break 22 | fi 23 | done 24 | 25 | if [[ -z $network ]]; then 26 | echo "Could not determine an interface for requested VIP: $ip" 27 | exit 1 28 | fi 29 | 30 | interface=$(ip route show scope link | grep "${network}" | cut -d " " -f3) 31 | if [[ -z ${interface} ]]; then 32 | echo "Could not find the previously found network ${network} in the routing tables. Something is very wrong" 33 | exit 1 34 | fi 35 | echo "${interface}" 36 | } 37 | 38 | binary_ip() { 39 | string_ip=$1 40 | i=1 41 | binary_ip=0 42 | for octet in $(echo "$string_ip" | tr -s "." " "); do 43 | binary_ip=$(($binary_ip + ($octet<<(32-(8*$i))))) 44 | i=$(($i+1)) 45 | done 46 | echo ${binary_ip} 47 | } 48 | 49 | binary_mask() { 50 | bits=$1 51 | full_mask=$(((2**32)-1)) 52 | mask=$((((2**32)-1)-((2**(32-$bits))-1))) 53 | echo $mask 54 | } 55 | 56 | ip_in_network() { 57 | network=$1 58 | target_ip=$2 59 | 60 | net_ip=$(echo $network | cut -d "/" -f1) 61 | net_mask=$(echo $network | cut -d "/" -f2) 62 | 63 | net_ip_binary=$(binary_ip "$net_ip") 64 | target_ip_binary=$(binary_ip "$target_ip") 65 | net_mask_binary=$(binary_mask "$net_mask") 66 | 67 | if [[ $(($net_ip_binary&$net_mask_binary)) == $(($target_ip_binary&$net_mask_binary)) ]]; then 68 | echo 0 69 | else 70 | echo 1 71 | fi 72 | } 73 | 74 | case $1 in 75 | 76 | start) 77 | echo "Starting keepalived... ($(date))" 78 | mkdir -p $RUN_DIR $LOG_DIR 79 | chown -R vcap:vcap $RUN_DIR $LOG_DIR 80 | 81 | <% if p("keepalived.interface") == "auto" -%> 82 | interface=$(interface_for_ip <%= p("vip") %>) 83 | if [[ $? != 0 || -z ${interface} ]]; then 84 | echo "Could not autodetect interface to use for <%= p("vip") %>. Cannot continue." 85 | exit 1 86 | fi 87 | if grep "interface auto" ${CONF_DIR}/keepalived.tpl > /dev/null; then 88 | sed "s/interface auto/interface ${interface}/" ${CONF_DIR}/keepalived.tpl > ${CONF_DIR}/keepalived.conf 89 | fi 90 | <% else -%> 91 | cp ${CONF_DIR}/keepalived.tpl ${CONF_DIR}/keepalived.conf 92 | <% end -%> 93 | 94 | $SBIN_DIR/keepalived -l -D -n \ 95 | -f $CONF_DIR/keepalived.conf \ 96 | --pid=$RUN_DIR/keepalived.pid \ 97 | --vrrp_pid=${RUN_DIR}/vrrp.pid \ 98 | --checkers_pid=${RUN_DIR}/checkers.pid \ 99 | ;; 100 | 101 | stop) 102 | echo "Stopping keepalived... ($(date))" 103 | for PIDFILE in ${RUN_DIR}/*.pid; do 104 | pid=$(cat $PIDFILE) 105 | [[ -z "${pid}" ]] && continue 106 | kill "${pid}" 107 | wait_pid_death "${pid}" 25 108 | if pid_is_running "${pid}"; then 109 | kill -9 ${pid} 110 | fi 111 | rm -f $PIDFILE 112 | done 113 | 114 | ;; 115 | 116 | *) 117 | echo "Usage: keepalived {start|stop}" ;; 118 | 119 | esac 120 | -------------------------------------------------------------------------------- /jobs/vip/templates/config/haproxy.conf: -------------------------------------------------------------------------------- 1 | <% require "digest"%> 2 | 3 | global 4 | log 127.0.0.1 syslog <%= p('ha_proxy.log_level') %> 5 | daemon 6 | user vcap 7 | group vcap 8 | maxconn 64000 9 | spread-checks 4 10 | stats socket /var/vcap/sys/run/vip/haproxy.sock mode 600 level admin 11 | stats timeout 2m 12 | external-check 13 | 14 | defaults 15 | log global 16 | maxconn 64000 17 | option contstats 18 | timeout connect <%= p("ha_proxy.connect_timeout").to_i * 1000 %>ms 19 | timeout client <%= p("ha_proxy.client_timeout").to_i * 1000 %>ms 20 | timeout server <%= p("ha_proxy.server_timeout").to_i * 1000 %>ms 21 | timeout queue <%= p("ha_proxy.queue_timeout").to_i * 1000 %>ms 22 | 23 | 24 | <% if p("ha_proxy.stats_enable") %> 25 | listen stats 26 | bind <%= p("ha_proxy.stats_bind") %> 27 | acl private src <%= p("ha_proxy.trusted_stats_cidrs") %> 28 | http-request deny unless private 29 | mode http 30 | stats enable 31 | stats hide-version 32 | stats realm Haproxy\ Statistics 33 | stats uri /<%= p("ha_proxy.stats_uri") %> 34 | stats auth <%= p("ha_proxy.stats_user") %>:<%= p("ha_proxy.stats_password") %> 35 | <% end %> 36 | 37 | frontend postgres 38 | mode tcp 39 | bind :<%= p('port') %> 40 | default_backend postgres 41 | 42 | backend postgres 43 | mode tcp 44 | option external-check 45 | external-check command "/var/vcap/jobs/postgres/bin/healthy" 46 | external-check path "/var/vcap/packages/postgres/bin:/bin:/usr/bin:/sbin:/usr/sbin" 47 | <% link("db").instances.each_with_index do |peer, i| %> 48 | server node<%= i %> <%= peer.address %>:<%= p('backend_port') %> check inter 1000<% end %> 49 | 50 | 51 | frontend postgres-read-replica 52 | mode tcp 53 | bind :<%= p('readonly_port', '7432') %> 54 | default_backend postgres-read-replica 55 | 56 | backend postgres-read-replica 57 | mode tcp 58 | option external-check 59 | external-check command "/var/vcap/jobs/postgres/bin/healthy" 60 | external-check path "/var/vcap/packages/postgres/bin:/bin:/usr/bin:/sbin:/usr/sbin" 61 | <% link("db").instances.each_with_index do |peer, i| %> 62 | server node<%= i %> <%= peer.address %>:<%= p('backend_port') %> check inter 1000<% end %> 63 | -------------------------------------------------------------------------------- /jobs/vip/templates/config/keepalived.tpl: -------------------------------------------------------------------------------- 1 | global_defs { 2 | lvs_id <%= spec.name %> 3 | } 4 | 5 | vrrp_script check-postgres { 6 | script "killall -0 haproxy" 7 | interval 2 8 | weight 2 9 | } 10 | 11 | vrrp_instance <%= spec.name %>-postgres { 12 | <% if spec.bootstrap %> 13 | state MASTER 14 | priority 101 15 | <% else %> 16 | state SLAVE 17 | priority 100 18 | <% end %> 19 | interface <%= p('keepalived.interface') %> 20 | virtual_router_id <%= p('keepalived.virtual_router_id') %> 21 | 22 | virtual_ipaddress { 23 | <%= p('vip') %> 24 | } 25 | 26 | track_script { 27 | check-postgres 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /jobs/vip/templates/helpers/ctl_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Setup env vars and folders for the ctl script 4 | # This helps keep the ctl script as readable 5 | # as possible 6 | 7 | # Usage options: 8 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh JOB_NAME OUTPUT_LABEL 9 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar 10 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar foobar 11 | # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar nginx 12 | 13 | set -e # exit immediately if a simple command exits with a non-zero status 14 | set -u # report the usage of uninitialized variables 15 | 16 | JOB_NAME=$1 17 | output_label=${2:-${JOB_NAME}} 18 | 19 | export JOB_DIR=/var/vcap/jobs/$JOB_NAME 20 | chmod 755 $JOB_DIR # to access file via symlink 21 | 22 | # Load some bosh deployment properties into env vars 23 | # Try to put all ERb into data/properties.sh.erb 24 | # incl $NAME, $JOB_INDEX, $WEBAPP_DIR 25 | source $JOB_DIR/data/properties.sh 26 | 27 | source $JOB_DIR/helpers/ctl_utils.sh 28 | redirect_output ${output_label} 29 | 30 | export HOME=${HOME:-/home/vcap} 31 | 32 | # Add all packages' /bin & /sbin into $PATH 33 | for package_bin_dir in $(ls -d /var/vcap/packages/*/*bin) 34 | do 35 | export PATH=${package_bin_dir}:$PATH 36 | done 37 | 38 | export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-''} # default to empty 39 | for package_bin_dir in $(ls -d /var/vcap/packages/*/lib) 40 | do 41 | export LD_LIBRARY_PATH=${package_bin_dir}:$LD_LIBRARY_PATH 42 | done 43 | 44 | # Setup log, run and tmp folders 45 | 46 | export RUN_DIR=/var/vcap/sys/run/$JOB_NAME 47 | export LOG_DIR=/var/vcap/sys/log/$JOB_NAME 48 | export TMP_DIR=/var/vcap/sys/tmp/$JOB_NAME 49 | export STORE_DIR=/var/vcap/store/$JOB_NAME 50 | for dir in $RUN_DIR $LOG_DIR $TMP_DIR $STORE_DIR 51 | do 52 | mkdir -p ${dir} 53 | chown vcap:vcap ${dir} 54 | chmod 775 ${dir} 55 | done 56 | export TMPDIR=$TMP_DIR 57 | 58 | export C_INCLUDE_PATH=/var/vcap/packages/mysqlclient/include/mysql:/var/vcap/packages/sqlite/include:/var/vcap/packages/libpq/include 59 | export LIBRARY_PATH=/var/vcap/packages/mysqlclient/lib/mysql:/var/vcap/packages/sqlite/lib:/var/vcap/packages/libpq/lib 60 | 61 | # consistent place for vendoring python libraries within package 62 | if [[ -d ${WEBAPP_DIR:-/xxxx} ]] 63 | then 64 | export PYTHONPATH=$WEBAPP_DIR/vendor/lib/python 65 | fi 66 | 67 | if [[ -d /var/vcap/packages/java7 ]] 68 | then 69 | export JAVA_HOME="/var/vcap/packages/java7" 70 | fi 71 | 72 | # setup CLASSPATH for all jars/ folders within packages 73 | export CLASSPATH=${CLASSPATH:-''} # default to empty 74 | for java_jar in $(ls -d /var/vcap/packages/*/*/*.jar) 75 | do 76 | export CLASSPATH=${java_jar}:$CLASSPATH 77 | done 78 | 79 | PIDFILE=$RUN_DIR/$output_label.pid 80 | 81 | echo '$PATH' $PATH 82 | -------------------------------------------------------------------------------- /jobs/vip/templates/helpers/ctl_utils.sh: -------------------------------------------------------------------------------- 1 | # Helper functions used by ctl scripts 2 | 3 | # links a job file (probably a config file) into a package 4 | # Example usage: 5 | # link_job_file_to_package config/redis.yml [config/redis.yml] 6 | # link_job_file_to_package config/wp-config.php wp-config.php 7 | link_job_file_to_package() { 8 | source_job_file=$1 9 | target_package_file=${2:-$source_job_file} 10 | full_package_file=$WEBAPP_DIR/${target_package_file} 11 | 12 | link_job_file ${source_job_file} ${full_package_file} 13 | } 14 | 15 | # links a job file (probably a config file) somewhere 16 | # Example usage: 17 | # link_job_file config/bashrc /home/vcap/.bashrc 18 | link_job_file() { 19 | source_job_file=$1 20 | target_file=$2 21 | full_job_file=$JOB_DIR/${source_job_file} 22 | 23 | echo link_job_file ${full_job_file} ${target_file} 24 | if [[ ! -f ${full_job_file} ]] 25 | then 26 | echo "file to link ${full_job_file} does not exist" 27 | else 28 | # Create/recreate the symlink to current job file 29 | # If another process is using the file, it won't be 30 | # deleted, so don't attempt to create the symlink 31 | mkdir -p $(dirname ${target_file}) 32 | ln -nfs ${full_job_file} ${target_file} 33 | fi 34 | } 35 | 36 | # If loaded within monit ctl scripts then pipe output 37 | # If loaded from 'source ../utils.sh' then normal STDOUT 38 | redirect_output() { 39 | SCRIPT=$1 40 | mkdir -p /var/vcap/sys/log/monit 41 | exec 1>> /var/vcap/sys/log/monit/$SCRIPT.log 42 | exec 2>> /var/vcap/sys/log/monit/$SCRIPT.err.log 43 | } 44 | 45 | pid_guard() { 46 | pidfile=$1 47 | name=$2 48 | 49 | if [ -f "$pidfile" ]; then 50 | pid=$(head -1 "$pidfile") 51 | 52 | if [ -n "$pid" ] && [ -e /proc/$pid ]; then 53 | echo "$name is already running, please stop it first" 54 | exit 1 55 | fi 56 | 57 | echo "Removing stale pidfile..." 58 | rm $pidfile 59 | fi 60 | } 61 | 62 | wait_pid() { 63 | pid=$1 64 | try_kill=$2 65 | timeout=${3:-0} 66 | force=${4:-0} 67 | countdown=$(( $timeout * 10 )) 68 | 69 | echo wait_pid $pid $try_kill $timeout $force $countdown 70 | if [ -e /proc/$pid ]; then 71 | if [ "$try_kill" = "1" ]; then 72 | echo "Killing $pidfile: $pid " 73 | kill $pid 74 | fi 75 | while [ -e /proc/$pid ]; do 76 | sleep 0.1 77 | [ "$countdown" != '0' -a $(( $countdown % 10 )) = '0' ] && echo -n . 78 | if [ $timeout -gt 0 ]; then 79 | if [ $countdown -eq 0 ]; then 80 | if [ "$force" = "1" ]; then 81 | echo -ne "\nKill timed out, using kill -9 on $pid... " 82 | kill -9 $pid 83 | sleep 0.5 84 | fi 85 | break 86 | else 87 | countdown=$(( $countdown - 1 )) 88 | fi 89 | fi 90 | done 91 | if [ -e /proc/$pid ]; then 92 | echo "Timed Out" 93 | else 94 | echo "Stopped" 95 | fi 96 | else 97 | echo "Process $pid is not running" 98 | echo "Attempting to kill pid anyway..." 99 | kill $pid 100 | fi 101 | } 102 | 103 | wait_pidfile() { 104 | pidfile=$1 105 | try_kill=$2 106 | timeout=${3:-0} 107 | force=${4:-0} 108 | countdown=$(( $timeout * 10 )) 109 | 110 | if [ -f "$pidfile" ]; then 111 | pid=$(head -1 "$pidfile") 112 | if [ -z "$pid" ]; then 113 | echo "Unable to get pid from $pidfile" 114 | exit 1 115 | fi 116 | 117 | wait_pid $pid $try_kill $timeout $force 118 | 119 | rm -f $pidfile 120 | else 121 | echo "Pidfile $pidfile doesn't exist" 122 | fi 123 | } 124 | 125 | kill_and_wait() { 126 | pidfile=$1 127 | # Monit default timeout for start/stop is 30s 128 | # Append 'with timeout {n} seconds' to monit start/stop program configs 129 | timeout=${2:-25} 130 | force=${3:-1} 131 | if [[ -f ${pidfile} ]] 132 | then 133 | wait_pidfile $pidfile 1 $timeout $force 134 | else 135 | # TODO assume $1 is something to grep from 'ps ax' 136 | pid="$(ps auwwx | grep "$1" | awk '{print $2}')" 137 | wait_pid $pid 1 $timeout $force 138 | fi 139 | } 140 | 141 | check_nfs_mount() { 142 | opts=$1 143 | exports=$2 144 | mount_point=$3 145 | 146 | if grep -qs $mount_point /proc/mounts; then 147 | echo "Found NFS mount $mount_point" 148 | else 149 | echo "Mounting NFS..." 150 | mount $opts $exports $mount_point 151 | if [ $? != 0 ]; then 152 | echo "Cannot mount NFS from $exports to $mount_point, exiting..." 153 | exit 1 154 | fi 155 | fi 156 | } 157 | -------------------------------------------------------------------------------- /jobs/vip/templates/properties.sh.erb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # job template binding variables 4 | 5 | # job name & index of this VM within cluster 6 | # e.g. JOB_NAME=redis, JOB_INDEX=0 7 | export NAME='<%= name %>' 8 | export JOB_INDEX=<%= index %> 9 | # full job name, like redis/0 or webapp/3 10 | export JOB_FULL="$NAME/$JOB_INDEX" 11 | -------------------------------------------------------------------------------- /jobs/vip/templates/ssl_redirect.map.erb: -------------------------------------------------------------------------------- 1 | <% p("ha_proxy.https_redirect_domains").each do |domain| %> 2 | <%= domain %> true 3 | <% end %> 4 | -------------------------------------------------------------------------------- /manifests/ha.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: postgres 3 | 4 | instance_groups: 5 | - azs: 6 | - z1 7 | name: postgres 8 | networks: 9 | - name: default 10 | persistent_disk_type: default 11 | stemcell: default 12 | vm_type: default 13 | instances: 2 14 | jobs: 15 | - name: vip 16 | properties: 17 | vip: (( vip_ip )) 18 | release: postgres 19 | - name: postgres 20 | properties: 21 | postgres: 22 | config: 23 | max_connections: 500 24 | databases: 25 | - extensions: 26 | - citext 27 | name: diegodb 28 | users: 29 | - hank 30 | - extensions: 31 | - citext 32 | name: uaadb 33 | users: 34 | - hank 35 | - extensions: 36 | - citext 37 | name: ccdb 38 | users: 39 | - hank 40 | hba: 41 | - host all all 0.0.0.0/0 md5 42 | - host all all ::/0 md5 43 | replication: 44 | enabled: true 45 | users: 46 | - admin: true 47 | password: propane 48 | username: hank 49 | release: postgres 50 | 51 | 52 | - azs: 53 | - z1 54 | name: smoke-tests 55 | instances: 1 56 | lifecycle: errand 57 | networks: 58 | - name: default 59 | stemcell: default 60 | vm_type: default 61 | jobs: 62 | - name: smoke-tests 63 | properties: 64 | postgres: 65 | smoke-tests: 66 | backend: 67 | port: 6432 68 | target: 69 | port: 5432 70 | username: hank 71 | password: propane 72 | replication: 73 | enabled: true 74 | release: postgres 75 | 76 | stemcells: 77 | - alias: default 78 | os: ubuntu-xenial 79 | version: latest 80 | 81 | update: 82 | canaries: 1 83 | canary_watch_time: 1000-120000 84 | max_in_flight: 50 85 | serial: false 86 | update_watch_time: 1000-120000 87 | 88 | releases: 89 | - name: postgres 90 | version: 3.2.2 91 | url: https://github.com/cloudfoundry-community/postgres-boshrelease/releases/download/v3.2.2/postgres-3.2.2.tgz 92 | sha1: 7699715ed0b7ec129f60958e2864958030333cea 93 | -------------------------------------------------------------------------------- /manifests/postgres.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: postgres 3 | 4 | instance_groups: 5 | - name: postgres 6 | azs: [z1] 7 | networks: [{name: default }] 8 | persistent_disk: 2048 9 | stemcell: default 10 | vm_type: default 11 | instances: 1 12 | jobs: 13 | - name: postgres 14 | release: postgres 15 | properties: 16 | postgres: 17 | config: 18 | max_connections: 500 19 | databases: 20 | - name: diegodb 21 | extensions: [citext] 22 | users: [hank] 23 | 24 | - name: uaadb 25 | extensions: [citext] 26 | users: [hank] 27 | 28 | - name: ccdb 29 | extensions: [citext] 30 | users: [hank] 31 | 32 | hba: 33 | - host all all 0.0.0.0/0 md5 34 | - host all all ::/0 md5 35 | 36 | users: 37 | - username: hank 38 | password: propane 39 | admin: true 40 | 41 | - name: smoke-tests 42 | instances: 1 43 | azs: [z1] 44 | lifecycle: errand 45 | networks: [{name: default}] 46 | stemcell: default 47 | vm_type: default 48 | jobs: 49 | - name: smoke-tests 50 | release: postgres 51 | properties: 52 | postgres: 53 | smoke-tests: 54 | backend: 55 | port: 6432 56 | target: 57 | port: 5432 58 | username: hank 59 | password: propane 60 | 61 | stemcells: 62 | - alias: default 63 | os: ubuntu-xenial 64 | version: latest 65 | 66 | update: 67 | canaries: 1 68 | canary_watch_time: 1000-120000 69 | max_in_flight: 50 70 | serial: false 71 | update_watch_time: 1000-120000 72 | 73 | releases: 74 | - name: postgres 75 | version: 3.2.2 76 | url: https://github.com/cloudfoundry-community/postgres-boshrelease/releases/download/v3.2.2/postgres-3.2.2.tgz 77 | sha1: 7699715ed0b7ec129f60958e2864958030333cea 78 | -------------------------------------------------------------------------------- /packages/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry-community/postgres-boshrelease/265157cbcc5f31a1ea50f0371a1a8addcc656b7d/packages/.gitkeep -------------------------------------------------------------------------------- /packages/haproxy/packaging: -------------------------------------------------------------------------------- 1 | # abort script on any command that exit with a non zero value 2 | set -e 3 | 4 | PCRE_VERSION=10.39 5 | SOCAT_VERSION=1.7.3.4 6 | HAPROXY_VERSION=1.8.30 7 | 8 | mkdir ${BOSH_INSTALL_TARGET}/bin 9 | 10 | echo "Extracting pcre..." 11 | tar xjf haproxy/pcre2-${PCRE_VERSION}.tar.bz2 12 | pushd pcre2-${PCRE_VERSION} 13 | ./configure --prefix ${BOSH_INSTALL_TARGET} 14 | make 15 | make install 16 | popd 17 | 18 | echo "Installing socat..." 19 | tar xjf haproxy/socat-${SOCAT_VERSION}.tar.bz2 20 | pushd socat-${SOCAT_VERSION} 21 | ./configure 22 | make 23 | cp socat ${BOSH_INSTALL_TARGET}/bin 24 | chmod 755 ${BOSH_INSTALL_TARGET}/bin/socat 25 | popd 26 | 27 | tar xf haproxy/haproxy-${HAPROXY_VERSION}.tar.gz 28 | pushd haproxy-${HAPROXY_VERSION} 29 | make TARGET=linux2628 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=yes USE_STATIC_PCRE2=1 USE_ZLIB=1 PCRE2DIR=${BOSH_INSTALL_TARGET} 30 | cp haproxy ${BOSH_INSTALL_TARGET}/bin/ 31 | chmod 755 ${BOSH_INSTALL_TARGET}/bin/haproxy 32 | popd 33 | 34 | cp hatop/hatop ${BOSH_INSTALL_TARGET}/bin/ 35 | chmod 755 ${BOSH_INSTALL_TARGET}/bin/hatop 36 | -------------------------------------------------------------------------------- /packages/haproxy/spec: -------------------------------------------------------------------------------- 1 | --- 2 | name: haproxy 3 | files: 4 | - haproxy/haproxy-*.tar.gz 5 | - haproxy/pcre2-*.tar.bz2 6 | - haproxy/socat-*.tar.bz2 7 | - hatop/hatop 8 | -------------------------------------------------------------------------------- /packages/keepalived/packaging: -------------------------------------------------------------------------------- 1 | # abort script on any command that exits with a non zero value 2 | set -e -x 3 | 4 | # Copy common utils 5 | mkdir -p ${BOSH_INSTALL_TARGET}/common 6 | cp -a ${BOSH_COMPILE_TARGET}/common/* ${BOSH_INSTALL_TARGET}/common 7 | 8 | #Source can be downloaded at : http://www.keepalived.org/software/keepalived-1.2.24.tar.gz 9 | tar xzvf keepalived/keepalived-1.2.24.tar.gz 10 | cd keepalived-1.2.24/ 11 | 12 | #compile keepalive 13 | ./configure --prefix=${BOSH_INSTALL_TARGET} 14 | make 15 | make install 16 | 17 | 18 | -------------------------------------------------------------------------------- /packages/keepalived/spec: -------------------------------------------------------------------------------- 1 | --- 2 | name: keepalived 3 | 4 | dependencies: [] 5 | 6 | files: 7 | - common/utils.sh 8 | - keepalived/keepalived-1.2.24.tar.gz 9 | 10 | -------------------------------------------------------------------------------- /packages/pgrt/packaging: -------------------------------------------------------------------------------- 1 | set -e # exit immediately if a simple command exits with a non-zero status 2 | set -u # report the usage of uninitialized variables 3 | 4 | # Detect # of CPUs so make jobs can be parallelized 5 | CPUS=$(grep -c ^processor /proc/cpuinfo) 6 | # Available variables 7 | # $BOSH_COMPILE_TARGET - where this package & spec'd source files are available 8 | # $BOSH_INSTALL_TARGET - where you copy/install files to be included in package 9 | export HOME=/var/vcap 10 | 11 | mkdir -p ${BOSH_INSTALL_TARGET}/bin 12 | cp pgrt/pgrt ${BOSH_INSTALL_TARGET}/bin 13 | chmod 0755 ${BOSH_INSTALL_TARGET}/bin/* 14 | -------------------------------------------------------------------------------- /packages/pgrt/spec: -------------------------------------------------------------------------------- 1 | --- 2 | name: pgrt 3 | dependencies: [] 4 | files: 5 | - pgrt/pgrt 6 | -------------------------------------------------------------------------------- /packages/postgres/packaging: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | export HOME=/var/vcap 6 | 7 | processors=$(grep -c ^processor /proc/cpuinfo) 8 | version="16.4" # https://ftp.postgresql.org/pub/source/v16.4/postgresql-16.4.tar.bz2 9 | 10 | tar -jxf postgres/postgresql-${version}.tar.bz2 11 | 12 | cd postgresql-${version}/ 13 | 14 | # Set the default socket directory to /var/vcap/sys/run/postgres 15 | sed -i -e 's%#define DEFAULT_PGSOCKET_DIR.*$%#define DEFAULT_PGSOCKET_DIR "/var/vcap/sys/run/postgres"%' \ 16 | src/include/pg_config_manual.h 17 | 18 | ./configure --prefix "${BOSH_INSTALL_TARGET}" 19 | 20 | make -j"${processors}" world 21 | 22 | make install-world 23 | -------------------------------------------------------------------------------- /packages/postgres/spec: -------------------------------------------------------------------------------- 1 | --- 2 | name: postgres 3 | 4 | dependencies: [] 5 | 6 | files: 7 | - postgres/postgresql-* 8 | -------------------------------------------------------------------------------- /releases/postgres/index.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | 173c692c-aa23-45fa-4023-f0197af00e9a: 3 | version: 3.2.1 4 | 37236237-db24-4d93-5b42-b1bd87506bde: 5 | version: 3.1.0 6 | 3c8040ef-3e65-4ff9-a17f-a1cde644a2d5: 7 | version: 1.0.0 8 | 514398a8-f8c1-4fdc-4c59-d7a2b8bd7617: 9 | version: 3.1.4 10 | 596d4bf3-5298-473e-4082-116710e43c9d: 11 | version: 3.1.5 12 | 643b684e-c6ed-44cf-84ae-56b8eb6a5482: 13 | version: 1.0.3 14 | 67f455ee-cf16-4758-76eb-92327cf7a4e1: 15 | version: 3.1.2 16 | 6ae3345e-abcd-407d-48b6-2446d78cb5b0: 17 | version: 3.2.2 18 | 73ca71e2-8a95-46da-68ef-41e6fea4ec56: 19 | version: 2.0.0 20 | 7945699a-ccf1-410d-9ba4-28da6e60c773: 21 | version: 1.1.0 22 | 8b5e44b0-b13f-489d-73b9-f2d62c7f12db: 23 | version: 3.0.0 24 | a47eef5c-53b8-415f-8497-a0c52b84741c: 25 | version: 1.0.2 26 | bd762da4-4f09-4367-9865-c5dab8fc4be8: 27 | version: 1.0.4 28 | cf497389-a963-4401-6f9a-bc629c5005ab: 29 | version: 3.1.3 30 | d2faa9b7-c7b0-46c6-4494-62b6499cd7e4: 31 | version: 3.2.0 32 | e294790c-f962-4697-59f1-a8dadf5d464b: 33 | version: 3.1.1 34 | format-version: "2" 35 | -------------------------------------------------------------------------------- /releases/postgres/postgres-1.0.0.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packages: 3 | - name: pgpool2 4 | version: 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac 5 | fingerprint: 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac 6 | sha1: ed1d3ee8879106c56a615b80bf145fe2a240ec31 7 | dependencies: 8 | - postgres 9 | - name: pgrt 10 | version: dafda7c29675f507adc4effbee68094c3d5dabce 11 | fingerprint: dafda7c29675f507adc4effbee68094c3d5dabce 12 | sha1: fadd380592639abf60204e71210b85e66e0dbd53 13 | dependencies: [] 14 | - name: postgres 15 | version: a72141d28d97c02c4348633d31995953e75a6183 16 | fingerprint: a72141d28d97c02c4348633d31995953e75a6183 17 | sha1: 1df80253697ca51dca59b294a057bb68d6fb5d9f 18 | dependencies: [] 19 | jobs: 20 | - name: pgpool 21 | version: ee5697f0f1d54eccc2ad49034f0a8a5ae95e2306 22 | fingerprint: ee5697f0f1d54eccc2ad49034f0a8a5ae95e2306 23 | sha1: 407405699f75c9ea62536fc55f16416b06c20a42 24 | - name: postgres 25 | version: 06ff778a048b81cf830872c70cb1b5ece3c1b8e9 26 | fingerprint: 06ff778a048b81cf830872c70cb1b5ece3c1b8e9 27 | sha1: 90508786e65252aee2dadcc8868856c19a6a23da 28 | - name: smoke-tests 29 | version: c70d7c4abb65cb5dc064416c60021dee6db11e57 30 | fingerprint: c70d7c4abb65cb5dc064416c60021dee6db11e57 31 | sha1: ea74f9da8949de680e930a9d05b8f2167058b218 32 | license: 33 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 34 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 35 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 36 | commit_hash: 591f743e 37 | uncommitted_changes: true 38 | name: postgres 39 | version: 1.0.0 40 | -------------------------------------------------------------------------------- /releases/postgres/postgres-1.0.2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packages: 3 | - name: pgpool2 4 | version: 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac 5 | fingerprint: 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac 6 | sha1: ed1d3ee8879106c56a615b80bf145fe2a240ec31 7 | dependencies: 8 | - postgres 9 | - name: pgrt 10 | version: dafda7c29675f507adc4effbee68094c3d5dabce 11 | fingerprint: dafda7c29675f507adc4effbee68094c3d5dabce 12 | sha1: fadd380592639abf60204e71210b85e66e0dbd53 13 | dependencies: [] 14 | - name: postgres 15 | version: a72141d28d97c02c4348633d31995953e75a6183 16 | fingerprint: a72141d28d97c02c4348633d31995953e75a6183 17 | sha1: 1df80253697ca51dca59b294a057bb68d6fb5d9f 18 | dependencies: [] 19 | jobs: 20 | - name: pgpool 21 | version: ee5697f0f1d54eccc2ad49034f0a8a5ae95e2306 22 | fingerprint: ee5697f0f1d54eccc2ad49034f0a8a5ae95e2306 23 | sha1: 407405699f75c9ea62536fc55f16416b06c20a42 24 | - name: postgres 25 | version: 75eb1be9a33cecc9541b6f2945656296b1587ab6 26 | fingerprint: 75eb1be9a33cecc9541b6f2945656296b1587ab6 27 | sha1: 93607d91627cefd5f5a8b9fb539b966ab81b22ee 28 | - name: smoke-tests 29 | version: c70d7c4abb65cb5dc064416c60021dee6db11e57 30 | fingerprint: c70d7c4abb65cb5dc064416c60021dee6db11e57 31 | sha1: ea74f9da8949de680e930a9d05b8f2167058b218 32 | license: 33 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 34 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 35 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 36 | commit_hash: 3866fac2 37 | uncommitted_changes: true 38 | name: postgres 39 | version: 1.0.2 40 | -------------------------------------------------------------------------------- /releases/postgres/postgres-1.0.3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packages: 3 | - name: pgrt 4 | version: dafda7c29675f507adc4effbee68094c3d5dabce 5 | fingerprint: dafda7c29675f507adc4effbee68094c3d5dabce 6 | sha1: fadd380592639abf60204e71210b85e66e0dbd53 7 | dependencies: [] 8 | - name: pgpool2 9 | version: 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac 10 | fingerprint: 19c016a7cdcaaa93c40e4bbb936f454a1b26ddac 11 | sha1: ed1d3ee8879106c56a615b80bf145fe2a240ec31 12 | dependencies: 13 | - postgres 14 | - name: postgres 15 | version: a9ef943bde149989a4f5077a14ed16b5b1b29100 16 | fingerprint: a9ef943bde149989a4f5077a14ed16b5b1b29100 17 | sha1: 03645d7cac757e492dbea3599d9cfd2d8c7592dd 18 | dependencies: [] 19 | jobs: 20 | - name: pgpool 21 | version: 232e1d6a1d504df42c31c34c845c65c79e7cb38c 22 | fingerprint: 232e1d6a1d504df42c31c34c845c65c79e7cb38c 23 | sha1: a8e1020712f0885e4187db3d9ee932d342a28cf8 24 | - name: postgres 25 | version: 6fc4ada1355fd55e6c5c095086492aa93526b1e7 26 | fingerprint: 6fc4ada1355fd55e6c5c095086492aa93526b1e7 27 | sha1: 3a282cf7abc7211cc6141dbfcbb7b5b5eca028fa 28 | - name: smoke-tests 29 | version: a73a6108927abdadff05042a5df17916f18a9090 30 | fingerprint: a73a6108927abdadff05042a5df17916f18a9090 31 | sha1: 928187a4409cec9f5681c034e921e81a554a40b5 32 | license: 33 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 34 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 35 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 36 | commit_hash: 71dfd61b 37 | uncommitted_changes: true 38 | name: postgres 39 | version: 1.0.3 40 | -------------------------------------------------------------------------------- /releases/postgres/postgres-1.0.4.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packages: 3 | - name: pgpool2 4 | version: 0989b207209bea0bdcbffb90b09046a68415ae28 5 | fingerprint: 0989b207209bea0bdcbffb90b09046a68415ae28 6 | sha1: 53703c74c0fd49022250bb164c2abf9bcfc41117 7 | dependencies: 8 | - postgres 9 | - name: pgrt 10 | version: dafda7c29675f507adc4effbee68094c3d5dabce 11 | fingerprint: dafda7c29675f507adc4effbee68094c3d5dabce 12 | sha1: fadd380592639abf60204e71210b85e66e0dbd53 13 | dependencies: [] 14 | - name: postgres 15 | version: 664fd4f2accda02570a30290d1d45a0abc459f9c 16 | fingerprint: 664fd4f2accda02570a30290d1d45a0abc459f9c 17 | sha1: 1272d0a8297c300647cf7446537117b742222022 18 | dependencies: [] 19 | jobs: 20 | - name: pgpool 21 | version: 751805a78a400009ef2961bf21850abc29bee910 22 | fingerprint: 751805a78a400009ef2961bf21850abc29bee910 23 | sha1: 1ee0636ea84eaa538d96b3b548b795647b264b9c 24 | - name: postgres 25 | version: 56b55ff4547348601a728ac54e550a5824de56c3 26 | fingerprint: 56b55ff4547348601a728ac54e550a5824de56c3 27 | sha1: 088a6265d744c7218f30201475b39f413ff25cdd 28 | - name: smoke-tests 29 | version: a73a6108927abdadff05042a5df17916f18a9090 30 | fingerprint: a73a6108927abdadff05042a5df17916f18a9090 31 | sha1: 928187a4409cec9f5681c034e921e81a554a40b5 32 | license: 33 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 34 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 35 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 36 | commit_hash: b9fe4ae8 37 | uncommitted_changes: true 38 | name: postgres 39 | version: 1.0.4 40 | -------------------------------------------------------------------------------- /releases/postgres/postgres-1.1.0.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packages: 3 | - name: pgrt 4 | version: dafda7c29675f507adc4effbee68094c3d5dabce 5 | fingerprint: dafda7c29675f507adc4effbee68094c3d5dabce 6 | sha1: fadd380592639abf60204e71210b85e66e0dbd53 7 | dependencies: [] 8 | - name: postgres 9 | version: 664fd4f2accda02570a30290d1d45a0abc459f9c 10 | fingerprint: 664fd4f2accda02570a30290d1d45a0abc459f9c 11 | sha1: 1272d0a8297c300647cf7446537117b742222022 12 | dependencies: [] 13 | - name: pgpool2 14 | version: 31ffabc1a00122bf9f62956e0fa26c959ea7c07a 15 | fingerprint: 31ffabc1a00122bf9f62956e0fa26c959ea7c07a 16 | sha1: 8ad38de0edd185f917b53c40141ec6d70ebb2f68 17 | dependencies: 18 | - postgres 19 | jobs: 20 | - name: pgpool 21 | version: 31e75c9a2d3f6c1406887319def9b7c27fe8360d 22 | fingerprint: 31e75c9a2d3f6c1406887319def9b7c27fe8360d 23 | sha1: d6f9f0441c4a1d026b15db9372fc9147b393b9bd 24 | - name: postgres 25 | version: 6cb5f1dbf79cb4ec531cd392465c037eebe4b28f 26 | fingerprint: 6cb5f1dbf79cb4ec531cd392465c037eebe4b28f 27 | sha1: a25a1831f6ca704c04e1afe4d4c708a0ba0c2b10 28 | - name: smoke-tests 29 | version: a73a6108927abdadff05042a5df17916f18a9090 30 | fingerprint: a73a6108927abdadff05042a5df17916f18a9090 31 | sha1: 928187a4409cec9f5681c034e921e81a554a40b5 32 | license: 33 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 34 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 35 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 36 | commit_hash: e3feb72e 37 | uncommitted_changes: true 38 | name: postgres 39 | version: 1.1.0 40 | -------------------------------------------------------------------------------- /releases/postgres/postgres-2.0.0.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 2.0.0 3 | commit_hash: 6d12c47 4 | uncommitted_changes: false 5 | jobs: 6 | - name: pgpool 7 | version: 39c430611966dd13d9ecc4c24c1d5b94470c89bf 8 | fingerprint: 39c430611966dd13d9ecc4c24c1d5b94470c89bf 9 | sha1: bf8b7118ba780b6810b79a9443c55491ad4f5b42 10 | - name: postgres 11 | version: a0e2b8b8995b9b2d563b1ce7094e7e77a8fba94b 12 | fingerprint: a0e2b8b8995b9b2d563b1ce7094e7e77a8fba94b 13 | sha1: 7d2affaf1484251ad45ec9d2a42c95427fcd9561 14 | - name: smoke-tests 15 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 16 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 17 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 18 | packages: 19 | - name: pgpool2 20 | version: 39ae1587bb89bc5df75cce7a05e01e9e9ffb579e 21 | fingerprint: 39ae1587bb89bc5df75cce7a05e01e9e9ffb579e 22 | sha1: 10b5d639f85974af413d8482eee403ae747d25cf 23 | dependencies: 24 | - postgres 25 | - name: pgrt 26 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 27 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 28 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 29 | dependencies: [] 30 | - name: postgres 31 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 32 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 33 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 34 | dependencies: [] 35 | license: 36 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 37 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 38 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 39 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.0.0.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.0.0 3 | commit_hash: eebae0b 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: f1cca9cdac7f4345a37f4b9cd6d432010fc3304e 8 | fingerprint: f1cca9cdac7f4345a37f4b9cd6d432010fc3304e 9 | sha1: 84e0a6dbdc3223f27874789bee2c3acb5f278789 10 | - name: smoke-tests 11 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 12 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 13 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 14 | - name: vip 15 | version: 2a03d5631458cd5894ed1277ac1c6599d0a2a762 16 | fingerprint: 2a03d5631458cd5894ed1277ac1c6599d0a2a762 17 | sha1: 07c219d066537dbcbb74bdd5d7c0c080028dee2c 18 | packages: 19 | - name: haproxy 20 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 21 | fingerprint: 888c05456b91cb06db85659acc774fcabbfd09e7 22 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 23 | dependencies: [] 24 | - name: keepalived 25 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 26 | fingerprint: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 27 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 28 | dependencies: [] 29 | - name: pgrt 30 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 31 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 32 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 33 | dependencies: [] 34 | - name: postgres 35 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 36 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 37 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 38 | dependencies: [] 39 | license: 40 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 41 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 42 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 43 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.1.0.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.1.0 3 | commit_hash: ca15cff 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: 63e9e86100fb9717a0ce0e3049a678d72a82351f 8 | fingerprint: 63e9e86100fb9717a0ce0e3049a678d72a82351f 9 | sha1: e292d83d742f5245cf75b8cda37bb640957139e8 10 | - name: smoke-tests 11 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 12 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 13 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 14 | - name: vip 15 | version: 2d7e5195e36368e5bb4a6ce645ef7db8d449ff7e 16 | fingerprint: 2d7e5195e36368e5bb4a6ce645ef7db8d449ff7e 17 | sha1: c4e9f824dde586d8d7e0aec8ce18705711c1149e 18 | packages: 19 | - name: haproxy 20 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 21 | fingerprint: 888c05456b91cb06db85659acc774fcabbfd09e7 22 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 23 | dependencies: [] 24 | - name: keepalived 25 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 26 | fingerprint: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 27 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 28 | dependencies: [] 29 | - name: pgrt 30 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 31 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 32 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 33 | dependencies: [] 34 | - name: postgres 35 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 36 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 37 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 38 | dependencies: [] 39 | license: 40 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 41 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 42 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 43 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.1.1.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.1.1 3 | commit_hash: 636f93d 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: 6617cb6161f5f193be537a007af6348ff93d65e2 8 | fingerprint: 6617cb6161f5f193be537a007af6348ff93d65e2 9 | sha1: 9b6d141fec86702308d00b88e25653b9d7e591c9 10 | - name: smoke-tests 11 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 12 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 13 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 14 | - name: vip 15 | version: 2d7e5195e36368e5bb4a6ce645ef7db8d449ff7e 16 | fingerprint: 2d7e5195e36368e5bb4a6ce645ef7db8d449ff7e 17 | sha1: c4e9f824dde586d8d7e0aec8ce18705711c1149e 18 | packages: 19 | - name: haproxy 20 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 21 | fingerprint: 888c05456b91cb06db85659acc774fcabbfd09e7 22 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 23 | dependencies: [] 24 | - name: keepalived 25 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 26 | fingerprint: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 27 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 28 | dependencies: [] 29 | - name: pgrt 30 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 31 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 32 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 33 | dependencies: [] 34 | - name: postgres 35 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 36 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 37 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 38 | dependencies: [] 39 | license: 40 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 41 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 42 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 43 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.1.2.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.1.2 3 | commit_hash: 70e8e68 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: 6617cb6161f5f193be537a007af6348ff93d65e2 8 | fingerprint: 6617cb6161f5f193be537a007af6348ff93d65e2 9 | sha1: 9b6d141fec86702308d00b88e25653b9d7e591c9 10 | - name: smoke-tests 11 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 12 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 13 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 14 | - name: vip 15 | version: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 16 | fingerprint: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 17 | sha1: e2392d30b7c2a66017697b206934247fe109ab06 18 | packages: 19 | - name: haproxy 20 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 21 | fingerprint: 888c05456b91cb06db85659acc774fcabbfd09e7 22 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 23 | dependencies: [] 24 | - name: keepalived 25 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 26 | fingerprint: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 27 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 28 | dependencies: [] 29 | - name: pgrt 30 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 31 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 32 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 33 | dependencies: [] 34 | - name: postgres 35 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 36 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 37 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 38 | dependencies: [] 39 | license: 40 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 41 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 42 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 43 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.1.3.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.1.3 3 | commit_hash: 244d47c 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: fce369182274f4e57c23e970f320f86e206e0add 8 | fingerprint: fce369182274f4e57c23e970f320f86e206e0add 9 | sha1: 4eb8583bb0ebe0811cfb68a7aa00468723023446 10 | - name: smoke-tests 11 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 12 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 13 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 14 | - name: vip 15 | version: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 16 | fingerprint: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 17 | sha1: e2392d30b7c2a66017697b206934247fe109ab06 18 | packages: 19 | - name: haproxy 20 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 21 | fingerprint: 888c05456b91cb06db85659acc774fcabbfd09e7 22 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 23 | dependencies: [] 24 | - name: keepalived 25 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 26 | fingerprint: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 27 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 28 | dependencies: [] 29 | - name: pgrt 30 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 31 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 32 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 33 | dependencies: [] 34 | - name: postgres 35 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 36 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 37 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 38 | dependencies: [] 39 | license: 40 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 41 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 42 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 43 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.1.4.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.1.4 3 | commit_hash: 68f2f75 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: d849b3c95be04e2cbae3d827b059f3340cc3d277 8 | fingerprint: d849b3c95be04e2cbae3d827b059f3340cc3d277 9 | sha1: 51422ea48f17d7fb652e79e8004bd3e7935c54d8 10 | - name: smoke-tests 11 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 12 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 13 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 14 | - name: vip 15 | version: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 16 | fingerprint: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 17 | sha1: e2392d30b7c2a66017697b206934247fe109ab06 18 | packages: 19 | - name: haproxy 20 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 21 | fingerprint: 888c05456b91cb06db85659acc774fcabbfd09e7 22 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 23 | dependencies: [] 24 | - name: keepalived 25 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 26 | fingerprint: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 27 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 28 | dependencies: [] 29 | - name: pgrt 30 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 31 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 32 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 33 | dependencies: [] 34 | - name: postgres 35 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 36 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 37 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 38 | dependencies: [] 39 | license: 40 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 41 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 42 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 43 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.1.5.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.1.5 3 | commit_hash: 763bfda 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: d736b096cbe7854bf0cb11b6471ba7fac89eaf8e 8 | fingerprint: d736b096cbe7854bf0cb11b6471ba7fac89eaf8e 9 | sha1: 45fc0124c13056a8f93896bace65391138d83220 10 | - name: smoke-tests 11 | version: ad59026e232d3ea9d4ddf1149272efc4d62758c9 12 | fingerprint: ad59026e232d3ea9d4ddf1149272efc4d62758c9 13 | sha1: 2fa9c9d727486ae2a07793543a0a99e7531e26fe 14 | - name: vip 15 | version: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 16 | fingerprint: 2ca0e176c2c6a22b69ef76dd046c0056b828fa9f 17 | sha1: e2392d30b7c2a66017697b206934247fe109ab06 18 | packages: 19 | - name: haproxy 20 | version: 888c05456b91cb06db85659acc774fcabbfd09e7 21 | fingerprint: 888c05456b91cb06db85659acc774fcabbfd09e7 22 | sha1: 401b275ffde0759b3704752bdd371427ab65d81a 23 | dependencies: [] 24 | - name: keepalived 25 | version: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 26 | fingerprint: a8e18e8e2a5159e933377c8d8af36978a1f4cbf0 27 | sha1: 95a91213757f17fc42eb473a44ff7929945d39bc 28 | dependencies: [] 29 | - name: pgrt 30 | version: fdec5588f5c2c34039820660d73ae0055709f8bc 31 | fingerprint: fdec5588f5c2c34039820660d73ae0055709f8bc 32 | sha1: 893ef5ada4c01fceb214b57d3029feed81fced54 33 | dependencies: [] 34 | - name: postgres 35 | version: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 36 | fingerprint: f72ef4e8dc8355070f22f8fe99a3e6a1e8355a9b 37 | sha1: a8695b72e9f3f4af2a56dfa2e7dafced627d9506 38 | dependencies: [] 39 | license: 40 | version: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 41 | fingerprint: f0f6caa3fde87a6d14a5faaf366b31155ccf78a0 42 | sha1: 1cf953bbf657fc5bfb8bdc14f401435e25ed1665 43 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.2.0.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.2.0 3 | commit_hash: c051ada 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121 8 | fingerprint: 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121 9 | sha1: sha256:1285cea7b26518a7117b2863aac26caefe5c177ff213af99c49a5daa4e1435a1 10 | packages: 11 | - postgres 12 | - name: smoke-tests 13 | version: a08dc385aacde0fd9a3526177551a4de293fa9056e8f2228110e6c083fdca7c9 14 | fingerprint: a08dc385aacde0fd9a3526177551a4de293fa9056e8f2228110e6c083fdca7c9 15 | sha1: sha256:a246ef95120c757dd8740856b29735f70429b02fc6db1edd1c093b5a9f42b84e 16 | packages: 17 | - postgres 18 | - pgrt 19 | - name: vip 20 | version: c532352928175597d9e2e6968a7c0cc9eec4877a76cefde1152c8b3efefaabe4 21 | fingerprint: c532352928175597d9e2e6968a7c0cc9eec4877a76cefde1152c8b3efefaabe4 22 | sha1: sha256:65ed036a08ac3fdd232071853807d02c87d42be9ecd9539aa94f8f94d94e315c 23 | packages: 24 | - haproxy 25 | - keepalived 26 | packages: 27 | - name: haproxy 28 | version: aa51a5ba97479ee10e1583d0c9f33b56cded83c44d1fc6500a73d5bfb91083b4 29 | fingerprint: aa51a5ba97479ee10e1583d0c9f33b56cded83c44d1fc6500a73d5bfb91083b4 30 | sha1: sha256:03af57399c9efc0c0cff0cc95c3bc2764e9617b2178a3bff8f08db8451ca1a08 31 | dependencies: [] 32 | - name: keepalived 33 | version: 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733 34 | fingerprint: 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733 35 | sha1: sha256:fc5e7c66d1e098ee30229a32b77937cc1a47f89ebd3599653bae1e08ae074f1f 36 | dependencies: [] 37 | - name: pgrt 38 | version: bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775 39 | fingerprint: bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775 40 | sha1: sha256:131df4a02f3eb3ee589d25ee1c7746171e74e5801fd1244ded292fc5412a47e4 41 | dependencies: [] 42 | - name: postgres 43 | version: afffbdce123957c1fd39f7697356808668ff3748640fccc629c8024baf2adcbe 44 | fingerprint: afffbdce123957c1fd39f7697356808668ff3748640fccc629c8024baf2adcbe 45 | sha1: sha256:33504b684ea0cbdec04b94ea1e0f2e3880f3338b2cb2500a29081d041fd5d0a8 46 | dependencies: [] 47 | license: 48 | version: 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b 49 | fingerprint: 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b 50 | sha1: sha256:00c81214e4d7e2af778b3f2d6e07f2ed1bf8b08d6ee3116b959dc9b654f38492 51 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.2.1.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.2.1 3 | commit_hash: 5f2ccb4 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121 8 | fingerprint: 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121 9 | sha1: sha256:1285cea7b26518a7117b2863aac26caefe5c177ff213af99c49a5daa4e1435a1 10 | packages: 11 | - postgres 12 | - name: smoke-tests 13 | version: 173de226b3f33ec19362e3d869879f3e2d932c4ea78fb51b4c3b1ec534638002 14 | fingerprint: 173de226b3f33ec19362e3d869879f3e2d932c4ea78fb51b4c3b1ec534638002 15 | sha1: sha256:8c9ec2763bbf9a240faff5b5829777f7d26904662bac9477fada00f559fa5ac7 16 | packages: 17 | - postgres 18 | - pgrt 19 | - name: vip 20 | version: 44e341f4fcfd1964fee13f884eed592fd5aec23f59204d0fcd5ef6b506d684cc 21 | fingerprint: 44e341f4fcfd1964fee13f884eed592fd5aec23f59204d0fcd5ef6b506d684cc 22 | sha1: sha256:23c1ba4923b6e871b1b64bdd9b8318b47631f9b578c7650372ec69a17cfa6d35 23 | packages: 24 | - haproxy 25 | - keepalived 26 | packages: 27 | - name: haproxy 28 | version: acda58c18cc39e1099e3ba5570b783b33b9d079b5e30b25ede18bcc75c825534 29 | fingerprint: acda58c18cc39e1099e3ba5570b783b33b9d079b5e30b25ede18bcc75c825534 30 | sha1: sha256:2227c9dfb0b0dd55012e20448a9a14920b05ad83e4503b111378b7c5d219bfbc 31 | dependencies: [] 32 | - name: keepalived 33 | version: 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733 34 | fingerprint: 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733 35 | sha1: sha256:fc5e7c66d1e098ee30229a32b77937cc1a47f89ebd3599653bae1e08ae074f1f 36 | dependencies: [] 37 | - name: pgrt 38 | version: bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775 39 | fingerprint: bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775 40 | sha1: sha256:131df4a02f3eb3ee589d25ee1c7746171e74e5801fd1244ded292fc5412a47e4 41 | dependencies: [] 42 | - name: postgres 43 | version: 230580030604fb50dc88971df00a58918e454936353c81595d99c20caae02885 44 | fingerprint: 230580030604fb50dc88971df00a58918e454936353c81595d99c20caae02885 45 | sha1: sha256:4c9fd4747c904712e6fd96ba8b29e07dcc248e316c125a31facbacbc98851729 46 | dependencies: [] 47 | license: 48 | version: 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b 49 | fingerprint: 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b 50 | sha1: sha256:00c81214e4d7e2af778b3f2d6e07f2ed1bf8b08d6ee3116b959dc9b654f38492 51 | -------------------------------------------------------------------------------- /releases/postgres/postgres-3.2.2.yml: -------------------------------------------------------------------------------- 1 | name: postgres 2 | version: 3.2.2 3 | commit_hash: 202a7fb 4 | uncommitted_changes: false 5 | jobs: 6 | - name: postgres 7 | version: 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121 8 | fingerprint: 30216c368dd387301996cae4a65700940f40c828b46ff101e03da6558da25121 9 | sha1: sha256:1285cea7b26518a7117b2863aac26caefe5c177ff213af99c49a5daa4e1435a1 10 | packages: 11 | - postgres 12 | - name: smoke-tests 13 | version: 173de226b3f33ec19362e3d869879f3e2d932c4ea78fb51b4c3b1ec534638002 14 | fingerprint: 173de226b3f33ec19362e3d869879f3e2d932c4ea78fb51b4c3b1ec534638002 15 | sha1: sha256:8c9ec2763bbf9a240faff5b5829777f7d26904662bac9477fada00f559fa5ac7 16 | packages: 17 | - postgres 18 | - pgrt 19 | - name: vip 20 | version: 44e341f4fcfd1964fee13f884eed592fd5aec23f59204d0fcd5ef6b506d684cc 21 | fingerprint: 44e341f4fcfd1964fee13f884eed592fd5aec23f59204d0fcd5ef6b506d684cc 22 | sha1: sha256:23c1ba4923b6e871b1b64bdd9b8318b47631f9b578c7650372ec69a17cfa6d35 23 | packages: 24 | - haproxy 25 | - keepalived 26 | packages: 27 | - name: haproxy 28 | version: 3840aff7102cd05e3d84387908236db5c82bd444dc84b38dbb7d4e54ea71d808 29 | fingerprint: 3840aff7102cd05e3d84387908236db5c82bd444dc84b38dbb7d4e54ea71d808 30 | sha1: sha256:8c70dbcdbf12d2d49f9dfd13d2953f4caa193a2015df97992a72572cf630b7f2 31 | dependencies: [] 32 | - name: keepalived 33 | version: 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733 34 | fingerprint: 14aa689351e2083295d9f29e17330f15c4b21e775773f3e1f393395b212dd733 35 | sha1: sha256:fc5e7c66d1e098ee30229a32b77937cc1a47f89ebd3599653bae1e08ae074f1f 36 | dependencies: [] 37 | - name: pgrt 38 | version: bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775 39 | fingerprint: bc2478b5e358ecc6fc2859e5e7125f6a562eaf21c290878b56571eca63db5775 40 | sha1: sha256:131df4a02f3eb3ee589d25ee1c7746171e74e5801fd1244ded292fc5412a47e4 41 | dependencies: [] 42 | - name: postgres 43 | version: d6c6d82bf172de3708c80986055f26b855f03f70afb269af8ddf7fbf1749746e 44 | fingerprint: d6c6d82bf172de3708c80986055f26b855f03f70afb269af8ddf7fbf1749746e 45 | sha1: sha256:5450082278257aa1255e083b1a9f5c4ace19dd6ca3b87c9bafef76f3dad3c72f 46 | dependencies: [] 47 | license: 48 | version: 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b 49 | fingerprint: 16e4c2be65fa47859c0a2f40863170aba8a4b005b074e5be267b855b01907b6b 50 | sha1: sha256:00c81214e4d7e2af778b3f2d6e07f2ed1bf8b08d6ee3116b959dc9b654f38492 51 | -------------------------------------------------------------------------------- /src/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry-community/postgres-boshrelease/265157cbcc5f31a1ea50f0371a1a8addcc656b7d/src/.gitkeep -------------------------------------------------------------------------------- /src/common/utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function pid_is_running() { 4 | declare pid="$1" 5 | ps -p "${pid}" >/dev/null 2>&1 6 | } 7 | 8 | # pid_guard 9 | # 10 | # @param pidfile 11 | # @param name [String] an arbitrary name that might show up in STDOUT on errors 12 | # 13 | # Run this before attempting to start new processes that may use the same :pidfile:. 14 | # If an old process is running on the pid found in the :pidfile:, exit 1. Otherwise, 15 | # remove the stale :pidfile: if it exists. 16 | # 17 | function pid_guard() { 18 | declare pidfile="$1" name="$2" 19 | 20 | echo "------------ STARTING $(basename "$0") at $(date) --------------" | tee /dev/stderr 21 | 22 | if [ ! -f "${pidfile}" ]; then 23 | return 0 24 | fi 25 | 26 | local pid 27 | pid=$(head -1 "${pidfile}") 28 | 29 | if pid_is_running "${pid}"; then 30 | echo "${name} is already running, please stop it first" 31 | exit 1 32 | fi 33 | 34 | echo "Removing stale pidfile" 35 | rm "${pidfile}" 36 | } 37 | 38 | # wait_pid_death 39 | # 40 | # @param pid 41 | # @param timeout 42 | # 43 | # Watch a :pid: for :timeout: seconds, waiting for it to die. 44 | # If it dies before :timeout:, exit 0. If not, exit 1. 45 | # 46 | # Note that this should be run in a subshell, so that the current 47 | # shell does not exit. 48 | # 49 | function wait_pid_death() { 50 | declare pid="$1" timeout="$2" 51 | 52 | local countdown 53 | countdown=$(( timeout * 10 )) 54 | 55 | while true; do 56 | if ! pid_is_running "${pid}"; then 57 | return 0 58 | fi 59 | 60 | if [ ${countdown} -le 0 ]; then 61 | return 1 62 | fi 63 | 64 | countdown=$(( countdown - 1 )) 65 | sleep 0.1 66 | done 67 | } 68 | 69 | # kill_and_wait 70 | # 71 | # @param pidfile 72 | # @param timeout [default 25s] 73 | # 74 | # For a pid found in :pidfile:, send a `kill -15` TERM, then wait for :timeout: seconds to 75 | # see if it dies on its own. If not, send it a `kill -9`. If the process does die, 76 | # exit 0 and remove the :pidfile:. If after all of this, the process does not actually 77 | # die, exit 1. 78 | # 79 | # Note: 80 | # Monit default timeout for start/stop is 30s 81 | # Append 'with timeout {n} seconds' to monit start/stop program configs 82 | # 83 | function kill_and_wait() { 84 | declare pidfile="$1" timeout="${2:-25}" sigkill_on_timeout="${3:-1}" 85 | 86 | if [ ! -f "${pidfile}" ]; then 87 | echo "Pidfile ${pidfile} doesn't exist" 88 | exit 0 89 | fi 90 | 91 | local pid 92 | pid=$(head -1 "${pidfile}") 93 | 94 | if [ -z "${pid}" ]; then 95 | echo "Unable to get pid from ${pidfile}" 96 | exit 1 97 | fi 98 | 99 | if ! pid_is_running "${pid}"; then 100 | echo "Process ${pid} is not running" 101 | rm -f "${pidfile}" 102 | exit 0 103 | fi 104 | 105 | echo "Killing ${pidfile}: ${pid} " 106 | kill "${pid}" 107 | 108 | if ! wait_pid_death "${pid}" "${timeout}"; then 109 | if [ "${sigkill_on_timeout}" = "1" ]; then 110 | echo "Kill timed out, using kill -9 on ${pid}" 111 | kill -9 "${pid}" 112 | sleep 0.5 113 | fi 114 | fi 115 | 116 | if pid_is_running "${pid}"; then 117 | echo "Timed Out" 118 | exit 1 119 | else 120 | echo "Stopped" 121 | rm -f "${pidfile}" 122 | fi 123 | } 124 | 125 | running_in_container() { 126 | # look for a non-root cgroup 127 | grep --quiet --invert-match ':/$' /proc/self/cgroup 128 | } 129 | -------------------------------------------------------------------------------- /templates/deployment.yml: -------------------------------------------------------------------------------- 1 | meta: 2 | environment: (( param "please set environment" )) 3 | stemcell: (( param "please set stemcell" )) 4 | 5 | name: (( grab meta.environment )) 6 | 7 | director_uuid: (( param "please set director_uuid" )) 8 | 9 | releases: (( param "please set release" )) 10 | 11 | jobs: (( param "please set jobs" )) 12 | 13 | compilation: 14 | workers: 6 15 | network: postgres1 16 | reuse_compilation_vms: true 17 | cloud_properties: (( param "please set compilation cloud properties" )) 18 | 19 | update: 20 | canaries: 1 21 | max_in_flight: 1 22 | canary_watch_time: 30000-60000 23 | update_watch_time: 5000-60000 24 | serial: false 25 | 26 | resource_pools: 27 | - name: small_z1 28 | network: postgres1 29 | stemcell: (( grab meta.stemcell )) 30 | cloud_properties: (( param "please set resource_pool cloud properties" )) 31 | -------------------------------------------------------------------------------- /templates/infrastructure-aws-ec2.yml: -------------------------------------------------------------------------------- 1 | meta: 2 | environment: (( param "please set meta environment" )) 3 | dns_root: (( param "please set meta dns_root" )) 4 | security_groups: (( param "please set meta security_groups" )) 5 | persistent_disk: 4096 6 | 7 | stemcell: 8 | name: bosh-aws-xen-hvm-ubuntu-trusty-go_agent 9 | version: latest 10 | 11 | jobs: 12 | - name: postgres 13 | networks: 14 | - name: postgres1 15 | persistent_disk: (( grab meta.persistent_disk )) 16 | 17 | compilation: 18 | cloud_properties: 19 | instance_type: m3.medium 20 | 21 | resource_pools: 22 | - name: small_z1 23 | cloud_properties: 24 | instance_type: m3.medium 25 | 26 | networks: 27 | - name: floating 28 | type: vip 29 | cloud_properties: {} 30 | - name: postgres1 31 | type: dynamic 32 | cloud_properties: 33 | security_groups: (( grab meta.security_groups )) 34 | -------------------------------------------------------------------------------- /templates/infrastructure-warden.yml: -------------------------------------------------------------------------------- 1 | meta: 2 | environment: postgres-warden 3 | 4 | stemcell: 5 | name: bosh-warden-boshlite-ubuntu-trusty-go_agent 6 | version: latest 7 | 8 | update: 9 | canaries: 1 10 | max_in_flight: 50 11 | canary_watch_time: 1000-30000 12 | update_watch_time: 1000-30000 13 | 14 | jobs: 15 | - name: postgres 16 | networks: 17 | - name: postgres1 18 | static_ips: (( static_ips 0 1 2 3 4 5 6 7 8 9 )) 19 | - name: smoke-tests 20 | networks: 21 | - name: postgres1 22 | 23 | compilation: 24 | cloud_properties: 25 | name: random 26 | 27 | resource_pools: 28 | - name: small_z1 29 | cloud_properties: 30 | name: random 31 | 32 | networks: 33 | - name: postgres1 34 | type: manual 35 | subnets: 36 | - range: 10.244.232.0/24 37 | name: postgres1 38 | gateway: 10.244.232.1 39 | static: 40 | - 10.244.232.2 - 10.244.232.60 41 | -------------------------------------------------------------------------------- /templates/jobs.yml: -------------------------------------------------------------------------------- 1 | meta: 2 | environment: ~ 3 | 4 | update: 5 | canaries: 1 6 | max_in_flight: 50 7 | canary_watch_time: 1000-30000 8 | update_watch_time: 1000-30000 9 | serial: false 10 | 11 | jobs: 12 | - name: postgres 13 | templates: 14 | - { release: postgres, name: postgres } 15 | - { release: postgres, name: pgpool } 16 | instances: 3 17 | persistent_disk: 0 18 | resource_pool: small_z1 19 | networks: (( param "please set postgres networks" )) 20 | 21 | properties: 22 | postgres: 23 | config: 24 | port: 6432 25 | replication: 26 | enabled: true 27 | hba: 28 | - host all all 0.0.0.0/0 md5 29 | - host all all ::/0 md5 30 | pgpool: 31 | debug: true 32 | databases: 33 | - name: animals 34 | users: 35 | - porcupine 36 | - hedgehog 37 | extensions: 38 | - citext 39 | - pgcypto 40 | users: 41 | - username: porcupine 42 | password: quill 43 | - username: hedgehog 44 | password: frank 45 | admin: true 46 | - username: smoke-tests 47 | password: if-ya-got-em 48 | backend: 49 | port: 6432 50 | config: 51 | port: 5432 52 | enable_pool_hba: on 53 | hba: (( grab jobs.postgres.properties.postgres.hba )) 54 | pcp: 55 | system_password: secret 56 | 57 | - name: smoke-tests 58 | lifecycle: errand 59 | templates: 60 | - { release: postgres, name: smoke-tests } 61 | instances: 1 62 | persistent_disk: 0 63 | resource_pool: small_z1 64 | networks: (( param "please set smoke-tests networks" )) 65 | 66 | properties: 67 | postgres: 68 | smoke-tests: 69 | target: 70 | port: 5432 71 | username: smoke-tests 72 | password: if-ya-got-em 73 | backend: 74 | port: 6432 75 | 76 | 77 | 78 | networks: (( param "please set networks" )) 79 | 80 | properties: {} 81 | -------------------------------------------------------------------------------- /templates/make_manifest: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | template_prefix="postgres" 6 | STEMCELL_OS=${STEMCELL_OS:-ubuntu} 7 | 8 | infrastructure=$1 9 | 10 | fail() { 11 | echo >&2 $* 12 | } 13 | 14 | if [[ "$infrastructure" != "aws-ec2" && "$infrastructure" != "warden" ]] ; then 15 | fail "usage: ./make_manifest " 16 | exit 1 17 | fi 18 | 19 | case "${infrastructure}/${STEMCELL_OS}" in 20 | (warden/*) STEMCELL_URL="https://bosh.io/d/stemcells/bosh-warden-boshlite-ubuntu-trusty-go_agent" ;; 21 | (aws-ec2/ubuntu) STEMCELL_URL="https://bosh.io/d/stemcells/bosh-aws-xen-ubuntu-trusty-go_agent" ;; 22 | (aws-ec2/centos) STEMCELL_URL="https://bosh.io/d/stemcells/bosh-aws-xen-centos-7-go_agent" ;; 23 | (*) 24 | fail "Invalid infrastructure or OS specified." 25 | exit 1 26 | ;; 27 | esac 28 | 29 | shift 30 | 31 | BOSH_STATUS=$(bosh status) 32 | DIRECTOR_UUID=$(echo "$BOSH_STATUS" | grep UUID | awk '{print $2}') 33 | DIRECTOR_CPI=$(echo "$BOSH_STATUS" | grep CPI | awk '{print $2}' | sed -e 's/_cpi//') 34 | DIRECTOR_NAME=$(echo "$BOSH_STATUS" | grep Name | awk '{print $2}') 35 | NAME=$template_prefix-$infrastructure 36 | 37 | if [[ $DIRECTOR_NAME = "warden" && ${infrastructure} != "warden" ]]; then 38 | fail "Not targeting bosh-lite with warden CPI. Please make sure you have run 'bosh target' and are targeting a BOSH lite before running this script." 39 | exit 1 40 | fi 41 | 42 | if [[ $infrastructure = "aws-ec2" && ${DIRECTOR_CPI} != "aws" ]]; then 43 | fail "Not targeting an AWS BOSH. Please make sure you have run 'bosh target' and are targeting an AWS BOSH before running this script." 44 | exit 1 45 | fi 46 | 47 | function latest_uploaded_stemcell { 48 | echo $(bosh stemcells | grep bosh | grep $STEMCELL_OS | awk -F'|' '{ print $2, $3 }' | sort -nr -k2 | head -n1 | awk '{ print $1 }') 49 | } 50 | 51 | STEMCELL=${STEMCELL:-$(latest_uploaded_stemcell)} 52 | if [[ -z ${STEMCELL} ]]; then 53 | echo 54 | echo "Uploading latest $DIRECTOR_CPI/$STEMCELL_OS stemcell..." 55 | echo " (from ${STEMCELL_URL})" 56 | bosh upload stemcell $STEMCELL_URL 57 | fi 58 | STEMCELL=${STEMCELL:-$(latest_uploaded_stemcell)} 59 | 60 | templates=$(dirname $0) 61 | release=$templates/.. 62 | tmpdir=$release/tmp 63 | 64 | mkdir -p $tmpdir 65 | cp $templates/stub.yml $tmpdir/stub-with-uuid.yml 66 | echo $DIRECTOR_NAME $DIRECTOR_CPI $DIRECTOR_UUID $STEMCELL 67 | perl -pi -e "s/PLACEHOLDER-DIRECTOR-UUID/$DIRECTOR_UUID/g" $tmpdir/stub-with-uuid.yml 68 | perl -pi -e "s/NAME/$NAME/g" $tmpdir/stub-with-uuid.yml 69 | perl -pi -e "s/STEMCELL/$STEMCELL/g" $tmpdir/stub-with-uuid.yml 70 | 71 | if ! [ -x "$(command -v spruce)" ]; then 72 | echo 'spruce is not installed. Please download at https://github.com/geofffranks/spruce/releases' >&2 73 | fi 74 | 75 | spruce merge --prune meta \ 76 | $templates/deployment.yml \ 77 | $templates/jobs.yml \ 78 | $templates/infrastructure-${infrastructure}.yml \ 79 | $tmpdir/stub-with-uuid.yml \ 80 | $* > $tmpdir/$NAME-manifest.yml 81 | 82 | bosh deployment $tmpdir/$NAME-manifest.yml 83 | bosh status 84 | -------------------------------------------------------------------------------- /templates/stub.yml: -------------------------------------------------------------------------------- 1 | --- 2 | meta: 3 | environment: NAME 4 | dns_root: microbosh 5 | security_groups: 6 | - postgres 7 | stemcell: 8 | name: STEMCELL 9 | 10 | director_uuid: PLACEHOLDER-DIRECTOR-UUID 11 | 12 | releases: 13 | - name: postgres 14 | version: latest 15 | --------------------------------------------------------------------------------