├── .clang-format ├── .github └── workflows │ └── main.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── docs ├── api.md ├── assets │ └── favicon.ico ├── contributing.md ├── index.md ├── installation.md └── requirements_docs.txt ├── mkdocs.yml ├── nix ├── bench.sql ├── nginx │ ├── conf │ │ ├── custom.conf │ │ └── nginx.conf │ └── logs │ │ └── error.log ├── nginxCustom.nix ├── nixops.nix ├── nixopsScripts.nix └── xpg.nix ├── pg_net.control.in ├── shell.nix ├── sql ├── pg_net--0.1--0.2.sql ├── pg_net--0.10.0--0.11.0.sql ├── pg_net--0.11.0--0.12.0.sql ├── pg_net--0.12.0--0.13.0.sql ├── pg_net--0.13.0--0.14.0.sql ├── pg_net--0.14.0--0.15.0.sql ├── pg_net--0.15.0--new.sql ├── pg_net--0.2--0.3.sql ├── pg_net--0.3--0.4.sql ├── pg_net--0.4--0.5.sql ├── pg_net--0.5--0.5.1.sql ├── pg_net--0.5.1--0.6.sql ├── pg_net--0.6--0.7.sql ├── pg_net--0.7--0.7.1.sql ├── pg_net--0.7.1--0.7.3.sql ├── pg_net--0.7.3--0.8.0.sql ├── pg_net--0.8.0--0.9.3.sql ├── pg_net--0.9.3--0.10.0.sql └── pg_net.sql ├── src ├── core.c ├── core.h ├── curl_prelude.h ├── errors.c ├── errors.h ├── event.c ├── event.h ├── pg_prelude.h ├── util.c ├── util.h └── worker.c └── test ├── conftest.py ├── init.conf ├── init.sql ├── test_engine.py ├── test_http_delete.py ├── test_http_errors.py ├── test_http_get_collect.py ├── test_http_headers.py ├── test_http_malformed_headers.py ├── test_http_params.py ├── test_http_post_collect.py ├── test_http_requests_deleted_after_ttl.py ├── test_http_timeout.py ├── test_privileges.py ├── test_user_db.py └── test_worker_error.py /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: LLVM 2 | IndentWidth: 4 3 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | 7 | test: 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | pg-version: ['12', '13', '14', '15', '16', '17'] 12 | 13 | steps: 14 | - uses: actions/checkout@v4 15 | 16 | - name: Install Nix 17 | uses: cachix/install-nix-action@v30 18 | 19 | - name: Use Cachix Cache 20 | uses: cachix/cachix-action@v10 21 | with: 22 | name: nxpg 23 | authToken: ${{ secrets.CACHIX_AUTH_TOKEN }} 24 | 25 | - name: Build 26 | run: nix-shell --run "xpg -v ${{ matrix.pg-version }} build" 27 | 28 | - name: Run tests 29 | run: nix-shell --run "xpg -v ${{ matrix.pg-version }} test" 30 | 31 | # solve https://github.com/supabase/pg_net/pull/178#issuecomment-2722690110 32 | #test-on-macos: 33 | #runs-on: macos-15 34 | 35 | #strategy: 36 | #matrix: 37 | #pg-version: ['15'] 38 | 39 | #steps: 40 | #- uses: actions/checkout@v4 41 | 42 | #- name: Install Nix 43 | #uses: cachix/install-nix-action@v30 44 | #with: 45 | #nix_path: nixpkgs=channel:nixos-unstable 46 | 47 | #- name: Use Cachix Cache 48 | #uses: cachix/cachix-action@v10 49 | #with: 50 | #name: xpg 51 | #authToken: ${{ secrets.CACHIX_AUTH_TOKEN }} 52 | 53 | #- name: Build 54 | #run: nix-shell --run "xpg -v ${{ matrix.pg-version }} build" 55 | 56 | #- name: Run tests 57 | #run: nix-shell --run "nxpg -v ${{ matrix.pg-version }} test" 58 | 59 | coverage: 60 | 61 | runs-on: ubuntu-latest 62 | 63 | strategy: 64 | matrix: 65 | pg-version: ['17'] 66 | 67 | steps: 68 | - uses: actions/checkout@v4 69 | 70 | - name: Install Nix 71 | uses: cachix/install-nix-action@v30 72 | 73 | - name: Use Cachix Cache 74 | uses: cachix/cachix-action@v10 75 | with: 76 | name: nxpg 77 | authToken: ${{ secrets.CACHIX_AUTH_TOKEN }} 78 | 79 | - name: Coverage 80 | run: nix-shell --run "xpg -v ${{ matrix.pg-version }} coverage" 81 | 82 | - name: Send coverage to Coveralls 83 | uses: coverallsapp/github-action@v2.3.6 84 | with: 85 | github-token: ${{ secrets.GITHUB_TOKEN }} 86 | files: ./build-${{ matrix.pg-version }}/coverage.info 87 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | results/ 2 | .python-version 3 | venv/ 4 | site/ 5 | regression.* 6 | __pycache__/ 7 | .deployment.nixops* 8 | valgrindlog 9 | nix/nginx/logs/nginx.pid 10 | .history 11 | *.o 12 | *.bc 13 | *.control 14 | *.so 15 | *.gcno 16 | *.gcda 17 | coverage.info 18 | coverage_html 19 | nginx.pid 20 | tags 21 | net_worker.pid 22 | sql/pg_net--*.sql 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2021 Supabase 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SRC_DIR = src 2 | 3 | # the `-Wno`s quiet C90 warnings 4 | PG_CFLAGS = -std=c11 -Wextra -Wall -Werror \ 5 | -Wno-declaration-after-statement \ 6 | -Wno-vla \ 7 | -Wno-long-long 8 | ifeq ($(COVERAGE), 1) 9 | PG_CFLAGS += --coverage 10 | endif 11 | 12 | EXTENSION = pg_net 13 | EXTVERSION = 0.15.0 14 | 15 | DATA = $(wildcard sql/*--*.sql) 16 | 17 | EXTRA_CLEAN = sql/$(EXTENSION)--$(EXTVERSION).sql $(EXTENSION).control 18 | 19 | TESTS = $(wildcard test/sql/*.sql) 20 | REGRESS = $(patsubst test/sql/%.sql,%,$(TESTS)) 21 | REGRESS_OPTS = --use-existing --inputdir=test 22 | 23 | MODULE_big = $(EXTENSION) 24 | SRC = $(wildcard $(SRC_DIR)/*.c) 25 | 26 | ifdef BUILD_DIR 27 | OBJS = $(patsubst $(SRC_DIR)/%.c, $(BUILD_DIR)/%.o, $(SRC)) 28 | else 29 | OBJS = $(patsubst $(SRC_DIR)/%.c, src/%.o, $(SRC)) # if no BUILD_DIR, just build on src so standard PGXS `make` works 30 | endif 31 | 32 | PG_CONFIG = pg_config 33 | SHLIB_LINK = -lcurl 34 | 35 | # Find from system headers 36 | PG_CPPFLAGS := $(CPPFLAGS) -DEXTVERSION=\"$(EXTVERSION)\" 37 | 38 | all: sql/$(EXTENSION)--$(EXTVERSION).sql $(EXTENSION).control 39 | 40 | build: $(BUILD_DIR)/$(EXTENSION).so sql/$(EXTENSION)--$(EXTVERSION).sql $(EXTENSION).control 41 | 42 | $(BUILD_DIR)/.gitignore: sql/$(EXTENSION)--$(EXTVERSION).sql $(EXTENSION).control 43 | mkdir -p $(BUILD_DIR) 44 | cp $(EXTENSION).control $(BUILD_DIR) 45 | cp sql/$(EXTENSION)--$(EXTVERSION).sql $(BUILD_DIR) 46 | echo "*" > $(BUILD_DIR)/.gitignore 47 | 48 | $(BUILD_DIR)/%.o: $(SRC_DIR)/%.c $(BUILD_DIR)/.gitignore 49 | $(CC) $(CPPFLAGS) $(CFLAGS) -c $< -o $@ 50 | 51 | $(BUILD_DIR)/$(EXTENSION).so: $(EXTENSION).so 52 | mv $? $@ 53 | 54 | sql/$(EXTENSION)--$(EXTVERSION).sql: sql/$(EXTENSION).sql 55 | cp $< $@ 56 | 57 | $(EXTENSION).control: 58 | sed "s/@EXTVERSION@/$(EXTVERSION)/g" $(EXTENSION).control.in > $@ 59 | 60 | PGXS := $(shell $(PG_CONFIG) --pgxs) 61 | include $(PGXS) 62 | 63 | .PHONY: test 64 | test: 65 | net-with-nginx python -m pytest -vv 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PG_NET 2 | *A PostgreSQL extension that enables asynchronous (non-blocking) HTTP/HTTPS requests with SQL*. 3 | 4 | Requires libcurl >= 7.83. Compatible with PostgreSQL > = 12. 5 | 6 | ![PostgreSQL version](https://img.shields.io/badge/postgresql-12+-blue.svg) 7 | [![License](https://img.shields.io/pypi/l/markdown-subtemplate.svg)](https://github.com/supabase/pg_net/blob/master/LICENSE) 8 | [![Coverage Status](https://coveralls.io/repos/github/supabase/pg_net/badge.svg)](https://coveralls.io/github/supabase/pg_net) 9 | [![Tests](https://github.com/supabase/pg_net/actions/workflows/main.yml/badge.svg)](https://github.com/supabase/pg_net/actions) 10 | 11 | **Documentation**: [https://supabase.github.io/pg_net](https://supabase.github.io/pg_net) 12 | 13 | --- 14 | 15 | # Contents 16 | - [Introduction](#introduction) 17 | - [Technical Explanation](#technical-explanation) 18 | - [Installation](#installation) 19 | - [Configuration](#extension-configuration) 20 | - [Requests API](#requests-api) 21 | - Monitoring requests 22 | - GET requests 23 | - POST requests 24 | - DELETE requests 25 | - [Practical Examples](#practical-examples) 26 | - Syncing data with an external data source using triggers 27 | - Calling a serverless function every minute with PG_CRON 28 | - Retrying failed requests 29 | - [Contributing](#contributing) 30 | 31 | --- 32 | 33 | # Introduction 34 | 35 | The PG_NET extension enables PostgreSQL to make asynchronous HTTP/HTTPS requests in SQL. It eliminates the need for servers to continuously poll for database changes and instead allows the database to proactively notify external resources about significant events. It seamlessly integrates with triggers, cron jobs (e.g., [PG_CRON](https://github.com/citusdata/pg_cron)), and procedures, unlocking numerous possibilities. Notably, PG_NET powers Supabase's Webhook functionality, highlighting its robustness and reliability. 36 | 37 | Common use cases for the PG_NET extension include: 38 | 39 | - Calling external APIs 40 | - Syncing data with outside resources 41 | - Calling a serverless function when an event, such as an insert, occurred 42 | 43 | However, it is important to note that the extension has a few limitations. Currently, it only supports three types of asynchronous requests: 44 | 45 | - async http GET requests 46 | - async http POST requests with a *JSON* payload 47 | - async http DELETE requests 48 | 49 | Ultimately, though, PG_NET offers developers more flexibility in how they monitor and connect their database with external resources. 50 | 51 | --- 52 | 53 | # Technical Explanation 54 | 55 | The extension introduces a new `net` schema, which contains two unlogged tables, a type of table in PostgreSQL that offers performance improvements at the expense of durability. You can read more about unlogged tables [here](https://pgpedia.info/u/unlogged-table.html). The two tables are: 56 | 57 | 1. **`http_request_queue`**: This table serves as a queue for requests waiting to be executed. Upon successful execution of a request, the corresponding data is removed from the queue. 58 | 59 | The SQL statement to create this table is: 60 | 61 | ```sql 62 | CREATE UNLOGGED TABLE 63 | net.http_request_queue ( 64 | id bigint NOT NULL DEFAULT nextval('net.http_request_queue_id_seq'::regclass), 65 | method text NOT NULL, 66 | url text NOT NULL, 67 | headers jsonb NOT NULL, 68 | body bytea NULL, 69 | timeout_milliseconds integer NOT NULL 70 | ) 71 | ``` 72 | 73 | 2. **`_http_response`**: This table holds the responses of each executed request. 74 | 75 | The SQL statement to create this table is: 76 | 77 | ```sql 78 | CREATE UNLOGGED TABLE 79 | net._http_response ( 80 | id bigint NULL, 81 | status_code integer NULL, 82 | content_type text NULL, 83 | headers jsonb NULL, 84 | content text NULL, 85 | timed_out boolean NULL, 86 | error_msg text NULL, 87 | created timestamp with time zone NOT NULL DEFAULT now() 88 | ) 89 | ``` 90 | 91 | When any of the three request functions (`http_get`, `http_post`, `http_delete`) are invoked, they create an entry in the `net.http_request_queue` table. 92 | 93 | The extension employs C's [libCurl](https://curl.se/libcurl/c/) library within a PostgreSQL [background worker](https://www.postgresql.org/docs/current/bgworker.html) to manage HTTP requests. This background worker regularly checks the `http_request_queue` table and executes the requests it finds there. 94 | 95 | Once a response is received, it gets stored in the `_http_response` table. By monitoring this table, you can keep track of response statuses and messages. 96 | 97 | --- 98 | 99 | # Installation 100 | 101 | ## Enabling the Extension with Supabase 102 | 103 | You can activate the `pg_net` extension via Supabase's dashboard by following these steps: 104 | 105 | 1. Navigate to the 'Database' page. 106 | 2. Select 'Extensions' from the sidebar. 107 | 3. Search for "pg_net" and enable the extension. 108 | 109 | ## Local Setup 110 | 111 | ### Configuring Your Device/Server 112 | 113 | Clone this repo and run 114 | 115 | ```bash 116 | make && make install 117 | ``` 118 | 119 | To make the extension available to the database add on `postgresql.conf`: 120 | 121 | ``` 122 | shared_preload_libraries = 'pg_net' 123 | ``` 124 | 125 | By default, pg_net is available on the `postgres` database. To use pg_net on a different database, you can add the following on `postgresql.conf`: 126 | 127 | ``` 128 | pg_net.database_name = ''; 129 | ``` 130 | 131 | Using pg_net on multiple databases in a cluster is not supported. 132 | 133 | ### Installing in PostgreSQL 134 | 135 | To activate the extension in PostgreSQL, run the create extension command. The extension creates its own schema named net to avoid naming conflicts. 136 | 137 | ```psql 138 | create extension pg_net; 139 | ``` 140 | 141 | --- 142 | 143 | # Extension Configuration 144 | 145 | the extension creates 3 configurable variables: 146 | 147 | 1. **pg_net.batch_size** _(default: 200)_: An integer that limits the max number of rows that the extension will process from _`net.http_request_queue`_ during each read 148 | 2. **pg_net.ttl** _(default: 6 hours)_: An interval that defines the max time a row in the _`net.http_response`_ will live before being deleted 149 | 3. **pg_net.database_name** _(default: 'postgres')_: A string that defines which database the extension is applied to 150 | 4. **pg_net.username** _(default: NULL)_: A string that defines which user will the background worker be connected with. If not set (`NULL`), it will assume the bootstrap user. 151 | 152 | All these variables can be viewed with the following commands: 153 | ```sql 154 | show pg_net.batch_size; 155 | show pg_net.ttl; 156 | show pg_net.database_name; 157 | show pg_net.username; 158 | ``` 159 | 160 | You can change these by editing the `postgresql.conf` file (find it with `SHOW config_file;`) or with `ALTER SYSTEM`: 161 | 162 | ``` 163 | alter system set pg_net.ttl to '1 hour' 164 | alter system set pg_net.batch_size to 500; 165 | ``` 166 | 167 | Then, reload the settings and restart the `pg_net` background worker with: 168 | 169 | ``` 170 | select net.worker_restart(); 171 | ``` 172 | 173 | Note that doing `ALTER SYSTEM` requires SUPERUSER but on PostgreSQL >= 15, you can do: 174 | 175 | ``` 176 | grant alter system on parameter pg_net.ttl to ; 177 | grant alter system on parameter pg_net.batch_size to ; 178 | ``` 179 | 180 | To allow regular users to update `pg_net` settings. 181 | 182 | # Requests API 183 | 184 | ## GET requests 185 | 186 | ### net.http_get function signature 187 | 188 | ```sql 189 | net.http_get( 190 | -- url for the request 191 | url text, 192 | -- key/value pairs to be url encoded and appended to the `url` 193 | params jsonb default '{}'::jsonb, 194 | -- key/values to be included in request headers 195 | headers jsonb default '{}'::jsonb, 196 | -- the maximum number of milliseconds the request may take before being cancelled 197 | timeout_milliseconds int default 1000 198 | ) 199 | -- request_id reference 200 | returns bigint 201 | 202 | strict 203 | volatile 204 | parallel safe 205 | language plpgsql 206 | ``` 207 | 208 | ### Examples: 209 | The following examples use the [Postman Echo API](https://learning.postman.com/docs/developer/echo-api/). 210 | 211 | #### Calling an API 212 | 213 | ```sql 214 | SELECT net.http_get ( 215 | 'https://postman-echo.com/get?foo1=bar1&foo2=bar2' 216 | ) AS request_id; 217 | ``` 218 | 219 | > NOTE: You can view the response with the following query: 220 | > 221 | > ```sql 222 | > SELECT * 223 | > FROM net._http_response; 224 | > ``` 225 | 226 | #### Calling an API with URL encoded params 227 | 228 | ```sql 229 | SELECT net.http_get( 230 | 'https://postman-echo.com/get', 231 | -- Equivalent to calling https://postman-echo.com/get?foo1=bar1&foo2=bar2&encoded=%21 232 | -- The "!" is url-encoded as %21 233 | '{"foo1": "bar1", "foo2": "bar2", "encoded": "!"}'::JSONB 234 | ) AS request_id; 235 | ``` 236 | 237 | #### Calling an API with an API-KEY 238 | 239 | ```sql 240 | SELECT net.http_get( 241 | 'https://postman-echo.com/get?foo1=bar1&foo2=bar2', 242 | headers := '{"API-KEY-HEADER": ""}'::JSONB 243 | ) AS request_id; 244 | ``` 245 | 246 | ## POST requests 247 | ### net.http_post function signature 248 | 249 | ```sql 250 | net.http_post( 251 | -- url for the request 252 | url text, 253 | -- body of the POST request 254 | body jsonb default '{}'::jsonb, 255 | -- key/value pairs to be url encoded and appended to the `url` 256 | params jsonb default '{}'::jsonb, 257 | -- key/values to be included in request headers 258 | headers jsonb default '{"Content-Type": "application/json"}'::jsonb, 259 | -- the maximum number of milliseconds the request may take before being cancelled 260 | timeout_milliseconds int default 1000 261 | ) 262 | -- request_id reference 263 | returns bigint 264 | 265 | volatile 266 | parallel safe 267 | language plpgsql 268 | ``` 269 | ### Examples: 270 | The following examples post to the [Postman Echo API](https://learning.postman.com/docs/developer/echo-api/). 271 | 272 | #### Sending data to an API 273 | 274 | ```sql 275 | SELECT net.http_post( 276 | 'https://postman-echo.com/post', 277 | '{"key": "value", "key": 5}'::JSONB, 278 | headers := '{"API-KEY-HEADER": ""}'::JSONB 279 | ) AS request_id; 280 | ``` 281 | 282 | #### Sending single table row as a payload 283 | 284 | > NOTE: If multiple rows are sent using this method, each row will be sent as a separate request. 285 | 286 | ```sql 287 | WITH selected_row AS ( 288 | SELECT 289 | * 290 | FROM target_table 291 | LIMIT 1 292 | ) 293 | SELECT 294 | net.http_post( 295 | 'https://postman-echo.com/post', 296 | to_jsonb(selected_row.*), 297 | headers := '{"API-KEY-HEADER": ""}'::JSONB 298 | ) AS request_id 299 | FROM selected_row; 300 | ``` 301 | 302 | #### Sending multiple table rows as a payload 303 | 304 | > WARNING: when sending multiple rows, be careful to limit your payload size. 305 | 306 | ```sql 307 | WITH selected_rows AS ( 308 | SELECT 309 | -- Converts all the rows into a JSONB array 310 | jsonb_agg(to_jsonb(target_table)) AS JSON_payload 311 | FROM target_table 312 | -- Generally good practice to LIMIT the max amount of rows 313 | ) 314 | SELECT 315 | net.http_post( 316 | 'https://postman-echo.com/post'::TEXT, 317 | JSON_payload, 318 | headers := '{"API-KEY-HEADER": ""}'::JSONB 319 | ) AS request_id 320 | FROM selected_rows; 321 | ``` 322 | 323 | ## DELETE requests 324 | ### net.http_delete function signature 325 | 326 | ```sql 327 | net.http_delete( 328 | -- url for the request 329 | url text, 330 | -- key/value pairs to be url encoded and appended to the `url` 331 | params jsonb default '{}'::jsonb, 332 | -- key/values to be included in request headers 333 | headers jsonb default '{}'::jsonb, 334 | -- the maximum number of milliseconds the request may take before being cancelled 335 | timeout_milliseconds int default 2000 336 | ) 337 | -- request_id reference 338 | returns bigint 339 | 340 | strict 341 | volatile 342 | parallel safe 343 | language plpgsql 344 | security definer 345 | ``` 346 | 347 | ### Examples: 348 | The following examples use the [Dummy Rest API](https://dummy.restapiexample.com/employees). 349 | 350 | #### Sending a delete request to an API 351 | 352 | ```sql 353 | SELECT net.http_delete( 354 | 'https://dummy.restapiexample.com/api/v1/delete/2' 355 | ) AS request_id; 356 | ``` 357 | 358 | #### Sending a delete request with a row id as a query param 359 | 360 | ```sql 361 | WITH selected_id AS ( 362 | SELECT 363 | id 364 | FROM target_table 365 | LIMIT 1 -- if not limited, it will make a delete request for each returned row 366 | ) 367 | SELECT 368 | net.http_delete( 369 | 'https://dummy.restapiexample.com/api/v1/delete/'::TEXT, 370 | format('{"id": "%s"}', id)::JSONB 371 | ) AS request_id 372 | FROM selected_id; 373 | ``` 374 | 375 | #### Sending a delete request with a row id as a path param 376 | 377 | ```sql 378 | WITH selected_id AS ( 379 | SELECT 380 | id 381 | FROM target_table 382 | LIMIT 1 -- if not limited, it will make a delete request for each returned row 383 | ) 384 | SELECT 385 | net.http_delete( 386 | 'https://dummy.restapiexample.com/api/v1/delete/' || id 387 | ) AS request_id 388 | FROM selected_row 389 | ``` 390 | 391 | --- 392 | 393 | # Practical Examples 394 | 395 | ## Syncing data with an external data source using triggers 396 | 397 | The following example comes from [Typesense's Supabase Sync guide](https://typesense.org/docs/guide/supabase-full-text-search.html#syncing-individual-deletes) 398 | 399 | ```sql 400 | -- Create the function to delete the record from Typesense 401 | CREATE OR REPLACE FUNCTION delete_record() 402 | RETURNS TRIGGER 403 | LANGUAGE plpgSQL 404 | AS $$ 405 | BEGIN 406 | SELECT net.http_delete( 407 | url := format('/collections/products/documents/%s', OLD.id), 408 | headers := '{"X-Typesense-API-KEY": ""}' 409 | ) 410 | RETURN OLD; 411 | END $$; 412 | 413 | -- Create the trigger that calls the function when a record is deleted from the products table 414 | CREATE TRIGGER delete_products_trigger 415 | AFTER DELETE ON public.products 416 | FOR EACH ROW 417 | EXECUTE FUNCTION delete_products(); 418 | ``` 419 | 420 | ## Calling a serverless function every minute with PG_CRON 421 | 422 | The [PG_CRON](https://github.com/citusdata/pg_cron) extension enables PostgreSQL to become its own cron server. With it you can schedule regular calls to activate serverless functions. 423 | 424 | > Useful links: 425 | > 426 | > * [Supabase PG_CRON Installation Guide](https://supabase.com/docs/guides/database/extensions/pgcron) 427 | > * [Cron Syntax Helper](https://crontab.guru/) 428 | 429 | ### Example Cron job to call serverless function 430 | 431 | ```sql 432 | SELECT cron.schedule( 433 | 'cron-job-name', 434 | '* * * * *', -- Executes every minute (cron syntax) 435 | $$ 436 | -- SQL query 437 | SELECT net.http_get( 438 | -- URL of Edge function 439 | url:='https://.functions.Supabase.co/example', 440 | headers:='{ 441 | "Content-Type": "application/json", 442 | "Authorization": "Bearer " 443 | }'::JSONB 444 | ) as request_id; 445 | $$ 446 | ); 447 | ``` 448 | 449 | ## Retrying failed requests 450 | 451 | Every request made is logged within the net._http_response table. To identify failed requests, you can execute a query on the table, filtering for requests where the status code is 500 or higher. 452 | 453 | ### Finding failed requests 454 | 455 | ```sql 456 | SELECT 457 | * 458 | FROM net._http_response 459 | WHERE status_code >= 500; 460 | ``` 461 | 462 | While the net.\_http_response table logs each request, it doesn't store all the necessary information to retry failed requests. To facilitate this, we need to create a request tracking table and a wrapper function around the PG_NET request functions. This will help us store the required details for each request. 463 | 464 | ### Creating a Request Tracker Table 465 | 466 | ```sql 467 | CREATE TABLE request_tracker( 468 | method TEXT, 469 | url TEXT, 470 | params JSONB, 471 | body JSONB, 472 | headers JSONB, 473 | request_id BIGINT 474 | ) 475 | ``` 476 | 477 | Below is a function called request_wrapper, which wraps around the PG_NET request functions. This function records every request's details in the request_tracker table, facilitating future retries if needed. 478 | 479 | ### Creating a Request Wrapper Function 480 | 481 | ```sql 482 | CREATE OR REPLACE FUNCTION request_wrapper( 483 | method TEXT, 484 | url TEXT, 485 | params JSONB DEFAULT '{}'::JSONB, 486 | body JSONB DEFAULT '{}'::JSONB, 487 | headers JSONB DEFAULT '{}'::JSONB 488 | ) 489 | RETURNS BIGINT 490 | AS $$ 491 | DECLARE 492 | request_id BIGINT; 493 | BEGIN 494 | 495 | IF method = 'DELETE' THEN 496 | SELECT net.http_delete( 497 | url:=url, 498 | params:=params, 499 | headers:=headers 500 | ) INTO request_id; 501 | ELSIF method = 'POST' THEN 502 | SELECT net.http_post( 503 | url:=url, 504 | body:=body, 505 | params:=params, 506 | headers:=headers 507 | ) INTO request_id; 508 | ELSIF method = 'GET' THEN 509 | SELECT net.http_get( 510 | url:=url, 511 | params:=params, 512 | headers:=headers 513 | ) INTO request_id; 514 | ELSE 515 | RAISE EXCEPTION 'Method must be DELETE, POST, or GET'; 516 | END IF; 517 | 518 | INSERT INTO request_tracker (method, url, params, body, headers, request_id) 519 | VALUES (method, url, params, body, headers, request_id); 520 | 521 | RETURN request_id; 522 | END; 523 | $$ 524 | LANGUAGE plpgsql; 525 | ``` 526 | 527 | To retry a failed request recorded via the wrapper function, use the following query. This will select failed requests, retry them, and then remove the original request data from both the net.\_http_response and request_tracker tables. 528 | 529 | ### Retrying failed requests 530 | 531 | ```sql 532 | WITH retry_request AS ( 533 | SELECT 534 | request_tracker.method, 535 | request_tracker.url, 536 | request_tracker.params, 537 | request_tracker.body, 538 | request_tracker.headers, 539 | request_tracker.request_id 540 | FROM request_tracker 541 | INNER JOIN net._http_response ON net._http_response.id = request_tracker.request_id 542 | WHERE net._http_response.status_code >= 500 543 | LIMIT 3 544 | ), 545 | retry AS ( 546 | SELECT 547 | request_wrapper(retry_request.method, retry_request.url, retry_request.params, retry_request.body, retry_request.headers) 548 | FROM retry_request 549 | ), 550 | delete_http_response AS ( 551 | DELETE FROM net._http_response 552 | WHERE id IN (SELECT request_id FROM retry_request) 553 | RETURNING * 554 | ) 555 | DELETE FROM request_tracker 556 | WHERE request_id IN (SELECT request_id FROM retry_request) 557 | RETURNING *; 558 | ``` 559 | 560 | The above function can be called using cron jobs or manually to retry failed requests. It may also be beneficial to clean the request_tracker table in the process. 561 | 562 | # Contributing 563 | 564 | Checkout the [Contributing](docs/contributing.md) page to learn more about adding to the project. 565 | -------------------------------------------------------------------------------- /docs/api.md: -------------------------------------------------------------------------------- 1 | ## HTTP 2 | 3 | ### net.http_get 4 | 5 | ##### description 6 | Create an HTTP GET request returning the request's id 7 | 8 | !!! note 9 | HTTP requests are not started until the transaction is committed 10 | 11 | !!! note 12 | this is a Postgres SECURITY DEFINER function 13 | 14 | ##### signature 15 | ```sql 16 | net.http_get( 17 | -- url for the request 18 | url text, 19 | -- key/value pairs to be url encoded and appended to the `url` 20 | params jsonb default '{}'::jsonb, 21 | -- key/values to be included in request headers 22 | headers jsonb default '{}'::jsonb, 23 | -- the maximum number of milliseconds the request may take before being cancelled 24 | timeout_milliseconds int default 1000 25 | ) 26 | -- request_id reference 27 | returns bigint 28 | 29 | strict 30 | volatile 31 | parallel safe 32 | language plpgsql 33 | ``` 34 | 35 | ##### usage 36 | ```sql 37 | select net.http_get('https://news.ycombinator.com') as request_id; 38 | request_id 39 | ---------- 40 | 1 41 | (1 row) 42 | ``` 43 | 44 | ### net.http_post 45 | 46 | ##### description 47 | Create an HTTP POST request with a JSON body, returning the request's id 48 | 49 | !!! note 50 | HTTP requests are not started until the transaction is committed 51 | 52 | !!! note 53 | the body's character set encoding matches the database's `server_encoding` setting 54 | 55 | !!! note 56 | this is a Postgres SECURITY DEFINER function 57 | 58 | ##### signature 59 | ```sql 60 | net.http_post( 61 | -- url for the request 62 | url text, 63 | -- body of the POST request 64 | body jsonb default '{}'::jsonb, 65 | -- key/value pairs to be url encoded and appended to the `url` 66 | params jsonb default '{}'::jsonb, 67 | -- key/values to be included in request headers 68 | headers jsonb default '{"Content-Type": "application/json"}'::jsonb, 69 | -- the maximum number of milliseconds the request may take before being cancelled 70 | timeout_milliseconds int default 1000 71 | ) 72 | -- request_id reference 73 | returns bigint 74 | 75 | volatile 76 | parallel safe 77 | language plpgsql 78 | ``` 79 | 80 | ##### usage 81 | ```sql 82 | select 83 | net.http_post( 84 | url:='https://httpbin.org/post', 85 | body:='{"hello": "world"}'::jsonb 86 | ) as request_id; 87 | request_id 88 | ---------- 89 | 1 90 | (1 row) 91 | ``` 92 | 93 | ### net.http_delete 94 | 95 | ##### description 96 | Create an HTTP DELETE request, returning the request's id 97 | 98 | !!! note 99 | HTTP requests are not started until the transaction is committed 100 | 101 | !!! note 102 | the body's character set encoding matches the database's `server_encoding` setting 103 | 104 | !!! note 105 | this is a Postgres SECURITY DEFINER function 106 | 107 | ##### signature 108 | 109 | ```sql 110 | net.http_delete( 111 | -- url for the request 112 | url text, 113 | -- key/value pairs to be url encoded and appended to the `url` 114 | params jsonb default '{}'::jsonb, 115 | -- key/values to be included in request headers 116 | headers jsonb default '{}'::jsonb, 117 | -- the maximum number of milliseconds the request may take before being cancelled 118 | timeout_milliseconds int default 2000 119 | ) 120 | -- request_id reference 121 | returns bigint 122 | 123 | strict 124 | volatile 125 | parallel safe 126 | language plpgsql 127 | security definer 128 | ``` 129 | 130 | ##### usage 131 | ```sql 132 | select 133 | net.http_delete( 134 | url:='https://dummy.restapiexample.com/api/v1/delete/2' 135 | ) as request_id; 136 | request_id 137 | ---------- 138 | 1 139 | (1 row) 140 | ``` 141 | -------------------------------------------------------------------------------- /docs/assets/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/supabase/pg_net/0207e647da951ab5b2977915ec7887de1bdaae38/docs/assets/favicon.ico -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | pg_net is OSS. PR and issues are welcome. 2 | 3 | 4 | ## Development 5 | 6 | [Nix](https://nixos.org/download.html) is required to set up the environment and [Cachix](https://docs.cachix.org/installation) for cache usage. 7 | 8 | 9 | ### Testing 10 | 11 | For testing locally, execute: 12 | 13 | ```bash 14 | $ cachix use nxpg 15 | 16 | # might take a while in downloading all the dependencies 17 | $ nix-shell 18 | 19 | # test on pg 12 20 | $ xpg -v 12 test 21 | 22 | # test on pg 13 23 | $ xpg -v 13 test 24 | ``` 25 | 26 | ### Debugging 27 | 28 | You can turn on logging level to see curl traces with 29 | 30 | ``` 31 | $ export LOG_MIN_MESSAGES=debug2 32 | ``` 33 | 34 | ```sql 35 | select net.http_get('http://localhost:3000/projects'); 36 | 37 | -- * Trying ::1:3000... 38 | -- * connect to ::1 port 3000 failed: Connection refused 39 | -- * Trying 127.0.0.1:3000... 40 | -- * Connected to localhost (127.0.0.1) port 3000 (#0) 41 | -- > GET /projects HTTP/1.1 42 | -- Host: localhost:3000 43 | -- Accept: */* 44 | -- User-Agent: pg_net/0.2 45 | -- 46 | -- * Mark bundle as not supporting multiuse 47 | -- < HTTP/1.1 200 OK 48 | -- < Transfer-Encoding: chunked 49 | -- < Date: Fri, 27 Aug 2021 00:14:37 GMT 50 | -- < Server: postgrest/7.0.0 (UNKNOWN) 51 | -- < Content-Type: application/json; charset=utf-8 52 | -- < Content-Range: 0-58/* 53 | -- < Content-Location: /projects 54 | -- < 55 | -- * Connection #0 to host localhost left intact 56 | ``` 57 | 58 | ### GDB 59 | 60 | To debug the background worker, there's a script that wraps GDB. It automatically obtains the pid of the latest started worker: 61 | 62 | ``` 63 | $ nix-shell 64 | $ sudo net-with-gdb 65 | ``` 66 | 67 | ## Load Testing 68 | 69 | These are scripts that wrap NixOps to deploy an AWS cloud setup. You must have `default` setup in `.aws/credentials`. 70 | 71 | ```bash 72 | net-cloud-deploy 73 | ``` 74 | 75 | Then you can connect on the client instance and do requests to the server instance through `pg_net`. 76 | 77 | ```bash 78 | net-cloud-ssh 79 | 80 | psql -U postgres 81 | 82 | select net.http_get('http://server'); 83 | # this the default welcome page of nginx on the server instance 84 | # "server" is already included to /etc/hosts, so `curl http://server` will give the same result 85 | 86 | # do some load testing 87 | select net.http_get('http://server') from generate_series(1,1000); 88 | # run `top` on another shell(another `nixops ssh -d pg_net client`) to check the worker behavior 89 | ``` 90 | 91 | To destroy the cloud setup: 92 | 93 | ```bash 94 | net-cloud-destroy 95 | ``` 96 | 97 | ## Documentation 98 | 99 | All public API must be documented. Building documentation requires python 3.6+ 100 | 101 | 102 | ### Install Dependencies 103 | 104 | Install mkdocs, themes, and extensions. 105 | 106 | ```shell 107 | pip install -r docs/requirements_docs.txt 108 | ``` 109 | 110 | ### Serving 111 | 112 | To serve the documentation locally run 113 | 114 | ```shell 115 | mkdocs serve 116 | ``` 117 | 118 | and visit the docs at [http://127.0.0.1:8000/pg_net/](http://127.0.0.1:8000/pg_net/) 119 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # pg_net 2 | 3 |

4 | 5 | PostgreSQL version 6 | License 7 | Tests 8 | 9 |

10 | 11 | --- 12 | 13 | **Documentation**: https://supabase.github.io/pg_net 14 | 15 | **Source Code**: https://github.com/supabase/pg_net 16 | 17 | --- 18 | 19 | pg_net is a PostgreSQL extension exposing a SQL interface for async networking with a focus on scalability and UX. 20 | 21 | Features: 22 | 23 | - async http GET requests 24 | - async http POST requests with a JSON payload 25 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | Tested with PostgreSQL 12 and 13. 2 | 3 | ## Setup 4 | 5 | ### Server 6 | Clone this repo and run 7 | 8 | ```bash 9 | make && make install 10 | ``` 11 | 12 | To make the extension available to the database add on `postgresql.conf`: 13 | 14 | ``` 15 | shared_preload_libraries = 'pg_net' 16 | ``` 17 | 18 | 19 | ### Database 20 | To enable the extension in PostgreSQL we must execute a `create extension` statement. The extension creates its own schema/namespace named `net` to avoid naming conflicts. 21 | 22 | ```psql 23 | create extension pg_net; 24 | ``` 25 | -------------------------------------------------------------------------------- /docs/requirements_docs.txt: -------------------------------------------------------------------------------- 1 | mkdocs 2 | mkdocs-material 3 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: pg_net 2 | site_url: https://supabase.github.io/pg_net 3 | site_description: A PostgreSQL extension for async networking 4 | 5 | repo_name: supabase/pg_net 6 | repo_url: https://github.com/supabase/pg_net 7 | 8 | nav: 9 | - Welcome: 'index.md' 10 | - Installation: 'installation.md' 11 | - API Reference: 'api.md' 12 | - Contributing: 'contributing.md' 13 | 14 | theme: 15 | name: 'material' 16 | favicon: 'assets/favicon.ico' 17 | logo: 'assets/favicon.ico' 18 | homepage: https://supabase.github.io/pg_net 19 | palette: 20 | primary: black 21 | accent: light green 22 | 23 | markdown_extensions: 24 | - pymdownx.highlight: 25 | linenums: true 26 | - pymdownx.superfences 27 | - pymdownx.snippets 28 | - admonition 29 | -------------------------------------------------------------------------------- /nix/bench.sql: -------------------------------------------------------------------------------- 1 | create view pg_net_stats as 2 | select 3 | count(*) filter (where error_msg is null) as request_successes, 4 | count(*) filter (where error_msg is not null) as request_failures, 5 | (select error_msg from net._http_response where error_msg is not null order by id desc limit 1) as last_failure_error 6 | from net._http_response; 7 | 8 | create or replace procedure repro_timeouts(number_of_requests int default 10000, url text default 'http://server') as $$ 9 | declare 10 | last_id bigint; 11 | first_time timestamptz; 12 | second_time timestamptz; 13 | time_taken interval; 14 | begin 15 | delete from net._http_response; 16 | 17 | with do_requests as ( 18 | select 19 | net.http_get(url) as id 20 | from generate_series (1, number_of_requests) x 21 | ) 22 | select id, clock_timestamp() into last_id, first_time from do_requests offset number_of_requests - 1; 23 | 24 | commit; 25 | 26 | raise notice 'Waiting until % requests complete', number_of_requests; 27 | 28 | perform net._await_response(last_id); 29 | 30 | select clock_timestamp() into second_time; 31 | 32 | select age(second_time, first_time) into time_taken; 33 | 34 | raise notice 'Stats: %', (select to_json(x) from pg_net_stats x limit 1); 35 | 36 | raise notice 'Time taken: %', time_taken; 37 | end; 38 | $$ language plpgsql; 39 | -------------------------------------------------------------------------------- /nix/nginx/conf/custom.conf: -------------------------------------------------------------------------------- 1 | location / { 2 | echo 'Hello world'; 3 | } 4 | 5 | location /slow-reply { 6 | echo_sleep 2; 7 | echo 'this text will come in response body with HTTP 200 after 2 seconds'; 8 | } 9 | 10 | location /really-slow-reply { 11 | echo_sleep 30; 12 | echo 'this text will come in response body with HTTP 200 after 30 seconds'; 13 | } 14 | 15 | location /echo-method { 16 | echo $request_method; 17 | } 18 | 19 | location /anything { 20 | echo $is_args$query_string; 21 | } 22 | 23 | location /headers { 24 | echo_duplicate 1 $echo_client_request_headers; 25 | } 26 | 27 | location /post { 28 | if ($request_method != 'POST'){ 29 | return 405; 30 | } 31 | if ($http_content_type != "application/json") { 32 | return 406; 33 | } 34 | default_type application/json; 35 | echo_read_request_body; 36 | echo $request_body; 37 | } 38 | 39 | location /delete { 40 | if ($request_method != 'DELETE'){ 41 | return 405; 42 | } 43 | 44 | # Reject any request body, assumes they get sent with a content-type header (sufficient for our use cases) 45 | if ($http_content_type != "") { 46 | return 400; 47 | } 48 | 49 | echo_duplicate 1 $echo_client_request_headers$is_args$query_string; 50 | } 51 | 52 | location /delete_w_body { 53 | if ($request_method != 'DELETE'){ 54 | return 405; 55 | } 56 | 57 | echo_read_request_body; 58 | echo $request_body; 59 | } 60 | 61 | location /redirect_me { 62 | return 301 /to_here; 63 | } 64 | 65 | location /to_here { 66 | echo 'I got redirected'; 67 | } 68 | 69 | location /pathological { 70 | pathological; 71 | } 72 | -------------------------------------------------------------------------------- /nix/nginx/conf/nginx.conf: -------------------------------------------------------------------------------- 1 | daemon off; 2 | pid ./nginx.pid; 3 | 4 | worker_processes auto; 5 | 6 | events { 7 | worker_connections 1024; 8 | } 9 | 10 | http { 11 | 12 | access_log /dev/stdout; 13 | 14 | server { 15 | listen 8080; 16 | 17 | include custom.conf; 18 | } 19 | 20 | server { 21 | listen [::]:8888 ipv6only=on; 22 | 23 | location / { 24 | echo 'Hello ipv6 only'; 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /nix/nginx/logs/error.log: -------------------------------------------------------------------------------- 1 | Needed for nginx to start with a prefix directory 2 | -------------------------------------------------------------------------------- /nix/nginxCustom.nix: -------------------------------------------------------------------------------- 1 | { lib, fetchFromGitHub, nginx, nginxModules, writeShellScriptBin } : 2 | 3 | let 4 | ngx_pathological = rec { 5 | name = "ngx_pathological"; 6 | version = "0.1"; 7 | src = fetchFromGitHub { 8 | owner = "steve-chavez"; 9 | repo = name; 10 | rev = "668126a815daaf741433409a5afff5932e2fb2af"; 11 | sha256 = "sha256-tl7NoPlQCN9DDYQLRrHA3bP5csqbXUW9ozLKPbH2dfI="; 12 | }; 13 | meta = with lib; { 14 | license = with licenses; [ mit ]; 15 | }; 16 | }; 17 | customNginx = nginx.override { 18 | configureFlags = ["--with-cc='c99'"]; 19 | modules = [ 20 | nginxModules.echo 21 | ngx_pathological 22 | ]; 23 | }; 24 | script = '' 25 | set -euo pipefail 26 | 27 | export PATH=${customNginx}/bin:"$PATH" 28 | 29 | trap 'killall nginx' sigint sigterm exit 30 | 31 | nginx -p nix/nginx -e stderr & 32 | 33 | "$@" 34 | ''; 35 | in 36 | { 37 | customNginx = customNginx; 38 | nginxScript = writeShellScriptBin "net-with-nginx" script; 39 | } 40 | -------------------------------------------------------------------------------- /nix/nixops.nix: -------------------------------------------------------------------------------- 1 | let 2 | region = "us-east-2"; 3 | accessKeyId = "default"; 4 | in { 5 | network.storage.legacy = { 6 | databasefile = ".deployments.nixops"; 7 | }; 8 | 9 | network.description = "pg_net load testing setup"; 10 | 11 | resources = { 12 | ec2KeyPairs.netKP = { inherit region accessKeyId; }; 13 | vpc.netVpc = { 14 | inherit region accessKeyId; 15 | enableDnsSupport = true; 16 | enableDnsHostnames = true; 17 | cidrBlock = "10.0.0.0/24"; 18 | }; 19 | vpcSubnets.netSubnet = {resources, ...}: { 20 | inherit region accessKeyId; 21 | zone = "${region}a"; 22 | vpcId = resources.vpc.netVpc; 23 | cidrBlock = "10.0.0.0/24"; 24 | mapPublicIpOnLaunch = true; 25 | }; 26 | vpcInternetGateways.netIG = { resources, ... }: { 27 | inherit region accessKeyId; 28 | vpcId = resources.vpc.netVpc; 29 | }; 30 | vpcRouteTables.netRT = { resources, ... }: { 31 | inherit region accessKeyId; 32 | vpcId = resources.vpc.netVpc; 33 | }; 34 | vpcRoutes.netIGRoute = { resources, ... }: { 35 | inherit region accessKeyId; 36 | routeTableId = resources.vpcRouteTables.netRT; 37 | destinationCidrBlock = "0.0.0.0/0"; 38 | gatewayId = resources.vpcInternetGateways.netIG; 39 | }; 40 | vpcRouteTableAssociations.netTblAssoc = { resources, ... }: { 41 | inherit region accessKeyId; 42 | subnetId = resources.vpcSubnets.netSubnet; 43 | routeTableId = resources.vpcRouteTables.netRT; 44 | }; 45 | ec2SecurityGroups.netSecGroup = {resources, ...}: { 46 | inherit region accessKeyId; 47 | vpcId = resources.vpc.netVpc; 48 | rules = [ 49 | { fromPort = 80; toPort = 80; sourceIp = "0.0.0.0/0"; } 50 | { fromPort = 22; toPort = 22; sourceIp = "0.0.0.0/0"; } 51 | { fromPort = 0; toPort = 65535; sourceIp = resources.vpcSubnets.netSubnet.cidrBlock; } 52 | ]; 53 | }; 54 | }; 55 | 56 | server = { config, pkgs, resources, ... }: { 57 | deployment = { 58 | targetEnv = "ec2"; 59 | ec2 = { 60 | inherit region accessKeyId; 61 | instanceType = "t3a.micro"; 62 | associatePublicIpAddress = true; 63 | keyPair = resources.ec2KeyPairs.netKP; 64 | subnetId = resources.vpcSubnets.netSubnet; 65 | securityGroupIds = [resources.ec2SecurityGroups.netSecGroup.name]; 66 | }; 67 | }; 68 | 69 | services.nginx = { 70 | enable = true; 71 | package = (pkgs.callPackage ./nginxCustom.nix {}).customNginx; 72 | config = '' 73 | worker_processes auto; 74 | events { 75 | worker_connections 1024; 76 | } 77 | http { 78 | server { 79 | listen 0.0.0.0:80 ; 80 | listen [::]:80 ; 81 | server_name localhost; 82 | ${builtins.readFile nginx/conf/custom.conf} 83 | } 84 | } 85 | ''; 86 | }; 87 | networking.firewall.allowedTCPPorts = [ 80 ]; 88 | }; 89 | 90 | client = { config, pkgs, nodes, resources, ... }: { 91 | deployment = { 92 | targetEnv = "ec2"; 93 | ec2 = { 94 | inherit region accessKeyId; 95 | instanceType = "t3a.micro"; 96 | associatePublicIpAddress = true; 97 | ebsInitialRootDiskSize = 6; 98 | keyPair = resources.ec2KeyPairs.netKP; 99 | subnetId = resources.vpcSubnets.netSubnet; 100 | securityGroupIds = [resources.ec2SecurityGroups.netSecGroup.name]; 101 | }; 102 | }; 103 | 104 | services.postgresql = { 105 | enable = true; 106 | package = pkgs.postgresql_15.withPackages (p: [ 107 | (pkgs.callPackage ./pg_net.nix { postgresql = pkgs.postgresql_15;}) 108 | ]); 109 | authentication = pkgs.lib.mkOverride 10 '' 110 | local all all trust 111 | ''; 112 | settings = { 113 | shared_preload_libraries = "pg_net"; 114 | }; 115 | initialScript = pkgs.writeText "init-sql-script" '' 116 | create extension pg_net; 117 | ${builtins.readFile ./bench.sql} 118 | ''; 119 | }; 120 | 121 | services.journald.rateLimitBurst = 0; 122 | services.journald.rateLimitInterval = "0"; 123 | 124 | networking.hosts = { 125 | "${nodes.server.config.networking.privateIPv4}" = [ "server" ]; 126 | }; 127 | 128 | environment.systemPackages = [ 129 | pkgs.bcc 130 | pkgs.pgmetrics 131 | pkgs.pg_activity 132 | pkgs.htop 133 | pkgs.vegeta 134 | ( 135 | pkgs.writeShellScriptBin "vegeta-bench" '' 136 | set -euo pipefail 137 | 138 | # rate=0 means maximum rate subject to max-workers 139 | echo "GET http://server/pathological?status=200" | vegeta attack -rate=0 -duration=1s -max-workers=1 | tee results.bin | vegeta report 140 | '' 141 | ) 142 | ( 143 | pkgs.writeShellScriptBin "vegeta-bench-max-requests" '' 144 | set -euo pipefail 145 | 146 | # rate=0 means maximum rate subject to max-workers 147 | echo "GET http://server/pathological?status=200" | vegeta attack -rate=0 -duration=10s -max-workers=50 | tee results.bin | vegeta report 148 | '' 149 | ) 150 | ( 151 | pkgs.writeShellScriptBin "psql-net-bench" '' 152 | set -euo pipefail 153 | 154 | psql -U postgres -c "TRUNCATE net._http_response; TRUNCATE net.http_request_queue;" 155 | psql -U postgres -c "alter system set pg_net.batch_size to 32000;" # this just a high number 156 | psql -U postgres -c "select net.worker_restart();" 157 | psql -U postgres -c "truncate net._http_response;" 158 | psql -U postgres -c "select net.http_get('http://server/pathological?status=200') from generate_series(1, 400);" > /dev/null 159 | sleep 2 160 | psql -U postgres -c "select * from pg_net_stats;" 161 | psql -U postgres -c "alter system reset pg_net.batch_size;" 162 | psql -U postgres -c "select net.worker_restart();" 163 | '' 164 | ) 165 | ( 166 | pkgs.writeShellScriptBin "psql-reproduce-timeouts" '' 167 | set -euo pipefail 168 | 169 | psql -U postgres -c "call repro_timeouts();" 170 | '' 171 | ) 172 | ]; 173 | }; 174 | 175 | } 176 | -------------------------------------------------------------------------------- /nix/nixopsScripts.nix: -------------------------------------------------------------------------------- 1 | { nixops_unstable_minimal, writeShellScriptBin } : 2 | 3 | let 4 | nixops = nixops_unstable_minimal.withPlugins (ps: [ ps.nixops-aws ]); 5 | nixopsBin = "${nixops}/bin/nixops"; 6 | nixopsDeploy = 7 | writeShellScriptBin "net-cloud-deploy" 8 | '' 9 | set -euo pipefail 10 | 11 | cd nix 12 | 13 | set +e && ${nixopsBin} info -d pg_net > /dev/null 2> /dev/null 14 | info=$? && set -e 15 | 16 | if test $info -eq 1 17 | then 18 | echo "Creating deployment..." 19 | ${nixopsBin} create -d pg_net 20 | fi 21 | 22 | ${nixopsBin} deploy -k -d pg_net --allow-reboot --confirm 23 | ''; 24 | nixopsSSH = 25 | writeShellScriptBin ("net-cloud-ssh") 26 | '' 27 | set -euo pipefail 28 | 29 | cd nix 30 | 31 | ${nixopsBin} ssh -d pg_net client 32 | ''; 33 | nixopsSSHServer = 34 | writeShellScriptBin ("net-cloud-ssh-server") 35 | '' 36 | set -euo pipefail 37 | 38 | cd nix 39 | 40 | ${nixopsBin} ssh -d pg_net server 41 | ''; 42 | nixopsReproTimeouts = 43 | writeShellScriptBin ("net-cloud-reproduce-timeouts") 44 | '' 45 | set -euo pipefail 46 | 47 | cd nix 48 | 49 | ${nixopsBin} ssh -d pg_net client psql-reproduce-timeouts 50 | ''; 51 | nixopsDestroy = 52 | writeShellScriptBin ("net-cloud-destroy") 53 | '' 54 | set -euo pipefail 55 | 56 | cd nix 57 | 58 | ${nixopsBin} destroy -d pg_net --confirm 59 | 60 | ${nixopsBin} delete -d pg_net 61 | ''; 62 | nixopsInfo = 63 | writeShellScriptBin ("net-cloud-info") 64 | '' 65 | set -euo pipefail 66 | 67 | cd nix 68 | 69 | ${nixopsBin} info 70 | ''; 71 | in 72 | [ 73 | nixopsDeploy 74 | nixopsSSH 75 | nixopsSSHServer 76 | nixopsReproTimeouts 77 | nixopsDestroy 78 | nixopsInfo 79 | ] 80 | -------------------------------------------------------------------------------- /nix/xpg.nix: -------------------------------------------------------------------------------- 1 | { fetchFromGitHub, lib } : 2 | let 3 | dep = fetchFromGitHub { 4 | owner = "steve-chavez"; 5 | repo = "xpg"; 6 | rev = "v1.3.2"; 7 | sha256 = "sha256-ooYqMOQD9y+/87wBd33Mvbpsx+FwEMdZoibGRM4gvBk="; 8 | }; 9 | xpg = import dep; 10 | in 11 | xpg 12 | -------------------------------------------------------------------------------- /pg_net.control.in: -------------------------------------------------------------------------------- 1 | comment = 'Async HTTP' 2 | default_version = '@EXTVERSION@' 3 | relocatable = false 4 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | with import (builtins.fetchTarball { 2 | name = "24.05"; # May 31 2024 3 | url = "https://github.com/NixOS/nixpkgs/archive/refs/tags/24.05.tar.gz"; 4 | sha256 = "sha256:1lr1h35prqkd1mkmzriwlpvxcb34kmhc9dnr48gkm8hh089hifmx"; 5 | }) {}; 6 | mkShell { 7 | buildInputs = 8 | let 9 | nginxCustom = callPackage ./nix/nginxCustom.nix {}; 10 | nixopsScripts = callPackage ./nix/nixopsScripts.nix {}; 11 | xpg = callPackage ./nix/xpg.nix {inherit fetchFromGitHub;}; 12 | pythonDeps = with python3Packages; [ 13 | pytest 14 | psycopg2 15 | sqlalchemy 16 | ]; 17 | in 18 | [ 19 | xpg.xpg 20 | pythonDeps 21 | nginxCustom.nginxScript 22 | curl 23 | ] ++ 24 | nixopsScripts; 25 | shellHook = '' 26 | export HISTFILE=.history 27 | ''; 28 | } 29 | -------------------------------------------------------------------------------- /sql/pg_net--0.1--0.2.sql: -------------------------------------------------------------------------------- 1 | create or replace function net.http_post( 2 | -- url for the request 3 | url text, 4 | -- body of the POST request 5 | body jsonb default '{}'::jsonb, 6 | -- key/value pairs to be url encoded and appended to the `url` 7 | params jsonb default '{}'::jsonb, 8 | -- key/values to be included in request headers 9 | headers jsonb default '{"Content-Type": "application/json"}'::jsonb, 10 | -- the maximum number of milliseconds the request may take before being cancelled 11 | timeout_milliseconds int DEFAULT 1000 12 | ) 13 | -- request_id reference 14 | returns bigint 15 | volatile 16 | parallel safe 17 | language plpgsql 18 | as $$ 19 | declare 20 | request_id bigint; 21 | params_array text[]; 22 | content_type text; 23 | begin 24 | 25 | -- Exctract the content_type from headers 26 | select 27 | header_value into content_type 28 | from 29 | jsonb_each_text(coalesce(headers, '{}'::jsonb)) r(header_name, header_value) 30 | where 31 | lower(header_name) = 'content-type' 32 | limit 33 | 1; 34 | 35 | -- If the user provided new headers and omitted the content type 36 | -- add it back in automatically 37 | if content_type is null then 38 | select headers || '{"Content-Type": "application/json"}'::jsonb into headers; 39 | end if; 40 | 41 | -- Confirm that the content-type is set as "application/json" 42 | if content_type <> 'application/json' then 43 | raise exception 'Content-Type header must be "application/json"'; 44 | end if; 45 | 46 | -- Confirm body is set since http method switches on if body exists 47 | if body is null then 48 | raise exception 'body must not be null'; 49 | end if; 50 | 51 | select 52 | coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 53 | into 54 | params_array 55 | from 56 | jsonb_each_text(params); 57 | 58 | -- Add to the request queue 59 | insert into net.http_request_queue(method, url, headers, body, timeout_milliseconds) 60 | values ( 61 | 'POST', 62 | net._encode_url_with_params_array(url, params_array), 63 | headers, 64 | convert_to(body::text, 'UTF8'), 65 | timeout_milliseconds 66 | ) 67 | returning id 68 | into request_id; 69 | 70 | return request_id; 71 | end 72 | $$; 73 | -------------------------------------------------------------------------------- /sql/pg_net--0.10.0--0.11.0.sql: -------------------------------------------------------------------------------- 1 | -- no SQL changes in 0.11.0 2 | -------------------------------------------------------------------------------- /sql/pg_net--0.11.0--0.12.0.sql: -------------------------------------------------------------------------------- 1 | alter function net.http_get(text, jsonb, jsonb, integer) security invoker; 2 | 3 | alter function net.http_post(text, jsonb, jsonb, jsonb, integer) security invoker; 4 | 5 | alter function net.http_delete ( text, jsonb, jsonb, integer) security invoker; 6 | 7 | alter function net._http_collect_response ( bigint, boolean) security invoker; 8 | 9 | alter function net.http_collect_response ( bigint, boolean) security invoker; 10 | 11 | create or replace function net.worker_restart() 12 | returns bool 13 | language 'c' 14 | as 'pg_net'; 15 | 16 | grant usage on schema net to PUBLIC; 17 | grant all on all sequences in schema net to PUBLIC; 18 | grant all on all tables in schema net to PUBLIC; 19 | -------------------------------------------------------------------------------- /sql/pg_net--0.12.0--0.13.0.sql: -------------------------------------------------------------------------------- 1 | -- no SQL changes in 0.13.0 2 | -------------------------------------------------------------------------------- /sql/pg_net--0.13.0--0.14.0.sql: -------------------------------------------------------------------------------- 1 | -- no SQL changes in 0.14.0 2 | -------------------------------------------------------------------------------- /sql/pg_net--0.14.0--0.15.0.sql: -------------------------------------------------------------------------------- 1 | drop function net.http_delete (text, jsonb, jsonb, integer); 2 | 3 | create function net.http_delete( 4 | -- url for the request 5 | url text, 6 | -- key/value pairs to be url encoded and appended to the `url` 7 | params jsonb default '{}'::jsonb, 8 | -- key/values to be included in request headers 9 | headers jsonb default '{}'::jsonb, 10 | -- the maximum number of milliseconds the request may take before being cancelled 11 | timeout_milliseconds int default 5000, 12 | -- optional body of the request 13 | body jsonb default NULL 14 | ) 15 | -- request_id reference 16 | returns bigint 17 | volatile 18 | parallel safe 19 | language plpgsql 20 | as $$ 21 | declare 22 | request_id bigint; 23 | params_array text[]; 24 | begin 25 | select coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 26 | into params_array 27 | from jsonb_each_text(params); 28 | 29 | -- Add to the request queue 30 | insert into net.http_request_queue(method, url, headers, body, timeout_milliseconds) 31 | values ( 32 | 'DELETE', 33 | net._encode_url_with_params_array(url, params_array), 34 | headers, 35 | convert_to(body::text, 'UTF8'), 36 | timeout_milliseconds 37 | ) 38 | returning id 39 | into request_id; 40 | 41 | return request_id; 42 | end 43 | $$; 44 | -------------------------------------------------------------------------------- /sql/pg_net--0.15.0--new.sql: -------------------------------------------------------------------------------- 1 | alter function net._await_response(bigint) parallel unsafe called on null input; 2 | 3 | alter function net._urlencode_string(varchar) called on null input; 4 | 5 | alter function net._encode_url_with_params_array(text, text[]) called on null input; 6 | 7 | alter function net._await_response(bigint) parallel unsafe called on null input; 8 | 9 | alter function net.http_get(text, jsonb , jsonb , int) parallel unsafe called on null input; 10 | 11 | alter function net.http_post(text, jsonb , jsonb , jsonb, int) parallel unsafe; 12 | 13 | alter function net.http_delete(text, jsonb , jsonb, int, jsonb) parallel unsafe; 14 | 15 | alter function net._http_collect_response(bigint, bool) parallel unsafe called on null input; 16 | 17 | alter function net.http_collect_response(bigint, bool) parallel unsafe called on null input; 18 | -------------------------------------------------------------------------------- /sql/pg_net--0.2--0.3.sql: -------------------------------------------------------------------------------- 1 | drop index if exists created_idx; 2 | alter table net.http_request_queue drop created; 3 | 4 | alter table net._http_response drop constraint if exists _http_response_id_fkey; 5 | alter table net._http_response add created timestamptz not null default now(); 6 | create index on net._http_response (created); 7 | 8 | create or replace function net.http_collect_response( 9 | -- request_id reference 10 | request_id bigint, 11 | -- when `true`, return immediately. when `false` wait for the request to complete before returning 12 | async bool default true 13 | ) 14 | -- http response composite wrapped in a result type 15 | returns net.http_response_result 16 | strict 17 | volatile 18 | parallel safe 19 | language plpgsql 20 | as $$ 21 | declare 22 | rec net._http_response; 23 | req_exists boolean; 24 | begin 25 | 26 | if not async then 27 | perform net._await_response(request_id); 28 | end if; 29 | 30 | select * 31 | into rec 32 | from net._http_response 33 | where id = request_id; 34 | 35 | if rec is null then 36 | -- The request is either still processing or the request_id provided does not exist 37 | 38 | -- TODO: request in progress is indistinguishable from request that doesn't exist 39 | 40 | -- No request matching request_id found 41 | return ( 42 | 'ERROR', 43 | 'request matching request_id not found', 44 | null 45 | )::net.http_response_result; 46 | 47 | end if; 48 | 49 | -- Return a valid, populated http_response_result 50 | return ( 51 | 'SUCCESS', 52 | 'ok', 53 | ( 54 | rec.status_code, 55 | rec.headers, 56 | rec.content 57 | )::net.http_response 58 | )::net.http_response_result; 59 | end; 60 | $$; 61 | -------------------------------------------------------------------------------- /sql/pg_net--0.3--0.4.sql: -------------------------------------------------------------------------------- 1 | alter function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) security definer; 2 | alter function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) security definer; 3 | alter function net.http_collect_response(request_id bigint, async boolean) security definer; 4 | -------------------------------------------------------------------------------- /sql/pg_net--0.4--0.5.sql: -------------------------------------------------------------------------------- 1 | alter function net._encode_url_with_params_array ( text, text[]) strict; 2 | 3 | alter table net.http_request_queue drop constraint if exists http_request_queue_pkey cascade; 4 | alter table net._http_response drop constraint if exists _http_response_pkey cascade; 5 | 6 | drop trigger if exists ensure_worker_is_up on net.http_request_queue; 7 | drop function if exists net._check_worker_is_up(); 8 | 9 | create or replace function net.check_worker_is_up() returns void as $$ 10 | begin 11 | if not exists (select pid from pg_stat_activity where backend_type = 'pg_net worker') then 12 | raise exception using 13 | message = 'the pg_net background worker is not up' 14 | , detail = 'the pg_net background worker is down due to an internal error and cannot process requests' 15 | , hint = 'make sure that you didn''t modify any of pg_net internal tables'; 16 | end if; 17 | end 18 | $$ language plpgsql; 19 | 20 | drop index if exists net._http_response_created_idx; 21 | 22 | alter table net.http_request_queue set unlogged; 23 | alter table net._http_response set unlogged; 24 | -------------------------------------------------------------------------------- /sql/pg_net--0.5--0.5.1.sql: -------------------------------------------------------------------------------- 1 | -- noop 2 | -------------------------------------------------------------------------------- /sql/pg_net--0.5.1--0.6.sql: -------------------------------------------------------------------------------- 1 | drop index if exists _http_response_created_idx; 2 | create index on net._http_response (created); 3 | 4 | create or replace function net.http_post( 5 | -- url for the request 6 | url text, 7 | -- body of the POST request 8 | body jsonb default '{}'::jsonb, 9 | -- key/value pairs to be url encoded and appended to the `url` 10 | params jsonb default '{}'::jsonb, 11 | -- key/values to be included in request headers 12 | headers jsonb default '{"Content-Type": "application/json"}'::jsonb, 13 | -- the maximum number of milliseconds the request may take before being cancelled 14 | timeout_milliseconds int DEFAULT 2000 15 | ) 16 | -- request_id reference 17 | returns bigint 18 | volatile 19 | parallel safe 20 | language plpgsql 21 | security definer 22 | as $$ 23 | declare 24 | request_id bigint; 25 | params_array text[]; 26 | content_type text; 27 | begin 28 | 29 | -- Exctract the content_type from headers 30 | select 31 | header_value into content_type 32 | from 33 | jsonb_each_text(coalesce(headers, '{}'::jsonb)) r(header_name, header_value) 34 | where 35 | lower(header_name) = 'content-type' 36 | limit 37 | 1; 38 | 39 | -- If the user provided new headers and omitted the content type 40 | -- add it back in automatically 41 | if content_type is null then 42 | select headers || '{"Content-Type": "application/json"}'::jsonb into headers; 43 | end if; 44 | 45 | -- Confirm that the content-type is set as "application/json" 46 | if content_type <> 'application/json' then 47 | raise exception 'Content-Type header must be "application/json"'; 48 | end if; 49 | 50 | select 51 | coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 52 | into 53 | params_array 54 | from 55 | jsonb_each_text(params); 56 | 57 | -- Add to the request queue 58 | insert into net.http_request_queue(method, url, headers, body, timeout_milliseconds) 59 | values ( 60 | 'POST', 61 | net._encode_url_with_params_array(url, params_array), 62 | headers, 63 | convert_to(body::text, 'UTF8'), 64 | timeout_milliseconds 65 | ) 66 | returning id 67 | into request_id; 68 | 69 | return request_id; 70 | end 71 | $$; 72 | -------------------------------------------------------------------------------- /sql/pg_net--0.6--0.7.sql: -------------------------------------------------------------------------------- 1 | alter domain net.http_method drop constraint http_method_check; 2 | alter domain net.http_method add constraint http_method_check 3 | check ( 4 | value ilike 'get' 5 | or value ilike 'post' 6 | or value ilike 'delete' 7 | ); 8 | 9 | drop function net.http_collect_response(bigint, boolean); 10 | 11 | create or replace function net.http_delete( 12 | -- url for the request 13 | url text, 14 | -- key/value pairs to be url encoded and appended to the `url` 15 | params jsonb default '{}'::jsonb, 16 | -- key/values to be included in request headers 17 | headers jsonb default '{}'::jsonb, 18 | -- the maximum number of milliseconds the request may take before being cancelled 19 | timeout_milliseconds int default 2000 20 | ) 21 | -- request_id reference 22 | returns bigint 23 | strict 24 | volatile 25 | parallel safe 26 | language plpgsql 27 | security definer 28 | as $$ 29 | declare 30 | request_id bigint; 31 | params_array text[]; 32 | begin 33 | select coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 34 | into params_array 35 | from jsonb_each_text(params); 36 | 37 | -- Add to the request queue 38 | insert into net.http_request_queue(method, url, headers, timeout_milliseconds) 39 | values ( 40 | 'DELETE', 41 | net._encode_url_with_params_array(url, params_array), 42 | headers, 43 | timeout_milliseconds 44 | ) 45 | returning id 46 | into request_id; 47 | 48 | return request_id; 49 | end 50 | $$; 51 | 52 | create or replace function net._http_collect_response( 53 | -- request_id reference 54 | request_id bigint, 55 | -- when `true`, return immediately. when `false` wait for the request to complete before returning 56 | async bool default true 57 | ) 58 | -- http response composite wrapped in a result type 59 | returns net.http_response_result 60 | strict 61 | volatile 62 | parallel safe 63 | language plpgsql 64 | security definer 65 | as $$ 66 | declare 67 | rec net._http_response; 68 | req_exists boolean; 69 | begin 70 | 71 | if not async then 72 | perform net._await_response(request_id); 73 | end if; 74 | 75 | select * 76 | into rec 77 | from net._http_response 78 | where id = request_id; 79 | 80 | if rec is null then 81 | -- The request is either still processing or the request_id provided does not exist 82 | 83 | -- TODO: request in progress is indistinguishable from request that doesn't exist 84 | 85 | -- No request matching request_id found 86 | return ( 87 | 'ERROR', 88 | 'request matching request_id not found', 89 | null 90 | )::net.http_response_result; 91 | 92 | end if; 93 | 94 | -- Return a valid, populated http_response_result 95 | return ( 96 | 'SUCCESS', 97 | 'ok', 98 | ( 99 | rec.status_code, 100 | rec.headers, 101 | rec.content 102 | )::net.http_response 103 | )::net.http_response_result; 104 | end; 105 | $$; 106 | -------------------------------------------------------------------------------- /sql/pg_net--0.7--0.7.1.sql: -------------------------------------------------------------------------------- 1 | create or replace function net.http_collect_response( 2 | -- request_id reference 3 | request_id bigint, 4 | -- when `true`, return immediately. when `false` wait for the request to complete before returning 5 | async bool default true 6 | ) 7 | -- http response composite wrapped in a result type 8 | returns net.http_response_result 9 | strict 10 | volatile 11 | parallel safe 12 | language plpgsql 13 | security definer 14 | as $$ 15 | raise notice 'The net.http_collect_response function is deprecated.'; 16 | select net._http_collect_response(request_id, async); 17 | $$; 18 | -------------------------------------------------------------------------------- /sql/pg_net--0.7.1--0.7.3.sql: -------------------------------------------------------------------------------- 1 | create or replace function net.check_worker_is_up() returns void as $$ 2 | begin 3 | if not exists (select pid from pg_stat_activity where backend_type ilike '%pg_net%') then 4 | raise exception using 5 | message = 'the pg_net background worker is not up' 6 | , detail = 'the pg_net background worker is down due to an internal error and cannot process requests' 7 | , hint = 'make sure that you didn''t modify any of pg_net internal tables'; 8 | end if; 9 | end 10 | $$ language plpgsql; 11 | comment on function net.check_worker_is_up() is 'raises an exception if the pg_net background worker is not up, otherwise it doesn''t return anything'; 12 | -------------------------------------------------------------------------------- /sql/pg_net--0.7.3--0.8.0.sql: -------------------------------------------------------------------------------- 1 | create or replace function net.worker_restart() returns bool as $$ 2 | select pg_reload_conf(); 3 | select pg_terminate_backend(pid) 4 | from pg_stat_activity 5 | where backend_type ilike '%pg_net%'; 6 | $$ 7 | security definer 8 | language sql; 9 | 10 | create or replace function net.http_get( 11 | url text, 12 | params jsonb default '{}'::jsonb, 13 | headers jsonb default '{}'::jsonb, 14 | timeout_milliseconds int default 5000 15 | ) 16 | returns bigint 17 | strict 18 | volatile 19 | parallel safe 20 | language plpgsql 21 | as $$ 22 | declare 23 | request_id bigint; 24 | params_array text[]; 25 | begin 26 | select coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 27 | into params_array 28 | from jsonb_each_text(params); 29 | 30 | insert into net.http_request_queue(method, url, headers, timeout_milliseconds) 31 | values ( 32 | 'GET', 33 | net._encode_url_with_params_array(url, params_array), 34 | headers, 35 | timeout_milliseconds 36 | ) 37 | returning id 38 | into request_id; 39 | 40 | return request_id; 41 | end 42 | $$; 43 | 44 | create or replace function net.http_post( 45 | url text, 46 | body jsonb default '{}'::jsonb, 47 | params jsonb default '{}'::jsonb, 48 | headers jsonb default '{"Content-Type": "application/json"}'::jsonb, 49 | timeout_milliseconds int DEFAULT 5000 50 | ) 51 | returns bigint 52 | volatile 53 | parallel safe 54 | language plpgsql 55 | as $$ 56 | declare 57 | request_id bigint; 58 | params_array text[]; 59 | content_type text; 60 | begin 61 | 62 | select 63 | header_value into content_type 64 | from 65 | jsonb_each_text(coalesce(headers, '{}'::jsonb)) r(header_name, header_value) 66 | where 67 | lower(header_name) = 'content-type' 68 | limit 69 | 1; 70 | 71 | if content_type is null then 72 | select headers || '{"Content-Type": "application/json"}'::jsonb into headers; 73 | end if; 74 | 75 | if content_type <> 'application/json' then 76 | raise exception 'Content-Type header must be "application/json"'; 77 | end if; 78 | 79 | select 80 | coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 81 | into 82 | params_array 83 | from 84 | jsonb_each_text(params); 85 | 86 | insert into net.http_request_queue(method, url, headers, body, timeout_milliseconds) 87 | values ( 88 | 'POST', 89 | net._encode_url_with_params_array(url, params_array), 90 | headers, 91 | convert_to(body::text, 'UTF8'), 92 | timeout_milliseconds 93 | ) 94 | returning id 95 | into request_id; 96 | 97 | return request_id; 98 | end 99 | $$; 100 | 101 | create or replace function net.http_delete( 102 | url text, 103 | params jsonb default '{}'::jsonb, 104 | headers jsonb default '{}'::jsonb, 105 | timeout_milliseconds int default 5000 106 | ) 107 | returns bigint 108 | strict 109 | volatile 110 | parallel safe 111 | language plpgsql 112 | as $$ 113 | declare 114 | request_id bigint; 115 | params_array text[]; 116 | begin 117 | select coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 118 | into params_array 119 | from jsonb_each_text(params); 120 | 121 | insert into net.http_request_queue(method, url, headers, timeout_milliseconds) 122 | values ( 123 | 'DELETE', 124 | net._encode_url_with_params_array(url, params_array), 125 | headers, 126 | timeout_milliseconds 127 | ) 128 | returning id 129 | into request_id; 130 | 131 | return request_id; 132 | end 133 | $$; 134 | -------------------------------------------------------------------------------- /sql/pg_net--0.8.0--0.9.3.sql: -------------------------------------------------------------------------------- 1 | -- no SQL changes in 0.9.3 2 | -------------------------------------------------------------------------------- /sql/pg_net--0.9.3--0.10.0.sql: -------------------------------------------------------------------------------- 1 | -- no SQL changes in 0.10.0 2 | -------------------------------------------------------------------------------- /sql/pg_net.sql: -------------------------------------------------------------------------------- 1 | create schema if not exists net; 2 | 3 | create domain net.http_method as text 4 | check ( 5 | value ilike 'get' 6 | or value ilike 'post' 7 | or value ilike 'delete' 8 | ); 9 | 10 | -- Store pending requests. The background worker reads from here 11 | -- API: Private 12 | create unlogged table net.http_request_queue( 13 | id bigserial, 14 | method net.http_method not null, 15 | url text not null, 16 | headers jsonb not null, 17 | body bytea, 18 | timeout_milliseconds int not null 19 | ); 20 | 21 | create or replace function net.check_worker_is_up() returns void as $$ 22 | begin 23 | if not exists (select pid from pg_stat_activity where backend_type ilike '%pg_net%') then 24 | raise exception using 25 | message = 'the pg_net background worker is not up' 26 | , detail = 'the pg_net background worker is down due to an internal error and cannot process requests' 27 | , hint = 'make sure that you didn''t modify any of pg_net internal tables'; 28 | end if; 29 | end 30 | $$ language plpgsql; 31 | comment on function net.check_worker_is_up() is 'raises an exception if the pg_net background worker is not up, otherwise it doesn''t return anything'; 32 | 33 | -- Associates a response with a request 34 | -- API: Private 35 | create unlogged table net._http_response( 36 | id bigint, 37 | status_code integer, 38 | content_type text, 39 | headers jsonb, 40 | content text, 41 | timed_out bool, 42 | error_msg text, 43 | created timestamptz not null default now() 44 | ); 45 | 46 | create index on net._http_response (created); 47 | 48 | -- Blocks until an http_request is complete 49 | -- API: Private 50 | create or replace function net._await_response( 51 | request_id bigint 52 | ) 53 | returns bool 54 | language plpgsql 55 | as $$ 56 | declare 57 | rec net._http_response; 58 | begin 59 | while rec is null loop 60 | select * 61 | into rec 62 | from net._http_response 63 | where id = request_id; 64 | 65 | if rec is null then 66 | -- Wait 50 ms before checking again 67 | perform pg_sleep(0.05); 68 | end if; 69 | end loop; 70 | 71 | return true; 72 | end; 73 | $$; 74 | 75 | 76 | -- url encode a string 77 | -- API: Private 78 | create or replace function net._urlencode_string(string varchar) 79 | -- url encoded string 80 | returns text 81 | language 'c' 82 | immutable 83 | as 'pg_net'; 84 | 85 | -- API: Private 86 | create or replace function net._encode_url_with_params_array(url text, params_array text[]) 87 | -- url encoded string 88 | returns text 89 | language 'c' 90 | immutable 91 | as 'pg_net'; 92 | 93 | create or replace function net.worker_restart() 94 | returns bool 95 | language 'c' 96 | as 'pg_net'; 97 | 98 | -- Interface to make an async request 99 | -- API: Public 100 | create or replace function net.http_get( 101 | -- url for the request 102 | url text, 103 | -- key/value pairs to be url encoded and appended to the `url` 104 | params jsonb default '{}'::jsonb, 105 | -- key/values to be included in request headers 106 | headers jsonb default '{}'::jsonb, 107 | -- the maximum number of milliseconds the request may take before being cancelled 108 | timeout_milliseconds int default 5000 109 | ) 110 | -- request_id reference 111 | returns bigint 112 | language plpgsql 113 | as $$ 114 | declare 115 | request_id bigint; 116 | params_array text[]; 117 | begin 118 | select coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 119 | into params_array 120 | from jsonb_each_text(params); 121 | 122 | -- Add to the request queue 123 | insert into net.http_request_queue(method, url, headers, timeout_milliseconds) 124 | values ( 125 | 'GET', 126 | net._encode_url_with_params_array(url, params_array), 127 | headers, 128 | timeout_milliseconds 129 | ) 130 | returning id 131 | into request_id; 132 | 133 | return request_id; 134 | end 135 | $$; 136 | 137 | -- Interface to make an async request 138 | -- API: Public 139 | create or replace function net.http_post( 140 | -- url for the request 141 | url text, 142 | -- body of the POST request 143 | body jsonb default '{}'::jsonb, 144 | -- key/value pairs to be url encoded and appended to the `url` 145 | params jsonb default '{}'::jsonb, 146 | -- key/values to be included in request headers 147 | headers jsonb default '{"Content-Type": "application/json"}'::jsonb, 148 | -- the maximum number of milliseconds the request may take before being cancelled 149 | timeout_milliseconds int DEFAULT 5000 150 | ) 151 | -- request_id reference 152 | returns bigint 153 | language plpgsql 154 | as $$ 155 | declare 156 | request_id bigint; 157 | params_array text[]; 158 | content_type text; 159 | begin 160 | 161 | -- Exctract the content_type from headers 162 | select 163 | header_value into content_type 164 | from 165 | jsonb_each_text(coalesce(headers, '{}'::jsonb)) r(header_name, header_value) 166 | where 167 | lower(header_name) = 'content-type' 168 | limit 169 | 1; 170 | 171 | -- If the user provided new headers and omitted the content type 172 | -- add it back in automatically 173 | if content_type is null then 174 | select headers || '{"Content-Type": "application/json"}'::jsonb into headers; 175 | end if; 176 | 177 | -- Confirm that the content-type is set as "application/json" 178 | if content_type <> 'application/json' then 179 | raise exception 'Content-Type header must be "application/json"'; 180 | end if; 181 | 182 | select 183 | coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 184 | into 185 | params_array 186 | from 187 | jsonb_each_text(params); 188 | 189 | -- Add to the request queue 190 | insert into net.http_request_queue(method, url, headers, body, timeout_milliseconds) 191 | values ( 192 | 'POST', 193 | net._encode_url_with_params_array(url, params_array), 194 | headers, 195 | convert_to(body::text, 'UTF8'), 196 | timeout_milliseconds 197 | ) 198 | returning id 199 | into request_id; 200 | 201 | return request_id; 202 | end 203 | $$; 204 | 205 | -- Interface to make an async request 206 | -- API: Public 207 | create or replace function net.http_delete( 208 | -- url for the request 209 | url text, 210 | -- key/value pairs to be url encoded and appended to the `url` 211 | params jsonb default '{}'::jsonb, 212 | -- key/values to be included in request headers 213 | headers jsonb default '{}'::jsonb, 214 | -- the maximum number of milliseconds the request may take before being cancelled 215 | timeout_milliseconds int default 5000, 216 | -- optional body of the request 217 | body jsonb default NULL 218 | ) 219 | -- request_id reference 220 | returns bigint 221 | language plpgsql 222 | as $$ 223 | declare 224 | request_id bigint; 225 | params_array text[]; 226 | begin 227 | select coalesce(array_agg(net._urlencode_string(key) || '=' || net._urlencode_string(value)), '{}') 228 | into params_array 229 | from jsonb_each_text(params); 230 | 231 | -- Add to the request queue 232 | insert into net.http_request_queue(method, url, headers, body, timeout_milliseconds) 233 | values ( 234 | 'DELETE', 235 | net._encode_url_with_params_array(url, params_array), 236 | headers, 237 | convert_to(body::text, 'UTF8'), 238 | timeout_milliseconds 239 | ) 240 | returning id 241 | into request_id; 242 | 243 | return request_id; 244 | end 245 | $$; 246 | 247 | -- Lifecycle states of a request (all protocols) 248 | -- API: Public 249 | create type net.request_status as enum ('PENDING', 'SUCCESS', 'ERROR'); 250 | 251 | 252 | -- A response from an HTTP server 253 | -- API: Public 254 | create type net.http_response AS ( 255 | status_code integer, 256 | headers jsonb, 257 | body text 258 | ); 259 | 260 | -- State wrapper around responses 261 | -- API: Public 262 | create type net.http_response_result as ( 263 | status net.request_status, 264 | message text, 265 | response net.http_response 266 | ); 267 | 268 | 269 | -- Collect respones of an http request 270 | -- API: Private 271 | create or replace function net._http_collect_response( 272 | -- request_id reference 273 | request_id bigint, 274 | -- when `true`, return immediately. when `false` wait for the request to complete before returning 275 | async bool default true 276 | ) 277 | -- http response composite wrapped in a result type 278 | returns net.http_response_result 279 | language plpgsql 280 | as $$ 281 | declare 282 | rec net._http_response; 283 | req_exists boolean; 284 | begin 285 | 286 | if not async then 287 | perform net._await_response(request_id); 288 | end if; 289 | 290 | select * 291 | into rec 292 | from net._http_response 293 | where id = request_id; 294 | 295 | if rec is null or rec.error_msg is not null then 296 | -- The request is either still processing or the request_id provided does not exist 297 | 298 | -- TODO: request in progress is indistinguishable from request that doesn't exist 299 | 300 | -- No request matching request_id found 301 | return ( 302 | 'ERROR', 303 | coalesce(rec.error_msg, 'request matching request_id not found'), 304 | null 305 | )::net.http_response_result; 306 | 307 | end if; 308 | 309 | -- Return a valid, populated http_response_result 310 | return ( 311 | 'SUCCESS', 312 | 'ok', 313 | ( 314 | rec.status_code, 315 | rec.headers, 316 | rec.content 317 | )::net.http_response 318 | )::net.http_response_result; 319 | end; 320 | $$; 321 | 322 | create or replace function net.http_collect_response( 323 | -- request_id reference 324 | request_id bigint, 325 | -- when `true`, return immediately. when `false` wait for the request to complete before returning 326 | async bool default true 327 | ) 328 | -- http response composite wrapped in a result type 329 | returns net.http_response_result 330 | language plpgsql 331 | as $$ 332 | begin 333 | raise notice 'The net.http_collect_response function is deprecated.'; 334 | select net._http_collect_response(request_id, async); 335 | end; 336 | $$; 337 | 338 | grant usage on schema net to PUBLIC; 339 | grant all on all sequences in schema net to PUBLIC; 340 | grant all on all tables in schema net to PUBLIC; 341 | -------------------------------------------------------------------------------- /src/core.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "pg_prelude.h" 7 | #include "curl_prelude.h" 8 | #include "core.h" 9 | #include "event.h" 10 | #include "errors.h" 11 | 12 | typedef struct { 13 | int64 id; 14 | StringInfo body; 15 | struct curl_slist* request_headers; 16 | int32 timeout_milliseconds; 17 | } CurlData; 18 | 19 | static size_t 20 | body_cb(void *contents, size_t size, size_t nmemb, void *userp) 21 | { 22 | CurlData *cdata = (CurlData*) userp; 23 | size_t realsize = size * nmemb; 24 | appendBinaryStringInfo(cdata->body, (const char*)contents, (int)realsize); 25 | return realsize; 26 | } 27 | 28 | static struct curl_slist *pg_text_array_to_slist(ArrayType *array, 29 | struct curl_slist *headers) { 30 | ArrayIterator iterator; 31 | Datum value; 32 | bool isnull; 33 | char *hdr; 34 | 35 | iterator = array_create_iterator(array, 0, NULL); 36 | 37 | while (array_iterate(iterator, &value, &isnull)) { 38 | if (isnull) { 39 | continue; 40 | } 41 | 42 | hdr = TextDatumGetCString(value); 43 | EREPORT_CURL_SLIST_APPEND(headers, hdr); 44 | pfree(hdr); 45 | } 46 | array_free_iterator(iterator); 47 | 48 | return headers; 49 | } 50 | 51 | // We need a different memory context here, as the parent function will have an SPI memory context, which has a shorter lifetime. 52 | static void init_curl_handle(CURLM *curl_mhandle, MemoryContext curl_memctx, int64 id, Datum urlBin, NullableDatum bodyBin, NullableDatum headersBin, Datum methodBin, int32 timeout_milliseconds){ 53 | MemoryContext old_ctx = MemoryContextSwitchTo(curl_memctx); 54 | 55 | CurlData *cdata = palloc(sizeof(CurlData)); 56 | cdata->id = id; 57 | cdata->body = makeStringInfo(); 58 | 59 | cdata->timeout_milliseconds = timeout_milliseconds; 60 | 61 | if (!headersBin.isnull) { 62 | ArrayType *pgHeaders = DatumGetArrayTypeP(headersBin.value); 63 | struct curl_slist *request_headers = NULL; 64 | 65 | request_headers = pg_text_array_to_slist(pgHeaders, request_headers); 66 | 67 | EREPORT_CURL_SLIST_APPEND(request_headers, "User-Agent: pg_net/" EXTVERSION); 68 | 69 | cdata->request_headers = request_headers; 70 | } 71 | 72 | char *url = TextDatumGetCString(urlBin); 73 | 74 | char *reqBody = !bodyBin.isnull ? TextDatumGetCString(bodyBin.value) : NULL; 75 | 76 | char *method = TextDatumGetCString(methodBin); 77 | if (strcasecmp(method, "GET") != 0 && strcasecmp(method, "POST") != 0 && strcasecmp(method, "DELETE") != 0) { 78 | ereport(ERROR, errmsg("Unsupported request method %s", method)); 79 | } 80 | 81 | CURL *curl_ez_handle = curl_easy_init(); 82 | if(!curl_ez_handle) 83 | ereport(ERROR, errmsg("curl_easy_init()")); 84 | 85 | if (strcasecmp(method, "GET") == 0) { 86 | if (reqBody) { 87 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_POSTFIELDS, reqBody); 88 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_CUSTOMREQUEST, "GET"); 89 | } 90 | } 91 | 92 | if (strcasecmp(method, "POST") == 0) { 93 | if (reqBody) { 94 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_POSTFIELDS, reqBody); 95 | } 96 | else { 97 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_POST, 1); 98 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_POSTFIELDSIZE, 0); 99 | } 100 | } 101 | 102 | if (strcasecmp(method, "DELETE") == 0) { 103 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_CUSTOMREQUEST, "DELETE"); 104 | if (reqBody) { 105 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_POSTFIELDS, reqBody); 106 | } 107 | } 108 | 109 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_WRITEFUNCTION, body_cb); 110 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_WRITEDATA, cdata); 111 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_HEADER, 0L); 112 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_URL, url); 113 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_HTTPHEADER, cdata->request_headers); 114 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_TIMEOUT_MS, cdata->timeout_milliseconds); 115 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_PRIVATE, cdata); 116 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_FOLLOWLOCATION, true); 117 | if (log_min_messages <= DEBUG2) 118 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_VERBOSE, 1L); 119 | #if LIBCURL_VERSION_NUM >= 0x075500 /* libcurl 7.85.0 */ 120 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_PROTOCOLS_STR, "http,https"); 121 | #else 122 | EREPORT_CURL_SETOPT(curl_ez_handle, CURLOPT_PROTOCOLS, CURLPROTO_HTTP | CURLPROTO_HTTPS); 123 | #endif 124 | 125 | EREPORT_MULTI( 126 | curl_multi_add_handle(curl_mhandle, curl_ez_handle) 127 | ); 128 | 129 | MemoryContextSwitchTo(old_ctx); 130 | } 131 | 132 | void set_curl_mhandle(CURLM *curl_mhandle, LoopState *lstate){ 133 | EREPORT_CURL_MULTI_SETOPT(curl_mhandle, CURLMOPT_SOCKETFUNCTION, multi_socket_cb); 134 | EREPORT_CURL_MULTI_SETOPT(curl_mhandle, CURLMOPT_SOCKETDATA, lstate); 135 | EREPORT_CURL_MULTI_SETOPT(curl_mhandle, CURLMOPT_TIMERFUNCTION, multi_timer_cb); 136 | EREPORT_CURL_MULTI_SETOPT(curl_mhandle, CURLMOPT_TIMERDATA, lstate); 137 | } 138 | 139 | void delete_expired_responses(char *ttl, int batch_size){ 140 | SetCurrentStatementStartTimestamp(); 141 | StartTransactionCommand(); 142 | PushActiveSnapshot(GetTransactionSnapshot()); 143 | SPI_connect(); 144 | 145 | int ret_code = SPI_execute_with_args("\ 146 | WITH\ 147 | rows AS (\ 148 | SELECT ctid\ 149 | FROM net._http_response\ 150 | WHERE created < now() - $1\ 151 | ORDER BY created\ 152 | LIMIT $2\ 153 | )\ 154 | DELETE FROM net._http_response r\ 155 | USING rows WHERE r.ctid = rows.ctid", 156 | 2, 157 | (Oid[]){INTERVALOID, INT4OID}, 158 | (Datum[]){ 159 | DirectFunctionCall3(interval_in, CStringGetDatum(ttl), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1)) 160 | , Int32GetDatum(batch_size) 161 | }, NULL, false, 0); 162 | 163 | if (ret_code != SPI_OK_DELETE) 164 | { 165 | ereport(ERROR, errmsg("Error expiring response table rows: %s", SPI_result_code_string(ret_code))); 166 | } 167 | 168 | SPI_finish(); 169 | PopActiveSnapshot(); 170 | CommitTransactionCommand(); 171 | } 172 | 173 | static void insert_failure_response(CURL *ez_handle, CURLcode return_code, int64 id, int32 timeout_milliseconds){ 174 | StartTransactionCommand(); 175 | PushActiveSnapshot(GetTransactionSnapshot()); 176 | SPI_connect(); 177 | 178 | const char* error_msg; 179 | if (return_code == CURLE_OPERATION_TIMEDOUT){ 180 | error_msg = detailed_timeout_strerror(ez_handle, timeout_milliseconds).msg; 181 | } else { 182 | error_msg = curl_easy_strerror(return_code); 183 | } 184 | 185 | int ret_code = SPI_execute_with_args("\ 186 | insert into net._http_response(id, error_msg) values ($1, $2)", 187 | 2, 188 | (Oid[]){INT8OID, CSTRINGOID}, 189 | (Datum[]){Int64GetDatum(id), CStringGetDatum(error_msg)}, 190 | NULL, false, 1); 191 | 192 | if (ret_code != SPI_OK_INSERT) 193 | { 194 | ereport(ERROR, errmsg("Error when inserting failed response: %s", SPI_result_code_string(ret_code))); 195 | } 196 | 197 | SPI_finish(); 198 | PopActiveSnapshot(); 199 | CommitTransactionCommand(); 200 | } 201 | 202 | static void insert_success_response(CurlData *cdata, long http_status_code, char *contentType, Jsonb *jsonb_headers){ 203 | StartTransactionCommand(); 204 | PushActiveSnapshot(GetTransactionSnapshot()); 205 | SPI_connect(); 206 | 207 | int ret_code = SPI_execute_with_args("\ 208 | insert into net._http_response(id, status_code, content, headers, content_type, timed_out) values ($1, $2, $3, $4, $5, $6)", 209 | 6, 210 | (Oid[]){INT8OID, INT4OID, CSTRINGOID, JSONBOID, CSTRINGOID, BOOLOID}, 211 | (Datum[]){ 212 | Int64GetDatum(cdata->id) 213 | , Int32GetDatum(http_status_code) 214 | , CStringGetDatum(cdata->body->data) 215 | , JsonbPGetDatum(jsonb_headers) 216 | , CStringGetDatum(contentType) 217 | , BoolGetDatum(false) // timed_out is false here as it's a success 218 | }, 219 | (char[6]){ 220 | ' ' 221 | , [2] = cdata->body->data[0] == '\0'? 'n' : ' ' 222 | , [4] = !contentType? 'n' :' ' 223 | }, 224 | false, 1); 225 | 226 | if (ret_code != SPI_OK_INSERT) 227 | { 228 | ereport(ERROR, errmsg("Error when inserting successful response: %s", SPI_result_code_string(ret_code))); 229 | } 230 | 231 | SPI_finish(); 232 | PopActiveSnapshot(); 233 | CommitTransactionCommand(); 234 | } 235 | 236 | void consume_request_queue(CURLM *curl_mhandle, int batch_size, MemoryContext curl_memctx){ 237 | StartTransactionCommand(); 238 | PushActiveSnapshot(GetTransactionSnapshot()); 239 | SPI_connect(); 240 | 241 | int ret_code = SPI_execute_with_args("\ 242 | WITH\ 243 | rows AS (\ 244 | SELECT id\ 245 | FROM net.http_request_queue\ 246 | ORDER BY id\ 247 | LIMIT $1\ 248 | )\ 249 | DELETE FROM net.http_request_queue q\ 250 | USING rows WHERE q.id = rows.id\ 251 | RETURNING q.id, q.method, q.url, timeout_milliseconds, array(select key || ': ' || value from jsonb_each_text(q.headers)), q.body", 252 | 1, 253 | (Oid[]){INT4OID}, 254 | (Datum[]){Int32GetDatum(batch_size)}, 255 | NULL, false, 0); 256 | 257 | if (ret_code != SPI_OK_DELETE_RETURNING) 258 | ereport(ERROR, errmsg("Error getting http request queue: %s", SPI_result_code_string(ret_code))); 259 | 260 | 261 | for (size_t j = 0; j < SPI_processed; j++) { 262 | bool tupIsNull = false; 263 | 264 | int64 id = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[j], SPI_tuptable->tupdesc, 1, &tupIsNull)); 265 | EREPORT_NULL_ATTR(tupIsNull, id); 266 | 267 | int32 timeout_milliseconds = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[j], SPI_tuptable->tupdesc, 4, &tupIsNull)); 268 | EREPORT_NULL_ATTR(tupIsNull, timeout_milliseconds); 269 | 270 | Datum method = SPI_getbinval(SPI_tuptable->vals[j], SPI_tuptable->tupdesc, 2, &tupIsNull); 271 | EREPORT_NULL_ATTR(tupIsNull, method); 272 | 273 | Datum url = SPI_getbinval(SPI_tuptable->vals[j], SPI_tuptable->tupdesc, 3, &tupIsNull); 274 | EREPORT_NULL_ATTR(tupIsNull, url); 275 | 276 | NullableDatum headersBin = { 277 | .value = SPI_getbinval(SPI_tuptable->vals[j], SPI_tuptable->tupdesc, 5, &tupIsNull), 278 | .isnull = tupIsNull 279 | }; 280 | 281 | NullableDatum bodyBin = { 282 | .value = SPI_getbinval(SPI_tuptable->vals[j], SPI_tuptable->tupdesc, 6, &tupIsNull), 283 | .isnull = tupIsNull 284 | }; 285 | 286 | init_curl_handle(curl_mhandle, curl_memctx, id, url, bodyBin, headersBin, method, timeout_milliseconds); 287 | } 288 | 289 | SPI_finish(); 290 | PopActiveSnapshot(); 291 | CommitTransactionCommand(); 292 | } 293 | 294 | static void pfree_curl_data(CurlData *cdata){ 295 | pfree(cdata->body->data); 296 | pfree(cdata->body); 297 | if(cdata->request_headers) //curl_slist_free_all already handles the NULL case, but be explicit about it 298 | curl_slist_free_all(cdata->request_headers); 299 | } 300 | 301 | static Jsonb *jsonb_headers_from_curl_handle(CURL *ez_handle){ 302 | struct curl_header *header, *prev = NULL; 303 | 304 | JsonbParseState *headers = NULL; 305 | (void)pushJsonbValue(&headers, WJB_BEGIN_OBJECT, NULL); 306 | 307 | while((header = curl_easy_nextheader(ez_handle, CURLH_HEADER, 0, prev))) { 308 | JsonbValue key = {.type = jbvString, .val = {.string = {.val = header->name, .len = strlen(header->name)}}}; 309 | JsonbValue value = {.type = jbvString, .val = {.string = {.val = header->value, .len = strlen(header->value)}}}; 310 | (void)pushJsonbValue(&headers, WJB_KEY, &key); 311 | (void)pushJsonbValue(&headers, WJB_VALUE, &value); 312 | prev = header; 313 | } 314 | 315 | Jsonb *jsonb_headers = JsonbValueToJsonb(pushJsonbValue(&headers, WJB_END_OBJECT, NULL)); 316 | 317 | return jsonb_headers; 318 | } 319 | 320 | // Switch back to the curl memory context, which has the curl handles stored 321 | void insert_curl_responses(LoopState *lstate, MemoryContext curl_memctx){ 322 | MemoryContext old_ctx = MemoryContextSwitchTo(curl_memctx); 323 | int msgs_left=0; 324 | CURLMsg *msg = NULL; 325 | CURLM *curl_mhandle = lstate->curl_mhandle; 326 | 327 | while ((msg = curl_multi_info_read(curl_mhandle, &msgs_left))) { 328 | if (msg->msg == CURLMSG_DONE) { 329 | CURLcode return_code = msg->data.result; 330 | CURL *ez_handle= msg->easy_handle; 331 | CurlData *cdata = NULL; 332 | EREPORT_CURL_GETINFO(ez_handle, CURLINFO_PRIVATE, &cdata); 333 | 334 | if (return_code != CURLE_OK) { 335 | insert_failure_response(ez_handle, return_code, cdata->id, cdata->timeout_milliseconds); 336 | } else { 337 | char *contentType; 338 | EREPORT_CURL_GETINFO(ez_handle, CURLINFO_CONTENT_TYPE, &contentType); 339 | 340 | long http_status_code; 341 | EREPORT_CURL_GETINFO(ez_handle, CURLINFO_RESPONSE_CODE, &http_status_code); 342 | 343 | Jsonb *jsonb_headers = jsonb_headers_from_curl_handle(ez_handle); 344 | 345 | insert_success_response(cdata, http_status_code, contentType, jsonb_headers); 346 | 347 | pfree_curl_data(cdata); 348 | } 349 | 350 | int res = curl_multi_remove_handle(curl_mhandle, ez_handle); 351 | if(res != CURLM_OK) 352 | ereport(ERROR, errmsg("curl_multi_remove_handle: %s", curl_multi_strerror(res))); 353 | 354 | curl_easy_cleanup(ez_handle); 355 | } else { 356 | ereport(ERROR, errmsg("curl_multi_info_read(), CURLMsg=%d\n", msg->msg)); 357 | } 358 | } 359 | 360 | MemoryContextSwitchTo(old_ctx); 361 | } 362 | -------------------------------------------------------------------------------- /src/core.h: -------------------------------------------------------------------------------- 1 | #ifndef CORE_H 2 | #define CORE_H 3 | 4 | typedef struct { 5 | int epfd; 6 | CURLM *curl_mhandle; 7 | } LoopState; 8 | 9 | void delete_expired_responses(char *ttl, int batch_size); 10 | 11 | void consume_request_queue(CURLM *curl_mhandle, int batch_size, MemoryContext curl_memctx); 12 | 13 | void insert_curl_responses(LoopState *lstate, MemoryContext curl_memctx); 14 | 15 | void set_curl_mhandle(CURLM *curl_mhandle, LoopState *lstate); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /src/curl_prelude.h: -------------------------------------------------------------------------------- 1 | // pragmas needed to pass compiling with -Wextra 2 | #pragma GCC diagnostic push 3 | #pragma GCC diagnostic ignored "-Wunused-parameter" 4 | 5 | #include 6 | #include 7 | 8 | #pragma GCC diagnostic pop 9 | -------------------------------------------------------------------------------- /src/errors.c: -------------------------------------------------------------------------------- 1 | #include "pg_prelude.h" 2 | #include "curl_prelude.h" 3 | #include "errors.h" 4 | 5 | /* 6 | * Show a more detailed error message when a timeout happens, which includes the DNS, TCP/SSL handshake and HTTP request/response time. An example message is like: 7 | * 8 | * "Timeout of 800 ms reached. Total time: 801.159000 ms (DNS time: 73.407000 ms, TCP/SSL handshake time: 677.256000 ms, HTTP Request/Respose time: 50.103000 ms)" 9 | * 10 | * Curl allows to calculate the above by applying substractions on some internal timings. Refer to https://blog.cloudflare.com/a-question-of-timing/ for an explanation of these timings. 11 | * 12 | * There are extra considerations: 13 | * 14 | * - If a step (e.g. TCP handshake [CURLINFO_CONNECT_TIME]) surpasses the request timeout, its given timing is 0. 15 | * However the step duration can still be determined by using the total time (CURLINFO_TOTAL_TIME). 16 | * We want to show at which step the timeout occurred. 17 | * 18 | * - If a step is omitted its given timing is 0. This can happen on non-HTTPS requests with the SSL handshake time (CURLINFO_APPCONNECT_TIME). 19 | * 20 | * - The pretransfer time (CURLINFO_PRETRANSFER_TIME) is greater than 0 when the HTTP request step starts. 21 | */ 22 | curl_timeout_msg detailed_timeout_strerror(CURL *ez_handle, int32 timeout_milliseconds){ 23 | double namelookup; EREPORT_CURL_GETINFO(ez_handle, CURLINFO_NAMELOOKUP_TIME, &namelookup); 24 | double appconnect; EREPORT_CURL_GETINFO(ez_handle, CURLINFO_APPCONNECT_TIME, &appconnect); 25 | double connect; EREPORT_CURL_GETINFO(ez_handle, CURLINFO_CONNECT_TIME, &connect); 26 | double pretransfer; EREPORT_CURL_GETINFO(ez_handle, CURLINFO_PRETRANSFER_TIME, &pretransfer); 27 | double starttransfer; EREPORT_CURL_GETINFO(ez_handle, CURLINFO_STARTTRANSFER_TIME, &starttransfer); 28 | double total; EREPORT_CURL_GETINFO(ez_handle, CURLINFO_TOTAL_TIME, &total); 29 | 30 | elog(DEBUG2, "The curl timings are time_namelookup: %f, time_connect: %f, time_appconnect: %f, time_pretransfer: %f, time_starttransfer: %f, time_total: %f", 31 | namelookup, connect, appconnect, pretransfer, starttransfer, total); 32 | 33 | // Steps at which the request timed out 34 | bool timedout_at_dns = namelookup == 0 && connect == 0; // if DNS time is 0 and no TCP occurred, it timed out at the DNS step 35 | bool timedout_at_handshake = pretransfer == 0; // pretransfer determines if the HTTP step started, if 0 no HTTP ocurred and thus the timeout occurred at TCP or SSL handshake step 36 | bool timedout_at_http = pretransfer > 0; // The HTTP step did start and the timeout occurred here 37 | 38 | // Calculate the steps times 39 | double _dns_time = 40 | timedout_at_dns ? 41 | total: // get the total since namelookup will be 0 because of the timeout 42 | timedout_at_handshake ? 43 | namelookup: 44 | timedout_at_http ? 45 | namelookup: 46 | 0; 47 | 48 | double _handshake_time = 49 | timedout_at_dns ? 50 | 0: 51 | timedout_at_handshake ? 52 | total - namelookup: // connect or appconnect will be 0 because of the timeout, get the total - DNS step time 53 | timedout_at_http ? 54 | (connect - namelookup) + // TCP handshake time 55 | (appconnect > 0 ? (appconnect - connect): 0): // SSL handshake time. Prevent a negative here which can happen when no SSL is involved (plain HTTP request) and appconnect is 0 56 | 0; 57 | 58 | double _http_time = 59 | timedout_at_dns ? 60 | 0: 61 | timedout_at_handshake ? 62 | 0: 63 | timedout_at_http ? 64 | total - pretransfer: 65 | 0; 66 | 67 | // convert seconds to milliseconds 68 | double dns_time_ms = _dns_time * 1000; 69 | double handshake_time_ms = _handshake_time * 1000; 70 | double http_time_ms = _http_time * 1000; 71 | double total_time_ms = total * 1000; 72 | 73 | // build the error message 74 | curl_timeout_msg result = {.msg = {}}; 75 | snprintf(result.msg, CURL_TIMEOUT_MSG_SIZE, 76 | "Timeout of %d ms reached. Total time: %f ms (DNS time: %f ms, TCP/SSL handshake time: %f ms, HTTP Request/Response time: %f ms)", 77 | timeout_milliseconds, total_time_ms, dns_time_ms, handshake_time_ms, http_time_ms 78 | ); 79 | return result; 80 | } 81 | -------------------------------------------------------------------------------- /src/errors.h: -------------------------------------------------------------------------------- 1 | #ifndef ERRORS_H 2 | #define ERRORS_H 3 | 4 | #define EREPORT_CURL_SETOPT(hdl, opt, prm) \ 5 | do { \ 6 | if (curl_easy_setopt(hdl, opt, prm) != CURLE_OK) \ 7 | ereport(ERROR, errmsg("Could not curl_easy_setopt(%s)", #opt)); \ 8 | } while (0) 9 | 10 | #define EREPORT_CURL_GETINFO(hdl, opt, prm) \ 11 | do { \ 12 | if (curl_easy_getinfo(hdl, opt, prm) != CURLE_OK) \ 13 | ereport(ERROR, errmsg("Could not curl_easy_getinfo(%s)", #opt)); \ 14 | } while (0) 15 | 16 | #define EREPORT_CURL_MULTI_SETOPT(hdl, opt, prm) \ 17 | do { \ 18 | if (curl_multi_setopt(hdl, opt, prm) != CURLM_OK) \ 19 | ereport(ERROR, errmsg("Could not curl_multi_setopt(%s)", #opt)); \ 20 | } while (0) 21 | 22 | #define EREPORT_CURL_SLIST_APPEND(list, str) \ 23 | do { \ 24 | struct curl_slist *new_list = curl_slist_append(list, str); \ 25 | if (new_list == NULL) \ 26 | ereport(ERROR, errmsg("curl_slist_append returned NULL")); \ 27 | list = new_list; \ 28 | } while (0) 29 | 30 | #define EREPORT_NULL_ATTR(tupIsNull, attr) \ 31 | do { \ 32 | if (tupIsNull) \ 33 | ereport(ERROR, errmsg("%s cannot be null", #attr)); \ 34 | } while (0) 35 | 36 | #define EREPORT_MULTI(multi_call) \ 37 | do { \ 38 | CURLMcode code = multi_call; \ 39 | if (code != CURLM_OK) \ 40 | ereport(ERROR, errmsg("%s failed with %s", #multi_call, curl_multi_strerror(code))); \ 41 | } while (0) 42 | 43 | #define CURL_TIMEOUT_MSG_SIZE 256 44 | 45 | typedef struct { 46 | char msg[CURL_TIMEOUT_MSG_SIZE]; 47 | } curl_timeout_msg; 48 | 49 | curl_timeout_msg detailed_timeout_strerror(CURL *ez_handle, int32 timeout_milliseconds); 50 | 51 | #endif 52 | -------------------------------------------------------------------------------- /src/event.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "pg_prelude.h" 6 | #include "event.h" 7 | 8 | #ifdef WAIT_USE_EPOLL 9 | 10 | static int timerfd = 0; 11 | static bool timer_created = false; 12 | 13 | typedef struct epoll_event epoll_event; 14 | typedef struct itimerspec itimerspec; 15 | 16 | inline int wait_event(int fd, event *events, size_t maxevents, int wait_milliseconds){ 17 | return epoll_wait(fd, events, maxevents, /*timeout=*/wait_milliseconds); 18 | } 19 | 20 | inline int event_monitor(){ 21 | return epoll_create1(0); 22 | } 23 | 24 | void ev_monitor_close(LoopState *lstate){ 25 | close(lstate->epfd); 26 | close(timerfd); 27 | } 28 | 29 | int multi_timer_cb(__attribute__ ((unused)) CURLM *multi, long timeout_ms, LoopState *lstate) { 30 | elog(DEBUG2, "multi_timer_cb: Setting timeout to %ld ms\n", timeout_ms); 31 | 32 | if (!timer_created){ 33 | timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC); 34 | if (timerfd < 0) { 35 | ereport(ERROR, errmsg("Failed to create timerfd")); 36 | } 37 | timerfd_settime(timerfd, 0, &(itimerspec){}, NULL); 38 | epoll_ctl(lstate->epfd, EPOLL_CTL_ADD, timerfd, &(epoll_event){.events = EPOLLIN, .data.fd = timerfd}); 39 | 40 | timer_created = true; 41 | } 42 | 43 | itimerspec its = 44 | timeout_ms > 0 ? 45 | // assign the timeout normally 46 | (itimerspec){ 47 | .it_value.tv_sec = timeout_ms / 1000, 48 | .it_value.tv_nsec = (timeout_ms % 1000) * 1000 * 1000, 49 | }: 50 | timeout_ms == 0 ? 51 | /* libcurl wants us to timeout now, however setting both fields of 52 | * new_value.it_value to zero disarms the timer. The closest we can 53 | * do is to schedule the timer to fire in 1 ns. */ 54 | (itimerspec){ 55 | .it_value.tv_sec = 0, 56 | .it_value.tv_nsec = 1, 57 | }: 58 | // libcurl passes a -1 to indicate the timer should be deleted 59 | (itimerspec){}; 60 | 61 | int no_flags = 0; 62 | if (timerfd_settime(timerfd, no_flags, &its, NULL) < 0) { 63 | ereport(ERROR, errmsg("timerfd_settime failed")); 64 | } 65 | 66 | return 0; 67 | } 68 | 69 | int multi_socket_cb(__attribute__ ((unused)) CURL *easy, curl_socket_t sockfd, int what, LoopState *lstate, void *socketp) { 70 | static char *whatstrs[] = { "NONE", "CURL_POLL_IN", "CURL_POLL_OUT", "CURL_POLL_INOUT", "CURL_POLL_REMOVE" }; 71 | elog(DEBUG2, "multi_socket_cb: sockfd %d received %s", sockfd, whatstrs[what]); 72 | 73 | int epoll_op; 74 | if(!socketp){ 75 | epoll_op = EPOLL_CTL_ADD; 76 | bool *socket_exists = palloc(sizeof(bool)); 77 | curl_multi_assign(lstate->curl_mhandle, sockfd, socket_exists); 78 | } else if (what == CURL_POLL_REMOVE){ 79 | epoll_op = EPOLL_CTL_DEL; 80 | pfree(socketp); 81 | curl_multi_assign(lstate->curl_mhandle, sockfd, NULL); 82 | } else { 83 | epoll_op = EPOLL_CTL_MOD; 84 | } 85 | 86 | epoll_event ev = { 87 | .data.fd = sockfd, 88 | .events = 89 | (what & CURL_POLL_IN) ? 90 | EPOLLIN: 91 | (what & CURL_POLL_OUT) ? 92 | EPOLLOUT: 93 | 0, // no event is assigned since here we get CURL_POLL_REMOVE and the sockfd will be removed 94 | }; 95 | 96 | // epoll_ctl will copy ev, so there's no need to do palloc for the epoll_event 97 | // https://github.com/torvalds/linux/blob/e32cde8d2bd7d251a8f9b434143977ddf13dcec6/fs/eventpoll.c#L2408-L2418 98 | if (epoll_ctl(lstate->epfd, epoll_op, sockfd, &ev) < 0) { 99 | int e = errno; 100 | static char *opstrs[] = { "NONE", "EPOLL_CTL_ADD", "EPOLL_CTL_DEL", "EPOLL_CTL_MOD" }; 101 | ereport(ERROR, errmsg("epoll_ctl with %s failed when receiving %s for sockfd %d: %s", whatstrs[what], opstrs[epoll_op], sockfd, strerror(e))); 102 | } 103 | 104 | return 0; 105 | } 106 | 107 | bool is_timer(event ev){ 108 | return ev.data.fd == timerfd; 109 | } 110 | 111 | int get_curl_event(event ev){ 112 | int ev_bitmask = 113 | ev.events & EPOLLIN ? CURL_CSELECT_IN: 114 | ev.events & EPOLLOUT ? CURL_CSELECT_OUT: 115 | CURL_CSELECT_ERR; 116 | return ev_bitmask; 117 | } 118 | 119 | int get_socket_fd(event ev){ 120 | return ev.data.fd; 121 | } 122 | 123 | #else 124 | 125 | typedef struct { 126 | curl_socket_t sockfd; 127 | int action; 128 | } SocketInfo ; 129 | 130 | int inline wait_event(int fd, event *events, size_t maxevents, int wait_milliseconds){ 131 | return kevent(fd, NULL, 0, events, maxevents, &(struct timespec){.tv_sec = wait_milliseconds/1000}); 132 | } 133 | 134 | int inline event_monitor(){ 135 | return kqueue(); 136 | } 137 | 138 | void ev_monitor_close(LoopState *lstate){ 139 | close(lstate->epfd); 140 | } 141 | 142 | int multi_timer_cb(__attribute__ ((unused)) CURLM *multi, long timeout_ms, LoopState *lstate) { 143 | elog(DEBUG2, "multi_timer_cb: Setting timeout to %ld ms\n", timeout_ms); 144 | event timer_event; 145 | int id = 1; 146 | 147 | if (timeout_ms > 0) { 148 | EV_SET(&timer_event, id, EVFILT_TIMER, EV_ADD, 0, timeout_ms, NULL); //0 means milliseconds (the default) 149 | } else if (timeout_ms == 0){ 150 | /* libcurl wants us to timeout now, however setting both fields of 151 | * new_value.it_value to zero disarms the timer. The closest we can 152 | * do is to schedule the timer to fire in 1 ns. */ 153 | EV_SET(&timer_event, id, EVFILT_TIMER, EV_ADD, NOTE_NSECONDS, 1, NULL); 154 | } else { 155 | // libcurl passes a -1 to indicate the timer should be deleted 156 | EV_SET(&timer_event, id, EVFILT_TIMER, EV_DELETE, 0, 0, NULL); 157 | } 158 | 159 | if (kevent(lstate->epfd, &timer_event, 1, NULL, 0, NULL) < 0) { 160 | int save_errno = errno; 161 | ereport(ERROR, errmsg("kevent with EVFILT_TIMER failed: %s", strerror(save_errno))); 162 | } 163 | 164 | return 0; 165 | } 166 | 167 | int multi_socket_cb(__attribute__ ((unused)) CURL *easy, curl_socket_t sockfd, int what, LoopState *lstate, void *socketp) { 168 | static char *whatstrs[] = { "NONE", "CURL_POLL_IN", "CURL_POLL_OUT", "CURL_POLL_INOUT", "CURL_POLL_REMOVE" }; 169 | elog(DEBUG2, "multi_socket_cb: sockfd %d received %s", sockfd, whatstrs[what]); 170 | 171 | SocketInfo *sock_info = (SocketInfo *)socketp; 172 | struct kevent ev[2]; 173 | int count = 0; 174 | 175 | if (what == CURL_POLL_REMOVE) { 176 | if (sock_info->action & CURL_POLL_IN) 177 | EV_SET(&ev[count++], sockfd, EVFILT_READ, EV_DELETE, 0, 0, sock_info); 178 | 179 | if (sock_info->action & CURL_POLL_OUT) 180 | EV_SET(&ev[count++], sockfd, EVFILT_WRITE, EV_DELETE, 0, 0, sock_info); 181 | 182 | curl_multi_assign(lstate->curl_mhandle, sockfd, NULL); 183 | pfree(sock_info); 184 | } else { 185 | if (!sock_info) { 186 | sock_info = palloc(sizeof(SocketInfo)); 187 | sock_info->sockfd = sockfd; 188 | sock_info->action = what; 189 | curl_multi_assign(lstate->curl_mhandle, sockfd, sock_info); 190 | } 191 | 192 | if (what & CURL_POLL_IN) 193 | EV_SET(&ev[count++], sockfd, EVFILT_READ, EV_ADD, 0, 0, sock_info); 194 | 195 | if (what & CURL_POLL_OUT) 196 | EV_SET(&ev[count++], sockfd, EVFILT_WRITE, EV_ADD, 0, 0, sock_info); 197 | } 198 | 199 | Assert(count <= 2); 200 | 201 | if (kevent(lstate->epfd, &ev[0], count, NULL, 0, NULL) < 0) { 202 | int save_errno = errno; 203 | ereport(ERROR, errmsg("kevent with %s failed for sockfd %d: %s", whatstrs[what], sockfd, strerror(save_errno))); 204 | } 205 | 206 | return 0; 207 | } 208 | 209 | bool is_timer(event ev){ 210 | return ev.filter == EVFILT_TIMER; 211 | } 212 | 213 | int get_curl_event(event ev){ 214 | int ev_bitmask = 0; 215 | if (ev.filter == EVFILT_READ) 216 | ev_bitmask |= CURL_CSELECT_IN; 217 | else if (ev.filter == EVFILT_WRITE) 218 | ev_bitmask |= CURL_CSELECT_OUT; 219 | else 220 | ev_bitmask = CURL_CSELECT_ERR; 221 | 222 | return ev_bitmask; 223 | } 224 | 225 | int get_socket_fd(event ev){ 226 | SocketInfo *sock_info = (SocketInfo *) ev.udata; 227 | 228 | return sock_info->sockfd; 229 | } 230 | 231 | #endif 232 | -------------------------------------------------------------------------------- /src/event.h: -------------------------------------------------------------------------------- 1 | #ifndef EVENT_H 2 | #define EVENT_H 3 | 4 | #include 5 | 6 | #include "core.h" 7 | 8 | #ifdef __linux__ 9 | #define WAIT_USE_EPOLL 10 | #elif defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) 11 | #define WAIT_USE_KQUEUE 12 | #else 13 | #error "no event loop implementation available" 14 | #endif 15 | 16 | #ifdef WAIT_USE_EPOLL 17 | 18 | #include 19 | #include 20 | typedef struct epoll_event event; 21 | 22 | #else 23 | 24 | #include 25 | typedef struct kevent event; 26 | 27 | #endif 28 | 29 | int wait_event(int fd, event *events, size_t maxevents, int wait_milliseconds); 30 | int event_monitor(void); 31 | void ev_monitor_close(LoopState *lstate); 32 | int multi_timer_cb(CURLM *multi, long timeout_ms, LoopState *lstate); 33 | int multi_socket_cb(CURL *easy, curl_socket_t sockfd, int what, LoopState *lstate, void *socketp); 34 | bool is_timer(event ev); 35 | int get_curl_event(event ev); 36 | int get_socket_fd(event ev); 37 | 38 | #endif 39 | -------------------------------------------------------------------------------- /src/pg_prelude.h: -------------------------------------------------------------------------------- 1 | // pragmas needed to pass compiling with -Wextra 2 | #pragma GCC diagnostic push 3 | #pragma GCC diagnostic ignored "-Wunused-parameter" 4 | #pragma GCC diagnostic ignored "-Wsign-compare" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | 42 | #pragma GCC diagnostic pop 43 | -------------------------------------------------------------------------------- /src/util.c: -------------------------------------------------------------------------------- 1 | #include "pg_prelude.h" 2 | #include "curl_prelude.h" 3 | #include "util.h" 4 | 5 | PG_FUNCTION_INFO_V1(_urlencode_string); 6 | PG_FUNCTION_INFO_V1(_encode_url_with_params_array); 7 | 8 | Datum _urlencode_string(PG_FUNCTION_ARGS) { 9 | if(PG_GETARG_POINTER(0) == NULL) 10 | PG_RETURN_NULL(); 11 | 12 | char *str = text_to_cstring(PG_GETARG_TEXT_P(0)); 13 | char *urlencoded_str = NULL; 14 | 15 | urlencoded_str = curl_escape(str, strlen(str)); 16 | 17 | pfree(str); 18 | 19 | PG_RETURN_TEXT_P(cstring_to_text(urlencoded_str)); 20 | } 21 | 22 | Datum _encode_url_with_params_array(PG_FUNCTION_ARGS) { 23 | if(PG_GETARG_POINTER(0) == NULL || PG_GETARG_POINTER(1) == NULL) 24 | PG_RETURN_NULL(); 25 | 26 | char *url = text_to_cstring(PG_GETARG_TEXT_P(0)); 27 | ArrayType *params = PG_GETARG_ARRAYTYPE_P(1); 28 | 29 | char *full_url = NULL; 30 | 31 | ArrayIterator iterator; 32 | Datum value; 33 | bool isnull; 34 | char *param; 35 | 36 | CURLU *h = curl_url(); 37 | CURLUcode rc = curl_url_set(h, CURLUPART_URL, url, 0); 38 | if (rc != CURLUE_OK) { 39 | // TODO: Use curl_url_strerror once released. 40 | elog(ERROR, "%s", curl_easy_strerror((CURLcode)rc)); 41 | } 42 | 43 | iterator = array_create_iterator(params, 0, NULL); 44 | while (array_iterate(iterator, &value, &isnull)) { 45 | if (isnull) 46 | continue; 47 | 48 | param = TextDatumGetCString(value); 49 | rc = curl_url_set(h, CURLUPART_QUERY, param, CURLU_APPENDQUERY); 50 | if (rc != CURLUE_OK) { 51 | elog(ERROR, "curl_url returned: %d", rc); 52 | } 53 | pfree(param); 54 | } 55 | array_free_iterator(iterator); 56 | 57 | rc = curl_url_get(h, CURLUPART_URL, &full_url, 0); 58 | if (rc != CURLUE_OK) { 59 | elog(ERROR, "curl_url returned: %d", rc); 60 | } 61 | 62 | pfree(url); 63 | curl_url_cleanup(h); 64 | 65 | PG_RETURN_TEXT_P(cstring_to_text(full_url)); 66 | } 67 | 68 | -------------------------------------------------------------------------------- /src/util.h: -------------------------------------------------------------------------------- 1 | #ifndef UTIL_H 2 | #define UTIL_H 3 | 4 | #endif 5 | -------------------------------------------------------------------------------- /src/worker.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "pg_prelude.h" 7 | #include "curl_prelude.h" 8 | #include "util.h" 9 | #include "errors.h" 10 | #include "core.h" 11 | #include "event.h" 12 | 13 | #define MIN_LIBCURL_VERSION_NUM 0x075300 // This is the 7.83.0 version in hex as defined in curl/curlver.h 14 | #define REQUIRED_LIBCURL_ERR_MSG "libcurl >= 7.83.0 is required, we use the curl_easy_nextheader() function added in this version" 15 | _Static_assert(LIBCURL_VERSION_NUM, REQUIRED_LIBCURL_ERR_MSG); // test for older libcurl versions that don't even have LIBCURL_VERSION_NUM defined (e.g. libcurl 6.5). 16 | _Static_assert(LIBCURL_VERSION_NUM >= MIN_LIBCURL_VERSION_NUM, REQUIRED_LIBCURL_ERR_MSG); 17 | 18 | PG_MODULE_MAGIC; 19 | 20 | static char* guc_ttl; 21 | static int guc_batch_size; 22 | static char* guc_database_name; 23 | static char* guc_username; 24 | static MemoryContext CurlMemContext = NULL; 25 | static shmem_startup_hook_type prev_shmem_startup_hook = NULL; 26 | static long latch_timeout = 1000; 27 | static volatile sig_atomic_t got_sigterm = false; 28 | static volatile sig_atomic_t got_sighup = false; 29 | static bool* restart_worker = NULL; 30 | 31 | void _PG_init(void); 32 | PGDLLEXPORT void pg_net_worker(Datum main_arg) pg_attribute_noreturn(); 33 | 34 | PG_FUNCTION_INFO_V1(worker_restart); 35 | Datum worker_restart(__attribute__ ((unused)) PG_FUNCTION_ARGS) { 36 | bool result = DatumGetBool(DirectFunctionCall1(pg_reload_conf, (Datum) NULL)); // reload the config 37 | *restart_worker = true; 38 | PG_RETURN_BOOL(result && *restart_worker); // TODO is not necessary to return a bool here, but we do it to maintain backward compatibility 39 | } 40 | 41 | static void 42 | handle_sigterm(__attribute__ ((unused)) SIGNAL_ARGS) 43 | { 44 | int save_errno = errno; 45 | got_sigterm = true; 46 | if (MyProc) 47 | SetLatch(&MyProc->procLatch); 48 | errno = save_errno; 49 | } 50 | 51 | static void 52 | handle_sighup(__attribute__ ((unused)) SIGNAL_ARGS) 53 | { 54 | int save_errno = errno; 55 | got_sighup = true; 56 | if (MyProc) 57 | SetLatch(&MyProc->procLatch); 58 | errno = save_errno; 59 | } 60 | 61 | static bool is_extension_loaded(){ 62 | Oid extensionOid; 63 | 64 | StartTransactionCommand(); 65 | 66 | extensionOid = get_extension_oid("pg_net", true); 67 | 68 | CommitTransactionCommand(); 69 | 70 | return OidIsValid(extensionOid); 71 | } 72 | 73 | void pg_net_worker(__attribute__ ((unused)) Datum main_arg) { 74 | pqsignal(SIGTERM, handle_sigterm); 75 | pqsignal(SIGHUP, handle_sighup); 76 | pqsignal(SIGUSR1, procsignal_sigusr1_handler); 77 | 78 | BackgroundWorkerUnblockSignals(); 79 | 80 | BackgroundWorkerInitializeConnection(guc_database_name, guc_username, 0); 81 | pgstat_report_appname("pg_net " EXTVERSION); // set appname for pg_stat_activity 82 | 83 | elog(INFO, "pg_net_worker started with a config of: pg_net.ttl=%s, pg_net.batch_size=%d, pg_net.username=%s, pg_net.database_name=%s", guc_ttl, guc_batch_size, guc_username, guc_database_name); 84 | 85 | int curl_ret = curl_global_init(CURL_GLOBAL_ALL); 86 | if(curl_ret != CURLE_OK) 87 | ereport(ERROR, errmsg("curl_global_init() returned %s\n", curl_easy_strerror(curl_ret))); 88 | 89 | LoopState lstate = { 90 | .epfd = event_monitor(), 91 | .curl_mhandle = curl_multi_init(), 92 | }; 93 | 94 | if (lstate.epfd < 0) { 95 | ereport(ERROR, errmsg("Failed to create event monitor file descriptor")); 96 | } 97 | 98 | if(!lstate.curl_mhandle) 99 | ereport(ERROR, errmsg("curl_multi_init()")); 100 | 101 | set_curl_mhandle(lstate.curl_mhandle, &lstate); 102 | 103 | while (!got_sigterm) { 104 | WaitLatch(&MyProc->procLatch, 105 | WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH, 106 | latch_timeout, 107 | PG_WAIT_EXTENSION); 108 | ResetLatch(&MyProc->procLatch); 109 | 110 | CHECK_FOR_INTERRUPTS(); 111 | 112 | if(!is_extension_loaded()){ 113 | elog(DEBUG2, "pg_net_worker: extension not yet loaded"); 114 | continue; 115 | } 116 | 117 | if (got_sighup) { 118 | got_sighup = false; 119 | ProcessConfigFile(PGC_SIGHUP); 120 | } 121 | 122 | if (restart_worker && *restart_worker) { 123 | *restart_worker = false; 124 | elog(INFO, "Restarting pg_net worker"); 125 | break; 126 | } 127 | 128 | delete_expired_responses(guc_ttl, guc_batch_size); 129 | 130 | consume_request_queue(lstate.curl_mhandle, guc_batch_size, CurlMemContext); 131 | 132 | int running_handles = 0; 133 | int maxevents = guc_batch_size + 1; // 1 extra for the timer 134 | event *events = palloc0(sizeof(event) * maxevents); 135 | 136 | do { 137 | int nfds = wait_event(lstate.epfd, events, maxevents, 1000); 138 | if (nfds < 0) { 139 | int save_errno = errno; 140 | if(save_errno == EINTR) { // can happen when the wait is interrupted, for example when running under GDB. Just continue in this case. 141 | continue; 142 | } 143 | else { 144 | ereport(ERROR, errmsg("wait_event() failed: %s", strerror(save_errno))); 145 | break; 146 | } 147 | } 148 | 149 | for (int i = 0; i < nfds; i++) { 150 | if (is_timer(events[i])) { 151 | EREPORT_MULTI( 152 | curl_multi_socket_action(lstate.curl_mhandle, CURL_SOCKET_TIMEOUT, 0, &running_handles) 153 | ); 154 | } else { 155 | int curl_event = get_curl_event(events[i]); 156 | int sockfd = get_socket_fd(events[i]); 157 | 158 | EREPORT_MULTI( 159 | curl_multi_socket_action( 160 | lstate.curl_mhandle, 161 | sockfd, 162 | curl_event, 163 | &running_handles) 164 | ); 165 | } 166 | 167 | insert_curl_responses(&lstate, CurlMemContext); 168 | } 169 | 170 | } while (running_handles > 0); // run again while there are curl handles, this will prevent waiting for the latch_timeout (which will cause the cause the curl timeouts to be wrong) 171 | 172 | pfree(events); 173 | 174 | MemoryContextReset(CurlMemContext); 175 | } 176 | 177 | ev_monitor_close(&lstate); 178 | 179 | curl_multi_cleanup(lstate.curl_mhandle); 180 | curl_global_cleanup(); 181 | 182 | // causing a failure on exit will make the postmaster process restart the bg worker 183 | proc_exit(EXIT_FAILURE); 184 | } 185 | 186 | static void net_shmem_startup(void) { 187 | if (prev_shmem_startup_hook) 188 | prev_shmem_startup_hook(); 189 | 190 | restart_worker = ShmemAlloc(sizeof(bool)); 191 | *restart_worker = false; 192 | } 193 | 194 | void _PG_init(void) { 195 | if (IsBinaryUpgrade) { 196 | return; 197 | } 198 | 199 | if (!process_shared_preload_libraries_in_progress) { 200 | ereport(ERROR, errmsg("pg_net is not in shared_preload_libraries"), 201 | errhint("Add pg_net to the shared_preload_libraries " 202 | "configuration variable in postgresql.conf.")); 203 | } 204 | 205 | RegisterBackgroundWorker(&(BackgroundWorker){ 206 | .bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION, 207 | .bgw_start_time = BgWorkerStart_RecoveryFinished, 208 | .bgw_library_name = "pg_net", 209 | .bgw_function_name = "pg_net_worker", 210 | .bgw_name = "pg_net " EXTVERSION " worker", 211 | .bgw_restart_time = 1, 212 | }); 213 | 214 | prev_shmem_startup_hook = shmem_startup_hook; 215 | shmem_startup_hook = net_shmem_startup; 216 | 217 | CurlMemContext = AllocSetContextCreate(TopMemoryContext, 218 | "pg_net curl context", 219 | ALLOCSET_DEFAULT_MINSIZE, 220 | ALLOCSET_DEFAULT_INITSIZE, 221 | ALLOCSET_DEFAULT_MAXSIZE); 222 | 223 | DefineCustomStringVariable("pg_net.ttl", 224 | "time to live for request/response rows", 225 | "should be a valid interval type", 226 | &guc_ttl, 227 | "6 hours", 228 | PGC_SUSET, 0, 229 | NULL, NULL, NULL); 230 | 231 | DefineCustomIntVariable("pg_net.batch_size", 232 | "number of requests executed in one iteration of the background worker", 233 | NULL, 234 | &guc_batch_size, 235 | 200, 236 | 0, PG_INT16_MAX, 237 | PGC_SUSET, 0, 238 | NULL, NULL, NULL); 239 | 240 | DefineCustomStringVariable("pg_net.database_name", 241 | "Database where the worker will connect to", 242 | NULL, 243 | &guc_database_name, 244 | "postgres", 245 | PGC_SIGHUP, 0, 246 | NULL, NULL, NULL); 247 | 248 | DefineCustomStringVariable("pg_net.username", 249 | "Connection user for the worker", 250 | NULL, 251 | &guc_username, 252 | NULL, 253 | PGC_SIGHUP, 0, 254 | NULL, NULL, NULL); 255 | } 256 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from sqlalchemy import create_engine 3 | from sqlalchemy.orm import Session 4 | from sqlalchemy import text 5 | 6 | @pytest.fixture(scope="function") 7 | def engine(): 8 | engine = create_engine("postgresql:///postgres") 9 | yield engine 10 | engine.dispose() 11 | 12 | 13 | @pytest.fixture(scope="function") 14 | def sess(engine): 15 | 16 | session = Session(engine) 17 | 18 | # Reset sequences and tables between tests 19 | session.execute(text( 20 | """ 21 | create extension if not exists pg_net; 22 | """ 23 | )) 24 | session.commit() 25 | 26 | yield session 27 | 28 | session.rollback() 29 | 30 | session.execute(text( 31 | """ 32 | drop extension pg_net cascade; 33 | create extension if not exists pg_net; 34 | """ 35 | )) 36 | session.commit() 37 | -------------------------------------------------------------------------------- /test/init.conf: -------------------------------------------------------------------------------- 1 | shared_preload_libraries='pg_net' 2 | log_min_messages=INFO 3 | -------------------------------------------------------------------------------- /test/init.sql: -------------------------------------------------------------------------------- 1 | create database pre_existing; 2 | create role pre_existing nosuperuser login; 3 | create extension pg_net; 4 | \ir ../nix/bench.sql 5 | -------------------------------------------------------------------------------- /test/test_engine.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | 3 | def test_connect(sess): 4 | (x,) = sess.execute(text("select 1")).fetchone() 5 | assert x == 1 6 | -------------------------------------------------------------------------------- /test/test_http_delete.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | 3 | def test_http_delete_returns_id(sess): 4 | """net.http_delete returns a bigint id""" 5 | 6 | (request_id,) = sess.execute(text( 7 | """ 8 | select net.http_get( 9 | url:='http://localhost:8080/delete' 10 | ); 11 | """ 12 | )).fetchone() 13 | 14 | assert request_id == 1 15 | 16 | 17 | def test_http_delete_collect_sync_success(sess): 18 | """test net.http_delete works""" 19 | 20 | # Create a request 21 | (request_id,) = sess.execute(text( 22 | """ 23 | select net.http_delete( 24 | url:='http://localhost:8080/delete' 25 | , params:= '{"param-foo": "bar"}' 26 | , headers:= '{"X-Baz": "foo"}' 27 | ); 28 | """ 29 | )).fetchone() 30 | 31 | # Commit so background worker can start 32 | sess.commit() 33 | 34 | # Collect the response, waiting as needed 35 | response = sess.execute( 36 | text( 37 | """ 38 | select * from net._http_collect_response(:request_id, async:=false); 39 | """ 40 | ), 41 | {"request_id": request_id}, 42 | ).fetchone() 43 | 44 | assert response is not None 45 | assert response[0] == "SUCCESS" 46 | assert response[1] == "ok" 47 | assert response[2] is not None 48 | assert "X-Baz" in response[2] 49 | assert "param-foo" in response[2] 50 | 51 | 52 | def test_http_delete_positional_args(sess): 53 | """test net.http_delete works with positional arguments. This to ensure backwards compat when a new parameter is added to the function.""" 54 | 55 | (request_id,) = sess.execute(text( 56 | """ 57 | select net.http_delete( 58 | 'http://localhost:8080/delete' 59 | ); 60 | """ 61 | )).fetchone() 62 | 63 | # Commit so background worker can start 64 | sess.commit() 65 | 66 | # Collect the response, waiting as needed 67 | response = sess.execute( 68 | text( 69 | """ 70 | select * from net._http_collect_response(:request_id, async:=false); 71 | """ 72 | ), 73 | {"request_id": request_id}, 74 | ).fetchone() 75 | 76 | assert response is not None 77 | assert response[0] == "SUCCESS" 78 | assert response[1] == "ok" 79 | 80 | 81 | (request_id,) = sess.execute(text( 82 | """ 83 | select net.http_delete( 84 | 'http://localhost:8080/delete', 85 | '{"param-foo": "bar"}' 86 | ); 87 | """ 88 | )).fetchone() 89 | 90 | # Commit so background worker can start 91 | sess.commit() 92 | 93 | # Collect the response, waiting as needed 94 | response = sess.execute( 95 | text( 96 | """ 97 | select * from net._http_collect_response(:request_id, async:=false); 98 | """ 99 | ), 100 | {"request_id": request_id}, 101 | ).fetchone() 102 | 103 | assert response is not None 104 | assert response[0] == "SUCCESS" 105 | assert response[1] == "ok" 106 | 107 | 108 | (request_id,) = sess.execute(text( 109 | """ 110 | select net.http_delete( 111 | 'http://localhost:8080/delete', 112 | '{"param-foo": "bar"}', 113 | '{"X-Baz": "foo"}' 114 | ); 115 | """ 116 | )).fetchone() 117 | 118 | # Commit so background worker can start 119 | sess.commit() 120 | 121 | # Collect the response, waiting as needed 122 | response = sess.execute( 123 | text( 124 | """ 125 | select * from net._http_collect_response(:request_id, async:=false); 126 | """ 127 | ), 128 | {"request_id": request_id}, 129 | ).fetchone() 130 | 131 | assert response is not None 132 | assert response[0] == "SUCCESS" 133 | assert response[1] == "ok" 134 | 135 | 136 | (request_id,) = sess.execute(text( 137 | """ 138 | select net.http_delete( 139 | 'http://localhost:8080/delete', 140 | '{"param-foo": "bar"}', 141 | '{"X-Baz": "foo"}', 142 | 5000 143 | ); 144 | """ 145 | )).fetchone() 146 | 147 | # Commit so background worker can start 148 | sess.commit() 149 | 150 | # Collect the response, waiting as needed 151 | response = sess.execute( 152 | text( 153 | """ 154 | select * from net._http_collect_response(:request_id, async:=false); 155 | """ 156 | ), 157 | {"request_id": request_id}, 158 | ).fetchone() 159 | 160 | assert response is not None 161 | assert response[0] == "SUCCESS" 162 | assert response[1] == "ok" 163 | 164 | 165 | def test_http_delete_with_body(sess): 166 | """delete with request body works""" 167 | 168 | # Create a request 169 | (request_id,) = sess.execute(text( 170 | """ 171 | select net.http_delete( 172 | url :='http://localhost:8080/delete_w_body' 173 | , body := '{"key": "val"}' 174 | ); 175 | """ 176 | )).fetchone() 177 | 178 | # Commit so background worker can start 179 | sess.commit() 180 | 181 | # Collect the response, waiting as needed 182 | (response_json,) = sess.execute( 183 | text( 184 | """ 185 | select 186 | ((x.response).body)::jsonb body_json 187 | from 188 | net._http_collect_response(:request_id, async:=false) x; 189 | """ 190 | ), 191 | {"request_id": request_id}, 192 | ).fetchone() 193 | 194 | assert response_json["key"] == "val" 195 | -------------------------------------------------------------------------------- /test/test_http_errors.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | from sqlalchemy import text 5 | 6 | wrong_port = 6666 7 | 8 | def test_get_bad_url(sess): 9 | """net.http_get returns a descriptive errors for bad urls""" 10 | 11 | with pytest.raises(Exception) as execinfo: 12 | res = sess.execute(text( 13 | f""" 14 | select net.http_get('localhost:{wrong_port}'); 15 | """ 16 | )) 17 | assert r"Couldn\'t resolve proxy name" in str(execinfo) 18 | 19 | 20 | def test_bad_post(sess): 21 | """net.http_post with an empty url + body returns an error""" 22 | 23 | with pytest.raises(Exception) as execinfo: 24 | res = sess.execute(text( 25 | """ 26 | select net.http_post(null, '{"hello": "world"}'); 27 | """ 28 | )) 29 | assert 'null value in column "url"' in str(execinfo) 30 | 31 | 32 | def test_bad_get(sess): 33 | """net.http_get with an empty url + body returns an error""" 34 | 35 | with pytest.raises(Exception) as execinfo: 36 | res = sess.execute(text( 37 | """ 38 | select net.http_get(null); 39 | """ 40 | )) 41 | assert 'null value in column "url"' in str(execinfo) 42 | 43 | 44 | def test_bad_delete(sess): 45 | """net.http_delete with an empty url + body returns an error""" 46 | 47 | with pytest.raises(Exception) as execinfo: 48 | res = sess.execute(text( 49 | """ 50 | select net.http_delete(null); 51 | """ 52 | )) 53 | assert 'null value in column "url"' in str(execinfo) 54 | 55 | 56 | def test_bad_utils(sess): 57 | """util functions of pg_net return null""" 58 | 59 | res = sess.execute(text( 60 | """ 61 | select net._encode_url_with_params_array(null, null); 62 | """ 63 | )).scalar_one() 64 | 65 | assert res is None 66 | 67 | res = sess.execute(text( 68 | """ 69 | select net._urlencode_string(null); 70 | """ 71 | )).scalar_one() 72 | 73 | assert res is None 74 | 75 | 76 | def test_it_keeps_working_after_many_connection_refused(sess): 77 | """the worker doesn't crash on many failed responses with connection refused""" 78 | 79 | res = sess.execute(text( 80 | f""" 81 | select net.http_get('http://localhost:{wrong_port}') from generate_series(1,10); 82 | """ 83 | )) 84 | sess.commit() 85 | 86 | time.sleep(2) 87 | 88 | (error_msg,count) = sess.execute(text( 89 | """ 90 | select error_msg, count(*) from net._http_response where status_code is null group by error_msg; 91 | """ 92 | )).fetchone() 93 | 94 | assert error_msg == "Couldn't connect to server" 95 | assert count == 10 96 | 97 | (request_id,) = sess.execute(text( 98 | """ 99 | select net.http_get('http://localhost:8080/pathological?status=200'); 100 | """ 101 | )).fetchone() 102 | 103 | sess.commit() 104 | 105 | response = sess.execute( 106 | text( 107 | """ 108 | select * from net._http_collect_response(:request_id, async:=false); 109 | """ 110 | ), 111 | {"request_id": request_id}, 112 | ).fetchone() 113 | 114 | assert response[0] == "SUCCESS" 115 | assert response[1] == "ok" 116 | assert response[2].startswith("(200") 117 | 118 | def test_it_keeps_working_after_server_returns_nothing(sess): 119 | """the worker doesn't crash on many failed responses with server returned nothing""" 120 | 121 | sess.execute(text( 122 | """ 123 | select net.http_get('http://localhost:8080/pathological?disconnect=true') from generate_series(1,10); 124 | """ 125 | )) 126 | sess.commit() 127 | 128 | time.sleep(1.5) 129 | 130 | (error_msg,count) = sess.execute(text( 131 | """ 132 | select error_msg, count(*) from net._http_response where status_code is null group by error_msg; 133 | """ 134 | )).fetchone() 135 | 136 | assert error_msg == "Server returned nothing (no headers, no data)" 137 | assert count == 10 138 | 139 | sess.execute(text( 140 | """ 141 | select net.http_get('http://localhost:8080/pathological?status=200') from generate_series(1,10); 142 | """ 143 | )).fetchone() 144 | 145 | sess.commit() 146 | 147 | time.sleep(1.5) 148 | 149 | (status_code,count) = sess.execute(text( 150 | """ 151 | select status_code, count(*) from net._http_response where status_code = 200 group by status_code; 152 | """ 153 | )).fetchone() 154 | 155 | assert status_code == 200 156 | assert count == 10 157 | -------------------------------------------------------------------------------- /test/test_http_get_collect.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | import time 3 | 4 | 5 | def test_http_get_returns_id(sess): 6 | """net.http_get returns a bigint id""" 7 | 8 | (request_id,) = sess.execute(text( 9 | """ 10 | select net.http_get('http://localhost:8080'); 11 | """ 12 | )).fetchone() 13 | 14 | assert request_id == 1 15 | 16 | 17 | def test_http_get_collect_sync_success(sess): 18 | """Collect a response, waiting if it has not completed yet""" 19 | 20 | # Create a request 21 | (request_id,) = sess.execute(text( 22 | """ 23 | select net.http_get('http://localhost:8080'); 24 | """ 25 | )).fetchone() 26 | 27 | # Commit so background worker can start 28 | sess.commit() 29 | 30 | # Collect the response, waiting as needed 31 | response = sess.execute(text( 32 | """ 33 | select * from net._http_collect_response(:request_id, async:=false); 34 | """ 35 | ), 36 | {"request_id": request_id}, 37 | ).fetchone() 38 | 39 | assert response is not None 40 | assert response[0] == "SUCCESS" 41 | assert response[1] == "ok" 42 | assert response[2] is not None 43 | # psycopg2 does not deserialize nested composites 44 | assert response[2].startswith("(200") 45 | 46 | 47 | # def test_http_get_collect_async_pending(sess): 48 | # """Collect a response async before completed""" 49 | 50 | # # Create a request 51 | # (request_id,) = sess.execute( 52 | # """ 53 | # select net.http_get('https://news.ycombinator.com'); 54 | # """ 55 | # ).fetchone() 56 | 57 | # # Commit so background worker can start 58 | # sess.commit() 59 | 60 | # # Collect the response, waiting as needed 61 | # response = sess.execute( 62 | # text( 63 | # """ 64 | # select * from net._http_collect_response(:request_id, async:=true); 65 | # """ 66 | # ), 67 | # {"request_id": request_id}, 68 | # ).fetchone() 69 | 70 | # assert response is not None 71 | # assert response[0] == "PENDING" 72 | # assert "pending" in response[1] 73 | # assert response[2] is None 74 | 75 | 76 | def test_http_collect_response_async_does_not_exist(sess): 77 | """Collect a non-existent response with async true""" 78 | 79 | # Collect the response, waiting as needed 80 | response = sess.execute(text( 81 | """ 82 | select * from net._http_collect_response(1, async:=true); 83 | """ 84 | )).fetchone() 85 | 86 | assert response[0] == "ERROR" 87 | assert "not found" in response[1] 88 | assert response[2] is None 89 | 90 | def test_http_get_responses_have_different_created_times(sess): 91 | """Ensure the rows in net._http_response have different created times""" 92 | 93 | sess.execute(text( 94 | """ 95 | select net.http_get('http://localhost:8080/echo-method') 96 | """ 97 | )) 98 | sess.commit() 99 | 100 | time.sleep(1) 101 | 102 | sess.execute(text( 103 | """ 104 | select net.http_get('http://localhost:8080/echo-method') 105 | """ 106 | )) 107 | sess.commit() 108 | 109 | time.sleep(1) 110 | 111 | sess.execute(text( 112 | """ 113 | select net.http_get('http://localhost:8080/echo-method') 114 | """ 115 | )) 116 | sess.commit() 117 | 118 | time.sleep(1) 119 | 120 | count = sess.execute(text( 121 | """ 122 | select count(distinct created) from net._http_response; 123 | """ 124 | )).scalar() 125 | 126 | assert count == 3 127 | 128 | def test_http_get_collect_with_redirect(sess): 129 | """Follows a redirect and collects a response""" 130 | 131 | # Create a request 132 | (request_id,) = sess.execute(text( 133 | """ 134 | select net.http_get('http://localhost:8080/redirect_me'); 135 | """ 136 | )).fetchone() 137 | 138 | # Commit so background worker can start 139 | sess.commit() 140 | 141 | # Collect the response, waiting as needed 142 | response = sess.execute(text( 143 | """ 144 | select * from net._http_collect_response(:request_id, async:=false); 145 | """ 146 | ), 147 | {"request_id": request_id}, 148 | ).fetchone() 149 | 150 | assert response is not None 151 | assert "I got redirected" in response[2] 152 | 153 | def test_http_get_ipv6(sess): 154 | """Can resolve an ipv6 only server""" 155 | 156 | # Create a request 157 | (request_id,) = sess.execute(text( 158 | """ 159 | select net.http_get('http://localhost:8888/'); 160 | """ 161 | )).fetchone() 162 | 163 | # Commit so background worker can start 164 | sess.commit() 165 | 166 | # Collect the response, waiting as needed 167 | response = sess.execute(text( 168 | """ 169 | select * from net._http_collect_response(:request_id, async:=false); 170 | """ 171 | ), 172 | {"request_id": request_id}, 173 | ).fetchone() 174 | 175 | assert response is not None 176 | assert "Hello ipv6 only" in response[2] 177 | -------------------------------------------------------------------------------- /test/test_http_headers.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | 3 | 4 | def test_http_headers_set(sess): 5 | """Check that headers are being set""" 6 | # Create a request 7 | (request_id,) = sess.execute(text( 8 | """ 9 | select net.http_get( 10 | url:='http://localhost:8080/headers', 11 | headers:='{"pytest-header": "pytest-header", "accept": "application/json"}' 12 | ); 13 | """ 14 | )).fetchone() 15 | 16 | # Commit so background worker can start 17 | sess.commit() 18 | 19 | # Collect the response, waiting as needed 20 | response = sess.execute( 21 | text( 22 | """ 23 | select * from net._http_collect_response(:request_id, async:=false); 24 | """ 25 | ), 26 | {"request_id": request_id}, 27 | ).fetchone() 28 | print(response) 29 | assert response is not None 30 | assert response[0] == "SUCCESS" 31 | assert "pytest-header" in response[2] 32 | -------------------------------------------------------------------------------- /test/test_http_malformed_headers.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | 3 | def test_http_header_missing_value(sess): 4 | """Check that a `MissingValue: ` header is processed correctly""" 5 | 6 | (request_id,) = sess.execute(text( 7 | """ 8 | select net.http_get( 9 | url:='http://localhost:8080/pathological?malformed-header=missing-value' 10 | ); 11 | """ 12 | )).fetchone() 13 | 14 | # Commit so background worker can start 15 | sess.commit() 16 | 17 | # Collect the response, waiting as needed 18 | response = sess.execute( 19 | text( 20 | """ 21 | select * from net._http_collect_response(:request_id, async:=false); 22 | """ 23 | ), 24 | {"request_id": request_id}, 25 | ).fetchone() 26 | assert response is not None 27 | assert response[0] == "SUCCESS" 28 | assert "MissingValue" in response[2] 29 | 30 | 31 | def test_http_header_injection(sess): 32 | """Check that a `HeaderInjection Injected-Header: This header contains an injection` header fails without crashing""" 33 | 34 | (request_id,) = sess.execute(text( 35 | """ 36 | select net.http_get( 37 | url:='http://localhost:8080/pathological?malformed-header=header-injection' 38 | ); 39 | """ 40 | )).fetchone() 41 | 42 | # Commit so background worker can start 43 | sess.commit() 44 | 45 | # Collect the response, waiting as needed 46 | response = sess.execute( 47 | text( 48 | """ 49 | select * from net._http_collect_response(:request_id, async:=false); 50 | """ 51 | ), 52 | {"request_id": request_id}, 53 | ).fetchone() 54 | assert response is not None 55 | assert response[0] == "ERROR" 56 | assert "Weird server reply" in response[1] 57 | 58 | 59 | def test_http_header_spaces(sess): 60 | """Check that a `Spaces In Header Name: This header name contains spaces` header is processed correctly""" 61 | 62 | (request_id,) = sess.execute(text( 63 | """ 64 | select net.http_get( 65 | url:='http://localhost:8080/pathological?malformed-header=spaces-in-header-name' 66 | ); 67 | """ 68 | )).fetchone() 69 | 70 | # Commit so background worker can start 71 | sess.commit() 72 | 73 | # Collect the response, waiting as needed 74 | response = sess.execute( 75 | text( 76 | """ 77 | select * from net._http_collect_response(:request_id, async:=false); 78 | """ 79 | ), 80 | {"request_id": request_id}, 81 | ).fetchone() 82 | assert response is not None 83 | assert response[0] == "SUCCESS" 84 | assert "Spaces In Header Name" in response[2] 85 | 86 | 87 | def test_http_header_non_printable_chars(sess): 88 | """Check that a `NonPrintableChars: NonPrintableChars\\u0001\\u0002` header is processed correctly""" 89 | 90 | (request_id,) = sess.execute(text( 91 | """ 92 | select net.http_get( 93 | url:='http://localhost:8080/pathological?malformed-header=non-printable-chars' 94 | ); 95 | """ 96 | )).fetchone() 97 | 98 | # Commit so background worker can start 99 | sess.commit() 100 | 101 | # Collect the response, waiting as needed 102 | response = sess.execute( 103 | text( 104 | """ 105 | select * from net._http_collect_response(:request_id, async:=false); 106 | """ 107 | ), 108 | {"request_id": request_id}, 109 | ).fetchone() 110 | assert response is not None 111 | assert response[0] == "SUCCESS" 112 | assert r"NonPrintableChars\\u0001\\u0002" in response[2] 113 | -------------------------------------------------------------------------------- /test/test_http_params.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | 3 | 4 | def test_http_get_url_params_set(sess): 5 | """Check that params are being set on GET 6 | """ 7 | # Create a request 8 | (request_id,) = sess.execute(text( 9 | """ 10 | select net.http_get( 11 | url:='http://localhost:8080/anything', 12 | params:='{"hello": "world"}'::jsonb 13 | ); 14 | """ 15 | )).fetchone() 16 | 17 | # Commit so background worker can start 18 | sess.commit() 19 | 20 | # Collect the response, waiting as needed 21 | response = sess.execute( 22 | text( 23 | """ 24 | select * from net._http_collect_response(:request_id, async:=false); 25 | """ 26 | ), 27 | {"request_id": request_id}, 28 | ).fetchone() 29 | print(response) 30 | assert response is not None 31 | assert response[0] == "SUCCESS" 32 | assert "?hello=world" in response[2] 33 | 34 | 35 | def test_http_post_url_params_set(sess): 36 | """Check that params are being set on POST 37 | """ 38 | # Create a request 39 | (request_id,) = sess.execute(text( 40 | """ 41 | select net.http_post( 42 | url:='http://localhost:8080/anything', 43 | params:='{"hello": "world"}'::jsonb 44 | ); 45 | """ 46 | )).fetchone() 47 | 48 | # Commit so background worker can start 49 | sess.commit() 50 | 51 | # Collect the response, waiting as needed 52 | response = sess.execute( 53 | text( 54 | """ 55 | select * from net._http_collect_response(:request_id, async:=false); 56 | """ 57 | ), 58 | {"request_id": request_id}, 59 | ).fetchone() 60 | print(response) 61 | assert response is not None 62 | assert response[0] == "SUCCESS" 63 | assert "?hello=world" in response[2] 64 | -------------------------------------------------------------------------------- /test/test_http_post_collect.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | 3 | 4 | def test_http_post_returns_id(sess): 5 | """net.http_post returns a bigint id""" 6 | 7 | (request_id,) = sess.execute(text( 8 | """ 9 | select net.http_post( 10 | url:='http://localhost:8080/post', 11 | body:='{}'::jsonb 12 | ); 13 | """ 14 | )).fetchone() 15 | 16 | assert request_id == 1 17 | 18 | 19 | def test_http_post_special_chars_body(sess): 20 | """net.http_post returns a bigint id""" 21 | 22 | (request_id,) = sess.execute(text( 23 | """ 24 | select net.http_post( 25 | url:='http://localhost:8080/post', 26 | body:=json_build_object('foo', 'ba"r')::jsonb 27 | ); 28 | """ 29 | )).fetchone() 30 | 31 | assert request_id == 1 32 | 33 | 34 | def test_http_post_collect_sync_success(sess): 35 | """Collect a response, waiting if it has not completed yet""" 36 | 37 | # Create a request 38 | (request_id,) = sess.execute(text( 39 | """ 40 | select net.http_post( 41 | url:='http://localhost:8080/post' 42 | ); 43 | """ 44 | )).fetchone() 45 | 46 | # Commit so background worker can start 47 | sess.commit() 48 | 49 | # Collect the response, waiting as needed 50 | response = sess.execute( 51 | text( 52 | """ 53 | select * from net._http_collect_response(:request_id, async:=false); 54 | """ 55 | ), 56 | {"request_id": request_id}, 57 | ).fetchone() 58 | 59 | assert response is not None 60 | assert response[0] == "SUCCESS" 61 | assert response[1] == "ok" 62 | assert response[2] is not None 63 | 64 | 65 | # def test_http_post_collect_async_pending(sess): 66 | # """Collect a response async before completed""" 67 | 68 | # # Create a request 69 | # (request_id,) = sess.execute( 70 | # """ 71 | # select net.http_post( 72 | # url:='http://localhost:8080/post', 73 | # body:='{}'::jsonb 74 | # ); 75 | # """ 76 | # ).fetchone() 77 | 78 | # # Commit so background worker can start 79 | # sess.commit() 80 | 81 | # # Collect the response, waiting as needed 82 | # response = sess.execute( 83 | # text( 84 | # """ 85 | # select * from net._http_collect_response(:request_id, async:=true); 86 | # """ 87 | # ), 88 | # {"request_id": request_id}, 89 | # ).fetchone() 90 | 91 | # assert response is not None 92 | # assert response[0] == "PENDING" 93 | # assert "pending" in response[1] 94 | # assert response[2] is None 95 | 96 | 97 | def test_http_post_collect_non_empty_body(sess): 98 | """Collect a response async before completed""" 99 | 100 | # Create a request 101 | (request_id,) = sess.execute(text( 102 | """ 103 | select net.http_post( 104 | url:='http://localhost:8080/post', 105 | body:='{"hello": "world"}'::jsonb, 106 | headers:='{"Content-Type": "application/json", "accept": "application/json"}'::jsonb 107 | ); 108 | """ 109 | )).fetchone() 110 | 111 | # Commit so background worker can start 112 | sess.commit() 113 | 114 | # Collect the response, waiting as needed 115 | response = sess.execute( 116 | text( 117 | """ 118 | select * from net._http_collect_response(:request_id, async:=false); 119 | """ 120 | ), 121 | {"request_id": request_id}, 122 | ).fetchone() 123 | assert response is not None 124 | assert response[0] == "SUCCESS" 125 | assert "ok" in response[1] 126 | assert "hello" in response[2] 127 | assert "world" in response[2] 128 | 129 | # Make sure response is json 130 | (response_json,) = sess.execute( 131 | text( 132 | """ 133 | select 134 | ((x.response).body)::jsonb body_json 135 | from 136 | net._http_collect_response(:request_id, async:=false) x; 137 | """ 138 | ), 139 | {"request_id": request_id}, 140 | ).fetchone() 141 | 142 | assert response_json["hello"] == "world" 143 | 144 | 145 | def test_http_post_wrong_header_exception(sess): 146 | """Confirm that non application/json raises exception""" 147 | 148 | did_raise = False 149 | 150 | try: 151 | sess.execute(text( 152 | """ 153 | select net.http_post( 154 | url:='http://localhost:8080/post', 155 | headers:='{"Content-Type": "application/text"}'::jsonb 156 | ); 157 | """ 158 | )).fetchone() 159 | except: 160 | sess.rollback() 161 | did_raise = True 162 | 163 | assert did_raise 164 | 165 | 166 | def test_http_post_no_content_type_coerce(sess): 167 | """Confirm that a missing content type coerces to application/json""" 168 | 169 | # Create a request 170 | request_id, = sess.execute(text( 171 | """ 172 | select net.http_post( 173 | url:='http://localhost:8080/post', 174 | headers:='{"other": "val"}'::jsonb 175 | ); 176 | """ 177 | )).fetchone() 178 | 179 | 180 | headers, = sess.execute(text( 181 | """ 182 | select 183 | headers 184 | from 185 | net.http_request_queue 186 | where 187 | id = :request_id 188 | """), {"request_id": request_id} 189 | ).fetchone() 190 | 191 | assert headers["Content-Type"] == "application/json" 192 | assert headers["other"] == "val" 193 | 194 | 195 | def test_http_post_empty_body(sess): 196 | """net.http_post can post a null body""" 197 | 198 | (request_id,) = sess.execute(text( 199 | """ 200 | select net.http_post( 201 | url:='http://localhost:8080/echo-method', 202 | body:=null 203 | ); 204 | """ 205 | )).fetchone() 206 | 207 | sess.commit() 208 | 209 | (body) = sess.execute( 210 | text( 211 | """ 212 | select 213 | (x.response).body as body 214 | from net._http_collect_response(:request_id, async:=false) x; 215 | """ 216 | ), 217 | {"request_id": request_id}, 218 | ).fetchone() 219 | 220 | assert 'POST' in str(body) 221 | -------------------------------------------------------------------------------- /test/test_http_requests_deleted_after_ttl.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | from sqlalchemy import text 5 | 6 | def test_http_requests_deleted_after_ttl(sess): 7 | """Check that http requests are deleted within a few seconds of their ttl""" 8 | 9 | # commit to avoid "cannot run inside a transaction block" error, see https://stackoverflow.com/a/75757326/4692662 10 | sess.execute(text("COMMIT")) 11 | sess.execute(text("alter system set pg_net.ttl to '4 seconds'")) 12 | sess.execute(text("select net.worker_restart()")) 13 | 14 | # bg worker restarts after 1 second 15 | time.sleep(1) 16 | 17 | # Create a request 18 | (request_id,) = sess.execute(text( 19 | """ 20 | select net.http_get( 21 | 'http://localhost:8080/anything' 22 | ); 23 | """ 24 | )).fetchone() 25 | 26 | # Commit so background worker can start 27 | sess.commit() 28 | 29 | # Confirm that the request was retrievable 30 | response = sess.execute( 31 | text( 32 | """ 33 | select * from net._http_collect_response(:request_id, async:=false); 34 | """ 35 | ), 36 | {"request_id": request_id}, 37 | ).fetchone() 38 | assert response[0] == "SUCCESS" 39 | 40 | # Sleep until after request should have been deleted 41 | time.sleep(5) 42 | 43 | # Ensure collecting the resposne now results in an error 44 | response = sess.execute( 45 | text( 46 | """ 47 | select * from net._http_collect_response(:request_id); 48 | """ 49 | ), 50 | {"request_id": request_id}, 51 | ).fetchone() 52 | # TODO an ERROR status doesn't seem correct here 53 | assert response[0] == "ERROR" 54 | 55 | sess.execute(text("COMMIT")) 56 | sess.execute(text("alter system reset pg_net.ttl")) 57 | sess.execute(text("select net.worker_restart()")) 58 | 59 | # wait until the worker has restarted to not affect other tests 60 | time.sleep(1) 61 | -------------------------------------------------------------------------------- /test/test_http_timeout.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | import re 5 | from sqlalchemy import text 6 | 7 | def test_http_get_timeout_reached(sess): 8 | """net.http_get with timeout errs on a slow reply""" 9 | 10 | (request_id,) = sess.execute(text( 11 | """ 12 | select net.http_get(url := 'http://localhost:8080/pathological?status=200&delay=6'); 13 | """ 14 | )).fetchone() 15 | 16 | sess.commit() 17 | 18 | # wait for timeout 19 | time.sleep(7) 20 | 21 | (response,) = sess.execute( 22 | text( 23 | """ 24 | select error_msg from net._http_response where id = :request_id; 25 | """ 26 | ), 27 | {"request_id": request_id}, 28 | ).fetchone() 29 | 30 | assert response.startswith("Timeout of 5000 ms reached") 31 | 32 | 33 | def test_http_detailed_timeout(sess): 34 | """the timeout shows a detailed error msg""" 35 | 36 | pattern = r""" 37 | Total\stime:\s* # Match 'Total time:' with optional spaces 38 | (?P[0-9]*\.?[0-9]+)\s*ms # Capture A (Total time) 39 | \s*\(DNS\stime:\s* # Match '(DNS time:' with optional spaces 40 | (?P[0-9]*\.?[0-9]+)\s*ms,\s* # Capture B (DNS time) 41 | TCP/SSL\shandshake\stime:\s* # Match 'TCP/SSL handshake time:' with spaces 42 | (?P[0-9]*\.?[0-9]+)\s*ms,\s* # Capture C (TCP/SSL handshake time) 43 | HTTP\sRequest/Response\stime:\s* # Match 'HTTP Request/Response time:' with spaces 44 | (?P[0-9]*\.?[0-9]+)\s*ms\) # Capture D (HTTP Request/Response time) 45 | """ 46 | 47 | regex = re.compile(pattern, re.VERBOSE) 48 | 49 | # TODO Timeout at the DNS step. 50 | # TODO make this work locally. A slow DNS cannot be ensured on an external network. 51 | # This can be done manually with `select net.http_get('https://news.ycombinator.com/', timeout_milliseconds := 10);` 52 | 53 | # TODO add a TCP/SSL handshake timeout test 54 | # This can be done locally on Linux with `sudo tc qdisc add dev lo root netem delay 500ms` and 55 | # select net.http_get(url := 'http://localhost:8080/pathological', timeout_milliseconds := 1000); 56 | 57 | # Timeout at the HTTP step 58 | (request_id,) = sess.execute(text( 59 | """ 60 | select net.http_get(url := 'http://localhost:8080/pathological?delay=1', timeout_milliseconds := 1000) 61 | """ 62 | )).fetchone() 63 | 64 | sess.commit() 65 | 66 | # wait for timeout 67 | time.sleep(2.1) 68 | 69 | (response,) = sess.execute( 70 | text( 71 | """ 72 | select error_msg from net._http_response where id = :request_id; 73 | """ 74 | ), 75 | {"request_id": request_id}, 76 | ).fetchone() 77 | 78 | match = regex.search(response) 79 | 80 | total_time = float(match.group('A')) 81 | dns_time = float(match.group('B')) 82 | tcp_ssl_time = float(match.group('C')) 83 | http_time = float(match.group('D')) 84 | 85 | assert total_time > 0 86 | assert dns_time > 0 87 | assert tcp_ssl_time > 0 88 | assert http_time > 0 89 | 90 | def test_http_get_succeed_with_gt_timeout(sess): 91 | """net.http_get with timeout succeeds when the timeout is greater than the slow reply response time""" 92 | 93 | (request_id,) = sess.execute(text( 94 | """ 95 | select net.http_get(url := 'http://localhost:8080?status=200&delay=3', timeout_milliseconds := 3500); 96 | """ 97 | )).fetchone() 98 | 99 | sess.commit() 100 | 101 | time.sleep(4.5) 102 | 103 | (status_code,) = sess.execute( 104 | text( 105 | """ 106 | select status_code from net._http_response where id = :request_id; 107 | """ 108 | ), 109 | {"request_id": request_id}, 110 | ).fetchone() 111 | 112 | assert status_code == 200 113 | 114 | def test_many_slow_mixed_with_fast(sess): 115 | """many fast responses finish despite being mixed with slow responses, the fast responses will wait the timeout duration""" 116 | 117 | sess.execute(text( 118 | """ 119 | select 120 | net.http_get(url := 'http://localhost:8080/pathological?status=200') 121 | , net.http_get(url := 'http://localhost:8080/pathological?status=200&delay=2', timeout_milliseconds := 1000) 122 | , net.http_get(url := 'http://localhost:8080/pathological?status=200') 123 | , net.http_get(url := 'http://localhost:8080/pathological?status=200&delay=2', timeout_milliseconds := 1000) 124 | from generate_series(1,25) _; 125 | """ 126 | )) 127 | 128 | sess.commit() 129 | 130 | # wait for timeouts 131 | time.sleep(3) 132 | 133 | (request_successes, request_timeouts) = sess.execute(text( 134 | """ 135 | select 136 | count(*) filter (where error_msg is null and status_code = 200) as request_successes, 137 | count(*) filter (where error_msg is not null and error_msg like 'Timeout of 1000 ms reached%') as request_timeouts 138 | from net._http_response; 139 | """ 140 | )).fetchone() 141 | 142 | assert request_successes == 50 143 | assert request_timeouts == 50 144 | -------------------------------------------------------------------------------- /test/test_privileges.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from sqlalchemy import text 3 | 4 | def test_net_on_postgres_role(sess): 5 | """Check that the postgres role can use the net schema by default""" 6 | 7 | role = sess.execute(text("select current_user;")).fetchone() 8 | 9 | assert role[0] == "postgres" 10 | 11 | # Create a request 12 | (request_id,) = sess.execute(text( 13 | """ 14 | select net.http_get( 15 | 'http://localhost:8080/anything' 16 | ); 17 | """ 18 | )).fetchone() 19 | 20 | # Commit so background worker can start 21 | sess.commit() 22 | 23 | # Confirm that the request was retrievable 24 | response = sess.execute( 25 | text( 26 | """ 27 | select * from net._http_collect_response(:request_id, async:=false); 28 | """ 29 | ), 30 | {"request_id": request_id}, 31 | ).fetchone() 32 | assert response[0] == "SUCCESS" 33 | 34 | 35 | def test_net_on_pre_existing_role(sess): 36 | """Check that a pre existing role can use the net schema""" 37 | 38 | # Create a request 39 | (request_id,) = sess.execute(text( 40 | """ 41 | set local role to pre_existing; 42 | select net.http_get( 43 | 'http://localhost:8080/anything' 44 | ); 45 | """ 46 | )).fetchone() 47 | 48 | # Commit so background worker can start 49 | sess.commit() 50 | 51 | # Confirm that the request was retrievable 52 | response = sess.execute( 53 | text( 54 | """ 55 | set local role to pre_existing; 56 | select * from net._http_collect_response(:request_id, async:=false); 57 | """ 58 | ), 59 | {"request_id": request_id}, 60 | ).fetchone() 61 | assert response[0] == "SUCCESS" 62 | 63 | sess.execute(text(""" 64 | set local role postgres; 65 | """)) 66 | 67 | def test_net_on_new_role(sess): 68 | """Check that a newly created role can use the net schema""" 69 | 70 | sess.execute(text(""" 71 | create role another; 72 | """)) 73 | 74 | # Create a request 75 | (request_id,) = sess.execute(text( 76 | """ 77 | set local role to another; 78 | select net.http_get( 79 | 'http://localhost:8080/anything' 80 | ); 81 | """ 82 | )).fetchone() 83 | 84 | # Commit so background worker can start 85 | sess.commit() 86 | 87 | # Confirm that the request was retrievable 88 | response = sess.execute( 89 | text( 90 | """ 91 | set local role to another; 92 | select * from net._http_collect_response(:request_id, async:=false); 93 | """ 94 | ), 95 | {"request_id": request_id}, 96 | ).fetchone() 97 | assert response[0] == "SUCCESS" 98 | 99 | ## can use the net.worker_restart function 100 | response = sess.execute( 101 | text( 102 | """ 103 | set local role to another; 104 | select net.worker_restart(); 105 | """ 106 | ) 107 | ).fetchone() 108 | assert response[0] == True 109 | 110 | sess.execute(text(""" 111 | set local role postgres; 112 | drop role another; 113 | """)) 114 | -------------------------------------------------------------------------------- /test/test_user_db.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | from sqlalchemy import text 5 | 6 | def test_net_with_different_username_dbname(sess): 7 | """Check that a pre existing role can use the net schema""" 8 | 9 | # commit to avoid "cannot run inside a transaction block" error, see https://stackoverflow.com/a/75757326/4692662 10 | sess.execute(text("COMMIT")) 11 | sess.execute(text("alter system set pg_net.username to 'pre_existing'")) 12 | sess.execute(text("alter system set pg_net.database_name to 'pre_existing'")) 13 | sess.execute(text("select net.worker_restart()")) 14 | 15 | # bg worker restarts after 1 second 16 | time.sleep(1.1) 17 | 18 | ## can use the net.worker_restart function 19 | (username,datname) = sess.execute( 20 | text( 21 | """ 22 | select usename, datname from pg_stat_activity where backend_type ilike '%pg_net%'; 23 | """ 24 | ) 25 | ).fetchone() 26 | assert username == 'pre_existing' 27 | assert datname == 'pre_existing' 28 | 29 | sess.execute(text("COMMIT")) 30 | sess.execute(text("alter system reset pg_net.username")) 31 | sess.execute(text("select net.worker_restart()")) 32 | 33 | # bg worker restarts after 1 second 34 | time.sleep(1.1) 35 | 36 | 37 | def test_net_appname(sess): 38 | """Check that pg_stat_activity has appname set""" 39 | 40 | ## can use the net.worker_restart function 41 | (count,) = sess.execute( 42 | text( 43 | """ 44 | select count(1) from pg_stat_activity where application_name like '%pg_net%'; 45 | """ 46 | ) 47 | ).fetchone() 48 | assert count == 1 49 | -------------------------------------------------------------------------------- /test/test_worker_error.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import text 2 | import pytest 3 | import time 4 | 5 | def test_success_when_worker_is_up(sess): 6 | """net.check_worker_is_up should not return anything when the worker is running""" 7 | 8 | time.sleep(1) # wait if another test did a net.worker_restart() 9 | 10 | (result,) = sess.execute(text(""" 11 | select net.check_worker_is_up(); 12 | """)).fetchone() 13 | assert result is not None 14 | assert result == '' 15 | 16 | 17 | def test_http_get_error_when_worker_down(sess): 18 | """net.http_get returns an error when pg background worker is down""" 19 | 20 | (restarted,) = sess.execute(text(""" 21 | select pg_terminate_backend(pid) from pg_stat_activity where backend_type ilike '%pg_net%'; 22 | """)).fetchone() 23 | assert restarted is not None 24 | assert restarted == True 25 | 26 | time.sleep(0.1) 27 | 28 | with pytest.raises(Exception) as execinfo: 29 | res = sess.execute(text( 30 | """ 31 | select net.check_worker_is_up(); 32 | """ 33 | )) 34 | assert "the pg_net background worker is not up" in str(execinfo) 35 | --------------------------------------------------------------------------------