├── .cargo └── config ├── .github ├── dependabot.yml └── workflows │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── Makefile.toml ├── README.md ├── examples ├── adding │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── bin.rs │ │ └── lib.rs ├── fdw-rw │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── fdw │ ├── Cargo.toml │ └── src │ │ ├── bin.rs │ │ └── lib.rs ├── logging │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── bin.rs │ │ └── lib.rs ├── memory_context │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── bin.rs │ │ └── lib.rs ├── nullable │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── bin.rs │ │ └── lib.rs ├── panicking │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── bin.rs │ │ └── lib.rs └── strings │ ├── Cargo.toml │ ├── README.md │ └── src │ ├── bin.rs │ └── lib.rs ├── integration-tests ├── Cargo.toml ├── Makefile.toml ├── postgresql.conf ├── src │ └── lib.rs └── tests │ ├── adding_tests.rs │ ├── fdw.rs │ ├── logging.rs │ ├── memory_context.rs │ ├── nullable_tests.rs │ ├── panicking_tests.rs │ └── strings.rs ├── pg-extend ├── Cargo.toml ├── Makefile.toml ├── build.rs ├── pg_majorversion.h ├── src │ ├── lib.rs │ ├── log.rs │ ├── native │ │ ├── mod.rs │ │ ├── text.rs │ │ └── varlena.rs │ ├── pg_alloc.rs │ ├── pg_bool.rs │ ├── pg_datum.rs │ ├── pg_error.rs │ ├── pg_fdw.rs │ ├── pg_sys.rs │ └── pg_type.rs └── wrapper.h └── pg-extern-attr ├── Cargo.toml ├── Makefile.toml ├── README.md └── src ├── lib.rs └── lifetime.rs /.cargo/config: -------------------------------------------------------------------------------- 1 | # Informs the linker that some of the symbols for Postgres won't be available 2 | # until runtime on the dynamic library load. Flags differ based on target family 3 | 4 | [target.'cfg(unix)'] 5 | rustflags = "-C link-arg=-undefineddynamic_lookup" 6 | 7 | [target.'cfg(windows)'] 8 | rustflags = "-C link-arg=/FORCE" -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "13:00" 8 | open-pull-requests-limit: 10 9 | ignore: 10 | - dependency-name: cargo 11 | versions: 12 | - 0.50.0 13 | - 0.50.1 14 | - 0.51.0 15 | - dependency-name: quote 16 | versions: 17 | - 1.0.8 18 | - dependency-name: proc-macro2 19 | versions: 20 | - 1.0.24 21 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - release/** 8 | pull_request: 9 | branches: 10 | - master 11 | - release/** 12 | schedule: 13 | - cron: '0 3 * * 4' 14 | 15 | jobs: 16 | postgres: 17 | name: postgres 18 | runs-on: ${{ matrix.os }} 19 | strategy: 20 | matrix: 21 | #os: [ubuntu-latest, macos-latest, windows-latest] 22 | os: [ubuntu-latest, macos-latest] 23 | version: ["v10", "v11", "v12"] 24 | steps: 25 | - uses: actions/checkout@v1 26 | 27 | - uses: actions-rs/toolchain@v1 28 | with: 29 | profile: minimal 30 | toolchain: stable 31 | override: true 32 | - uses: davidB/rust-cargo-make@v1 33 | with: 34 | version: '0.30.8' 35 | 36 | - name: target/postgres cache 37 | uses: actions/cache@v1 38 | with: 39 | path: target/postgres 40 | key: ${{ runner.os }}-postgres-${{ matrix.version }}-${{ hashFiles('**/Makefile.toml') }} 41 | restore-keys: | 42 | ${{ runner.os }}-postgres-${{ matrix.version }}-${{ hashFiles('**/Makefile.toml') }} 43 | 44 | - name: cargo make matrix.version 45 | run: cargo make -p ${{ matrix.version }} install-postgres 46 | 47 | 48 | ## Run all default oriented feature sets across all unix platforms. 49 | unix-matrix: 50 | name: platform 51 | runs-on: ${{ matrix.os }} 52 | needs: postgres 53 | strategy: 54 | fail-fast: false 55 | matrix: 56 | os: [ubuntu-latest, macos-latest] 57 | feature: [no-default-features, default-features, all-features] 58 | version: ["v10", "v11", "v12"] 59 | steps: 60 | - uses: actions/checkout@v1 61 | 62 | - uses: actions-rs/toolchain@v1 63 | with: 64 | profile: minimal 65 | toolchain: stable 66 | override: true 67 | - uses: davidB/rust-cargo-make@v1 68 | with: 69 | version: '0.30.8' 70 | 71 | - name: target/postgres cache 72 | uses: actions/cache@v1 73 | with: 74 | path: target/postgres 75 | key: ${{ runner.os }}-postgres-${{ matrix.version }}-${{ hashFiles('**/Makefile.toml') }} 76 | restore-keys: | 77 | ${{ runner.os }}-postgres-${{ matrix.version }}-${{ hashFiles('**/Makefile.toml') }} 78 | 79 | - name: cargo make matrix.feature 80 | run: cargo make -p ${{ matrix.version }} ${{ matrix.feature }} 81 | 82 | ## Run all default oriented feature sets across all unix platforms. 83 | windows-matrix: 84 | name: platform 85 | runs-on: ${{ matrix.os }} 86 | needs: postgres 87 | strategy: 88 | fail-fast: true 89 | matrix: 90 | os: [windows-latest] 91 | feature: [no-default-features, default-features, all-features] 92 | version: ["v10", "v11", "v12"] 93 | continue-on-error: true 94 | steps: 95 | - uses: actions/checkout@v1 96 | 97 | - uses: actions-rs/toolchain@v1 98 | with: 99 | profile: minimal 100 | toolchain: stable 101 | override: true 102 | - uses: davidB/rust-cargo-make@v1 103 | with: 104 | version: '0.30.8' 105 | 106 | - name: target/postgres cache 107 | uses: actions/cache@v1 108 | with: 109 | path: target/postgres 110 | key: ${{ runner.os }}-postgres-${{ matrix.version }}-${{ hashFiles('**/Makefile.toml') }} 111 | restore-keys: | 112 | ${{ runner.os }}-postgres-${{ matrix.version }}-${{ hashFiles('**/Makefile.toml') }} 113 | 114 | - name: cargo make matrix.feature 115 | run: cargo make -p ${{ matrix.version }} ${{ matrix.feature }} 116 | 117 | ## Execute the clippy checks 118 | cleanliness: 119 | name: cleanliness 120 | runs-on: ubuntu-latest 121 | needs: postgres 122 | env: 123 | version: v11 124 | steps: 125 | - uses: actions/checkout@v1 126 | 127 | # not using the cargo cache here, since this differs significantly 128 | - name: cargo-all cache 129 | uses: actions/cache@v1 130 | with: 131 | path: ~/.cargo 132 | key: ${{ runner.os }}-cargo-all-${{ hashFiles('**/Cargo.toml') }}-${{ hashFiles('**/Cargo.lock') }} 133 | restore-keys: | 134 | ${{ runner.os }}-cargo-all-${{ hashFiles('**/Cargo.toml') }} 135 | ${{ runner.os }}-cargo-all 136 | ${{ runner.os }}-cargo 137 | 138 | - uses: actions-rs/toolchain@v1 139 | with: 140 | profile: minimal 141 | toolchain: stable 142 | components: rustfmt, clippy 143 | override: true 144 | - uses: davidB/rust-cargo-make@v1 145 | with: 146 | version: '0.30.8' 147 | 148 | - name: target/postgres cache 149 | uses: actions/cache@v1 150 | with: 151 | path: target/postgres 152 | key: ${{ runner.os }}-postgres-${{ env.version }}-${{ hashFiles('**/Makefile.toml') }} 153 | restore-keys: | 154 | ${{ runner.os }}-postgres-${{ env.version }}-${{ hashFiles('**/Makefile.toml') }} 155 | 156 | # Clippy 157 | - name: cargo make clippy -p ${{ env.version }} 158 | run: cargo make clippy -p ${{ env.version }} 159 | # Rustfmt 160 | - name: cargo make fmt -p ${{ env.version }} 161 | run: cargo make fmt -p ${{ env.version }} 162 | # Audit 163 | - name: cargo audit -p ${{ env.version }} 164 | run: cargo make audit -p ${{ env.version }} 165 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/** 2 | .idea/** 3 | **/target 4 | /target 5 | **/*.rs.bk 6 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log: pg-extend-rs 2 | 3 | All notable changes to this project will be documented in this file. 4 | This project adheres to [Semantic Versioning](http://semver.org/). 5 | 6 | ## 0.2.0 7 | 8 | ### Added 9 | 10 | - `PgAllocator` for allocating/deallocating through the Postgres `palloc` and `pfree` method. 11 | 12 | ## 0.1.0 13 | 14 | ### Added 15 | 16 | - created `pg-extend` and `pg-extern-attr` 17 | - `pg_extend::pg_sys` for postgres bindings 18 | - `pg_extend::pg_datum` for conversions between Rust types and Postgres Datums 19 | - `#[pg_extern]` that externalizes Rust functions for Postgres extensions 20 | - panic handler to map panics to `FATAL` errors in Postgres 21 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at benjaminfry@me.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [profile.release] 2 | debug = true 3 | 4 | [workspace] 5 | members = [ 6 | "pg-extend", 7 | "pg-extern-attr", 8 | "examples/adding", 9 | # Examples disabled because FDW support broken with PostgreSQL 11+. 10 | # See https://github.com/bluejekyll/pg-extend-rs/issues/49 11 | "examples/fdw", 12 | # "examples/fdw-rw", 13 | "examples/logging", 14 | "examples/memory_context", 15 | "examples/nullable", 16 | "examples/panicking", 17 | "examples/strings", 18 | "integration-tests", 19 | ] 20 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 The pg-extend-rs developers. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile.toml: -------------------------------------------------------------------------------- 1 | # Copyright 2015-2019 Benjamin Fry 2 | # 3 | # Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | # copied, modified, or distributed except according to those terms. 7 | 8 | # This is a Makefile for `cargo make`, to use it first install cargo-make with `cargo install cargo-make` 9 | 10 | [config] 11 | skip_core_tasks = true 12 | on_error_task = "on_error" 13 | 14 | [config.modify_core_tasks] 15 | # if true, all core tasks are set to private (default false) 16 | private = true 17 | 18 | ## General environment configuration 19 | [env] 20 | TARGET_DIR = "${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}/target" 21 | CARGO_MAKE_WORKSPACE_TARGET_DIRECTORY = "${TARGET_DIR}" 22 | CARGO_MAKE_WORKSPACE_SKIP_MEMBERS = "examples/*" 23 | CARGO_MAKE_EXTEND_WORKSPACE_MAKEFILE = "true" 24 | CARGO_MAKE_KCOV_INSTALLATION_DIRECTORY = "${TARGET_DIR}/kcov" 25 | CARGO_MAKE_KCOV_DOWNLOAD_DIRECTORY = "${TARGET_DIR}/kcov-dl" 26 | CARGO_MAKE_KCOV_VERSION = "37" 27 | 28 | # This can be overriden (e.g. in pg-extend crate) to specify a more limited set of features 29 | # because we use features for enabling different versions of pg, this should not be --all-features in general 30 | #ALL_FEATURES = "" 31 | 32 | # Defaults are all for postgres version 12 33 | PG_VERSION = { source = "${CARGO_MAKE_PROFILE}", default_value = "12.1", mapping = { v10 = "10.11", v11 = "11.6" }} 34 | PGPORT = { source = "${CARGO_MAKE_PROFILE}", default_value = "5444", mapping = { v10 = "5442", v11 = "5443" }} 35 | PGSERVICE = { unset = true } 36 | PGHOST = "/tmp/" 37 | PGDATABASE = "postgres" 38 | PSQLRC = "/dev/null" # This ensures that every psql invocation will skip the user's .psqlrc file" 39 | ALL_FEATURES = { source = "${CARGO_MAKE_PROFILE}", default_value = "", mapping = { v10 = "--features=fdw", v11 = "--features=fdw" }} 40 | 41 | PG_DIR = "${TARGET_DIR}/postgres" 42 | PG_DL_DIR = "${PG_DIR}" 43 | PG_BUILD_DIR = "${PG_DIR}/postgres_build_${PG_VERSION}" 44 | PG_INSTALL_DIR = "${PG_DIR}/postgres_${PG_VERSION}" 45 | PG_BIN_DIR = "${PG_INSTALL_DIR}/bin" 46 | PG_DB_DIR = "${TARGET_DIR}/postgres_db_${PG_VERSION}" 47 | POSTGRES_TEST_DB = "pg_extend_rs_test_db" 48 | PG_LOGPATH = "${TARGET_DIR}/postgres-${PG_VERSION}.log" 49 | 50 | PATH = "${PG_BIN_DIR}:${PATH}" 51 | PG_CONFIG = "${PG_BIN_DIR}/pg_config" 52 | 53 | ## 54 | ## Installation tasks 55 | ## 56 | 57 | [tasks.install-openssl] 58 | description = "Installs OpenSSL on Windows" 59 | workspace = false 60 | env = { OPENSSL_VERSION = "1_1_1d", OPENSSL_DIR = "${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}\\target\\OpenSSL" } 61 | condition = { platforms = ["windows"], files_not_exist = ["${OPENSSL_DIR}"] } 62 | script_runner = "powershell" 63 | script_extension = "ps1" 64 | script = [ 65 | ''' 66 | mkdir ${env:CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}\\target 67 | mkdir ${env:CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}\\target\OpenSSL 68 | Invoke-WebRequest -URI "http://slproweb.com/download/Win64OpenSSL-${env:OPENSSL_VERSION}.exe" -OutFile "${env:CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}\target\OpenSSL.exe" 69 | Start-Process -FilePath "${env:CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY}\target\OpenSSL.exe" -ArgumentList "/SILENT /VERYSILENT /SP- /DIR=${env:OPENSSL_DIR}" 70 | Invoke-WebRequest "https://curl.haxx.se/ca/cacert.pem" -O "${env:OPENSSL_DIR}\cacert.pem" 71 | ''' 72 | ] 73 | 74 | [tasks.install-postgres] 75 | description = "Installs Postgres" 76 | workspace = false 77 | windows_alias = "empty" 78 | script_runner = "@shell" 79 | script = [ 80 | ''' 81 | set -e 82 | 83 | if ${PG_BIN_DIR}/postgres --version ; then exit 0 ; fi 84 | 85 | echo "Installing Postgres ${PG_VERSION:?} into ${PG_INSTALL_DIR:?}" 86 | 87 | # download 88 | PG_DL_PATH="${PG_DL_DIR:?}/postgres_${PG_VERSION}.tar.bz2" 89 | PG_URL=https://ftp.postgresql.org/pub/source/v${PG_VERSION:?}/postgresql-${PG_VERSION:?}.tar.bz2 90 | 91 | if ! [ -f "${PG_DL_PATH:?}" ]; then 92 | echo "Retrieving ${PG_URL:?}" 93 | mkdir -p ${PG_DL_DIR:?} 94 | curl -s -o ${PG_DL_PATH:?} ${PG_URL:?} 95 | fi 96 | 97 | # build 98 | echo "Building ${PG_BUILD_DIR:?}" 99 | mkdir -p ${PG_BUILD_DIR:?} 100 | 101 | current_dir=${PWD} 102 | cd ${PG_BUILD_DIR:?} 103 | 104 | bunzip2 -k -d -f ${PG_DL_PATH:?} 105 | PG_TAR_PATH="${PG_DL_DIR}/postgres_${PG_VERSION}.tar" 106 | tar -xf ${PG_TAR_PATH:?} 107 | 108 | cd postgresql-${PG_VERSION} 109 | ./configure --prefix=${PG_INSTALL_DIR} --with-pgport=${PGPORT} --enable-cassert --without-readline 110 | make install 111 | 112 | cd ${current_dir:?} 113 | ${PG_BIN_DIR}/postgres --version 114 | 115 | rm -r ${PG_BUILD_DIR:?} 116 | rm ${PG_TAR_PATH:?} 117 | rm ${PG_DL_PATH:?} 118 | ''' 119 | ] 120 | 121 | [tasks.install-audit] 122 | description = "Installs cargo-audit" 123 | workspace = false 124 | condition_script = ["if cargo audit --version ; then exit 1 ; else exit 0 ; fi"] 125 | command = "cargo" 126 | args = ["install", "cargo-audit"] 127 | 128 | [tasks.install-with] 129 | description = "Installs cargo-with" 130 | workspace = false 131 | condition_script = ["if cargo with --version ; then exit 1 ; else exit 0 ; fi"] 132 | command = "cargo" 133 | args = ["install", "cargo-with", "--git=https://github.com/bluejekyll/cargo-with.git", "--branch=master"] 134 | 135 | ## 136 | ## Postgres operations 137 | 138 | [tasks.pg-init-db-dir] 139 | description = "Creates the test DB used by the integration tests" 140 | workspace = false 141 | condition = { files_not_exist = ["${PG_DB_DIR}/pg_wal"] } 142 | dependencies = ["install-postgres"] 143 | script_runner = "@shell" 144 | script = [ 145 | ''' 146 | set -e 147 | 148 | echo "Intializing postgres db ${PG_DB_DIR:?}" 149 | rm -rf ${PG_DB_DIR:?} 150 | mkdir -p ${PG_DB_DIR:?} 151 | ${PG_BIN_DIR}/pg_ctl init -D ${PG_DB_DIR:?} -l ${PG_LOGPATH} 152 | ''' 153 | ] 154 | 155 | [tasks.pg-start] 156 | description = "Starts Postgres" 157 | workspace = false 158 | dependencies = ["pg-init-db-dir"] 159 | script_runner = "@shell" 160 | script = [ 161 | ''' 162 | set -e 163 | 164 | if ${PG_BIN_DIR}/pg_isready ; then exit 0 ; fi 165 | 166 | echo "Starting postgres ${PG_DB_DIR:?}" 167 | mkdir -p ${PG_DB_DIR:?} 168 | 169 | cp ${CARGO_MAKE_WORKSPACE_WORKING_DIRECTORY:?}/integration-tests/postgresql.conf ${PG_DB_DIR:?}/ 170 | mv ${PG_LOGPATH:?} ${PG_LOGPATH}.bak || true 171 | 172 | ${PG_BIN_DIR}/pg_ctl start -D ${PG_DB_DIR:?} -l ${PG_LOGPATH:?} 173 | ${PG_BIN_DIR}/pg_isready -t 5 174 | ${PG_BIN_DIR}/psql postgres -o /dev/null -c "SELECT 1" # check the connection works 175 | ''' 176 | ] 177 | 178 | [tasks.pg-create-db] 179 | description = "Creates the test DB used by the integration tests" 180 | workspace = false 181 | dependencies = ["pg-start"] 182 | script_runner = "@shell" 183 | script = [ 184 | ''' 185 | set -e 186 | 187 | echo "Creating DB ${POSTGRES_TEST_DB:?}" 188 | ${PG_BIN_DIR}/psql postgres -o /dev/null -c "SELECT 1" # check the connection works 189 | ${PG_BIN_DIR}/psql postgres -c "CREATE DATABASE ${POSTGRES_TEST_DB:?};" || true 190 | ''' 191 | ] 192 | 193 | [tasks.pg-drop-db] 194 | description = "Creates the test DB used by the integration tests" 195 | workspace = false 196 | dependencies = ["pg-start"] 197 | script_runner = "@shell" 198 | script = [ 199 | ''' 200 | set -e 201 | 202 | echo "Dropping DB ${POSTGRES_TEST_DB:?}" 203 | ${PG_BIN_DIR}/psql postgres -o /dev/null -c "SELECT 1" # check the connection works 204 | ${PG_BIN_DIR}/psql postgres -c "DROP DATABASE ${POSTGRES_TEST_DB:?};" || true 205 | ''' 206 | ] 207 | 208 | [tasks.pg-stop] 209 | description = "Starts Postgres" 210 | workspace = false 211 | dependencies = ["pg-init-db-dir"] 212 | script_runner = "@shell" 213 | script = [ 214 | ''' 215 | set -e 216 | 217 | echo "Stopping postgres ${PG_DB_DIR:?}" 218 | ${PG_BIN_DIR}/pg_ctl stop -D ${PG_DB_DIR:?} -l ${PG_LOGPATH} || true 219 | ''' 220 | ] 221 | 222 | [tasks.psql] 223 | description = "Connects psql to" 224 | dependencies = ["pg-create-db"] 225 | workspace = false 226 | script_runner = "@shell" 227 | script = [ 228 | ''' 229 | set -e 230 | 231 | echo "Connection to postgres ${PG_DB_DIR:?} ${POSTGRES_TEST_DB:?}" 232 | ${PG_BIN_DIR}/psql ${POSTGRES_TEST_DB:?} 233 | ''' 234 | ] 235 | 236 | ## 237 | ## Standard tasks for testing, building, etc. 238 | ## 239 | 240 | # TODO: actually make this await rather than sleep 241 | [tasks.await-update] 242 | description = "awaits the package to show up in crates.io" 243 | script_runner = "@shell" 244 | script = [ 245 | ''' 246 | sleep 10 247 | ''' 248 | ] 249 | 250 | [tasks.clean-kcov] 251 | description = "Remove the kcov installation" 252 | workspace = false 253 | script_runner = "@shell" 254 | script = [ 255 | ''' 256 | rm -rf ${CARGO_MAKE_KCOV_INSTALLATION_DIRECTORY:?} 257 | ''' 258 | ] 259 | 260 | [tasks.clean] 261 | description = "Remove only the current workspace member" 262 | command = "cargo" 263 | args = ["clean", "-p", "${CARGO_MAKE_CRATE_NAME}"] 264 | 265 | [tasks.clean-build] 266 | description = "Remove only the current workspace member" 267 | workspace = false 268 | script_runner = "@shell" 269 | script = [ 270 | ''' 271 | rm -rf ${TARGET_DIR:?}/debug 272 | ''' 273 | ] 274 | 275 | [tasks.clean-db] 276 | description = "Removes the DB directory" 277 | workspace = false 278 | script_runner = "@shell" 279 | script = [ 280 | ''' 281 | rm -rf ${PG_DB_DIR:?} 282 | ''' 283 | ] 284 | 285 | [tasks.clean-all] 286 | description = "Remove only the current workspace member" 287 | workspace = false 288 | script_runner = "@shell" 289 | script = [ 290 | ''' 291 | rm -rf ${TARGET_DIR:?} 292 | rm -rf target 293 | ''' 294 | ] 295 | 296 | [tasks.update] 297 | description = "Update dependencies" 298 | command = "cargo" 299 | args = ["update", "-p", "${CARGO_MAKE_CRATE_NAME}"] 300 | 301 | [tasks.fmt] 302 | description = "Check formatting with rustfmt" 303 | command = "cargo" 304 | args = ["fmt", "--", "--check"] 305 | 306 | [tasks.check] 307 | description = "Run a quick check on all the crates" 308 | dependencies = ["install-postgres"] 309 | command = "cargo" 310 | args = ["check", "--all-targets", "@@remove-empty(FEATURES)"] 311 | 312 | [tasks.doc] 313 | description = "Run a quick check on all the crates" 314 | dependencies = ["install-postgres"] 315 | command = "cargo" 316 | args = ["doc", "@@remove-empty(FEATURES)"] 317 | 318 | [tasks.build] 319 | description = "Build all the crates" 320 | dependencies = ["install-postgres"] 321 | command = "cargo" 322 | args = ["build", "--all-targets", "@@remove-empty(FEATURES)"] 323 | 324 | [tasks.test] 325 | description = "Run tests on all the crates" 326 | command = "cargo" 327 | args = ["test", "--all-targets", "@@remove-empty(FEATURES)"] 328 | 329 | [tasks.clippy] 330 | description = "Run the clippy linter on all crates" 331 | #dependencies = ["clean", "install-postgres"] 332 | dependencies = ["clean-build", "install-postgres"] 333 | command = "cargo" 334 | #args = ["clippy", "--all-targets", "${ALL_FEATURES}", "--", "-D", "warnings"] 335 | # FIXME: the cbove command is correct, but seems to hit an issue with clippy and library paths, not clear why... 336 | workspace = false 337 | args = ["clippy", "--all", "--all-targets", "--", "-D", "warnings"] 338 | 339 | [tasks.build-bench] 340 | description = "Check that all benchmarks compile" 341 | dependencies = ["clean", "install-postgres"] 342 | command = "cargo" 343 | toolchain = "nightly" 344 | args = ["bench", "--no-run"] 345 | 346 | [tasks.audit] 347 | description = "Run cargo audit on all crates" 348 | workspace = false 349 | dependencies = ["check", "install-audit"] 350 | command = "cargo" 351 | args = ["audit", "--deny-warnings", "--ignore=RUSTSEC-2020-0016"] 352 | 353 | [tasks.all] 354 | description = "Run check, build, and test on all crates" 355 | dependencies = ["check", "build", "test"] 356 | 357 | [tasks.default] 358 | description = "Run the all task" 359 | run_task = "all" 360 | 361 | [tasks.on_error] 362 | description = "Dumps addition information to the CLI on failure" 363 | workspace = false 364 | script_runner = "@shell" 365 | script = [ 366 | ''' 367 | echo "!!!!Dumping PG log after failure!!!!" 368 | cat ${PG_LOGPATH} || true 369 | ''' 370 | ] 371 | 372 | ## 373 | ## All feature testing builds 374 | ## 375 | 376 | [tasks.default-features] 377 | description = "Run all with default features" 378 | dependencies = ["install-openssl"] 379 | env = { FEATURES = "" } 380 | run_task = { name = "all", fork = true } 381 | 382 | [tasks.no-default-features] 383 | description = "Run all with --no-default-features" 384 | dependencies = ["install-openssl"] 385 | env = { FEATURES = "--no-default-features" } 386 | run_task = { name = "all", fork = true } 387 | 388 | [tasks.all-features] 389 | description = "Run all with --all-features" 390 | dependencies = ["install-openssl"] 391 | env = { FEATURES = "${ALL_FEATURES}" } 392 | run_task = { name = "all", fork = true } 393 | 394 | ## 395 | ## publishing 396 | ## 397 | 398 | [tasks.package] 399 | description = "package artifacts for each crate" 400 | command = "cargo" 401 | args = ["package", "--locked"] 402 | 403 | [tasks.inner_publish] 404 | description = "publish next release" 405 | dependencies = ["await-update", "update", "check", "package"] 406 | private = true 407 | command = "cargo" 408 | args = ["publish", "--verbose", "--locked"] 409 | 410 | [tasks.publish] 411 | description = "publish next release" 412 | workspace = false 413 | env = { CARGO_MAKE_WORKSPACE_SKIP_MEMBERS = "integration-tests/*;examples/*"} 414 | run_task = { name = "inner_publish", fork = true } 415 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [![Build Status](https://github.com/bluejekyll/pg-extend-rs/workflows/test/badge.svg?branch=master)](https://github.com/bluejekyll/pg-extend-rs/actions?query=workflow%3Atest) 3 | [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE-MIT) 4 | [![License: Apache 2.0](https://img.shields.io/badge/license-Apache_2.0-blue.svg)](LICENSE-APACHE) 5 | [![Dependabot Status](https://api.dependabot.com/badges/status?host=github&repo=bluejekyll/pg-extend-rs)](https://dependabot.com) 6 | [![](http://meritbadge.herokuapp.com/pg-extend)](https://crates.io/crates/pg-extend) 7 | [![Discord](https://img.shields.io/discord/589988605322199149.svg)](https://discord.gg/y7ZvY5p) 8 | 9 | # Rust based Postgres extension 10 | 11 | The main things provided by this crate are some macros that help with writing Postgres extensions in Rust. 12 | 13 | The objective (not all these are yet implemented): 14 | 15 | - Automatic type conversions, see `PgDatum` and `TryFromPgDatum` to `Into` 16 | - `pg_magic` macro for declaring libraries as Postgres extensions 17 | - `pg_extern` attribute for wrapping Rust functions in Postgres C style definitions 18 | - panic handlers for conversion into Postgres errors 19 | - allocator that uses Postgres `palloc` allocator and `pfree` 20 | - *tbd* integrate postgres error logs with `log` 21 | - *tbd* support all Datum types 22 | - *tbd* support table like returns and manipulation 23 | - *tbd* generators for the psql scripts to load functions 24 | 25 | ## Getting started 26 | 27 | This project uses `cargo-make` for automation. While not necessary, it does help with a lot of the build tasks, so is recommended. This can be installed with `cargo install cargo-make`. 28 | 29 | Once installed, it will install Postgres into the `target` directory for testing. There are profiles for each supported Postgres version, `v10`, `v11`, and `v12`. The specific minor version used is in 30 | 31 | To run all tests with all features, for example, run: 32 | 33 | ```shell 34 | > cargo make all-features -p v12 # if -p is left off, then the default is v12 35 | ``` 36 | 37 | ## Building 38 | 39 | If using `cargo-make` then the environment variable `PG_DIR` can be used to specify the location of the Postgres install. 40 | 41 | First install Postgres. The build should be able to find the directory for the Postgres server headers, it uses the `pg_config --includedir-server` to attempt to find the directory. If it is unsuccessful then this environment variable is required: 42 | 43 | `PG_INCLUDE_PATH=[/path/to/postgres]/include/server # e.g. /usr/local/pgsql/include/server` 44 | 45 | For the dynamic library to compile, your project should also have `.cargo/config` file with content: 46 | 47 | ```toml 48 | [target.'cfg(unix)'] 49 | rustflags = "-C link-arg=-undefineddynamic_lookup" 50 | 51 | [target.'cfg(windows)'] 52 | rustflags = "-C link-arg=/FORCE" 53 | ``` 54 | 55 | This informs the linker that some of the symbols for Postgres won't be available until runtime on the dynamic library load. 56 | 57 | ## Running the integration tests 58 | 59 | Standard tests can be run with the normal `cargo test`, but the integration tests are a little more involved. They require a connection to an actual Postgres DB. These instructions were performed on macOS. Create a DB in Postgres to be use. In this example a DB was created in the `/usr/local/var/posgres` path, with the name `postgres`. When using `cargo-make` all the automation of starting, installing and setting up the DB is handled for you: 60 | 61 | Test all features: 62 | 63 | ```shell 64 | > cargo make all-features 65 | ``` 66 | 67 | Test default features: 68 | 69 | ```shell 70 | > cargo make default-features 71 | ``` 72 | 73 | Test no-default-features: 74 | 75 | ```shell 76 | > cargo make no-default-features 77 | ``` 78 | 79 | Testing against different versions; `v10`, `v11`, `v12` are valid: 80 | 81 | ```shell 82 | > cargo make all-features -p v10 83 | ``` 84 | 85 | ### Without cargo-make 86 | 87 | To run the test must know the DB name to use, the DB must be running, and then the tests can be run: 88 | 89 | ```shell 90 | export POSTGRES_TEST_DB=postgres 91 | 92 | pg_ctl -D /usr/local/var/postgres start 93 | cargo test 94 | ``` 95 | 96 | ## Examples 97 | 98 | - [adding](https://github.com/bluejekyll/pg-extend-rs/tree/master/examples/adding) 99 | - [panicking](https://github.com/bluejekyll/pg-extend-rs/tree/master/examples/panicking) 100 | 101 | ## Features 102 | 103 | TBD 104 | 105 | ## Community 106 | 107 | For live discussions beyond this repository, please see this [Discord](https://discord.gg/y7ZvY5p). 108 | -------------------------------------------------------------------------------- /examples/adding/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "adding" 3 | version = "0.1.0" 4 | authors = ["Benjamin Fry "] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [[bin]] 11 | name = "adding-stmt" 12 | path = "src/bin.rs" 13 | 14 | [dependencies] 15 | pg-extern-attr = { version = "*", path = "../../pg-extern-attr" } 16 | pg-extend = { version = "*", path = "../../pg-extend" } 17 | -------------------------------------------------------------------------------- /examples/adding/README.md: -------------------------------------------------------------------------------- 1 | # Example Postgres extension using integers 2 | 3 | An example of adding 1 to another number and returning the result. 4 | 5 | To build, get Rust, then: 6 | 7 | ```console 8 | $> cargo build --release 9 | ... 10 | ``` 11 | 12 | then load into Postgres: 13 | 14 | ```console 15 | $> psql $CONN_STR 16 | postgres=# CREATE FUNCTION add_one(integer) RETURNS integer AS 'path/to/crate/target/release/libadding.dylib', 'pg_add_one' LANGUAGE C STRICT; 17 | ``` -------------------------------------------------------------------------------- /examples/adding/src/bin.rs: -------------------------------------------------------------------------------- 1 | extern crate pg_extend; 2 | 3 | use pg_extend::pg_create_stmt_bin; 4 | 5 | pg_create_stmt_bin!( 6 | add_one_pg_create_stmt, 7 | add_big_one_pg_create_stmt, 8 | add_small_one_pg_create_stmt, 9 | add_together_pg_create_stmt, 10 | sum_array_pg_create_stmt, 11 | sum_small_array_pg_create_stmt, 12 | sum_big_array_pg_create_stmt, 13 | sum_float_array_pg_create_stmt, 14 | sum_double_array_pg_create_stmt 15 | ); 16 | -------------------------------------------------------------------------------- /examples/adding/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate pg_extend; 9 | extern crate pg_extern_attr; 10 | 11 | use pg_extend::pg_magic; 12 | use pg_extern_attr::pg_extern; 13 | 14 | // This tells Postgres this library is a Postgres extension 15 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 16 | 17 | /// The pg_extern attribute wraps the function in the proper functions syntax for C extensions 18 | #[pg_extern] 19 | fn add_one(value: i32) -> i32 { 20 | value + 1 21 | } 22 | 23 | /// Test the i16 value 24 | #[pg_extern] 25 | fn add_small_one(value: i16) -> i16 { 26 | value + 1 27 | } 28 | 29 | /// Test the i64 value 30 | #[pg_extern] 31 | fn add_big_one(value: i64) -> i64 { 32 | value + 1 33 | } 34 | 35 | /// Test all 3 values at a time 36 | #[pg_extern] 37 | fn add_together(v1: i64, v2: i32, v3: i16) -> i64 { 38 | v1 + i64::from(v2) + i64::from(v3) 39 | } 40 | 41 | // Test array of i32 42 | #[pg_extern] 43 | fn sum_array(arr: &[i32]) -> i32 { 44 | arr.iter().sum() 45 | } 46 | 47 | // Test array of i16 48 | #[pg_extern] 49 | fn sum_small_array(arr: &[i16]) -> i16 { 50 | arr.iter().sum() 51 | } 52 | 53 | // Test array of i64 54 | #[pg_extern] 55 | fn sum_big_array(arr: &[i64]) -> i64 { 56 | arr.iter().sum() 57 | } 58 | 59 | // Test array of f32 60 | #[pg_extern] 61 | fn sum_float_array(arr: &[f32]) -> f32 { 62 | arr.iter().sum() 63 | } 64 | 65 | // Test array of f32 66 | #[pg_extern] 67 | fn sum_double_array(arr: &[f64]) -> f64 { 68 | arr.iter().sum() 69 | } 70 | 71 | #[cfg(test)] 72 | mod tests { 73 | use super::*; 74 | 75 | #[test] 76 | fn test_add_one() { 77 | assert_eq!(add_one(1), 2); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /examples/fdw-rw/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fdw-rw" 3 | version = "0.1.0" 4 | authors = ["Ellie Frost "] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [dependencies] 11 | pg-extern-attr = { version = "*", path = "../../pg-extern-attr" } 12 | pg-extend = { version = "*", path = "../../pg-extend", features = ["fdw"] } 13 | -------------------------------------------------------------------------------- /examples/fdw-rw/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Liz Frost 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | extern crate pg_extend; 8 | extern crate pg_extern_attr; 9 | 10 | use pg_extend::pg_alloc::PgAllocator; 11 | use pg_extend::pg_datum::TryFromPgDatum; 12 | use pg_extend::pg_fdw::{ForeignData, ForeignRow, ForeignTableMetadata, OptionMap, Tuple}; 13 | use pg_extend::{info, pg_datum, pg_magic, pg_type}; 14 | use pg_extern_attr::pg_foreignwrapper; 15 | 16 | use std::collections::HashMap; 17 | use std::sync::RwLock; 18 | 19 | // Needs feature(staticmutex) 20 | // use std::sync::{StaticMutex, MUTEX_INIT}; 21 | // static LOCK: StaticMutex = MUTEX_INIT; 22 | static mut _CACHE: Option>> = None; 23 | 24 | fn get_cache() -> &'static RwLock> { 25 | // let _g = LOCK.lock().unwrap(); 26 | unsafe { 27 | if _CACHE.is_none() { 28 | let rw = RwLock::new(HashMap::new()); 29 | _CACHE = Some(rw) 30 | } 31 | &_CACHE.as_ref().unwrap() 32 | } 33 | } 34 | 35 | // This tells Postges this library is a Postgres extension 36 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 37 | 38 | #[pg_foreignwrapper] 39 | struct CacheFDW { 40 | inner: Vec<(String, String)>, 41 | } 42 | 43 | struct MyRow { 44 | key: String, 45 | value: String, 46 | } 47 | 48 | impl ForeignRow for MyRow { 49 | fn get_field( 50 | &self, 51 | name: &str, 52 | _typ: pg_type::PgType, 53 | _opts: OptionMap, 54 | ) -> Result, &str> { 55 | match name { 56 | "key" => Ok(Some(self.key.clone().into())), 57 | "value" => Ok(Some(self.value.clone().into())), 58 | _ => Err("unknown field"), 59 | } 60 | } 61 | } 62 | 63 | impl Iterator for CacheFDW { 64 | type Item = Box; 65 | fn next(&mut self) -> Option { 66 | match self.inner.pop() { 67 | None => None, 68 | Some((k, v)) => Some(Box::new(MyRow { 69 | key: k.to_string(), 70 | value: v.to_string(), 71 | })), 72 | } 73 | } 74 | } 75 | 76 | impl ForeignData for CacheFDW { 77 | fn begin(_table_metadata: &ForeignTableMetadata) -> Self { 78 | let c = get_cache().read().unwrap(); 79 | let vecs = c.iter().map(|(k, v)| (k.clone(), v.clone())).collect(); 80 | 81 | CacheFDW { inner: vecs } 82 | } 83 | 84 | fn schema( 85 | _sopts: OptionMap, 86 | server_name: String, 87 | _rschema: String, 88 | lschema: String, 89 | ) -> Option> { 90 | Some(vec![format!( 91 | " 92 | CREATE FOREIGN TABLE {schema}.mytable ( 93 | key text, 94 | value text) SERVER {server} 95 | ", 96 | server = server_name, 97 | schema = lschema 98 | )]) 99 | } 100 | 101 | fn index_columns(_table_metadata: &ForeignTableMetadata) -> Option> { 102 | Some(vec!["key".into()]) 103 | } 104 | 105 | fn update(&self, new_row: &Tuple, indices: &Tuple) -> Option> { 106 | let mut c = get_cache().write().unwrap(); 107 | let key = indices.get("key"); 108 | let value = new_row.get("value"); 109 | match (key, value) { 110 | (Some(key), Some(value)) => { 111 | // TODO: handle errors 112 | 113 | // TODO: switch to currect memory context 114 | let memory_context = PgAllocator::current_context(); 115 | 116 | let key = String::try_from(&memory_context, (*key).clone()).unwrap(); 117 | let value = String::try_from(&memory_context, (*value).clone()).unwrap(); 118 | c.insert(key.clone(), value.clone()); 119 | Some(Box::new(MyRow { key, value })) 120 | } 121 | _ => { 122 | info!("Missing key ({:?}) or value ({:?})", key, value); 123 | None 124 | } 125 | } 126 | } 127 | 128 | fn insert(&self, new_row: &Tuple) -> Option> { 129 | // Since we only use one field from each, these methods are equivalent 130 | self.update(new_row, new_row) 131 | } 132 | 133 | fn delete(&self, indices: &Tuple) -> Option> { 134 | // TODO: switch to correct memory context 135 | let memory_context = PgAllocator::current_context(); 136 | 137 | let mut c = get_cache().write().unwrap(); 138 | let key = indices.get("key"); 139 | 140 | match key { 141 | Some(key) => { 142 | let key = String::try_from(&memory_context, (*key).clone()).unwrap(); 143 | match c.remove(&key) { 144 | Some(value) => Some(Box::new(MyRow { key, value })), 145 | None => None, 146 | } 147 | } 148 | _ => { 149 | info!("Delete called without Key"); 150 | None 151 | } 152 | } 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /examples/fdw/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fdw" 3 | version = "0.1.0" 4 | authors = ["Ellie Frost "] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [[bin]] 11 | name = "fdw-stmt" 12 | path = "src/bin.rs" 13 | 14 | [dependencies] 15 | pg-extern-attr = { version = "*", path = "../../pg-extern-attr" } 16 | pg-extend = { version = "*", path = "../../pg-extend", features = ["fdw"] } 17 | -------------------------------------------------------------------------------- /examples/fdw/src/bin.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate pg_extend; 9 | 10 | use pg_extend::pg_create_stmt_bin; 11 | 12 | pg_create_stmt_bin!(DefaultFDW_pg_create_stmt); 13 | -------------------------------------------------------------------------------- /examples/fdw/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Liz Frost 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | extern crate pg_extend; 8 | extern crate pg_extern_attr; 9 | 10 | use pg_extend::pg_fdw::{ForeignData, ForeignRow, ForeignTableMetadata, OptionMap}; 11 | use pg_extend::{pg_datum, pg_magic, pg_type}; 12 | use pg_extern_attr::pg_foreignwrapper; 13 | 14 | #[cfg(test)] 15 | mod tests { 16 | #[test] 17 | fn it_works() { 18 | assert_eq!(2 + 2, 4); 19 | } 20 | } 21 | 22 | // This tells Postges this library is a Postgres extension 23 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 24 | 25 | #[pg_foreignwrapper] 26 | struct DefaultFDW { 27 | i: i32, 28 | } 29 | 30 | struct MyRow { 31 | i: i32, 32 | } 33 | 34 | impl ForeignRow for MyRow { 35 | fn get_field( 36 | &self, 37 | _name: &str, 38 | _typ: pg_type::PgType, 39 | _opts: OptionMap, 40 | ) -> Result, &str> { 41 | Ok(Some(self.i.into())) 42 | } 43 | } 44 | 45 | impl Iterator for DefaultFDW { 46 | type Item = Box; 47 | fn next(&mut self) -> Option { 48 | self.i += 1; 49 | if self.i > 5 { 50 | None 51 | } else { 52 | Some(Box::new(MyRow { i: self.i })) 53 | } 54 | } 55 | } 56 | 57 | impl ForeignData for DefaultFDW { 58 | fn begin(_table_metadata: &ForeignTableMetadata) -> Self { 59 | DefaultFDW { i: 0 } 60 | } 61 | 62 | fn schema( 63 | _server_opts: OptionMap, 64 | server_name: String, 65 | _remote_schema: String, 66 | local_schema: String, 67 | ) -> Option> { 68 | Some(vec![format!( 69 | "CREATE FOREIGN TABLE {schema}.mytable (number Integer) SERVER {server}", 70 | server = server_name, 71 | schema = local_schema 72 | )]) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /examples/logging/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "logging" 3 | version = "0.1.0" 4 | authors = ["Marti Raudsepp cargo build --release 7 | ... 8 | ``` 9 | 10 | then load into Postgres: 11 | 12 | ```console 13 | $> psql $CONN_STR 14 | postgres=# CREATE FUNCTION rs_nullif(text,text) RETURNS text AS 'path/to/libnullable.dylib', 'pg_rs_nullif' LANGUAGE C; 15 | ``` 16 | -------------------------------------------------------------------------------- /examples/logging/src/bin.rs: -------------------------------------------------------------------------------- 1 | extern crate pg_extend; 2 | 3 | use pg_extend::pg_create_stmt_bin; 4 | 5 | pg_create_stmt_bin!(rs_error_pg_create_stmt, rs_log_all_pg_create_stmt); 6 | -------------------------------------------------------------------------------- /examples/logging/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Marti Raudsepp 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate pg_extend; 9 | extern crate pg_extern_attr; 10 | 11 | use pg_extend::pg_magic; 12 | use pg_extend::{debug, error, info, log, notice, trace, warn}; 13 | use pg_extern_attr::pg_extern; 14 | 15 | // This tells Postgres this library is a Postgres extension 16 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 17 | 18 | /// An error in PostgreSQL aborts the current statement and (sub)transaction. 19 | #[pg_extern] 20 | fn rs_error(msg: String) { 21 | error!("{}", msg); 22 | } 23 | 24 | /// Log messages in all non-error log levels. 25 | #[pg_extern] 26 | fn rs_log_all() { 27 | warn!("TEST: This is a warning"); 28 | notice!("TEST: Notice this!"); 29 | info!("TEST: This is an info message"); 30 | log!("TEST: This is an LOG-level message"); 31 | debug!("TEST: This is a debug message"); 32 | trace!("TEST: This is a trace-level message") 33 | } 34 | 35 | #[cfg(test)] 36 | mod tests { 37 | /* Cannot test this module wihtout a PostgreSQL runtime. */ 38 | } 39 | -------------------------------------------------------------------------------- /examples/memory_context/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "memory_context" 3 | version = "0.1.0" 4 | authors = ["Benjamin Fry "] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [[bin]] 11 | name = "memory_context-stmt" 12 | path = "src/bin.rs" 13 | 14 | [dependencies] 15 | pg-extern-attr = { version = "*", path = "../../pg-extern-attr" } 16 | pg-extend = { version = "*", path = "../../pg-extend" } 17 | -------------------------------------------------------------------------------- /examples/memory_context/README.md: -------------------------------------------------------------------------------- 1 | # Example Postgres extension using strings 2 | 3 | To build, get Rust, then: 4 | 5 | ```console 6 | $> cargo build --release 7 | ... 8 | ``` 9 | 10 | then load into Postgres: 11 | 12 | ```console 13 | $> psql $CONN_STR 14 | postgres=# CREATE FUNCTION concat_rs(text, text) RETURNS text AS 'path/to/crate/target/release/libstrings.dylib', 'pg_concat_rs' LANGUAGE C STRICT; 15 | ``` 16 | -------------------------------------------------------------------------------- /examples/memory_context/src/bin.rs: -------------------------------------------------------------------------------- 1 | extern crate pg_extend; 2 | 3 | use pg_extend::pg_create_stmt_bin; 4 | 5 | pg_create_stmt_bin!(allocate_pg_create_stmt); 6 | -------------------------------------------------------------------------------- /examples/memory_context/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate pg_extend; 9 | extern crate pg_extern_attr; 10 | 11 | use pg_extend::pg_alloc::PgAllocator; 12 | use pg_extend::pg_magic; 13 | use pg_extern_attr::pg_extern; 14 | // This tells Postges this library is a Postgres extension 15 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 16 | 17 | /// The pg_extern attribute wraps the function in the proper functions syntax for C extensions 18 | #[pg_extern] 19 | fn allocate(_memory_context: &PgAllocator) { 20 | // the _memory_context can be used anywhere from here on. 21 | } 22 | -------------------------------------------------------------------------------- /examples/nullable/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nullable" 3 | version = "0.1.0" 4 | authors = ["Marti Raudsepp ) arguments and return type. 5 | 6 | To build, get Rust, then: 7 | 8 | ```console 9 | $> cargo build --release 10 | ... 11 | ``` 12 | 13 | then load into Postgres: 14 | 15 | ```console 16 | $> psql $CONN_STR 17 | postgres=# CREATE FUNCTION rs_nullif(text,text) RETURNS text AS 'path/to/libnullable.dylib', 'pg_rs_nullif' LANGUAGE C; 18 | ``` 19 | -------------------------------------------------------------------------------- /examples/nullable/src/bin.rs: -------------------------------------------------------------------------------- 1 | extern crate pg_extend; 2 | 3 | use pg_extend::pg_create_stmt_bin; 4 | 5 | pg_create_stmt_bin!(get_null_pg_create_stmt, rs_nullif_pg_create_stmt); 6 | -------------------------------------------------------------------------------- /examples/nullable/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Marti Raudsepp 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate pg_extend; 9 | extern crate pg_extern_attr; 10 | 11 | use pg_extend::pg_magic; 12 | use pg_extern_attr::pg_extern; 13 | 14 | // This tells Postgres this library is a Postgres extension 15 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 16 | 17 | /// Simply returns NULL. For testing a function with no arguments. 18 | #[pg_extern] 19 | fn get_null() -> Option { 20 | None 21 | } 22 | 23 | /// The NULLIF function returns a null value if value1 equals value2; otherwise it returns value1 24 | /// https://www.postgresql.org/docs/current/functions-conditional.html#FUNCTIONS-NULLIF 25 | #[pg_extern] 26 | fn rs_nullif(value1: Option, value2: Option) -> Option { 27 | if value1 == value2 { 28 | None 29 | } else { 30 | value1 31 | } 32 | } 33 | 34 | #[cfg(test)] 35 | mod tests { 36 | use super::*; 37 | 38 | #[test] 39 | fn test_get_null() { 40 | assert_eq!(get_null(), None); 41 | } 42 | 43 | #[test] 44 | fn test_rs_nullif() { 45 | assert_eq!( 46 | rs_nullif(Some("a".to_string()), Some("-".to_string())), 47 | Some("a".to_string()) 48 | ); 49 | assert_eq!( 50 | rs_nullif(Some("a".to_string()), None), 51 | Some("a".to_string()) 52 | ); 53 | assert_eq!( 54 | rs_nullif(Some("-".to_string()), Some("-".to_string())), 55 | None 56 | ); 57 | assert_eq!(rs_nullif(None, Some("-".to_string())), None); 58 | assert_eq!(rs_nullif(None, None), None); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /examples/panicking/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "panicking" 3 | version = "0.1.0" 4 | authors = ["Benjamin Fry "] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [[bin]] 11 | name = "panicking-stmt" 12 | path = "src/bin.rs" 13 | 14 | [dependencies] 15 | pg-extern-attr = { version = "*", path = "../../pg-extern-attr" } 16 | pg-extend = { version = "*", path = "../../pg-extend" } 17 | -------------------------------------------------------------------------------- /examples/panicking/README.md: -------------------------------------------------------------------------------- 1 | # Example Postgres extension using panic 2 | 3 | Demonstrating how panic is caught by pg-extend-rs. 4 | 5 | To build, get Rust, then: 6 | 7 | ```console 8 | $> cargo build --release 9 | ... 10 | ``` 11 | 12 | then load into Postgres: 13 | 14 | ```console 15 | $> psql $CONN_STR 16 | postgres=# CREATE FUNCTION panicking(integer) RETURNS integer AS 'path/to/crate/target/release/libpanicking.dylib', 'panicking' LANGUAGE C STRICT; 17 | ``` -------------------------------------------------------------------------------- /examples/panicking/src/bin.rs: -------------------------------------------------------------------------------- 1 | extern crate pg_extend; 2 | 3 | use pg_extend::pg_create_stmt_bin; 4 | 5 | pg_create_stmt_bin!(panicking_pg_create_stmt, longjmping_pg_create_stmt); 6 | -------------------------------------------------------------------------------- /examples/panicking/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate pg_extend; 9 | extern crate pg_extern_attr; 10 | 11 | use pg_extend::pg_magic; 12 | use pg_extern_attr::pg_extern; 13 | // This tells Postges this library is a Postgres extension 14 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 15 | 16 | /// The pg_extern attribute wraps the function in the proper functions syntax for C extensions 17 | #[pg_extern] 18 | fn panicking(value: i32) -> i32 { 19 | panic!("forced panic in Rust example, value: {}", value); 20 | } 21 | 22 | /// Tests a longjmp 23 | /// 24 | /// Don't actually do this, it's a test for Postgres' usage of longjmp 25 | #[pg_extern] 26 | fn longjmping(value: i32) -> i32 { 27 | use pg_extend::error; 28 | 29 | error!("this error will longjmp: {}", value); 30 | 31 | unreachable!("IF YOU'RE SEEING THIS, LONGJMP FAILED"); 32 | } 33 | 34 | #[cfg(test)] 35 | mod tests { 36 | use super::*; 37 | 38 | #[test] 39 | #[should_panic] 40 | fn test_panicking() { 41 | assert_eq!(panicking(1), 2); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /examples/strings/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "strings" 3 | version = "0.1.0" 4 | authors = ["Benjamin Fry "] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [[bin]] 11 | name = "strings-stmt" 12 | path = "src/bin.rs" 13 | 14 | [dependencies] 15 | pg-extern-attr = { version = "*", path = "../../pg-extern-attr" } 16 | pg-extend = { version = "*", path = "../../pg-extend" } 17 | -------------------------------------------------------------------------------- /examples/strings/README.md: -------------------------------------------------------------------------------- 1 | # Example Postgres extension using strings 2 | 3 | To build, get Rust, then: 4 | 5 | ```console 6 | $> cargo build --release 7 | ... 8 | ``` 9 | 10 | then load into Postgres: 11 | 12 | ```console 13 | $> psql $CONN_STR 14 | postgres=# CREATE FUNCTION concat_rs(text, text) RETURNS text AS 'path/to/crate/target/release/libstrings.dylib', 'pg_concat_rs' LANGUAGE C STRICT; 15 | ``` 16 | -------------------------------------------------------------------------------- /examples/strings/src/bin.rs: -------------------------------------------------------------------------------- 1 | extern crate pg_extend; 2 | 3 | use pg_extend::pg_create_stmt_bin; 4 | 5 | pg_create_stmt_bin!(concat_rs_pg_create_stmt, text_rs_pg_create_stmt); 6 | -------------------------------------------------------------------------------- /examples/strings/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate pg_extend; 9 | extern crate pg_extern_attr; 10 | 11 | use pg_extend::info; 12 | use pg_extend::native::Text; 13 | use pg_extend::pg_alloc::PgAllocator; 14 | use pg_extend::pg_magic; 15 | use pg_extern_attr::pg_extern; 16 | 17 | // This tells Postges this library is a Postgres extension 18 | pg_magic!(version: pg_sys::PG_VERSION_NUM); 19 | 20 | /// The pg_extern attribute wraps the function in the proper functions syntax for C extensions 21 | #[pg_extern] 22 | fn concat_rs(mut a: String, b: String) -> String { 23 | a.push_str(&b); 24 | 25 | a 26 | } 27 | 28 | /// Zero overhead Text types directly from PG, this requires the PgAllocator for the associated lifetime. 29 | #[pg_extern] 30 | fn text_rs<'mc>(_alloc: &'mc PgAllocator, text: Text<'mc>) -> Text<'mc> { 31 | info!("Length of text: {}", text.len()); 32 | 33 | // deref to a &str 34 | let rust_str: &str = &text; 35 | 36 | info!("Length of rust_str: {}", rust_str.len()); 37 | info!("Text as str from: {}", rust_str); 38 | text 39 | } 40 | 41 | #[cfg(test)] 42 | mod tests { 43 | use super::*; 44 | 45 | #[test] 46 | fn test_concat_rs() { 47 | assert_eq!(&concat_rs("a".to_string(), "b".to_string()), "ab"); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /integration-tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "integration-tests" 3 | version = "0.0.0" 4 | authors = ["Benjamin Fry "] 5 | edition = "2018" 6 | 7 | description = """ 8 | Integration tests for pg-extend 9 | """ 10 | 11 | documentation = "https://docs.rs/pg-extend" 12 | repository = "https://github.com/bluejekyll/pg-extend-rs" 13 | 14 | readme = "README.md" 15 | license = "MIT/Apache-2.0" 16 | 17 | [features] 18 | default = [] 19 | 20 | fdw = [] 21 | 22 | [dependencies] 23 | cargo = "0.44" 24 | postgres = "0.17" 25 | tempfile = "3.1" -------------------------------------------------------------------------------- /integration-tests/Makefile.toml: -------------------------------------------------------------------------------- 1 | [config] 2 | skip_core_tasks = true 3 | 4 | [config.modify_core_tasks] 5 | # if true, all core tasks are set to private (default false) 6 | private = true 7 | 8 | ## Feature profiles 9 | [env] 10 | CARGO_MAKE_EXTEND_WORKSPACE_MAKEFILE = "true" 11 | 12 | [tasks.test-inner] 13 | description = "Run tests on all the crates, without restarting PG" 14 | dependencies = ["pg-create-db"] 15 | env = { POSTGRES_PORT = "${PGPORT}" } 16 | command = "cargo" 17 | args = ["test", "--all-targets", "@@remove-empty(FEATURES)"] 18 | 19 | [tasks.test] 20 | description = "Run tests on all the crates, restarts and stops PG" 21 | dependencies = ["pg-stop", "pg-start", "test-inner", "pg-stop"] 22 | -------------------------------------------------------------------------------- /integration-tests/postgresql.conf: -------------------------------------------------------------------------------- 1 | # ----------------------------- 2 | # PostgreSQL configuration file 3 | # ----------------------------- 4 | # 5 | # This file consists of lines of the form: 6 | # 7 | # name = value 8 | # 9 | # (The "=" is optional.) Whitespace may be used. Comments are introduced with 10 | # "#" anywhere on a line. The complete list of parameter names and allowed 11 | # values can be found in the PostgreSQL documentation. 12 | # 13 | # The commented-out settings shown in this file represent the default values. 14 | # Re-commenting a setting is NOT sufficient to revert it to the default value; 15 | # you need to reload the server. 16 | # 17 | # This file is read on server startup and when the server receives a SIGHUP 18 | # signal. If you edit the file on a running system, you have to SIGHUP the 19 | # server for the changes to take effect, run "pg_ctl reload", or execute 20 | # "SELECT pg_reload_conf()". Some parameters, which are marked below, 21 | # require a server shutdown and restart to take effect. 22 | # 23 | # Any parameter can also be given as a command-line option to the server, e.g., 24 | # "postgres -c log_connections=on". Some parameters can be changed at run time 25 | # with the "SET" SQL command. 26 | # 27 | # Memory units: kB = kilobytes Time units: ms = milliseconds 28 | # MB = megabytes s = seconds 29 | # GB = gigabytes min = minutes 30 | # TB = terabytes h = hours 31 | # d = days 32 | 33 | 34 | #------------------------------------------------------------------------------ 35 | # FILE LOCATIONS 36 | #------------------------------------------------------------------------------ 37 | 38 | # The default values of these variables are driven from the -D command-line 39 | # option or PGDATA environment variable, represented here as ConfigDir. 40 | 41 | #data_directory = 'ConfigDir' # use data in another directory 42 | # (change requires restart) 43 | #hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file 44 | # (change requires restart) 45 | #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file 46 | # (change requires restart) 47 | 48 | # If external_pid_file is not explicitly set, no extra PID file is written. 49 | #external_pid_file = '' # write an extra PID file 50 | # (change requires restart) 51 | 52 | 53 | #------------------------------------------------------------------------------ 54 | # CONNECTIONS AND AUTHENTICATION 55 | #------------------------------------------------------------------------------ 56 | 57 | # - Connection Settings - 58 | 59 | #listen_addresses = 'localhost' # what IP address(es) to listen on; 60 | # comma-separated list of addresses; 61 | # defaults to 'localhost'; use '*' for all 62 | # (change requires restart) 63 | #port = 5442 # (change requires restart) 64 | max_connections = 100 # (change requires restart) 65 | #superuser_reserved_connections = 3 # (change requires restart) 66 | unix_socket_directories = '/tmp' # comma-separated list of directories 67 | # (change requires restart) 68 | #unix_socket_group = '' # (change requires restart) 69 | #unix_socket_permissions = 0777 # begin with 0 to use octal notation 70 | # (change requires restart) 71 | #bonjour = off # advertise server via Bonjour 72 | # (change requires restart) 73 | #bonjour_name = '' # defaults to the computer name 74 | # (change requires restart) 75 | 76 | # - Security and Authentication - 77 | 78 | #authentication_timeout = 1min # 1s-600s 79 | #ssl = off 80 | #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers 81 | #ssl_prefer_server_ciphers = on 82 | #ssl_ecdh_curve = 'prime256v1' 83 | #ssl_dh_params_file = '' 84 | #ssl_cert_file = 'server.crt' 85 | #ssl_key_file = 'server.key' 86 | #ssl_ca_file = '' 87 | #ssl_crl_file = '' 88 | #password_encryption = md5 # md5 or scram-sha-256 89 | #db_user_namespace = off 90 | #row_security = on 91 | 92 | # GSSAPI using Kerberos 93 | #krb_server_keyfile = '' 94 | #krb_caseins_users = off 95 | 96 | # - TCP Keepalives - 97 | # see "man 7 tcp" for details 98 | 99 | #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; 100 | # 0 selects the system default 101 | #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; 102 | # 0 selects the system default 103 | #tcp_keepalives_count = 0 # TCP_KEEPCNT; 104 | # 0 selects the system default 105 | 106 | 107 | #------------------------------------------------------------------------------ 108 | # RESOURCE USAGE (except WAL) 109 | #------------------------------------------------------------------------------ 110 | 111 | # - Memory - 112 | 113 | shared_buffers = 128MB # min 128kB 114 | # (change requires restart) 115 | #huge_pages = try # on, off, or try 116 | # (change requires restart) 117 | #temp_buffers = 8MB # min 800kB 118 | #max_prepared_transactions = 0 # zero disables the feature 119 | # (change requires restart) 120 | # Caution: it is not advisable to set max_prepared_transactions nonzero unless 121 | # you actively intend to use prepared transactions. 122 | #work_mem = 4MB # min 64kB 123 | #maintenance_work_mem = 64MB # min 1MB 124 | #replacement_sort_tuples = 150000 # limits use of replacement selection sort 125 | #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem 126 | #max_stack_depth = 2MB # min 100kB 127 | dynamic_shared_memory_type = posix # the default is the first option 128 | # supported by the operating system: 129 | # posix 130 | # sysv 131 | # windows 132 | # mmap 133 | # use none to disable dynamic shared memory 134 | # (change requires restart) 135 | 136 | # - Disk - 137 | 138 | #temp_file_limit = -1 # limits per-process temp file space 139 | # in kB, or -1 for no limit 140 | 141 | # - Kernel Resource Usage - 142 | 143 | #max_files_per_process = 1000 # min 25 144 | # (change requires restart) 145 | #shared_preload_libraries = '' # (change requires restart) 146 | 147 | # - Cost-Based Vacuum Delay - 148 | 149 | #vacuum_cost_delay = 0 # 0-100 milliseconds 150 | #vacuum_cost_page_hit = 1 # 0-10000 credits 151 | #vacuum_cost_page_miss = 10 # 0-10000 credits 152 | #vacuum_cost_page_dirty = 20 # 0-10000 credits 153 | #vacuum_cost_limit = 200 # 1-10000 credits 154 | 155 | # - Background Writer - 156 | 157 | #bgwriter_delay = 200ms # 10-10000ms between rounds 158 | #bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round 159 | #bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round 160 | #bgwriter_flush_after = 0 # measured in pages, 0 disables 161 | 162 | # - Asynchronous Behavior - 163 | 164 | #effective_io_concurrency = 0 # 1-1000; 0 disables prefetching 165 | #max_worker_processes = 8 # (change requires restart) 166 | #max_parallel_workers_per_gather = 2 # taken from max_parallel_workers 167 | #max_parallel_workers = 8 # maximum number of max_worker_processes that 168 | # can be used in parallel queries 169 | #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate 170 | # (change requires restart) 171 | #backend_flush_after = 0 # measured in pages, 0 disables 172 | 173 | 174 | #------------------------------------------------------------------------------ 175 | # WRITE AHEAD LOG 176 | #------------------------------------------------------------------------------ 177 | 178 | # - Settings - 179 | 180 | #wal_level = replica # minimal, replica, or logical 181 | # (change requires restart) 182 | #fsync = on # flush data to disk for crash safety 183 | # (turning this off can cause 184 | # unrecoverable data corruption) 185 | #synchronous_commit = on # synchronization level; 186 | # off, local, remote_write, remote_apply, or on 187 | #wal_sync_method = fsync # the default is the first option 188 | # supported by the operating system: 189 | # open_datasync 190 | # fdatasync (default on Linux) 191 | # fsync 192 | # fsync_writethrough 193 | # open_sync 194 | #full_page_writes = on # recover from partial page writes 195 | #wal_compression = off # enable compression of full-page writes 196 | #wal_log_hints = off # also do full page writes of non-critical updates 197 | # (change requires restart) 198 | #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers 199 | # (change requires restart) 200 | #wal_writer_delay = 200ms # 1-10000 milliseconds 201 | #wal_writer_flush_after = 1MB # measured in pages, 0 disables 202 | 203 | #commit_delay = 0 # range 0-100000, in microseconds 204 | #commit_siblings = 5 # range 1-1000 205 | 206 | # - Checkpoints - 207 | 208 | #checkpoint_timeout = 5min # range 30s-1d 209 | #max_wal_size = 1GB 210 | #min_wal_size = 80MB 211 | #checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 212 | #checkpoint_flush_after = 0 # measured in pages, 0 disables 213 | #checkpoint_warning = 30s # 0 disables 214 | 215 | # - Archiving - 216 | 217 | #archive_mode = off # enables archiving; off, on, or always 218 | # (change requires restart) 219 | #archive_command = '' # command to use to archive a logfile segment 220 | # placeholders: %p = path of file to archive 221 | # %f = file name only 222 | # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' 223 | #archive_timeout = 0 # force a logfile segment switch after this 224 | # number of seconds; 0 disables 225 | 226 | 227 | #------------------------------------------------------------------------------ 228 | # REPLICATION 229 | #------------------------------------------------------------------------------ 230 | 231 | # - Sending Server(s) - 232 | 233 | # Set these on the master and on any standby that will send replication data. 234 | 235 | #max_wal_senders = 10 # max number of walsender processes 236 | # (change requires restart) 237 | #wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables 238 | #wal_sender_timeout = 60s # in milliseconds; 0 disables 239 | 240 | #max_replication_slots = 10 # max number of replication slots 241 | # (change requires restart) 242 | #track_commit_timestamp = off # collect timestamp of transaction commit 243 | # (change requires restart) 244 | 245 | # - Master Server - 246 | 247 | # These settings are ignored on a standby server. 248 | 249 | #synchronous_standby_names = '' # standby servers that provide sync rep 250 | # method to choose sync standbys, number of sync standbys, 251 | # and comma-separated list of application_name 252 | # from standby(s); '*' = all 253 | #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed 254 | 255 | # - Standby Servers - 256 | 257 | # These settings are ignored on a master server. 258 | 259 | #hot_standby = on # "off" disallows queries during recovery 260 | # (change requires restart) 261 | #max_standby_archive_delay = 30s # max delay before canceling queries 262 | # when reading WAL from archive; 263 | # -1 allows indefinite delay 264 | #max_standby_streaming_delay = 30s # max delay before canceling queries 265 | # when reading streaming WAL; 266 | # -1 allows indefinite delay 267 | #wal_receiver_status_interval = 10s # send replies at least this often 268 | # 0 disables 269 | #hot_standby_feedback = off # send info from standby to prevent 270 | # query conflicts 271 | #wal_receiver_timeout = 60s # time that receiver waits for 272 | # communication from master 273 | # in milliseconds; 0 disables 274 | #wal_retrieve_retry_interval = 5s # time to wait before retrying to 275 | # retrieve WAL after a failed attempt 276 | 277 | # - Subscribers - 278 | 279 | # These settings are ignored on a publisher. 280 | 281 | #max_logical_replication_workers = 4 # taken from max_worker_processes 282 | # (change requires restart) 283 | #max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers 284 | 285 | 286 | #------------------------------------------------------------------------------ 287 | # QUERY TUNING 288 | #------------------------------------------------------------------------------ 289 | 290 | # - Planner Method Configuration - 291 | 292 | #enable_bitmapscan = on 293 | #enable_hashagg = on 294 | #enable_hashjoin = on 295 | #enable_indexscan = on 296 | #enable_indexonlyscan = on 297 | #enable_material = on 298 | #enable_mergejoin = on 299 | #enable_nestloop = on 300 | #enable_seqscan = on 301 | #enable_sort = on 302 | #enable_tidscan = on 303 | 304 | # - Planner Cost Constants - 305 | 306 | #seq_page_cost = 1.0 # measured on an arbitrary scale 307 | #random_page_cost = 4.0 # same scale as above 308 | #cpu_tuple_cost = 0.01 # same scale as above 309 | #cpu_index_tuple_cost = 0.005 # same scale as above 310 | #cpu_operator_cost = 0.0025 # same scale as above 311 | #parallel_tuple_cost = 0.1 # same scale as above 312 | #parallel_setup_cost = 1000.0 # same scale as above 313 | #min_parallel_table_scan_size = 8MB 314 | #min_parallel_index_scan_size = 512kB 315 | #effective_cache_size = 4GB 316 | 317 | # - Genetic Query Optimizer - 318 | 319 | #geqo = on 320 | #geqo_threshold = 12 321 | #geqo_effort = 5 # range 1-10 322 | #geqo_pool_size = 0 # selects default based on effort 323 | #geqo_generations = 0 # selects default based on effort 324 | #geqo_selection_bias = 2.0 # range 1.5-2.0 325 | #geqo_seed = 0.0 # range 0.0-1.0 326 | 327 | # - Other Planner Options - 328 | 329 | #default_statistics_target = 100 # range 1-10000 330 | #constraint_exclusion = partition # on, off, or partition 331 | #cursor_tuple_fraction = 0.1 # range 0.0-1.0 332 | #from_collapse_limit = 8 333 | #join_collapse_limit = 8 # 1 disables collapsing of explicit 334 | # JOIN clauses 335 | #force_parallel_mode = off 336 | 337 | 338 | #------------------------------------------------------------------------------ 339 | # ERROR REPORTING AND LOGGING 340 | #------------------------------------------------------------------------------ 341 | 342 | # - Where to Log - 343 | 344 | #log_destination = 'stderr' # Valid values are combinations of 345 | # stderr, csvlog, syslog, and eventlog, 346 | # depending on platform. csvlog 347 | # requires logging_collector to be on. 348 | 349 | # This is used when logging to stderr: 350 | #logging_collector = off # Enable capturing of stderr and csvlog 351 | # into log files. Required to be on for 352 | # csvlogs. 353 | # (change requires restart) 354 | 355 | # These are only used if logging_collector is on: 356 | #log_directory = 'log' # directory where log files are written, 357 | # can be absolute or relative to PGDATA 358 | #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, 359 | # can include strftime() escapes 360 | #log_file_mode = 0600 # creation mode for log files, 361 | # begin with 0 to use octal notation 362 | #log_truncate_on_rotation = off # If on, an existing log file with the 363 | # same name as the new log file will be 364 | # truncated rather than appended to. 365 | # But such truncation only occurs on 366 | # time-driven rotation, not on restarts 367 | # or size-driven rotation. Default is 368 | # off, meaning append to existing files 369 | # in all cases. 370 | #log_rotation_age = 1d # Automatic rotation of logfiles will 371 | # happen after that time. 0 disables. 372 | #log_rotation_size = 10MB # Automatic rotation of logfiles will 373 | # happen after that much log output. 374 | # 0 disables. 375 | 376 | # These are relevant when logging to syslog: 377 | #syslog_facility = 'LOCAL0' 378 | #syslog_ident = 'postgres' 379 | #syslog_sequence_numbers = on 380 | #syslog_split_messages = on 381 | 382 | # This is only relevant when logging to eventlog (win32): 383 | # (change requires restart) 384 | #event_source = 'PostgreSQL' 385 | 386 | # - When to Log - 387 | 388 | log_min_messages = debug2 # values in order of decreasing detail: 389 | # debug5 390 | # debug4 391 | # debug3 392 | # debug2 393 | # debug1 394 | # info 395 | # notice 396 | # warning 397 | # error 398 | # log 399 | # fatal 400 | # panic 401 | 402 | log_min_error_statement = debug2 # values in order of decreasing detail: 403 | # debug5 404 | # debug4 405 | # debug3 406 | # debug2 407 | # debug1 408 | # info 409 | # notice 410 | # warning 411 | # error 412 | # log 413 | # fatal 414 | # panic (effectively off) 415 | 416 | log_min_duration_statement = 0 # -1 is disabled, 0 logs all statements 417 | # and their durations, > 0 logs only 418 | # statements running at least this number 419 | # of milliseconds 420 | 421 | 422 | # - What to Log - 423 | 424 | #debug_print_parse = off 425 | #debug_print_rewritten = off 426 | #debug_print_plan = off 427 | #debug_pretty_print = on 428 | #log_checkpoints = off 429 | #log_connections = off 430 | #log_disconnections = off 431 | #log_duration = off 432 | #log_error_verbosity = default # terse, default, or verbose messages 433 | #log_hostname = off 434 | #log_line_prefix = '%m [%p] ' # special values: 435 | # %a = application name 436 | # %u = user name 437 | # %d = database name 438 | # %r = remote host and port 439 | # %h = remote host 440 | # %p = process ID 441 | # %t = timestamp without milliseconds 442 | # %m = timestamp with milliseconds 443 | # %n = timestamp with milliseconds (as a Unix epoch) 444 | # %i = command tag 445 | # %e = SQL state 446 | # %c = session ID 447 | # %l = session line number 448 | # %s = session start timestamp 449 | # %v = virtual transaction ID 450 | # %x = transaction ID (0 if none) 451 | # %q = stop here in non-session 452 | # processes 453 | # %% = '%' 454 | # e.g. '<%u%%%d> ' 455 | #log_lock_waits = off # log lock waits >= deadlock_timeout 456 | #log_statement = 'none' # none, ddl, mod, all 457 | #log_replication_commands = off 458 | #log_temp_files = -1 # log temporary files equal or larger 459 | # than the specified size in kilobytes; 460 | # -1 disables, 0 logs all temp files 461 | log_timezone = 'UTC' 462 | 463 | 464 | # - Process Title - 465 | 466 | #cluster_name = '' # added to process titles if nonempty 467 | # (change requires restart) 468 | #update_process_title = on 469 | 470 | 471 | #------------------------------------------------------------------------------ 472 | # RUNTIME STATISTICS 473 | #------------------------------------------------------------------------------ 474 | 475 | # - Query/Index Statistics Collector - 476 | 477 | #track_activities = on 478 | #track_counts = on 479 | #track_io_timing = off 480 | #track_functions = none # none, pl, all 481 | #track_activity_query_size = 1024 # (change requires restart) 482 | #stats_temp_directory = 'pg_stat_tmp' 483 | 484 | 485 | # - Statistics Monitoring - 486 | 487 | #log_parser_stats = off 488 | #log_planner_stats = off 489 | #log_executor_stats = off 490 | #log_statement_stats = off 491 | 492 | 493 | #------------------------------------------------------------------------------ 494 | # AUTOVACUUM PARAMETERS 495 | #------------------------------------------------------------------------------ 496 | 497 | #autovacuum = on # Enable autovacuum subprocess? 'on' 498 | # requires track_counts to also be on. 499 | #log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and 500 | # their durations, > 0 logs only 501 | # actions running at least this number 502 | # of milliseconds. 503 | #autovacuum_max_workers = 3 # max number of autovacuum subprocesses 504 | # (change requires restart) 505 | #autovacuum_naptime = 1min # time between autovacuum runs 506 | #autovacuum_vacuum_threshold = 50 # min number of row updates before 507 | # vacuum 508 | #autovacuum_analyze_threshold = 50 # min number of row updates before 509 | # analyze 510 | #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum 511 | #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze 512 | #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum 513 | # (change requires restart) 514 | #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age 515 | # before forced vacuum 516 | # (change requires restart) 517 | #autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for 518 | # autovacuum, in milliseconds; 519 | # -1 means use vacuum_cost_delay 520 | #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for 521 | # autovacuum, -1 means use 522 | # vacuum_cost_limit 523 | 524 | 525 | #------------------------------------------------------------------------------ 526 | # CLIENT CONNECTION DEFAULTS 527 | #------------------------------------------------------------------------------ 528 | 529 | # - Statement Behavior - 530 | 531 | #client_min_messages = notice # values in order of decreasing detail: 532 | # debug5 533 | # debug4 534 | # debug3 535 | # debug2 536 | # debug1 537 | # log 538 | # notice 539 | # warning 540 | # error 541 | #search_path = '"$user", public' # schema names 542 | #default_tablespace = '' # a tablespace name, '' uses the default 543 | #temp_tablespaces = '' # a list of tablespace names, '' uses 544 | # only default tablespace 545 | #check_function_bodies = on 546 | #default_transaction_isolation = 'read committed' 547 | #default_transaction_read_only = off 548 | #default_transaction_deferrable = off 549 | #session_replication_role = 'origin' 550 | statement_timeout = 10000 # in milliseconds, 0 is disabled 551 | lock_timeout = 10000 # in milliseconds, 0 is disabled 552 | #idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled 553 | #vacuum_freeze_min_age = 50000000 554 | #vacuum_freeze_table_age = 150000000 555 | #vacuum_multixact_freeze_min_age = 5000000 556 | #vacuum_multixact_freeze_table_age = 150000000 557 | #bytea_output = 'hex' # hex, escape 558 | #xmlbinary = 'base64' 559 | #xmloption = 'content' 560 | #gin_fuzzy_search_limit = 0 561 | #gin_pending_list_limit = 4MB 562 | 563 | # - Locale and Formatting - 564 | 565 | datestyle = 'iso, mdy' 566 | #intervalstyle = 'postgres' 567 | timezone = 'UTC' 568 | #timezone_abbreviations = 'Default' # Select the set of available time zone 569 | # abbreviations. Currently, there are 570 | # Default 571 | # Australia (historical usage) 572 | # India 573 | # You can create your own file in 574 | # share/timezonesets/. 575 | #extra_float_digits = 0 # min -15, max 3 576 | #client_encoding = sql_ascii # actually, defaults to database 577 | # encoding 578 | 579 | # These settings are initialized by initdb, but they can be changed. 580 | lc_messages = 'en_US.UTF-8' # locale for system error message 581 | # strings 582 | lc_monetary = 'en_US.UTF-8' # locale for monetary formatting 583 | lc_numeric = 'en_US.UTF-8' # locale for number formatting 584 | lc_time = 'en_US.UTF-8' # locale for time formatting 585 | 586 | # default configuration for text search 587 | default_text_search_config = 'pg_catalog.english' 588 | 589 | # - Other Defaults - 590 | 591 | #dynamic_library_path = '$libdir' 592 | #local_preload_libraries = '' 593 | #session_preload_libraries = '' 594 | 595 | 596 | #------------------------------------------------------------------------------ 597 | # LOCK MANAGEMENT 598 | #------------------------------------------------------------------------------ 599 | 600 | #deadlock_timeout = 1s 601 | #max_locks_per_transaction = 64 # min 10 602 | # (change requires restart) 603 | #max_pred_locks_per_transaction = 64 # min 10 604 | # (change requires restart) 605 | #max_pred_locks_per_relation = -2 # negative values mean 606 | # (max_pred_locks_per_transaction 607 | # / -max_pred_locks_per_relation) - 1 608 | #max_pred_locks_per_page = 2 # min 0 609 | 610 | 611 | #------------------------------------------------------------------------------ 612 | # VERSION/PLATFORM COMPATIBILITY 613 | #------------------------------------------------------------------------------ 614 | 615 | # - Previous PostgreSQL Versions - 616 | 617 | #array_nulls = on 618 | #backslash_quote = safe_encoding # on, off, or safe_encoding 619 | #default_with_oids = off 620 | #escape_string_warning = on 621 | #lo_compat_privileges = off 622 | #operator_precedence_warning = off 623 | #quote_all_identifiers = off 624 | #standard_conforming_strings = on 625 | #synchronize_seqscans = on 626 | 627 | # - Other Platforms and Clients - 628 | 629 | #transform_null_equals = off 630 | 631 | 632 | #------------------------------------------------------------------------------ 633 | # ERROR HANDLING 634 | #------------------------------------------------------------------------------ 635 | 636 | #exit_on_error = off # terminate session on any error? 637 | #restart_after_crash = on # reinitialize after backend crash? 638 | #data_sync_retry = off # retry or panic on failure to fsync 639 | # data? 640 | # (change requires restart) 641 | 642 | 643 | #------------------------------------------------------------------------------ 644 | # CONFIG FILE INCLUDES 645 | #------------------------------------------------------------------------------ 646 | 647 | # These options allow settings to be loaded from files other than the 648 | # default postgresql.conf. Note that these are directives, not variable 649 | # assignments, so they can usefully be given more than once. 650 | 651 | #include_dir = '...' # include files ending in '.conf' from 652 | # a directory, e.g., 'conf.d' 653 | #include_if_exists = '...' # include file only if it exists 654 | #include = '...' # include file 655 | 656 | 657 | #------------------------------------------------------------------------------ 658 | # CUSTOMIZED OPTIONS 659 | #------------------------------------------------------------------------------ 660 | 661 | # Add settings for extensions here 662 | -------------------------------------------------------------------------------- /integration-tests/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate cargo; 2 | extern crate postgres; 3 | extern crate tempfile; 4 | 5 | use std::env; 6 | use std::panic::{self, UnwindSafe}; 7 | use std::path::{Path, PathBuf}; 8 | use std::process; 9 | 10 | use cargo::core::compiler::{Compilation, CompileMode}; 11 | use cargo::util::errors::CargoResult; 12 | use postgres::{Client, NoTls}; 13 | 14 | #[allow(unused)] 15 | fn get_features() -> Vec { 16 | let mut features = vec![]; 17 | 18 | #[cfg(feature = "fdw")] 19 | { 20 | features.push("fdw".to_string()); 21 | } 22 | 23 | features 24 | } 25 | 26 | pub fn build_lib(name: &str) -> CargoResult { 27 | println!("building library: {}", name); 28 | let cfg = cargo::util::config::Config::default()?; 29 | 30 | let mut opts = cargo::ops::CompileOptions::new(&cfg, CompileMode::Build) 31 | .expect("failed to get compile options"); 32 | 33 | //opts.features = dbg!(get_features()); 34 | opts.spec = cargo::ops::Packages::Packages(vec![name.into()]); 35 | opts.filter = cargo::ops::CompileFilter::from_raw_arguments( 36 | true, 37 | vec![], 38 | false, 39 | vec![], 40 | false, 41 | vec![], 42 | false, 43 | vec![], 44 | false, 45 | false, 46 | ); 47 | 48 | let search_path = if cfg.cwd().ends_with("integration-tests") { 49 | // if it's in the integration-tests, this is being run in the pg-extend-rs project 50 | cfg.cwd().parent().unwrap() 51 | } else { 52 | // otherwise we're in a different project 53 | cfg.cwd() 54 | }; 55 | 56 | let path = cargo::util::important_paths::find_root_manifest_for_wd(search_path)?; 57 | println!("Cargo.toml: {}", path.display()); 58 | let ws = cargo::core::Workspace::new(&path, &cfg)?; 59 | let result = cargo::ops::compile(&ws, &opts)?; 60 | Ok(get_lib_path(&result, name)) 61 | } 62 | 63 | pub fn build_bin(name: &str) -> CargoResult { 64 | println!("building binary: {}", name); 65 | let cfg = cargo::util::config::Config::default()?; 66 | 67 | let mut opts = cargo::ops::CompileOptions::new(&cfg, CompileMode::Build) 68 | .expect("failed to get compile options"); 69 | 70 | //opts.features = dbg!(get_features()); 71 | opts.spec = cargo::ops::Packages::Packages(vec![name.into()]); 72 | opts.filter = cargo::ops::CompileFilter::from_raw_arguments( 73 | false, 74 | vec![], 75 | true, 76 | vec![], 77 | false, 78 | vec![], 79 | false, 80 | vec![], 81 | false, 82 | false, 83 | ); 84 | 85 | let search_path = if cfg.cwd().ends_with("integration-tests") { 86 | // if it's in the integration-tests, this is being run in the pg-extend-rs project 87 | cfg.cwd().parent().unwrap() 88 | } else { 89 | // otherwise we're in a different project 90 | cfg.cwd() 91 | }; 92 | 93 | let path = cargo::util::important_paths::find_root_manifest_for_wd(search_path)?; 94 | println!("Cargo.toml: {}", path.display()); 95 | let ws = cargo::core::Workspace::new(&path, &cfg)?; 96 | let result = cargo::ops::compile(&ws, &opts)?; 97 | Ok(get_stmt_bin_path(&result)) 98 | } 99 | 100 | fn get_lib_path(result: &Compilation, name: &str) -> PathBuf { 101 | let mut path = result.root_output.clone(); 102 | path.push("debug"); 103 | path.set_file_name(format!("lib{}", name)); 104 | path.set_extension(if cfg!(target_os = "windows") { 105 | "dll" 106 | } else if cfg!(target_os = "macos") { 107 | "dylib" 108 | } else { 109 | "so" 110 | }); 111 | path 112 | } 113 | 114 | fn get_stmt_bin_path(result: &Compilation) -> PathBuf { 115 | assert_eq!(1, result.binaries.len()); 116 | result.binaries[0].clone() 117 | } 118 | 119 | pub fn db_conn() -> Client { 120 | if let Ok(url) = env::var("POSTGRES_URL") { 121 | println!("executing on connection: {}", url); 122 | return Client::connect(&url, postgres::NoTls).expect("could not connect"); 123 | } 124 | 125 | let db_name = env::var("POSTGRES_TEST_DB").expect( 126 | "As a precaution, POSTGRES_TEST_DB must be set to ensure that other DBs are not damaged", 127 | ); 128 | 129 | let host = env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string()); 130 | let port = env::var("POSTGRES_PORT").unwrap_or_else(|_| "5432".to_string()); 131 | let user = 132 | env::var("POSTGRES_USER").unwrap_or_else(|_| env::var("USER").expect("USER is unset")); 133 | let conn_str = format!("postgres://{}@{}:{}/{}", user, host, port, db_name); 134 | 135 | println!("executing on connection: {}", conn_str); 136 | Client::connect(&conn_str as &str, NoTls).expect("could not connect") 137 | } 138 | 139 | pub fn run_create_stmts(bin_path: &PathBuf, lib_path: &PathBuf) { 140 | let sql = process::Command::new(bin_path) 141 | .arg(lib_path) 142 | .output() 143 | .expect("failed to run get stmts"); 144 | 145 | if !sql.status.success() { 146 | panic!( 147 | "get sql stmts failed: {}", 148 | String::from_utf8_lossy(&sql.stderr) 149 | ); 150 | } 151 | 152 | let attempts = 3; 153 | let sql = String::from_utf8_lossy(&sql.stdout); 154 | println!("executing stmts: {}", sql); 155 | 156 | // Try three times 157 | let mut error = None; 158 | for _ in 0..attempts { 159 | let mut conn = db_conn(); 160 | let result = conn.batch_execute(&sql); 161 | 162 | if let Err(err) = result { 163 | error = Some(err); 164 | } else { 165 | return; 166 | } 167 | } 168 | 169 | panic!( 170 | "Error creating function(s) after {} attempts: {}", 171 | attempts, 172 | error.expect("error should have been set in above loop") 173 | ); 174 | } 175 | 176 | pub fn copy_to_tempdir(path: &Path, lib_path: PathBuf) -> PathBuf { 177 | let tmplib = path.with_file_name(lib_path.file_name().unwrap()); 178 | assert!( 179 | path.exists(), 180 | format!("path does not exist: {}", path.display()) 181 | ); 182 | 183 | std::fs::copy(&lib_path, &tmplib) 184 | .map_err(|e| { 185 | format!( 186 | "failed to copy file from {} to {}: {}", 187 | lib_path.display(), 188 | tmplib.display(), 189 | e 190 | ) 191 | }) 192 | .unwrap(); 193 | 194 | tmplib 195 | } 196 | 197 | pub fn test_in_db(lib_name: &str, test: F) { 198 | println!("test_in_db: {}", lib_name); 199 | let bin_path = build_bin(lib_name).expect("failed to build stmt binary"); 200 | assert!(bin_path.exists()); 201 | 202 | let lib_path = build_lib(lib_name).expect("failed to build extension"); 203 | assert!(lib_path.exists()); 204 | //let tmpdir = tempfile::tempdir().expect("failed to make tempdir"); 205 | //let lib_path = copy_to_tempdir(tmpdir.path(), lib_path); 206 | 207 | println!("creating statements with bin: {}", bin_path.display()); 208 | run_create_stmts(&bin_path, &lib_path); 209 | 210 | let panic_result = panic::catch_unwind(|| { 211 | let conn = db_conn(); 212 | test(conn) 213 | }); 214 | 215 | // TODO: cleanup 216 | 217 | if let Err(e) = panic_result { 218 | panic::resume_unwind(e); 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /integration-tests/tests/adding_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate integration_tests; 2 | 3 | use integration_tests::*; 4 | 5 | #[test] 6 | fn test_add_one() { 7 | test_in_db("adding", |mut conn| { 8 | let result = conn.query("SELECT add_one(1)", &[]).expect("query failed"); 9 | assert_eq!(result.len(), 1); 10 | 11 | let row = result.get(0).expect("no rows returned"); 12 | let col: i32 = row.get(0); 13 | 14 | assert_eq!(col, 2); 15 | 16 | // Calling the function with NULL argument returns NULL because it's declared STRICT 17 | let result = conn 18 | .query("SELECT add_one(NULL)", &[]) 19 | .expect("query failed"); 20 | assert_eq!(result.len(), 1); 21 | 22 | let row = result.get(0).expect("no rows returned"); 23 | let col: Option = row.get(0); 24 | 25 | assert_eq!(col, None); 26 | }); 27 | } 28 | 29 | #[test] 30 | fn test_add_one_null() { 31 | test_in_db("adding", |mut conn| { 32 | // Rust add_big_one function should not be called because we declare it STRICT. 33 | let result = conn 34 | .query("SELECT add_big_one(CAST(NULL as int8))", &[]) 35 | .expect("query failed"); 36 | assert_eq!(result.len(), 1); 37 | 38 | let row = result.get(0).expect("no rows returned"); 39 | let col: Option = row.get(0); 40 | 41 | assert_eq!(col, None); 42 | }); 43 | } 44 | 45 | #[test] 46 | fn test_add_small_one() { 47 | test_in_db("adding", |mut conn| { 48 | let result = conn 49 | .query("SELECT add_small_one(CAST(1 as int2))", &[]) 50 | .expect("query failed"); 51 | assert_eq!(result.len(), 1); 52 | 53 | let row = result.get(0).expect("no rows returned"); 54 | let col: i16 = row.get(0); 55 | 56 | assert_eq!(col, 2); 57 | }); 58 | } 59 | 60 | #[test] 61 | fn test_add_big_one() { 62 | test_in_db("adding", |mut conn| { 63 | let result = conn 64 | .query("SELECT add_big_one(CAST(1 as int8))", &[]) 65 | .expect("query failed"); 66 | assert_eq!(result.len(), 1); 67 | 68 | let row = result.get(0).expect("no rows returned"); 69 | let col: i64 = row.get(0); 70 | 71 | assert_eq!(col, 2); 72 | }); 73 | } 74 | 75 | #[test] 76 | fn test_add_together() { 77 | test_in_db("adding", |mut conn| { 78 | let result = conn 79 | .query( 80 | "SELECT add_together(CAST(1 as int8), CAST(2 as int4), CAST(3 as int2))", 81 | &[], 82 | ) 83 | .expect("query failed"); 84 | assert_eq!(result.len(), 1); 85 | 86 | let row = result.get(0).expect("no rows returned"); 87 | let col: i64 = row.get(0); 88 | 89 | assert_eq!(col, 6); 90 | }); 91 | } 92 | 93 | #[test] 94 | fn test_sum_array() { 95 | test_in_db("adding", |mut conn| { 96 | let result = conn 97 | .query("SELECT sum_array(ARRAY[1, 2, 3])", &[]) 98 | .expect("query failed"); 99 | assert_eq!(result.len(), 1); 100 | 101 | let row = result.get(0).expect("no rows returned"); 102 | let col: i32 = row.get(0); 103 | 104 | assert_eq!(col, 6); 105 | }); 106 | } 107 | 108 | #[test] 109 | fn test_sum_small_array() { 110 | test_in_db("adding", |mut conn| { 111 | let result = conn 112 | .query("SELECT sum_small_array(ARRAY[1, 2, 3]::int2[])", &[]) 113 | .expect("query failed"); 114 | assert_eq!(result.len(), 1); 115 | 116 | let row = result.get(0).expect("no rows returned"); 117 | let col: i16 = row.get(0); 118 | 119 | assert_eq!(col, 6); 120 | }); 121 | } 122 | 123 | #[test] 124 | fn test_sum_big_array() { 125 | test_in_db("adding", |mut conn| { 126 | let result = conn 127 | .query("SELECT sum_big_array(ARRAY[1, 2, 3]::int8[])", &[]) 128 | .expect("query failed"); 129 | assert_eq!(result.len(), 1); 130 | 131 | let row = result.get(0).expect("no rows returned"); 132 | let col: i64 = row.get(0); 133 | 134 | assert_eq!(col, 6); 135 | }); 136 | } 137 | 138 | #[test] 139 | fn test_sum_float_array() { 140 | test_in_db("adding", |mut conn| { 141 | let result = conn 142 | .query("SELECT sum_float_array(ARRAY[1.1, 2.2, 3.3])", &[]) 143 | .expect("query failed"); 144 | assert_eq!(result.len(), 1); 145 | 146 | let row = result.get(0).expect("no rows returned"); 147 | let col: f32 = row.get(0); 148 | 149 | assert_eq!(format!("{:.1}", col), "6.6".to_owned()); 150 | }); 151 | } 152 | 153 | #[test] 154 | fn test_sum_double_array() { 155 | test_in_db("adding", |mut conn| { 156 | let result = conn 157 | .query("SELECT sum_double_array(ARRAY[1.1, 2.2, 3.3])", &[]) 158 | .expect("query failed"); 159 | assert_eq!(result.len(), 1); 160 | 161 | let row = result.get(0).expect("no rows returned"); 162 | let col: f64 = row.get(0); 163 | 164 | assert_eq!(format!("{:.1}", col), "6.6".to_owned()); 165 | }); 166 | } 167 | -------------------------------------------------------------------------------- /integration-tests/tests/fdw.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | #![cfg(not(postgres12))] 8 | #![cfg(feature = "fdw")] 9 | 10 | extern crate integration_tests; 11 | 12 | use integration_tests::*; 13 | 14 | // FDW tests disabled because it's broken with PostgreSQL 11+. 15 | // See See https://github.com/bluejekyll/pg-extend-rs/issues/49 16 | #[test] 17 | fn test_fdw() { 18 | test_in_db("fdw", |mut conn| { 19 | conn.batch_execute( 20 | " 21 | DROP SERVER IF EXISTS df CASCADE; 22 | CREATE SERVER df FOREIGN DATA WRAPPER defaultfdw; 23 | 24 | DROP SCHEMA IF EXISTS fdw_test_schema CASCADE; 25 | CREATE SCHEMA fdw_test_schema; 26 | 27 | IMPORT FOREIGN SCHEMA test 28 | FROM SERVER df 29 | INTO fdw_test_schema; 30 | ", 31 | ) 32 | .expect("Failed to import foreign schema"); 33 | 34 | let rows = conn 35 | .query("SELECT * FROM fdw_test_schema.mytable;", &[]) 36 | .expect("Failed to query FDW"); 37 | assert_eq!(rows.len(), 5); 38 | for (i, row) in rows.iter().enumerate() { 39 | assert_eq!(row.len(), 1); 40 | assert_eq!((i + 1) as i32, row.get::<_, i32>(0)) 41 | } 42 | conn.batch_execute("DROP FOREIGN DATA WRAPPER defaultfdw CASCADE") 43 | .expect("failed to drop defaultdfw"); 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /integration-tests/tests/logging.rs: -------------------------------------------------------------------------------- 1 | extern crate integration_tests; 2 | 3 | use core::mem; 4 | use std::sync::{Arc, Mutex}; 5 | 6 | use postgres::error::DbError; 7 | use postgres::Client; 8 | 9 | use integration_tests::*; 10 | 11 | #[test] 12 | fn test_rs_error() { 13 | test_in_db("logging", |mut conn| { 14 | let result = conn.query("SELECT rs_error('No you dont!')", &[]); 15 | assert!(result.is_err()); 16 | 17 | if let Err(err) = result { 18 | assert_eq!(format!("{}", err), "db error: ERROR: No you dont!"); 19 | } else { 20 | panic!("should have been an error"); 21 | } 22 | }); 23 | } 24 | 25 | struct MsgCapture { 26 | msgs: Arc>>, 27 | } 28 | 29 | /// Allows capturing log messages from a PostgreSQL connection. 30 | impl MsgCapture { 31 | fn new() -> MsgCapture { 32 | MsgCapture { 33 | msgs: Arc::new(Mutex::new(Vec::new())), 34 | } 35 | } 36 | /// Returns the current message buffer and flushes it. 37 | fn drain(&self) -> Vec { 38 | let mut msgs = self.msgs.lock().unwrap(); 39 | mem::replace(&mut *msgs, Vec::new()) 40 | } 41 | // /// Returns a callback for Connection::set_notice_handler() 42 | // fn get_handler(&self) -> Box { 43 | // let msgs = self.msgs.clone(); 44 | // // HandleNotice trait is implemented for FnMut(DbError) 45 | // return Box::new(move |e: DbError| { 46 | // msgs.lock().unwrap().push(e); 47 | // }); 48 | // } 49 | } 50 | 51 | #[test] 52 | #[ignore] // the new Postgres connection impl made it not possible to capture the logging output 53 | fn test_rs_log_all() { 54 | test_in_db("logging", |mut conn: Client| { 55 | let capture = MsgCapture::new(); 56 | 57 | // Test with log level ERROR 58 | // INFO messages are sent ot the client even at log level ERROR 59 | // https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-CLIENT-MIN-MESSAGES 60 | conn.query("SET client_min_messages=error", &[]) 61 | .expect("query failed"); 62 | // let old_handler = conn.set_notice_handler(capture.get_handler()); 63 | 64 | conn.query("SELECT rs_log_all()", &[]) 65 | .expect("query failed"); 66 | 67 | let msgs = capture.drain(); 68 | assert_eq!(msgs[0].severity(), "INFO"); 69 | assert_eq!(msgs[0].message(), "TEST: This is an info message"); 70 | assert_eq!(msgs.len(), 1); 71 | 72 | // Test with log level DEBUG5 73 | conn.query("SET client_min_messages=debug5", &[]) 74 | .expect("query failed"); 75 | conn.query("SELECT rs_log_all()", &[]) 76 | .expect("query failed"); 77 | 78 | // Filter out PostgreSQL's own debug messages e.g.: "DEBUG: StartTransaction(1) ..." 79 | // Our test messages all start with "TEST: " 80 | let msgs: Vec = capture 81 | .drain() 82 | .iter() 83 | .filter_map(|m: &DbError| { 84 | if m.severity() != "DEBUG" || m.message().starts_with("TEST: ") { 85 | Some(format!("{}: {}", m.severity(), m.message())) 86 | } else { 87 | None 88 | } 89 | }) 90 | .collect(); 91 | 92 | assert_eq!( 93 | msgs, 94 | vec![ 95 | "WARNING: TEST: This is a warning", 96 | "NOTICE: TEST: Notice this!", 97 | "INFO: TEST: This is an info message", 98 | "LOG: TEST: This is an LOG-level message", 99 | // PostgreSQL clients don't distinguish between DEBUG1...DEBUG5 levels. 100 | "DEBUG: TEST: This is a debug message", 101 | "DEBUG: TEST: This is a trace-level message" 102 | ] 103 | ); 104 | 105 | // Clean up, restore old notice handler. 106 | conn.query("RESET client_min_messages", &[]) 107 | .expect("query failed"); 108 | // conn.set_notice_handler(old_handler); 109 | }); 110 | } 111 | -------------------------------------------------------------------------------- /integration-tests/tests/memory_context.rs: -------------------------------------------------------------------------------- 1 | extern crate integration_tests; 2 | 3 | use integration_tests::*; 4 | 5 | #[test] 6 | fn test_memory_context() { 7 | test_in_db("memory_context", |mut conn| { 8 | let result = conn.query("SELECT allocate()", &[]).expect("query failed"); 9 | assert_eq!(result.len(), 1); 10 | }); 11 | } 12 | -------------------------------------------------------------------------------- /integration-tests/tests/nullable_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate integration_tests; 2 | 3 | use integration_tests::*; 4 | 5 | #[test] 6 | fn test_get_null() { 7 | test_in_db("nullable", |mut conn| { 8 | let result = conn.query("SELECT get_null()", &[]).expect("query failed"); 9 | assert_eq!(result.len(), 1); 10 | 11 | let row = result.get(0).expect("no row found"); 12 | let col: Option = row.get(0); 13 | 14 | assert_eq!(col, None); 15 | }); 16 | } 17 | 18 | #[test] 19 | fn test_rs_nullif() { 20 | test_in_db("nullable", |mut conn| { 21 | // 'a', 'b' => 'a' 22 | let result = conn 23 | .query("SELECT rs_nullif('a', 'b')", &[]) 24 | .expect("query failed"); 25 | assert_eq!(result.len(), 1); 26 | 27 | let row = result.get(0).expect("no rows returned"); 28 | let col: Option = row.get(0); 29 | 30 | assert_eq!(col, Some("a".to_string())); 31 | 32 | // '-', '-' => NULL 33 | let result = conn 34 | .query("SELECT rs_nullif('-', '-')", &[]) 35 | .expect("query failed"); 36 | assert_eq!(result.len(), 1); 37 | 38 | let row = result.get(0).expect("no rows returned"); 39 | let col: Option = row.get(0); 40 | 41 | assert_eq!(col, None); 42 | 43 | // 'a', NULL => 'a' 44 | let result = conn 45 | .query("SELECT rs_nullif('a', NULL)", &[]) 46 | .expect("query failed"); 47 | assert_eq!(result.len(), 1); 48 | 49 | let row = result.get(0).expect("no rows returned"); 50 | let col: Option = row.get(0); 51 | 52 | assert_eq!(col, Some("a".to_string())); 53 | 54 | // NULL, '-' => NULL 55 | let result = conn 56 | .query("SELECT rs_nullif(NULL, '-')", &[]) 57 | .expect("query failed"); 58 | assert_eq!(result.len(), 1); 59 | 60 | let row = result.get(0).expect("no rows returned"); 61 | let col: Option = row.get(0); 62 | 63 | assert_eq!(col, None); 64 | }); 65 | } 66 | -------------------------------------------------------------------------------- /integration-tests/tests/panicking_tests.rs: -------------------------------------------------------------------------------- 1 | extern crate integration_tests; 2 | 3 | use integration_tests::*; 4 | 5 | #[test] 6 | fn test_panicking() { 7 | test_in_db("panicking", |mut conn| { 8 | let result = conn.query("SELECT panicking(1)", &[]); 9 | assert!(result.is_err()); 10 | }); 11 | } 12 | 13 | #[test] 14 | fn test_longjmping() { 15 | test_in_db("panicking", |mut conn| { 16 | let result = conn.query("SELECT longjmping(3)", &[]); 17 | assert!(result.is_err()); 18 | }); 19 | } 20 | -------------------------------------------------------------------------------- /integration-tests/tests/strings.rs: -------------------------------------------------------------------------------- 1 | extern crate integration_tests; 2 | 3 | use integration_tests::*; 4 | 5 | #[test] 6 | fn test_concat_rs() { 7 | test_in_db("strings", |mut conn| { 8 | let result = conn 9 | .query("SELECT concat_rs('a','b')", &[]) 10 | .expect("query failed"); 11 | assert_eq!(result.len(), 1); 12 | 13 | let row = result.get(0).expect("no rows returned"); 14 | let col: String = row.get(0); 15 | 16 | assert_eq!(&col, "ab"); 17 | }); 18 | } 19 | 20 | #[test] 21 | fn test_text_rs() { 22 | test_in_db("strings", |mut conn| { 23 | let result = conn 24 | .query("SELECT text_rs('hello world!')", &[]) 25 | .expect("query failed"); 26 | assert_eq!(result.len(), 1); 27 | 28 | let row = result.get(0).expect("no rows returned"); 29 | let col: String = row.get(0); 30 | 31 | assert_eq!(&col, "hello world!"); 32 | }); 33 | } 34 | -------------------------------------------------------------------------------- /pg-extend/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pg-extend" 3 | version = "0.2.1" 4 | authors = ["Benjamin Fry "] 5 | edition = "2018" 6 | 7 | description = """ 8 | A library for extending Postgres with Rust. 9 | """ 10 | 11 | documentation = "https://docs.rs/pg-extend" 12 | repository = "https://github.com/bluejekyll/pg-extend" 13 | 14 | readme = "../README.md" 15 | license = "MIT/Apache-2.0" 16 | 17 | build = "build.rs" 18 | 19 | [features] 20 | default = [] 21 | 22 | # Enable Foreign Data wrappers support 23 | fdw = [] 24 | # We use feature flags to dictate which sets of PG features we support. 25 | # The build.rs script will set a feature flag rustc, but this does not enable the dependencies. 26 | # For that, build scripts that want to explcitly enable supported features in each 27 | # version, should pass the feature flag explicity. 28 | # Each of these should list all features supported by that version of PG. 29 | postgres-9 = ["fdw"] 30 | postgres-10 = ["fdw"] 31 | postgres-11 = ["fdw"] 32 | postgres-12 = [] 33 | 34 | [dependencies] 35 | cfg-if = "0.1.10" 36 | 37 | [build-dependencies] 38 | bindgen = "0.53" 39 | clang = "0.23" 40 | -------------------------------------------------------------------------------- /pg-extend/Makefile.toml: -------------------------------------------------------------------------------- 1 | [config] 2 | skip_core_tasks = true 3 | 4 | [config.modify_core_tasks] 5 | # if true, all core tasks are set to private (default false) 6 | private = true 7 | 8 | ## Feature profiles 9 | [env] 10 | CARGO_MAKE_EXTEND_WORKSPACE_MAKEFILE = "true" 11 | #ALL_FEATURES = "${VER_FEATURES}" 12 | -------------------------------------------------------------------------------- /pg-extend/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate bindgen; 9 | extern crate clang; 10 | 11 | use std::collections::HashSet; 12 | use std::env; 13 | use std::path::PathBuf; 14 | use std::process::Command; 15 | 16 | fn main() { 17 | let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()).join("postgres.rs"); 18 | 19 | let pg_config = env::var("PG_CONFIG").unwrap_or_else(|_| "pg_config".to_string()); 20 | 21 | // Re-run this if wrapper.h changes 22 | println!("cargo:rerun-if-changed=wrapper.h"); 23 | println!("cargo:rerun-if-changed=pg_majorversion.h"); 24 | 25 | let pg_include = include_dir(&pg_config) 26 | .map_err(|e| println!("WARNING: failed to run pg_config: {}", e)) 27 | .expect("set environment variable PG_INCLUDE_PATH to the Postgres install include dir, e.g. /var/lib/pgsql/include/server"); 28 | 29 | // these cause duplicate definition problems on linux 30 | // see: https://github.com/rust-lang/rust-bindgen/issues/687 31 | let ignored_macros = IgnoreMacros( 32 | vec![ 33 | "FP_INFINITE".into(), 34 | "FP_NAN".into(), 35 | "FP_NORMAL".into(), 36 | "FP_SUBNORMAL".into(), 37 | "FP_ZERO".into(), 38 | "IPPORT_RESERVED".into(), 39 | ] 40 | .into_iter() 41 | .collect(), 42 | ); 43 | 44 | let bindings = get_bindings(&pg_include) // Gets initial bindings that are OS-dependant 45 | // The input header we would like to generate 46 | // bindings for. 47 | .header("wrapper.h") 48 | .parse_callbacks(Box::new(ignored_macros)) 49 | .rustfmt_bindings(true) 50 | // FIXME: add this back 51 | .layout_tests(false); 52 | 53 | // Finish the builder and generate the bindings. 54 | let bindings = bindings 55 | .generate() 56 | // Unwrap the Result and panic on failure. 57 | .expect("Unable to generate bindings"); 58 | 59 | // Write the bindings to the $OUT_DIR/postgres.rs file. 60 | bindings 61 | .write_to_file(out_path) 62 | .expect("Couldn't write bindings!"); 63 | 64 | let feature_version = get_postgres_feature_version(pg_include); 65 | println!("cargo:rustc-cfg={}", feature_version); 66 | } 67 | 68 | #[cfg(windows)] 69 | fn get_bindings(pg_include: &str) -> bindgen::Builder { 70 | // Compilation in windows requires these extra inclde paths 71 | let pg_include_win32_msvc = format!("{}\\port\\win32_msvc", pg_include); 72 | let pg_include_win32 = format!("{}\\port\\win32", pg_include); 73 | // The `pg_include` path comes in the format og "includes/server", but we also need 74 | // the parent folder, so we remove the "/server" part at the end 75 | let pg_include_parent = pg_include[..(pg_include.len() - 7)].to_owned(); 76 | 77 | bindgen::Builder::default() 78 | .clang_arg(format!("-I{}", pg_include_win32_msvc)) 79 | .clang_arg(format!("-I{}", pg_include_win32)) 80 | .clang_arg(format!("-I{}", pg_include)) 81 | .clang_arg(format!("-I{}", pg_include_parent)) 82 | // Whitelist all PG-related functions 83 | .whitelist_function("pg.*") 84 | // Whitelist used functions 85 | .whitelist_function("longjmp") 86 | .whitelist_function("_setjmp") 87 | .whitelist_function("cstring_to_text") 88 | .whitelist_function("text_to_cstring") 89 | .whitelist_function("errmsg") 90 | .whitelist_function("errstart") 91 | .whitelist_function("errfinish") 92 | .whitelist_function("pfree") 93 | .whitelist_function("list_.*") 94 | .whitelist_function("palloc") 95 | .whitelist_function(".*array.*") 96 | .whitelist_function("get_typlenbyvalalign") 97 | // Whitelist all PG-related types 98 | .whitelist_type("PG.*") 99 | // Whitelist used types 100 | .whitelist_type("jmp_buf") 101 | .whitelist_type("text") 102 | .whitelist_type("varattrib_1b") 103 | .whitelist_type("varattrib_4b") 104 | .whitelist_type(".*Array.*") 105 | // Whitelist PG-related values 106 | .whitelist_var("PG.*") 107 | // Whitelist log-level values 108 | .whitelist_var("DEBUG.*") 109 | .whitelist_var("LOG.*") 110 | .whitelist_var("INFO") 111 | .whitelist_var("NOTICE") 112 | .whitelist_var("WARNING") 113 | .whitelist_var("ERROR") 114 | .whitelist_var("FATAL") 115 | .whitelist_var("PANIC") 116 | // Whitelist misc values 117 | .whitelist_var("CurrentMemoryContext") 118 | .whitelist_var("FUNC_MAX_ARGS") 119 | .whitelist_var("INDEX_MAX_KEYS") 120 | .whitelist_var("NAMEDATALEN") 121 | .whitelist_var("USE_FLOAT.*") 122 | // FDW whitelisting 123 | .whitelist_function("pstrdup") 124 | .whitelist_function("lappend") 125 | .whitelist_function("makeTargetEntry") 126 | .whitelist_function("makeVar") 127 | .whitelist_function("ExecStoreTuple") 128 | .whitelist_function("heap_form_tuple") 129 | .whitelist_function("ExecClearTuple") 130 | .whitelist_function("slot_getallattrs") 131 | .whitelist_function("get_rel_name") 132 | .whitelist_function("GetForeignTable") 133 | .whitelist_function("GetForeignServer") 134 | .whitelist_function("make_foreignscan") 135 | .whitelist_function("extract_actual_clauses") 136 | .whitelist_function("add_path") 137 | .whitelist_function("create_foreignscan_path") 138 | .whitelist_type("ImportForeignSchemaStmt") 139 | .whitelist_type("ResultRelInfo") 140 | .whitelist_type("EState") 141 | .whitelist_type("ModifyTableState") 142 | .whitelist_type("Relation") 143 | .whitelist_type("RangeTblEntry") 144 | .whitelist_type("Query") 145 | .whitelist_type("ForeignScanState") 146 | .whitelist_type("InvalidBuffer") 147 | .whitelist_type("RelationData") 148 | .whitelist_type("ForeignScan") 149 | .whitelist_type("Plan") 150 | .whitelist_type("ForeignPath") 151 | .whitelist_type("RelOptInfo") 152 | .whitelist_type("Form_pg_attribute") 153 | .whitelist_type("DefElem") 154 | .whitelist_type("Value") 155 | .whitelist_var("InvalidBuffer") 156 | } 157 | 158 | #[cfg(unix)] 159 | fn get_bindings(pg_include: &str) -> bindgen::Builder { 160 | bindgen::Builder::default().clang_arg(format!("-I{}", pg_include)) 161 | } 162 | 163 | fn include_dir(pg_config: &str) -> Result { 164 | env::var("PG_INCLUDE_PATH").or_else(|err| { 165 | match Command::new(pg_config).arg("--includedir-server").output() { 166 | Ok(out) => Ok(String::from_utf8(out.stdout).unwrap().trim().to_string()), 167 | Err(..) => Err(err), 168 | } 169 | }) 170 | } 171 | 172 | #[derive(Debug)] 173 | struct IgnoreMacros(HashSet); 174 | 175 | impl bindgen::callbacks::ParseCallbacks for IgnoreMacros { 176 | fn will_parse_macro(&self, name: &str) -> bindgen::callbacks::MacroParsingBehavior { 177 | if self.0.contains(name) { 178 | bindgen::callbacks::MacroParsingBehavior::Ignore 179 | } else { 180 | bindgen::callbacks::MacroParsingBehavior::Default 181 | } 182 | } 183 | } 184 | 185 | fn get_postgres_feature_version(pg_include: String) -> &'static str { 186 | let clang = clang::Clang::new().unwrap(); 187 | let index = clang::Index::new(&clang, false, false); 188 | let repr = index 189 | .parser("pg_majorversion.h") 190 | .arguments(&[format!("-I{}", pg_include)]) 191 | .parse() 192 | .expect("failed to parse pg_config.h"); 193 | 194 | // Find the variable declaration 195 | let major_version = repr 196 | .get_entity() 197 | .get_children() 198 | .into_iter() 199 | .find(|e| { 200 | e.get_kind() == clang::EntityKind::VarDecl 201 | && e.get_name() == Some("pg_majorversion".into()) 202 | }) 203 | .expect("Couldn't find major version"); 204 | 205 | // Find the string literal within the declaration 206 | let string_literal = major_version 207 | .get_children() 208 | .into_iter() 209 | .find(|e| e.get_kind() == clang::EntityKind::StringLiteral) 210 | .expect("couldn't find string literal for major version"); 211 | 212 | let version = string_literal.get_display_name().unwrap().replace("\"", ""); 213 | let version = version.split('.').collect::>(); 214 | 215 | match &version[..] { 216 | ["9", _] => "postgres9", 217 | ["10"] => "postgres10", 218 | ["11"] => "postgres11", 219 | ["12"] => "postgres12", 220 | val => panic!("unknown Postgres version {:?}", val), 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /pg-extend/pg_majorversion.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | char pg_majorversion[2] = PG_MAJORVERSION; 4 | -------------------------------------------------------------------------------- /pg-extend/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! Postgres extension library for Rust. 9 | #![warn(missing_docs)] 10 | 11 | use std::mem; 12 | use std::os::raw::c_int; 13 | use std::sync::atomic::compiler_fence; 14 | use std::sync::atomic::Ordering; 15 | 16 | pub mod pg_alloc; 17 | pub mod pg_sys; 18 | #[macro_use] 19 | pub mod pg_bool; 20 | pub mod pg_datum; 21 | pub mod pg_error; 22 | pub mod pg_fdw; 23 | pub mod pg_type; 24 | 25 | pub mod log; 26 | pub mod native; 27 | 28 | /// A macro for marking a library compatible with the Postgres extension framework. 29 | /// 30 | /// This macro was initially inspired from the `pg_module` macro in https://github.com/thehydroimpulse/postgres-extension.rs 31 | #[macro_export] 32 | macro_rules! pg_magic { 33 | (version: $vers:expr) => { 34 | #[no_mangle] 35 | #[allow(non_snake_case)] 36 | #[allow(unused)] 37 | #[link_name = "Pg_magic_func"] 38 | pub extern "C" fn Pg_magic_func() -> &'static pg_extend::pg_sys::Pg_magic_struct { 39 | use pg_extend::{pg_sys, register_panic_handler}; 40 | use std::mem::size_of; 41 | use std::os::raw::c_int; 42 | 43 | const my_magic: pg_extend::pg_sys::Pg_magic_struct = pg_sys::Pg_magic_struct { 44 | len: size_of::() as c_int, 45 | version: $vers as std::os::raw::c_int / 100, 46 | funcmaxargs: pg_sys::FUNC_MAX_ARGS as c_int, 47 | indexmaxkeys: pg_sys::INDEX_MAX_KEYS as c_int, 48 | namedatalen: pg_sys::NAMEDATALEN as c_int, 49 | float4byval: pg_sys::USE_FLOAT4_BYVAL as c_int, 50 | float8byval: pg_sys::USE_FLOAT8_BYVAL as c_int, 51 | }; 52 | 53 | // TODO: is this a good idea here? 54 | // register panic_handler 55 | register_panic_handler(); 56 | 57 | // return the magic 58 | &my_magic 59 | } 60 | }; 61 | } 62 | 63 | #[cfg(postgres12)] 64 | type FunctionCallInfoData = pg_sys::FunctionCallInfoBaseData; 65 | #[cfg(not(postgres12))] 66 | type FunctionCallInfoData = pg_sys::FunctionCallInfoData; 67 | 68 | /// Returns an iterator of argument Datums 69 | pub fn get_args<'a>( 70 | func_call_info: &'a FunctionCallInfoData, 71 | ) -> impl 'a + Iterator> { 72 | let num_args = func_call_info.nargs as usize; 73 | 74 | // PostgreSQL 12+: Convert from pg_sys::NullableDatum 75 | #[cfg(postgres12)] 76 | return unsafe { func_call_info.args.as_slice(num_args) } 77 | .iter() 78 | .map(|nullable| { 79 | if nullable.isnull { 80 | None 81 | } else { 82 | Some(nullable.value) 83 | } 84 | }); 85 | 86 | // Older versions store two separate arrays for 'isnull' and datums 87 | #[cfg(not(postgres12))] 88 | return { 89 | let args = &func_call_info.arg[..num_args]; 90 | let args_null = &func_call_info.argnull[..num_args]; 91 | 92 | args.iter().zip(args_null.iter()).map(|(value, isnull)| { 93 | if pg_bool::Bool::from(*isnull).into() { 94 | None 95 | } else { 96 | Some(*value) 97 | } 98 | }) 99 | }; 100 | } 101 | 102 | /// Information for a longjmp 103 | struct JumpContext { 104 | jump_value: c_int, 105 | } 106 | 107 | /// This will replace the current panic_handler 108 | pub fn register_panic_handler() { 109 | use std::panic; 110 | 111 | // set (and replace the existing) panic handler, this will tell Postgres that the call failed 112 | // a level of Fatal will force the DB connection to be killed. 113 | panic::set_hook(Box::new(|info| { 114 | // downcast info, check if it's the value we need. 115 | // this must check if the panic was due to a longjmp 116 | // the fence is to make sure the longjmp is not reodered. 117 | compiler_fence(Ordering::SeqCst); 118 | if let Some(panic_context) = info.payload().downcast_ref::() { 119 | // WARNING: do not set this level above Notice (ERROR, FATAL, PANIC), as it will cause 120 | // the following longjmp to execute. 121 | notice!("continuing longjmp: {}", info); 122 | 123 | // the panic came from a pg longjmp... so unwrap it and rethrow 124 | unsafe { 125 | pg_sys_longjmp( 126 | pg_sys::PG_exception_stack as *mut _, 127 | panic_context.jump_value, 128 | ); 129 | } 130 | } else { 131 | // error level will cause a longjmp in Postgres 132 | error!("panic in Rust extension: {}", info); 133 | } 134 | 135 | unreachable!("all above statements should have cause a longjmp to Postgres"); 136 | })); 137 | } 138 | 139 | cfg_if::cfg_if! { 140 | if #[cfg(windows)] { 141 | unsafe fn pg_sys_longjmp(_buf: *mut pg_sys::_JBTYPE, _value: ::std::os::raw::c_int) { 142 | pg_sys::longjmp(_buf, _value); 143 | } 144 | } else if #[cfg(target_os = "macos")] { 145 | unsafe fn pg_sys_longjmp(_buf: *mut c_int, _value: ::std::os::raw::c_int) { 146 | pg_sys::siglongjmp(_buf, _value); 147 | } 148 | } else if #[cfg(unix)] { 149 | unsafe fn pg_sys_longjmp(_buf: *mut pg_sys::__jmp_buf_tag, _value: ::std::os::raw::c_int) { 150 | pg_sys::siglongjmp(_buf, _value); 151 | } 152 | } 153 | } 154 | 155 | /// Provides a barrier between Rust and Postgres' usage of the C set/longjmp 156 | /// 157 | /// In the case of a longjmp being caught, this will convert that to a panic. For this to work 158 | /// properly, there must be a Rust panic handler (see crate::register_panic_handler).PanicContext 159 | /// If the `pg_exern` attribute macro is used for exposing Rust functions to Postgres, then 160 | /// this is already handled. 161 | /// 162 | /// See the man pages for info on setjmp http://man7.org/linux/man-pages/man3/setjmp.3.html 163 | #[cfg(unix)] 164 | #[inline(never)] 165 | pub(crate) unsafe fn guard_pg R>(f: F) -> R { 166 | // setup the check protection 167 | let original_exception_stack: *mut pg_sys::sigjmp_buf = pg_sys::PG_exception_stack; 168 | let mut local_exception_stack: mem::MaybeUninit = 169 | mem::MaybeUninit::uninit(); 170 | let jumped = pg_sys::sigsetjmp( 171 | // grab a mutable reference, cast to a mutabl pointr, then case to the expected erased pointer type 172 | local_exception_stack.as_mut_ptr() as *mut pg_sys::sigjmp_buf as *mut _, 173 | 1, 174 | ); 175 | // now that we have the local_exception_stack, we set that for any PG longjmps... 176 | 177 | if jumped != 0 { 178 | notice!("PG longjmped: {}", jumped); 179 | pg_sys::PG_exception_stack = original_exception_stack; 180 | 181 | // The C Panicked!, handling control to Rust Panic handler 182 | compiler_fence(Ordering::SeqCst); 183 | panic!(JumpContext { jump_value: jumped }); 184 | } 185 | 186 | // replace the exception stack with ours to jump to the above point 187 | pg_sys::PG_exception_stack = local_exception_stack.as_mut_ptr() as *mut _; 188 | 189 | // enforce that the setjmp is not reordered, though that's probably unlikely... 190 | compiler_fence(Ordering::SeqCst); 191 | let result = f(); 192 | 193 | compiler_fence(Ordering::SeqCst); 194 | pg_sys::PG_exception_stack = original_exception_stack; 195 | 196 | result 197 | } 198 | 199 | /// Provides a barrier between Rust and Postgres' usage of the C set/longjmp 200 | /// 201 | /// In the case of a longjmp being caught, this will convert that to a panic. For this to work 202 | /// properly, there must be a Rust panic handler (see crate::register_panic_handler).PanicContext 203 | /// If the `pg_exern` attribute macro is used for exposing Rust functions to Postgres, then 204 | /// this is already handled. 205 | /// 206 | /// See the man pages for info on setjmp http://man7.org/linux/man-pages/man3/setjmp.3.html 207 | #[cfg(windows)] 208 | #[inline(never)] 209 | pub(crate) unsafe fn guard_pg R>(f: F) -> R { 210 | // setup the check protection 211 | let original_exception_stack: *mut pg_sys::jmp_buf = pg_sys::PG_exception_stack; 212 | let mut local_exception_stack: mem::MaybeUninit = mem::MaybeUninit::uninit(); 213 | let jumped = pg_sys::_setjmp( 214 | // grab a mutable reference, cast to a mutabl pointr, then case to the expected erased pointer type 215 | local_exception_stack.as_mut_ptr() as *mut pg_sys::jmp_buf as *mut _, 216 | ); 217 | // now that we have the local_exception_stack, we set that for any PG longjmps... 218 | 219 | if jumped != 0 { 220 | notice!("PG longjmped: {}", jumped); 221 | pg_sys::PG_exception_stack = original_exception_stack; 222 | 223 | // The C Panicked!, handling control to Rust Panic handler 224 | compiler_fence(Ordering::SeqCst); 225 | panic!(JumpContext { jump_value: jumped }); 226 | } 227 | 228 | // replace the exception stack with ours to jump to the above point 229 | pg_sys::PG_exception_stack = local_exception_stack.as_mut_ptr() as *mut _; 230 | 231 | // enforce that the setjmp is not reordered, though that's probably unlikely... 232 | compiler_fence(Ordering::SeqCst); 233 | let result = f(); 234 | 235 | compiler_fence(Ordering::SeqCst); 236 | pg_sys::PG_exception_stack = original_exception_stack; 237 | 238 | result 239 | } 240 | 241 | /// auto generate function to output a SQL create statement for the function 242 | /// 243 | /// Until concat_ident! stabilizes, this requires the name to passed with the appended sctring 244 | /// `_pg_create_stmt` 245 | /// 246 | /// # Example 247 | /// 248 | /// create a binary for the library, like bin.rs, and this will generate a `main()` function in it 249 | /// 250 | /// ```text 251 | /// extern crate pg_extend; 252 | /// 253 | /// use pg_extend::pg_create_stmt_bin; 254 | /// 255 | /// pg_create_stmt_bin!( 256 | /// add_one_pg_create_stmt, 257 | /// add_big_one_pg_create_stmt, 258 | /// add_small_one_pg_create_stmt, 259 | /// add_together_pg_create_stmt 260 | /// ); 261 | /// ``` 262 | #[macro_export] 263 | macro_rules! pg_create_stmt_bin { 264 | ( $( $func:ident ),* ) => { 265 | use std::env; 266 | 267 | // becuase the lib is a cdylib... maybe there's a better way? 268 | mod lib; 269 | 270 | #[cfg(target_os = "linux")] 271 | const DYLIB_EXT: &str = "so"; 272 | 273 | #[cfg(target_os = "macos")] 274 | const DYLIB_EXT: &str = "dylib"; 275 | 276 | #[cfg(target_os = "windows")] 277 | const DYLIB_EXT: &str = "dll"; 278 | 279 | fn main() { 280 | const LIB_NAME: &str = env!("CARGO_PKG_NAME"); 281 | 282 | let lib_path = env::args().nth(1).unwrap_or_else(|| format!("target/release/lib{}.{}", LIB_NAME, DYLIB_EXT)); 283 | 284 | $( println!("{}", lib::$func(&lib_path)); )* 285 | } 286 | }; 287 | } 288 | -------------------------------------------------------------------------------- /pg-extend/src/log.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Marti Raudsepp 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! Implements macros for the PostgreSQL logging system. 9 | //! 10 | //! For common log levels, convenient macros are implemented: [`trace!`], [`debug!`], [`log!`], 11 | //! [`info!`], [`notice!`], [`warn!`], [`error!`], [`fatal!`]. 12 | //! 13 | //! Other log levels are supported with the generic macro [`pg_log!`]. See the [`Level` enum] for 14 | //! all available log levels. 15 | //! 16 | //! # Note 17 | //! 18 | //! Beware, log levels `ERROR` and higher also abort the current transaction. The PostgreSQL 19 | //! implementation uses exception handling with `longjmp`, which currently has unsafe side-effects. 20 | //! 21 | //! # Example 22 | //! 23 | //! ```rust,no_run 24 | //! use pg_extend::{info, pg_log}; 25 | //! use pg_extend::log::Level; 26 | //! 27 | //! info!("{} widgets frobnicated", 10); 28 | //! pg_log!(Level::LogServerOnly, "Big brother is watching {}!", "you"); 29 | //! ```` 30 | //! 31 | //! # Rust `log` crate 32 | //! 33 | //! The macro names make this mostly a drop-in replacement for the Rust `log` crate. However, there 34 | //! are differences: 35 | //! * Due to PostgreSQL behavior, log levels `ERROR` and higher log levels abort the current 36 | //! statement and transaction. 37 | //! * `pg_extend` macros do not support the optional `target:` argument. 38 | //! * In the `log` crate, the generic logging macro is called `log!`. However, we use that name as a 39 | //! specialized macro since PostgreSQL has a `LOG` log level. 40 | //! * `Level` enum contains Postgres-specific log levels; there is no `Level::Trace` for instance. 41 | //! 42 | //! [`trace!`]: ../macro.trace.html 43 | //! [`debug!`]: ../macro.debug.html 44 | //! [`log!`]: ../macro.log.html 45 | //! [`info!`]: ../macro.info.html 46 | //! [`notice!`]: ../macro.notice.html 47 | //! [`warn!`]: ../macro.warn.html 48 | //! [`error!`]: ../macro.error.html 49 | //! [`fatal!`]: ../macro.fatal.html 50 | //! [`pg_log!`]: ../macro.pg_log.html 51 | //! [`Level` enum]: enum.Level.html 52 | 53 | use std::ffi::CString; 54 | use std::fmt; 55 | use std::os::raw::{c_char, c_int}; 56 | 57 | use crate::pg_sys; 58 | 59 | /// Postgres logging Levels 60 | /// 61 | /// # Note 62 | /// 63 | /// Some of these levels effect the status of the connection and transaction in Postgres. 64 | /// Specifically, >= `Error` will cause the connection and transaction to fail and be reset. 65 | #[derive(Clone, Copy)] 66 | pub enum Level { 67 | /// Debugging messages, in categories of 5 decreasing detail. 68 | Debug5 = pg_sys::DEBUG5 as isize, 69 | /// Debugging messages, in categories of 4 decreasing detail. 70 | Debug4 = pg_sys::DEBUG4 as isize, 71 | /// Debugging messages, in categories of 3 decreasing detail. 72 | Debug3 = pg_sys::DEBUG3 as isize, 73 | /// Debugging messages, in categories of 2 decreasing detail. 74 | Debug2 = pg_sys::DEBUG2 as isize, 75 | /// Debugging messages, in categories of 1 decreasing detail. 76 | Debug1 = pg_sys::DEBUG1 as isize, 77 | /// Server operational messages; sent only to server log by default. 78 | Log = pg_sys::LOG as isize, 79 | /// Same as LOG for server reporting, but never sent to client. 80 | /// `CommError` is an alias for this 81 | #[cfg(not(postgres9))] 82 | LogServerOnly = pg_sys::LOG_SERVER_ONLY as isize, 83 | /// Messages specifically requested by user (eg VACUUM VERBOSE output); always sent to client 84 | /// regardless of client_min_messages, but by default not sent to server log. 85 | Info = pg_sys::INFO as isize, 86 | /// Helpful messages to users about query operation; sent to client and not to server log by 87 | /// default. 88 | Notice = pg_sys::NOTICE as isize, 89 | /// Warnings. NOTICE is for expected messages like implicit sequence creation by SERIAL. 90 | /// WARNING is for unexpected messages. 91 | Warning = pg_sys::WARNING as isize, 92 | /// user error - abort transaction; return to known state 93 | Error = pg_sys::ERROR as isize, 94 | /// fatal error - abort process 95 | Fatal = pg_sys::FATAL as isize, 96 | /// take down the other backends with me 97 | Panic = pg_sys::PANIC as isize, 98 | } 99 | 100 | impl From for c_int { 101 | fn from(level: Level) -> Self { 102 | level as isize as c_int 103 | } 104 | } 105 | 106 | /// Log a `DEBUG5` level message. This macro is included for easy replacement with Rust "log" crate 107 | /// macros. 108 | #[macro_export] 109 | macro_rules! trace { 110 | ($($arg:tt)*) => ( 111 | $crate::pg_log!($crate::log::Level::Debug5, $($arg)*); 112 | ) 113 | } 114 | 115 | /// Log a `DEBUG1` level message. These are hidden by default 116 | #[macro_export] 117 | macro_rules! debug { 118 | ($($arg:tt)*) => ( 119 | $crate::pg_log!($crate::log::Level::Debug1, $($arg)*); 120 | ) 121 | } 122 | 123 | /// Logs a `LOG` message. These messages have a high precedence for writing to PostgreSQL server 124 | /// logs but low precedence for sending to the client. 125 | #[macro_export] 126 | macro_rules! log { 127 | ($($arg:tt)*) => ( 128 | $crate::pg_log!($crate::log::Level::Log, $($arg)*); 129 | ) 130 | } 131 | 132 | /// Log an `INFO` message. Used for information specifically requested by user (eg VACUUM VERBOSE 133 | /// output). These messages are always sent to the client regardless of 134 | /// `client_min_messages` setting, and not to server logs by default. 135 | #[macro_export] 136 | macro_rules! info { 137 | ($($arg:tt)*) => ( 138 | $crate::pg_log!($crate::log::Level::Info, $($arg)*); 139 | ) 140 | } 141 | 142 | /// Log at `NOTICE` level. Use for helpful messages to users about query operation; expected 143 | /// messages like implicit sequence creation by SERIAL. 144 | #[macro_export] 145 | macro_rules! notice { 146 | ($($arg:tt)*) => ( 147 | $crate::pg_log!($crate::log::Level::Notice, $($arg)*); 148 | ) 149 | } 150 | 151 | /// Log at `WARNING` level. Use for messages that are unexpected for the user. 152 | #[macro_export] 153 | macro_rules! warn { 154 | ($($arg:tt)*) => ( 155 | $crate::pg_log!($crate::log::Level::Warning, $($arg)*); 156 | ) 157 | } 158 | 159 | /// Log at `ERROR` level and abort the current query and transaction. 160 | /// Beware! The PostgreSQL implementation uses exception handling with `longjmp`, which currently 161 | /// has unsafe side-effects. 162 | #[macro_export] 163 | macro_rules! error { 164 | ($($arg:tt)*) => ( 165 | $crate::pg_log!($crate::log::Level::Error, $($arg)*); 166 | ) 167 | } 168 | 169 | /// Log a `FATAL` error and exit the current backend process, also closing the database connection. 170 | #[macro_export] 171 | macro_rules! fatal { 172 | ($($arg:tt)*) => ( 173 | $crate::pg_log!($crate::log::Level::Fatal, $($arg)*); 174 | ) 175 | } 176 | 177 | /// Generic logging macro. See the [`Level` enum] for all available log levels. 178 | /// 179 | /// Usually one wouldn't call this directly but the more convenient specialized macros. 180 | /// 181 | /// # Example 182 | /// 183 | /// ```rust,no_run 184 | /// use pg_extend::pg_log; 185 | /// use pg_extend::log::Level; 186 | /// 187 | /// pg_log!(Level::LogServerOnly, "Big brother is watching {}!", "you"); 188 | /// ```` 189 | /// 190 | /// [`Level` enum]: enum.Level.html 191 | #[macro_export] 192 | macro_rules! pg_log { 193 | ($lvl:expr, $($arg:tt)+) => ({ 194 | $crate::log::__private_api_log( 195 | format_args!($($arg)+), 196 | $lvl, 197 | // Construct a tuple; the whole tuple is a compile-time constant. 198 | &( 199 | // Construct zero-terminated strings at compile time. 200 | concat!(module_path!(), "\0") as *const str as *const ::std::os::raw::c_char, 201 | concat!(file!(), "\0") as *const str as *const ::std::os::raw::c_char, 202 | line!(), 203 | ), 204 | ); 205 | }); 206 | } 207 | 208 | // WARNING: this is not part of the crate's public API and is subject to change at any time 209 | #[doc(hidden)] 210 | pub fn __private_api_log( 211 | args: fmt::Arguments, 212 | level: Level, 213 | &(module_path, file, line): &(*const c_char, *const c_char, u32), 214 | ) { 215 | use std::sync::atomic::{compiler_fence, Ordering}; 216 | 217 | let errlevel: c_int = c_int::from(level); 218 | let line = line as c_int; 219 | const LOG_DOMAIN: *const c_char = "RUST\0" as *const str as *const c_char; 220 | 221 | // Rust has no "function name" macro, for now we use module path instead. 222 | // See: https://github.com/rust-lang/rfcs/issues/1743 223 | let do_log = unsafe { 224 | crate::guard_pg(|| pg_sys::errstart(errlevel, file, line, module_path, LOG_DOMAIN)) 225 | }; 226 | 227 | // If errstart returned false, the message won't be seen by anyone; logging will be skipped 228 | if pgbool!(do_log) { 229 | // At this point we format the passed format string `args`; if the log level is suppressed, 230 | // no string processing needs to take place. 231 | let msg = format!("{}", args); 232 | let c_msg = CString::new(msg).or_else( 233 | |_| CString::new("failed to convert msg to a CString, check extension code for incompatible `CString` messages") 234 | ).expect("this should not fail: msg"); 235 | 236 | unsafe { 237 | crate::guard_pg(|| { 238 | compiler_fence(Ordering::SeqCst); 239 | let msg_result = pg_sys::errmsg(c_msg.as_ptr()); 240 | pg_sys::errfinish(msg_result); 241 | }); 242 | } 243 | } 244 | } 245 | -------------------------------------------------------------------------------- /pg-extend/src/native/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! Module for native Postgres types. 9 | //! 10 | //! These shoudl be near zero overhead types, exposed from Postgres and able to be directly used. 11 | 12 | mod text; 13 | mod varlena; 14 | 15 | pub use text::Text; 16 | pub(crate) use varlena::VarLenA; 17 | -------------------------------------------------------------------------------- /pg-extend/src/native/text.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use std::ffi::CString; 9 | use std::ops::Deref; 10 | use std::ptr::NonNull; 11 | use std::str; 12 | 13 | use crate::native::VarLenA; 14 | use crate::pg_alloc::{PgAllocated, PgAllocator, RawPtr}; 15 | use crate::pg_sys; 16 | 17 | /// A zero-overhead view of `text` data from Postgres 18 | pub struct Text<'mc>(PgAllocated<'mc, NonNull>); 19 | 20 | impl<'mc> Text<'mc> { 21 | /// Create from the raw pointer to the Postgres data 22 | #[allow(clippy::missing_safety_doc)] 23 | pub unsafe fn from_raw(alloc: &'mc PgAllocator, text_ptr: *mut pg_sys::text) -> Self { 24 | Text(PgAllocated::from_raw(alloc, text_ptr)) 25 | } 26 | 27 | /// Convert into the underlying pointer 28 | #[allow(clippy::missing_safety_doc)] 29 | pub unsafe fn into_ptr(mut self) -> *mut pg_sys::text { 30 | self.0.take_ptr() 31 | } 32 | 33 | /// Allocate a new Text data from the CString using the PgAllocator for the Postgres MemoryContext 34 | pub fn from_cstring(alloc: &'mc PgAllocator, s: CString) -> Self { 35 | unsafe { 36 | let text_ptr = { alloc.exec_with_guard(|| pg_sys::cstring_to_text(s.as_ptr())) }; 37 | 38 | Text::from_raw(alloc, text_ptr) 39 | } 40 | } 41 | 42 | /// Return true if this is empty 43 | pub fn is_empty(&self) -> bool { 44 | self.len() == 0 45 | } 46 | 47 | /// Return the length of the text data 48 | pub fn len(&self) -> usize { 49 | let varlena = unsafe { VarLenA::from_varlena(self.0.as_ref()) }; 50 | varlena.len() 51 | } 52 | 53 | /// Allocate a new CString, using the PgAllocator for the MemoryContext 54 | pub fn into_cstring(self, alloc: &'mc PgAllocator) -> PgAllocated<'mc, CString> { 55 | use std::os::raw::c_char; 56 | 57 | unsafe { 58 | alloc.exec_with_guard(|| { 59 | let text_ptr = self.0.as_ptr(); 60 | 61 | // from varlena.c 62 | /* 63 | * text_to_cstring 64 | * 65 | * Create a palloc'd, null-terminated C string from a text value. 66 | * 67 | * We support being passed a compressed or toasted text value. 68 | * This is a bit bogus since such values shouldn't really be referred to as 69 | * "text *", but it seems useful for robustness. If we didn't handle that 70 | * case here, we'd need another routine that did, anyway. 71 | */ 72 | let cstr = pg_sys::text_to_cstring(text_ptr) as *mut c_char; 73 | 74 | // this is dangerous! it's owned by CString, which is why PgAllocated will 75 | // block the dealloc 76 | PgAllocated::from_raw(alloc, cstr) 77 | }) 78 | } 79 | } 80 | } 81 | 82 | /// *WARNING* This requires the database to be a UTF-8 locale. 83 | impl<'mc> Deref for Text<'mc> { 84 | type Target = str; 85 | 86 | fn deref(&self) -> &str { 87 | // FIXME: this should panic if the DB is not UTF-8. 88 | unsafe { 89 | let varlena = VarLenA::from_varlena(self.0.as_ref()); 90 | str::from_utf8_unchecked(&*(varlena.as_slice() as *const [i8] as *const [u8])) 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /pg-extend/src/native/varlena.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | use std::mem; 9 | 10 | use crate::pg_sys; 11 | 12 | #[allow(clippy::enum_variant_names)] 13 | #[derive(Debug)] 14 | pub(crate) enum VarLenA<'a> { 15 | VarAtt4b(&'a pg_sys::varattrib_4b__bindgen_ty_1), 16 | VarAtt4bU, 17 | VarAtt4bC, 18 | VarAtt1b(&'a pg_sys::varattrib_1b), 19 | VarAtt1bE, 20 | VarAttNotPadByte, 21 | } 22 | 23 | #[allow(clippy::verbose_bit_mask)] 24 | #[allow(clippy::cast_ptr_alignment)] 25 | impl<'a> VarLenA<'a> { 26 | /// See postgres.h 27 | pub(crate) unsafe fn from_varlena(varlena: &'a pg_sys::varlena) -> VarLenA<'a> { 28 | let varattrib_1b = &*(varlena as *const pg_sys::varlena as *const pg_sys::varattrib_1b); 29 | 30 | if (varattrib_1b.va_header & 0x01) == 0x00 { 31 | // #define VARATT_IS_4B(PTR) \ 32 | // ((((varattrib_1b *) (PTR))->va_header & 0x01) == 0x00) 33 | VarLenA::VarAtt4b( 34 | &*(varlena as *const pg_sys::varlena as *const pg_sys::varattrib_4b__bindgen_ty_1), 35 | ) 36 | } else if (varattrib_1b.va_header & 0x03) == 0x00 { 37 | // #define VARATT_IS_4B_U(PTR) \ 38 | // ((((varattrib_1b *) (PTR))->va_header & 0x03) == 0x00) 39 | 40 | VarLenA::VarAtt4bU 41 | } else if (varattrib_1b.va_header & 0x03) == 0x02 { 42 | // #define VARATT_IS_4B_C(PTR) \ 43 | // ((((varattrib_1b *) (PTR))->va_header & 0x03) == 0x02) 44 | VarLenA::VarAtt4bC 45 | } else if (varattrib_1b.va_header & 0x01) == 0x01 { 46 | // #define VARATT_IS_1B(PTR) \ 47 | // ((((varattrib_1b *) (PTR))->va_header & 0x01) == 0x01) 48 | VarLenA::VarAtt1b(&*(varlena as *const pg_sys::varlena as *const pg_sys::varattrib_1b)) 49 | } else if varattrib_1b.va_header == 0x01 { 50 | // #define VARATT_IS_1B_E(PTR) \ 51 | // ((((varattrib_1b *) (PTR))->va_header) == 0x01) 52 | VarLenA::VarAtt1bE 53 | } else { 54 | /*if *mem::transmute::<&pg_sys::varlena, &u8>(self.as_text()) != 0*/ 55 | // #define VarAttNotPadByte(PTR) \ 56 | 57 | VarLenA::VarAttNotPadByte 58 | } 59 | } 60 | 61 | pub(crate) fn len(&self) -> usize { 62 | use VarLenA::*; 63 | 64 | match self { 65 | // define VARSIZE_4B(PTR) \ 66 | // ((((varattrib_4b *) (PTR))->va_4byte.va_header >> 2) & 0x3FFFFFFF) 67 | VarAtt4b(varlena) => { 68 | ((varlena.va_header >> 2) & 0x3FFF_FFFF) as usize 69 | - Self::size_of(&varlena.va_header) 70 | } 71 | // #define VARSIZE_1B(PTR) \ 72 | // ((((varattrib_1b *) (PTR))->va_header >> 1) & 0x7F) 73 | VarAtt1b(varlena) => { 74 | ((varlena.va_header >> 1) & 0x7F) as usize - Self::size_of(&varlena.va_header) 75 | } 76 | // #define VARTAG_1B_E(PTR) \ 77 | // (((varattrib_1b_e *) (PTR))->va_tag) 78 | _ => unimplemented!("this VarLenA type not yet supported: {:?}", self), 79 | } 80 | } 81 | 82 | pub(crate) fn as_slice(&self) -> &[std::os::raw::c_char] { 83 | use VarLenA::*; 84 | 85 | let len = self.len(); 86 | 87 | unsafe { 88 | match self { 89 | VarAtt4b(varlena) => varlena.va_data.as_slice(len), 90 | VarAtt1b(varlena) => varlena.va_data.as_slice(len), 91 | _ => unimplemented!("this VarLenA type not yet supported: {:?}", self), 92 | } 93 | } 94 | } 95 | 96 | fn size_of(_ty: &T) -> usize { 97 | mem::size_of::() 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /pg-extend/src/pg_alloc.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! A Postgres Allocator 9 | 10 | use std::ffi::c_void; 11 | use std::marker::{PhantomData, PhantomPinned}; 12 | use std::mem::ManuallyDrop; 13 | use std::ops::{Deref, DerefMut}; 14 | use std::ptr::NonNull; 15 | 16 | use crate::pg_sys; 17 | 18 | /// An allocattor which uses the palloc and pfree functions available from Postgres. 19 | /// 20 | /// This is managed by Postgres and guarantees that all memory is freed after a transaction completes. 21 | pub struct PgAllocator(NonNull); 22 | 23 | impl PgAllocator { 24 | /// Instantiate a PgAllocator from the raw pointer. 25 | unsafe fn from_raw(context: *mut pg_sys::MemoryContextData) -> Self { 26 | Self(NonNull::new_unchecked(context)) 27 | } 28 | 29 | /// Establishes a PgAllocator from the current default context. 30 | pub fn current_context() -> Self { 31 | unsafe { Self::from_raw(pg_sys::CurrentMemoryContext) } 32 | } 33 | 34 | /// Sets this PgAllocator as the current memory context, and then resets it to the previous 35 | /// after executing the function. 36 | pub fn exec R>(&self, f: F) -> R { 37 | let previous_context; 38 | unsafe { 39 | // save the previous context 40 | previous_context = pg_sys::CurrentMemoryContext; 41 | 42 | // set this context as the current 43 | pg_sys::CurrentMemoryContext = self.0.as_ref() as *const _ as *mut _; 44 | } 45 | 46 | // TODO: should we catch panics here to guarantee the context is reset? 47 | let result = f(); 48 | 49 | // reset the previous context 50 | unsafe { 51 | pg_sys::CurrentMemoryContext = previous_context; 52 | } 53 | 54 | result 55 | } 56 | 57 | /// Same as exec, but additionally wraps in with pg_guard 58 | /// 59 | /// # Safety 60 | /// 61 | /// This has the same safety requirements as `guard_pg` 62 | pub unsafe fn exec_with_guard R>(&self, f: F) -> R { 63 | use crate::guard_pg; 64 | 65 | self.exec(|| guard_pg(f)) 66 | } 67 | 68 | unsafe fn dealloc(&self, pg_data: *mut T) { 69 | // TODO: see mctx.c in Postgres' source this probably needs more validation 70 | let ptr = pg_data as *mut c_void; 71 | // pg_sys::pfree(pg_data as *mut c_void) 72 | let methods = *self.0.as_ref().methods; 73 | crate::guard_pg(|| { 74 | methods.free_p.expect("free_p is none")(self.0.as_ref() as *const _ as *mut _, ptr); 75 | }); 76 | } 77 | } 78 | 79 | /// Types that were allocated by Postgres 80 | /// 81 | /// Any data allocated by Postgres or being returned to Postgres for management must be stored in this value. 82 | pub struct PgAllocated<'mc, T: 'mc + RawPtr> { 83 | inner: Option>, 84 | allocator: &'mc PgAllocator, 85 | _disable_send_sync: PhantomData>, 86 | _not_unpin: PhantomPinned, 87 | } 88 | 89 | impl<'mc, T: RawPtr> PgAllocated<'mc, T> 90 | where 91 | T: 'mc + RawPtr, 92 | { 93 | /// Creates a new Allocated type from Postgres. 94 | /// 95 | /// This does not allocate, it associates the lifetime of the Allocator to this type. 96 | /// it protects the wrapped type from being dropped by Rust, and uses the 97 | /// associated Postgres Allocator for freeing the backing memory. 98 | /// 99 | /// # Safety 100 | /// 101 | /// The memory referenced by `ptr` must have been allocated withinn the associated `memory_context`. 102 | pub unsafe fn from_raw( 103 | memory_context: &'mc PgAllocator, 104 | ptr: *mut ::Target, 105 | ) -> Self { 106 | PgAllocated { 107 | inner: Some(ManuallyDrop::new(T::from_raw(ptr))), 108 | allocator: memory_context, 109 | _disable_send_sync: PhantomData, 110 | _not_unpin: PhantomPinned, 111 | } 112 | } 113 | 114 | /// This consumes the inner pointer 115 | #[allow(clippy::missing_safety_doc)] 116 | pub unsafe fn take_ptr(&mut self) -> *mut ::Target { 117 | let inner = self 118 | .inner 119 | .take() 120 | .expect("invalid None while PgAllocated is live"); 121 | ManuallyDrop::into_inner(inner).into_raw() 122 | } 123 | 124 | /// Returns a pointer to the inner type 125 | pub fn as_ptr(&self) -> *const ::Target { 126 | self.inner 127 | .as_ref() 128 | .expect("invalid None while PgAllocated is live") 129 | .as_ptr() 130 | } 131 | } 132 | 133 | impl<'mc, T: 'mc + RawPtr> Deref for PgAllocated<'mc, T> { 134 | type Target = T; 135 | 136 | fn deref(&self) -> &Self::Target { 137 | self.inner 138 | .as_ref() 139 | .expect("invalid None while PgAllocated is live") 140 | .deref() 141 | } 142 | } 143 | 144 | impl<'mc, T: 'mc + RawPtr> DerefMut for PgAllocated<'mc, T> { 145 | fn deref_mut(&mut self) -> &mut Self::Target { 146 | // TODO: instead of requiring Option here, swap the pointer with 0, and allow free on 0, which is safe. 147 | self.inner 148 | .as_mut() 149 | .expect("invalid None while PgAllocated is live") 150 | .deref_mut() 151 | } 152 | } 153 | 154 | impl<'mc, T: RawPtr> Drop for PgAllocated<'mc, T> { 155 | fn drop(&mut self) { 156 | if let Some(inner) = self.inner.take() { 157 | unsafe { 158 | // TODO: do we need to run the drop on the inner type? 159 | // let ptr: *mut T = mem::transmute(inner.deref_mut().deref_mut()); 160 | let ptr: *mut _ = ManuallyDrop::into_inner(inner).into_raw(); 161 | self.allocator.dealloc(ptr); 162 | } 163 | } 164 | } 165 | } 166 | 167 | /// Types which implement this can be converted from pointers to their Rust type and vice versa. 168 | pub trait RawPtr { 169 | /// Type to which the pointer is associated. 170 | type Target; 171 | 172 | /// Instantiate the type from the pointer 173 | /// 174 | /// # Safety 175 | /// 176 | /// Implementors should validate that all conversions into Rust wrapper type are within MemoryContexts 177 | unsafe fn from_raw(ptr: *mut Self::Target) -> Self; 178 | 179 | /// Consume this and return the pointer. 180 | /// 181 | /// # Safety 182 | /// 183 | /// After calling `into_raw` there should be no other pointers to the data behind the pointer. 184 | unsafe fn into_raw(self) -> *mut Self::Target; 185 | 186 | /// Returns a pointer to this 187 | fn as_ptr(&self) -> *const Self::Target; 188 | } 189 | 190 | impl RawPtr for std::ffi::CString { 191 | type Target = std::os::raw::c_char; 192 | 193 | unsafe fn from_raw(ptr: *mut Self::Target) -> Self { 194 | std::ffi::CString::from_raw(ptr) 195 | } 196 | 197 | unsafe fn into_raw(self) -> *mut Self::Target { 198 | std::ffi::CString::into_raw(self) 199 | } 200 | 201 | fn as_ptr(&self) -> *const Self::Target { 202 | self.as_c_str().as_ptr() 203 | } 204 | } 205 | 206 | impl RawPtr for NonNull { 207 | type Target = pg_sys::text; 208 | 209 | unsafe fn from_raw(ptr: *mut Self::Target) -> Self { 210 | NonNull::new_unchecked(ptr) 211 | } 212 | 213 | unsafe fn into_raw(self) -> *mut Self::Target { 214 | NonNull::as_ptr(self) 215 | } 216 | 217 | fn as_ptr(&self) -> *const Self::Target { 218 | unsafe { self.as_ref() } 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /pg-extend/src/pg_bool.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! Support for Postgres boolean values 9 | 10 | const TRUE_U8: u8 = 1; 11 | const FALSE_U8: u8 = 0; 12 | 13 | const TRUE_I8: i8 = 1; 14 | const FALSE_I8: i8 = 0; 15 | 16 | const TRUE_CH: char = 1 as char; 17 | const FALSE_CH: char = 0 as char; 18 | 19 | /// A macro to convert booleans between Postgres and Rust 20 | #[macro_export] 21 | macro_rules! pgbool { 22 | ($x:expr) => { 23 | $crate::pg_bool::Bool::from($x).into() 24 | }; 25 | } 26 | 27 | /// This type provides conversions for all the possible types that Postgres might use internally for 28 | /// boolean values. 29 | #[derive(Clone, Copy)] 30 | pub struct Bool(bool); 31 | 32 | impl From for bool { 33 | fn from(b: Bool) -> Self { 34 | b.0 35 | } 36 | } 37 | 38 | impl From for Bool { 39 | fn from(b: bool) -> Self { 40 | Bool(b) 41 | } 42 | } 43 | 44 | impl From for u8 { 45 | fn from(b: Bool) -> Self { 46 | if b.0 { 47 | TRUE_U8 48 | } else { 49 | FALSE_U8 50 | } 51 | } 52 | } 53 | 54 | impl From for Bool { 55 | /// Parse a Bool from a integer. 56 | /// 57 | /// Required in the case where bindgen turns a C bool into u8 (i.e. linux) 58 | /// 59 | /// ``` 60 | /// extern crate pg_extend; 61 | /// use pg_extend::pg_bool::Bool; 62 | /// 63 | /// assert_eq!(u8::from(Bool::from(1_u8)), u8::from(Bool::from(true))); 64 | /// assert_eq!(u8::from(Bool::from(0_u8)), u8::from(Bool::from(false))); 65 | /// ``` 66 | fn from(i: u8) -> Self { 67 | Bool(i == TRUE_U8) 68 | } 69 | } 70 | 71 | impl From for i8 { 72 | fn from(b: Bool) -> Self { 73 | if b.0 { 74 | TRUE_I8 75 | } else { 76 | FALSE_I8 77 | } 78 | } 79 | } 80 | 81 | impl From for Bool { 82 | fn from(i: i8) -> Self { 83 | Bool(i == TRUE_I8) 84 | } 85 | } 86 | 87 | impl From for char { 88 | fn from(b: Bool) -> Self { 89 | if b.0 { 90 | TRUE_CH 91 | } else { 92 | FALSE_CH 93 | } 94 | } 95 | } 96 | 97 | impl From for Bool { 98 | fn from(i: char) -> Self { 99 | Bool(i == TRUE_CH) 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /pg-extend/src/pg_datum.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! Postgres Datum conversions for Rust types 9 | 10 | use std::ffi::{CStr, CString}; 11 | use std::marker::PhantomData; 12 | use std::os::raw::c_char; 13 | use std::ptr::NonNull; 14 | 15 | use crate::native::Text; 16 | use crate::pg_alloc::{PgAllocated, PgAllocator}; 17 | use crate::pg_bool; 18 | use crate::pg_sys::{self, Datum}; 19 | 20 | /// A wrapper type for Postgres Datum's. 21 | /// 22 | /// This simplifies the semantics around Nullability of the Datum value, and provides conversion tools 23 | /// between Datum and Rust types 24 | #[derive(Clone, Debug)] 25 | pub struct PgDatum<'mc>(Option, PhantomData>); 26 | 27 | impl<'mc> PgDatum<'mc> { 28 | /// Returns a new PgDatum wrapper for Datatypes used by Postgres. 29 | #[allow(clippy::missing_safety_doc)] 30 | pub unsafe fn from_raw>( 31 | _memory_context: &'mc PgAllocator, 32 | datum: Datum, 33 | is_null: B, 34 | ) -> PgDatum<'mc> { 35 | let is_null: pg_bool::Bool = is_null.into(); 36 | let datum = if is_null.into() { None } else { Some(datum) }; 37 | PgDatum(datum, PhantomData) 38 | } 39 | 40 | /// Returns a new PgDatum wrapper if you already have Option 41 | #[allow(clippy::missing_safety_doc)] 42 | pub unsafe fn from_option( 43 | _memory_context: &'mc PgAllocator, 44 | datum: Option, 45 | ) -> PgDatum<'mc> { 46 | PgDatum(datum, PhantomData) 47 | } 48 | 49 | /// Return true if this Datum is None 50 | /// 51 | /// # Notes 52 | /// 53 | /// This must not panic, this is called directly at the FFI boundary with Postgres, if it panics it will cause 54 | /// the full Postgres DB to restart and enter recovery mode. 55 | pub fn is_null(&self) -> bool { 56 | self.0.is_none() 57 | } 58 | 59 | /// Do a direct converstion to the Postgres datum type. 60 | /// 61 | /// # Notes 62 | /// 63 | /// This must not panic, this is called directly at the FFI boundary with Postgres, if it panics it will cause 64 | /// the full Postgres DB to restart and enter recovery mode. 65 | #[allow(clippy::missing_safety_doc)] 66 | pub unsafe fn into_datum(self) -> Datum { 67 | match self.0 { 68 | Some(datum) => datum, 69 | None => 0 as Datum, 70 | } 71 | } 72 | } 73 | 74 | /// A trait that allows for conversions between Postgres Datum types and Rust types. 75 | /// 76 | /// Only Sized types, that fit in a single Datum, bool, u8 - u64 e.g. Nothing else is 77 | /// safe here. 78 | pub trait TryFromPgDatum<'s>: Sized { 79 | /// Attempt a conversion to from the Postgres data type into the Rust type 80 | fn try_from<'mc>( 81 | memory_context: &'mc PgAllocator, 82 | datum: PgDatum<'mc>, 83 | ) -> Result 84 | where 85 | Self: 's, 86 | 'mc: 's; 87 | } 88 | 89 | impl<'s> TryFromPgDatum<'s> for i16 { 90 | fn try_from<'mc>(_: &'mc PgAllocator, datum: PgDatum<'mc>) -> Result 91 | where 92 | Self: 's, 93 | 'mc: 's, 94 | { 95 | if let Some(datum) = datum.0 { 96 | Ok(datum as i16) 97 | } else { 98 | Err("datum was NULL") 99 | } 100 | } 101 | } 102 | 103 | impl From for PgDatum<'_> { 104 | fn from(value: i16) -> Self { 105 | PgDatum(Some(value as Datum), PhantomData) 106 | } 107 | } 108 | 109 | impl<'s> TryFromPgDatum<'s> for f32 { 110 | fn try_from<'mc>(_: &'mc PgAllocator, datum: PgDatum<'mc>) -> Result 111 | where 112 | Self: 's, 113 | 'mc: 's, 114 | { 115 | if let Some(datum) = datum.0 { 116 | Ok(f32::from_bits(datum as u32)) 117 | } else { 118 | Err("datum was NULL") 119 | } 120 | } 121 | } 122 | 123 | impl From for PgDatum<'_> { 124 | fn from(value: f32) -> Self { 125 | PgDatum(Some(f32::to_bits(value) as Datum), PhantomData) 126 | } 127 | } 128 | 129 | impl<'s> TryFromPgDatum<'s> for f64 { 130 | fn try_from<'mc>(_: &'mc PgAllocator, datum: PgDatum<'mc>) -> Result 131 | where 132 | Self: 's, 133 | 'mc: 's, 134 | { 135 | if let Some(datum) = datum.0 { 136 | Ok(f64::from_bits(datum as u64)) 137 | } else { 138 | Err("datum was NULL") 139 | } 140 | } 141 | } 142 | 143 | impl From for PgDatum<'_> { 144 | fn from(value: f64) -> Self { 145 | PgDatum(Some(f64::to_bits(value) as Datum), PhantomData) 146 | } 147 | } 148 | 149 | impl<'s> TryFromPgDatum<'s> for i32 { 150 | fn try_from<'mc>(_: &'mc PgAllocator, datum: PgDatum<'mc>) -> Result 151 | where 152 | Self: 's, 153 | 'mc: 's, 154 | { 155 | if let Some(datum) = datum.0 { 156 | Ok(datum as i32) 157 | } else { 158 | Err("datum was NULL") 159 | } 160 | } 161 | } 162 | 163 | impl From for PgDatum<'_> { 164 | fn from(value: i32) -> Self { 165 | PgDatum(Some(value as Datum), PhantomData) 166 | } 167 | } 168 | 169 | impl<'s> TryFromPgDatum<'s> for i64 { 170 | fn try_from<'mc>(_: &'mc PgAllocator, datum: PgDatum<'mc>) -> Result 171 | where 172 | Self: 's, 173 | 'mc: 's, 174 | { 175 | assert!( 176 | std::mem::size_of::() >= std::mem::size_of::(), 177 | "Datum not large enough for i64 values" 178 | ); 179 | if let Some(datum) = datum.0 { 180 | Ok(datum as i64) 181 | } else { 182 | Err("datum was NULL") 183 | } 184 | } 185 | } 186 | 187 | impl From for PgDatum<'_> { 188 | fn from(value: i64) -> Self { 189 | assert!( 190 | std::mem::size_of::() >= std::mem::size_of::(), 191 | "Datum not large enough for i64 values" 192 | ); 193 | PgDatum(Some(value as Datum), PhantomData) 194 | } 195 | } 196 | 197 | #[deprecated(note = "String is not Zero cost, please use the CString variant")] 198 | impl<'s> TryFromPgDatum<'s> for String { 199 | fn try_from<'mc>( 200 | memory_context: &'mc PgAllocator, 201 | datum: PgDatum<'mc>, 202 | ) -> Result 203 | where 204 | Self: 's, 205 | 'mc: 's, 206 | { 207 | let cstr = CString::try_from(memory_context, datum)?; 208 | 209 | cstr.into_string() 210 | .map_err(|_| "String contained non-utf8 data") 211 | } 212 | } 213 | 214 | // FIXME: this lifetime is wrong 215 | impl From for PgDatum<'_> { 216 | fn from(value: String) -> Self { 217 | let cstr = CString::new(value).expect("This shouldn't fail"); 218 | let ptr: *const c_char = cstr.as_ptr(); 219 | 220 | let text = unsafe { crate::guard_pg(|| pg_sys::cstring_to_text(ptr)) }; 221 | 222 | PgDatum(Some(text as Datum), PhantomData) 223 | } 224 | } 225 | 226 | #[deprecated(note = "String is not Zero cost, please use the CString variant")] 227 | impl<'s> TryFromPgDatum<'s> for CString { 228 | fn try_from<'mc>(_: &'mc PgAllocator, datum: PgDatum<'mc>) -> Result 229 | where 230 | Self: 's, 231 | 'mc: 's, 232 | { 233 | if let Some(datum) = datum.0 { 234 | let text_val = datum as *const pg_sys::text; 235 | 236 | unsafe { 237 | crate::guard_pg(|| { 238 | let val: *mut c_char = pg_sys::text_to_cstring(text_val); 239 | let cstr = CStr::from_ptr(val).to_owned(); 240 | 241 | pg_sys::pfree(val as *mut _); 242 | 243 | Ok(cstr) 244 | }) 245 | } 246 | } else { 247 | Err("datum was NULL") 248 | } 249 | } 250 | } 251 | 252 | // FIXME: this lifetime is wrong 253 | impl From for PgDatum<'_> { 254 | fn from(value: CString) -> Self { 255 | let ptr: *const c_char = value.as_ptr(); 256 | let text = unsafe { crate::guard_pg(|| pg_sys::cstring_to_text(ptr)) }; 257 | 258 | PgDatum(Some(text as Datum), PhantomData) 259 | } 260 | } 261 | 262 | impl<'s> TryFromPgDatum<'s> for PgAllocated<'s, CString> { 263 | fn try_from<'mc>( 264 | memory_context: &'mc PgAllocator, 265 | datum: PgDatum<'mc>, 266 | ) -> Result 267 | where 268 | Self: 's, 269 | 'mc: 's, 270 | { 271 | if let Some(datum) = datum.0 { 272 | let text_val = datum as *const pg_sys::text; 273 | 274 | unsafe { 275 | crate::guard_pg(|| { 276 | // from varlena.c 277 | /* 278 | * text_to_cstring 279 | * 280 | * Create a palloc'd, null-terminated C string from a text value. 281 | * 282 | * We support being passed a compressed or toasted text value. 283 | * This is a bit bogus since such values shouldn't really be referred to as 284 | * "text *", but it seems useful for robustness. If we didn't handle that 285 | * case here, we'd need another routine that did, anyway. 286 | */ 287 | let cstr = pg_sys::text_to_cstring(text_val) as *mut c_char; 288 | 289 | // this is dangerous! it's owned by CString, which is why PgAllocated will 290 | // block the dealloc 291 | //let cstr = CString::from_raw(val); 292 | let allocated = PgAllocated::from_raw(memory_context, cstr); 293 | 294 | Ok(allocated) 295 | }) 296 | } 297 | } else { 298 | Err("datum was NULL") 299 | } 300 | } 301 | } 302 | 303 | impl<'s> From> for PgDatum<'s> { 304 | fn from(value: Text<'s>) -> Self { 305 | let ptr = unsafe { value.into_ptr() }; 306 | PgDatum(Some(ptr as Datum), PhantomData) 307 | } 308 | } 309 | 310 | impl<'s> TryFromPgDatum<'s> for Text<'s> { 311 | fn try_from<'mc>( 312 | memory_context: &'mc PgAllocator, 313 | datum: PgDatum<'mc>, 314 | ) -> Result 315 | where 316 | Self: 's, 317 | 'mc: 's, 318 | { 319 | if let Some(datum) = datum.0 { 320 | let text_ptr = datum as *const pg_sys::text; 321 | 322 | unsafe { Ok(Text::from_raw(memory_context, text_ptr as *mut _)) } 323 | } else { 324 | Err("datum was NULL") 325 | } 326 | } 327 | } 328 | 329 | impl<'s, T> TryFromPgDatum<'s> for Option 330 | where 331 | T: 's + TryFromPgDatum<'s>, 332 | { 333 | fn try_from<'mc>( 334 | memory_context: &'mc PgAllocator, 335 | datum: PgDatum<'mc>, 336 | ) -> Result 337 | where 338 | Self: 's, 339 | 'mc: 's, 340 | { 341 | if datum.is_null() { 342 | return Ok(None); 343 | } 344 | 345 | // Value is not NULL: Call try_from(_: &PgAllocator, ) of type T without Optional<> 346 | let result: Result = TryFromPgDatum::try_from(memory_context, datum); 347 | 348 | Ok(Some(result?)) 349 | } 350 | } 351 | 352 | impl<'mc, 's, T> From> for PgDatum<'mc> 353 | where 354 | 'mc: 's, 355 | T: 's, 356 | PgDatum<'mc>: From, 357 | { 358 | fn from(value: Option) -> Self { 359 | match value { 360 | Some(value) => PgDatum::from(value), 361 | None => PgDatum(None, PhantomData), 362 | } 363 | } 364 | } 365 | 366 | struct DetoastedArrayWrapper { 367 | original_datum: *mut pg_sys::ArrayType, 368 | arr_type: *mut pg_sys::ArrayType, 369 | elements: *mut Datum, 370 | nulls: *mut bool, 371 | } 372 | 373 | impl DetoastedArrayWrapper { 374 | unsafe fn detoasted(datum: Datum) -> Result { 375 | let datum = datum as *mut pg_sys::varlena; 376 | if datum.is_null() { 377 | return Err("datum was NULL"); 378 | } 379 | 380 | #[allow(clippy::cast_ptr_alignment)] 381 | let arr_type = pg_sys::pg_detoast_datum(datum) as *mut pg_sys::ArrayType; 382 | 383 | #[allow(clippy::cast_ptr_alignment)] 384 | let original_datum = datum as *mut pg_sys::ArrayType; 385 | 386 | Ok(DetoastedArrayWrapper { 387 | original_datum, 388 | arr_type, 389 | elements: std::ptr::null_mut::(), 390 | nulls: std::ptr::null_mut::(), 391 | }) 392 | } 393 | } 394 | 395 | impl Drop for DetoastedArrayWrapper { 396 | fn drop(&mut self) { 397 | if self.arr_type != self.original_datum { 398 | unsafe { 399 | if !self.arr_type.is_null() { 400 | pg_sys::pfree(self.arr_type as *mut _); 401 | } 402 | if !self.elements.is_null() { 403 | pg_sys::pfree(self.elements as *mut _); 404 | } 405 | if !self.nulls.is_null() { 406 | pg_sys::pfree(self.nulls as *mut _); 407 | } 408 | } 409 | } 410 | } 411 | } 412 | 413 | /// Inner trait used to limit which types can be used for direct casting 414 | #[doc(hidden)] 415 | pub trait PgPrimitiveDatum {} 416 | 417 | impl PgPrimitiveDatum for i16 {} 418 | impl PgPrimitiveDatum for i32 {} 419 | impl PgPrimitiveDatum for i64 {} 420 | impl PgPrimitiveDatum for f32 {} 421 | impl PgPrimitiveDatum for f64 {} 422 | 423 | impl<'s, T> TryFromPgDatum<'s> for &'s [T] 424 | where 425 | T: 's + TryFromPgDatum<'s> + PgPrimitiveDatum, 426 | { 427 | fn try_from<'mc>(_: &'mc PgAllocator, datum: PgDatum<'mc>) -> Result 428 | where 429 | Self: 's, 430 | 'mc: 's, 431 | { 432 | if let Some(datum) = datum.0 { 433 | unsafe { 434 | let mut detoasted_wrapper = DetoastedArrayWrapper::detoasted(datum)?; 435 | 436 | if (*(detoasted_wrapper.arr_type)).ndim > 1 { 437 | return Err("argument must be empty or one-dimensional array"); 438 | } 439 | 440 | let mut elmlen: pg_sys::int16 = 0; 441 | let mut elmbyval = pgbool!(false); 442 | let mut elmalign: ::std::os::raw::c_char = 0; 443 | 444 | pg_sys::get_typlenbyvalalign( 445 | (*(detoasted_wrapper.arr_type)).elemtype, 446 | &mut elmlen, 447 | &mut elmbyval, 448 | &mut elmalign, 449 | ); 450 | 451 | let mut nelems: i32 = 0; 452 | 453 | pg_sys::deconstruct_array( 454 | detoasted_wrapper.arr_type, 455 | (*(detoasted_wrapper.arr_type)).elemtype, 456 | elmlen as i32, 457 | elmbyval, 458 | elmalign, 459 | &mut detoasted_wrapper.elements, 460 | &mut detoasted_wrapper.nulls, 461 | &mut nelems, 462 | ); 463 | 464 | let datums = std::slice::from_raw_parts( 465 | detoasted_wrapper.elements as *const Datum, 466 | nelems as usize, 467 | ); 468 | 469 | // This is where the conversion from `&[Datum]` is done to `&[T]` by a simple type casting, 470 | // however, we should use `T::try_cast(&'mc PgAllocator, Datum)` to ignore nulls 471 | let mem_size_datums = std::mem::size_of_val(datums); 472 | let datums = if mem_size_datums == 0 { 473 | std::slice::from_raw_parts(datums.as_ptr() as *const T, 0) 474 | } else { 475 | let mem_size_type = std::mem::size_of::(); 476 | assert_eq!(mem_size_datums % mem_size_type, 0); 477 | std::slice::from_raw_parts( 478 | datums.as_ptr() as *const T, 479 | mem_size_datums / mem_size_type, 480 | ) 481 | }; 482 | 483 | Ok(datums) 484 | } 485 | } else { 486 | Err("datum was NULL") 487 | } 488 | } 489 | } 490 | 491 | impl From<()> for PgDatum<'static> { 492 | fn from(_value: ()) -> Self { 493 | PgDatum(None, PhantomData) 494 | } 495 | } 496 | 497 | // FIXME: wrong lifetime 498 | impl From for PgDatum<'static> { 499 | fn from(datum: Datum) -> PgDatum<'static> { 500 | PgDatum(Some(datum), PhantomData) 501 | } 502 | } 503 | -------------------------------------------------------------------------------- /pg-extend/src/pg_error.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | //! Error reporting support for Postgres. 9 | //! This module is deprecated, use the logging macros in the [`pg_extend::log` module]. 10 | //! 11 | //! [`pg_extend::log` module]: ../log/index.html 12 | #![allow(deprecated)] 13 | 14 | use std::os::raw::{c_char, c_int}; 15 | 16 | use crate::{pg_bool, pg_sys}; 17 | 18 | const ERR_DOMAIN: &[u8] = b"RUST\0"; 19 | 20 | /// Postgres logging Levels 21 | /// 22 | /// # Note 23 | /// 24 | /// Some of these levels effect the status of the connection and transaction in Postgres. Specifically, >= Error will cause 25 | /// the connection and transaction to fail and be reset. 26 | #[derive(Clone, Copy)] 27 | #[deprecated = "use pg_extend::log::Level"] 28 | pub enum Level { 29 | /// Debugging messages, in categories of 5 decreasing detail. 30 | Debug5 = pg_sys::DEBUG5 as isize, 31 | /// Debugging messages, in categories of 4 decreasing detail. 32 | Debug4 = pg_sys::DEBUG4 as isize, 33 | /// Debugging messages, in categories of 3 decreasing detail. 34 | Debug3 = pg_sys::DEBUG3 as isize, 35 | /// Debugging messages, in categories of 2 decreasing detail. 36 | Debug2 = pg_sys::DEBUG2 as isize, 37 | /// Debugging messages, in categories of 1 decreasing detail. 38 | Debug1 = pg_sys::DEBUG1 as isize, 39 | /// Server operational messages; sent only to server log by default. 40 | Log = pg_sys::LOG as isize, 41 | /// Same as LOG for server reporting, but never sent to client. 42 | /// `CommError` is an alias for this 43 | #[cfg(not(postgres9))] 44 | LogServerOnly = pg_sys::LOG_SERVER_ONLY as isize, 45 | /// Messages specifically requested by user (eg VACUUM VERBOSE output); always sent to client regardless of client_min_messages, but by default not sent to server log. 46 | Info = pg_sys::INFO as isize, 47 | /// Helpful messages to users about query operation; sent to client and not to server log by default. 48 | Notice = pg_sys::NOTICE as isize, 49 | /// Warnings. NOTICE is for expected messages like implicit sequence creation by SERIAL. WARNING is for unexpected messages. 50 | Warning = pg_sys::WARNING as isize, 51 | /// user error - abort transaction; return to known state 52 | Error = pg_sys::ERROR as isize, 53 | ///.fatal error - abort process 54 | Fatal = pg_sys::FATAL as isize, 55 | /// take down the other backends with me 56 | Panic = pg_sys::PANIC as isize, 57 | } 58 | 59 | impl From for c_int { 60 | fn from(level: Level) -> Self { 61 | level as isize as c_int 62 | } 63 | } 64 | 65 | // TODO: offer a similar interface to that postgres for multi-log lines? 66 | // TODO: is there a better interface for CStr? 67 | /// log an error to Postgres 68 | #[deprecated = "use the new logging macros in pg_extend::log"] 69 | pub fn log(level: Level, file: T1, line: u32, func_name: T2, msg: T3) 70 | where 71 | T1: Into>, 72 | T2: Into>, 73 | T3: Into>, 74 | { 75 | use std::ffi::CString; 76 | 77 | // convert to C ffi 78 | let file = CString::new(file.into()).expect("this should not fail: file"); 79 | let line = line as c_int; // TODO: check bounds? 80 | let func_name = CString::new(func_name.into()).expect("this should not fail: func_name"); 81 | let msg = CString::new(msg.into()).or_else(|_| CString::new("failed to convert msg to a CString, check extension code for incompatibly `CString` messages")).expect("this should not fail: msg"); 82 | 83 | // these are owned by us 84 | let file: *const c_char = file.as_ptr(); 85 | let func_name: *const c_char = func_name.as_ptr(); 86 | let msg: *const c_char = msg.as_ptr(); 87 | 88 | let errlevel: c_int = c_int::from(level); 89 | 90 | // log the data: 91 | unsafe { 92 | crate::guard_pg(|| { 93 | let res = pg_sys::errstart( 94 | errlevel, 95 | file, 96 | line, 97 | func_name, 98 | ERR_DOMAIN.as_ptr() as *const c_char, 99 | ); 100 | if pg_bool::Bool::from(res).into() { 101 | let msg_result = pg_sys::errmsg(msg); 102 | pg_sys::errfinish(msg_result); 103 | } 104 | }) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /pg-extend/src/pg_sys.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | #![allow(missing_docs)] 9 | #![allow(non_camel_case_types)] 10 | #![allow(non_snake_case)] 11 | #![allow(non_upper_case_globals)] 12 | #![allow(safe_packed_borrows)] 13 | #![allow(clippy::approx_constant)] 14 | #![allow(clippy::cast_lossless)] 15 | #![allow(clippy::redundant_static_lifetimes)] 16 | #![allow(clippy::missing_safety_doc)] 17 | #![allow(clippy::new_without_default)] 18 | #![allow(clippy::should_implement_trait)] 19 | #![allow(clippy::too_many_arguments)] 20 | #![allow(clippy::trivially_copy_pass_by_ref)] 21 | #![allow(clippy::transmute_ptr_to_ptr)] 22 | #![allow(clippy::unreadable_literal)] 23 | #![allow(clippy::useless_transmute)] 24 | #![allow(improper_ctypes)] 25 | 26 | //! All exported C FFI symbols from Postgres 27 | //! 28 | //! For all function calls into Postgres, they should generally be wrapped in `pg_extend::guard_pg`. 29 | 30 | include!(concat!(env!("OUT_DIR"), "/postgres.rs")); 31 | 32 | #[cfg(target_os = "linux")] 33 | use std::os::raw::c_int; 34 | 35 | #[cfg(target_os = "linux")] 36 | extern "C" { 37 | #[link_name = "__sigsetjmp"] 38 | pub fn sigsetjmp(env: *mut sigjmp_buf, savemask: c_int) -> c_int; 39 | } 40 | -------------------------------------------------------------------------------- /pg-extend/src/pg_type.rs: -------------------------------------------------------------------------------- 1 | //! Postgres type definitions 2 | 3 | use crate::native::Text; 4 | 5 | /// See https://www.postgresql.org/docs/11/xfunc-c.html#XFUNC-C-TYPE-TABLE 6 | /// 7 | /// TODO: it would be cool to share code with the sfackler/rust-postgres project 8 | /// though, that is converting from NetworkByte order, and this is all NativeByte order? 9 | #[derive(Clone, Copy)] 10 | pub enum PgType { 11 | /// abstime AbsoluteTime utils/nabstime.h 12 | AbsoluteTime, 13 | /// bigint (int8) int64 postgres.h 14 | BigInt, 15 | /// bigint (int8) int64 postgres.h 16 | Int8, 17 | /// boolean bool postgres.h (maybe compiler built-in) 18 | Boolean, 19 | /// box BOX* utils/geo_decls.h 20 | GeoBox, 21 | /// bytea bytea* postgres.h 22 | ByteA, 23 | /// "char" char (compiler built-in) 24 | Char, 25 | /// character BpChar* postgres.h 26 | Character, 27 | /// cid CommandId postgres.h 28 | CommandId, 29 | /// date DateADT utils/date.h 30 | Date, 31 | /// smallint (int2) int16 postgres.h 32 | SmallInt, 33 | /// smallint (int2) int16 postgres.h 34 | Int2, 35 | /// int2vector int2vector* postgres.h 36 | Int2Vector, 37 | /// integer (int4) int32 postgres.h 38 | Integer, 39 | /// integer (int4) int32 postgres.h 40 | Int4, 41 | /// real (float4) float4* postgres.h 42 | Real, 43 | /// real (float4) float4* postgres.h 44 | Float4, 45 | /// double precision (float8) float8* postgres.h 46 | DoublePrecision, 47 | /// double precision (float8) float8* postgres.h 48 | Float8, 49 | /// interval Interval* datatype/timestamp.h 50 | Interval, 51 | /// lseg LSEG* utils/geo_decls.h 52 | Lseg, 53 | /// name Name postgres.h 54 | Name, 55 | /// oid Oid postgres.h 56 | Oid, 57 | /// oidvector oidvector* postgres.h 58 | OidVector, 59 | /// path PATH* utils/geo_decls.h 60 | Path, 61 | /// point POINT* utils/geo_decls.h 62 | Point, 63 | /// regproc regproc postgres.h 64 | RegProc, 65 | /// reltime RelativeTime utils/nabstime.h 66 | RelativeTime, 67 | /// text text* postgres.h 68 | Text, 69 | /// tid ItemPointer storage/itemptr.h 70 | ItemPointer, 71 | /// time TimeADT utils/date.h 72 | Time, 73 | /// time with time zone TimeTzADT utils/date.h 74 | TimeWithTimeZone, 75 | /// timestamp Timestamp* datatype/timestamp.h 76 | Timestamp, 77 | /// tinterval TimeInterval utils/nabstime.h 78 | TimeInterval, 79 | /// varchar VarChar* postgres.h 80 | VarChar, 81 | /// void 82 | Void, 83 | /// xid TransactionId postgres.h 84 | TransactionId, 85 | } 86 | 87 | impl PgType { 88 | /// Return the PgType of the parameter's type 89 | pub fn from_rust() -> PgType { 90 | T::pg_type() 91 | } 92 | 93 | /// Return the string representation of this type 94 | pub fn as_str(self, as_array: bool) -> &'static str { 95 | match self { 96 | // abstime AbsoluteTime utils/nabstime.h 97 | PgType::AbsoluteTime => "abstime", 98 | // bigint (int8) int64 postgres.h 99 | PgType::BigInt => "bigint", 100 | PgType::Int8 if as_array => "int8[]", 101 | PgType::Int8 => "int8", 102 | // boolean bool postgres.h (maybe compiler built-in) 103 | PgType::Boolean => "boolean", 104 | // box BOX* utils/geo_decls.h 105 | PgType::GeoBox => "box", 106 | // bytea bytea* postgres.h 107 | PgType::ByteA => "bytea", 108 | // "char" char (compiler built-in) 109 | PgType::Char => "char", 110 | // character BpChar* postgres.h 111 | PgType::Character => "character", 112 | // cid CommandId postgres.h 113 | PgType::CommandId => "cid", 114 | // date DateADT utils/date.h 115 | PgType::Date => "date", 116 | // smallint (int2) int16 postgres.h 117 | PgType::SmallInt => "smallint", 118 | PgType::Int2 if as_array => "int2[]", 119 | PgType::Int2 => "int2", 120 | // int2vector int2vector* postgres.h 121 | PgType::Int2Vector => "int2vector", 122 | // integer (int4) int32 postgres.h 123 | PgType::Integer => "integer", 124 | PgType::Int4 if as_array => "int4[]", 125 | PgType::Int4 => "int4", 126 | // real (float4) float4* postgres.h 127 | PgType::Real => "real", 128 | PgType::Float4 if as_array => "float4[]", 129 | PgType::Float4 => "float4", 130 | // double precision (float8) float8* postgres.h 131 | PgType::DoublePrecision => "double precision", 132 | PgType::Float8 if as_array => "float8[]", 133 | PgType::Float8 => "float8", 134 | // interval Interval* datatype/timestamp.h 135 | PgType::Interval => "interval", 136 | // lseg LSEG* utils/geo_decls.h 137 | PgType::Lseg => "lseg", 138 | // name Name postgres.h 139 | PgType::Name => "name", 140 | // oid Oid postgres.h 141 | PgType::Oid => "oid", 142 | // oidvector oidvector* postgres.h 143 | PgType::OidVector => "oidvector", 144 | // path PATH* utils/geo_decls.h 145 | PgType::Path => "path", 146 | // point POINT* utils/geo_decls.h 147 | PgType::Point => "point", 148 | // regproc regproc postgres.h 149 | PgType::RegProc => "regproc", 150 | // reltime RelativeTime utils/nabstime.h 151 | PgType::RelativeTime => "reltime", 152 | // text text* postgres.h 153 | PgType::Text => "text", 154 | // tid ItemPointer storage/itemptr.h 155 | PgType::ItemPointer => "tid", 156 | // time TimeADT utils/date.h 157 | PgType::Time => "time", 158 | // time with time zone TimeTzADT utils/date.h 159 | PgType::TimeWithTimeZone => "time with time zone", 160 | // timestamp Timestamp* datatype/timestamp.h 161 | PgType::Timestamp => "timestamp", 162 | // tinterval TimeInterval utils/nabstime.h 163 | PgType::TimeInterval => "tinterval", 164 | // varchar VarChar* postgres.h 165 | PgType::VarChar => "varchar", 166 | // void 167 | PgType::Void => "void", 168 | // xid TransactionId postgres.h 169 | PgType::TransactionId => "xid", 170 | } 171 | } 172 | 173 | /// Return the String to be used for the RETURNS statement in SQL 174 | pub fn return_stmt(self, as_array: bool) -> String { 175 | format!("RETURNS {}", self.as_str(as_array)) 176 | } 177 | } 178 | 179 | /// Get the Postgres info for a type 180 | pub trait PgTypeInfo { 181 | /// return the Postgres type 182 | fn pg_type() -> PgType; 183 | /// for distinguishing optional and non-optional arguments 184 | fn is_option() -> bool { 185 | false 186 | } 187 | /// for distinguishing array argsuments 188 | fn is_array() -> bool { 189 | false 190 | } 191 | } 192 | 193 | impl PgTypeInfo for f32 { 194 | fn pg_type() -> PgType { 195 | PgType::Float4 196 | } 197 | } 198 | 199 | impl PgTypeInfo for f64 { 200 | fn pg_type() -> PgType { 201 | PgType::Float8 202 | } 203 | } 204 | 205 | impl PgTypeInfo for i16 { 206 | fn pg_type() -> PgType { 207 | PgType::Int2 208 | } 209 | } 210 | 211 | impl PgTypeInfo for i32 { 212 | fn pg_type() -> PgType { 213 | PgType::Int4 214 | } 215 | } 216 | 217 | impl PgTypeInfo for i64 { 218 | fn pg_type() -> PgType { 219 | PgType::Int8 220 | } 221 | } 222 | 223 | impl PgTypeInfo for String { 224 | fn pg_type() -> PgType { 225 | PgType::Text 226 | } 227 | } 228 | 229 | impl PgTypeInfo for std::ffi::CString { 230 | fn pg_type() -> PgType { 231 | PgType::Text 232 | } 233 | } 234 | 235 | impl PgTypeInfo for () { 236 | fn pg_type() -> PgType { 237 | PgType::Void 238 | } 239 | } 240 | 241 | impl PgTypeInfo for Option 242 | where 243 | T: PgTypeInfo, 244 | { 245 | fn pg_type() -> PgType { 246 | T::pg_type() 247 | } 248 | 249 | fn is_option() -> bool { 250 | true 251 | } 252 | } 253 | 254 | impl PgTypeInfo for Text<'_> { 255 | fn pg_type() -> PgType { 256 | PgType::Text 257 | } 258 | } 259 | 260 | impl PgTypeInfo for &[T] 261 | where 262 | T: PgTypeInfo, 263 | { 264 | fn pg_type() -> PgType { 265 | T::pg_type() 266 | } 267 | 268 | fn is_array() -> bool { 269 | true 270 | } 271 | } 272 | -------------------------------------------------------------------------------- /pg-extend/wrapper.h: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | #include "stdbool.h" 9 | #include "postgres.h" 10 | #include "postgres_ext.h" 11 | #include "access/relscan.h" 12 | #include "access/sysattr.h" 13 | #include "catalog/pg_type.h" 14 | #include "executor/spi.h" 15 | #include "foreign/fdwapi.h" 16 | #include "foreign/foreign.h" 17 | #include "lib/stringinfo.h" 18 | #include "nodes/makefuncs.h" 19 | #include "nodes/pg_list.h" 20 | #include "nodes/memnodes.h" 21 | #include "optimizer/pathnode.h" 22 | #include "optimizer/planmain.h" 23 | #include "optimizer/restrictinfo.h" 24 | #include "utils/builtins.h" 25 | #include "utils/rel.h" 26 | #include "utils/lsyscache.h" 27 | #include "utils/palloc.h" 28 | -------------------------------------------------------------------------------- /pg-extern-attr/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pg-extern-attr" 3 | version = "0.2.2" 4 | authors = ["Benjamin Fry "] 5 | edition = "2018" 6 | 7 | description = """ 8 | A attribute proc-macro for deriving Postgres extension function information, akin to PG_FUNCTION_INFO_V1 in Postgres 9 | """ 10 | 11 | documentation = "https://docs.rs/pg-extern-attr" 12 | repository = "https://github.com/bluejekyll/pg-extend-rs" 13 | 14 | readme = "README.md" 15 | license = "MIT/Apache-2.0" 16 | 17 | [lib] 18 | proc-macro = true 19 | 20 | [dependencies] 21 | syn = { version = "1.0.33", features = ["extra-traits", "full", "fold", "parsing"] } 22 | quote = "1.0.7" 23 | proc-macro2 = "1.0.17" 24 | 25 | [dev-dependencies] 26 | pg-extend = { path = "../pg-extend" } -------------------------------------------------------------------------------- /pg-extern-attr/Makefile.toml: -------------------------------------------------------------------------------- 1 | [config] 2 | skip_core_tasks = true 3 | 4 | [config.modify_core_tasks] 5 | # if true, all core tasks are set to private (default false) 6 | private = true 7 | 8 | ## Feature profiles 9 | [env] 10 | CARGO_MAKE_EXTEND_WORKSPACE_MAKEFILE = "true" 11 | ALL_FEATURES = "--all-features" 12 | -------------------------------------------------------------------------------- /pg-extern-attr/README.md: -------------------------------------------------------------------------------- 1 | # pg-extern-attr 2 | 3 | Attribute macro for externalizing Postgres extension functions from Rust. -------------------------------------------------------------------------------- /pg-extern-attr/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | #![recursion_limit = "1024"] 9 | 10 | extern crate proc_macro; 11 | extern crate proc_macro2; 12 | #[macro_use] 13 | extern crate quote; 14 | #[macro_use] 15 | extern crate syn; 16 | 17 | use proc_macro2::{Ident, Span, TokenStream}; 18 | use quote::ToTokens; 19 | use syn::punctuated::Punctuated; 20 | use syn::spanned::Spanned; 21 | use syn::token::Comma; 22 | use syn::Type; 23 | 24 | mod lifetime; 25 | 26 | /// A type that represents that PgAllocator is an argument to the Rust function. 27 | type HasPgAllocatorArg = bool; 28 | 29 | fn create_function_params(num_args: usize, has_pg_allocator: HasPgAllocatorArg) -> TokenStream { 30 | let mut tokens = TokenStream::new(); 31 | 32 | // if the allocator is the first arg we want to start at 1 33 | if has_pg_allocator { 34 | tokens.extend(quote!(&memory_context,)); 35 | }; 36 | 37 | for i in 0..num_args { 38 | let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site()); 39 | 40 | tokens.extend(quote!( 41 | #arg_name, 42 | )); 43 | } 44 | 45 | tokens 46 | } 47 | 48 | fn get_arg_types(inputs: &Punctuated) -> Vec { 49 | let mut types = Vec::new(); 50 | 51 | for arg in inputs.iter() { 52 | let arg_type: &syn::Type = match *arg { 53 | syn::FnArg::Receiver(_) => panic!("self functions not supported"), 54 | syn::FnArg::Typed(ref ty) => &ty.ty, 55 | }; 56 | 57 | // if it's carrying a lifetime, we're going to replace it with the annonymous one. 58 | let mut arg_type = arg_type.clone(); 59 | lifetime::strip_type(&mut arg_type); 60 | 61 | types.push(arg_type); 62 | } 63 | 64 | types 65 | } 66 | 67 | /// Check if the argument is the PgAllocator (aka MemoryContext) 68 | fn check_for_pg_allocator(ty: &Type) -> bool { 69 | // we only accept references, i.e. &PgAllocator 70 | let type_ref = match ty { 71 | Type::Reference(type_ref) => type_ref, 72 | _ => return false, 73 | }; 74 | 75 | // find the path and ident 76 | match *type_ref.elem { 77 | Type::Path(ref path) => path 78 | .path 79 | .segments 80 | .iter() 81 | .last() 82 | .map_or(false, |p| p.ident == stringify!(PgAllocator)), 83 | _ => false, 84 | } 85 | } 86 | 87 | /// Returns a token stream of all the argument data extracted from the SQL function parameters 88 | /// PgDatums, and converts them to the arg list for the Rust function. 89 | /// 90 | /// # Return 91 | /// 92 | /// The TokenStream of all the args, and a boolean if the first arg is the PgAllocator 93 | fn extract_arg_data(arg_types: &[Type]) -> (TokenStream, HasPgAllocatorArg) { 94 | let mut get_args_stream = TokenStream::new(); 95 | 96 | // 1 to skip first 0, to use first arg. 97 | let first_param_pg_allocator = arg_types 98 | .first() 99 | .map_or(false, |ty| check_for_pg_allocator(ty)); 100 | let skip_first = if first_param_pg_allocator { 1 } else { 0 }; 101 | 102 | for (i, arg_type) in arg_types.iter().skip(skip_first).enumerate() { 103 | let arg_name = Ident::new(&format!("arg_{}", i), i.span()); 104 | let arg_error = format!("unsupported function argument type for {}", arg_name); 105 | 106 | let get_arg = quote_spanned!( arg_type.span()=> 107 | let datum = args.next().expect("wrong number of args passed into get_args for args?"); 108 | let #arg_name: #arg_type = unsafe { 109 | pg_extend::pg_datum::TryFromPgDatum::try_from( 110 | &memory_context, 111 | pg_extend::pg_datum::PgDatum::from_option(&memory_context, datum), 112 | ) 113 | .expect(#arg_error) 114 | }; 115 | ); 116 | 117 | get_args_stream.extend(get_arg); 118 | } 119 | 120 | (get_args_stream, first_param_pg_allocator) 121 | } 122 | 123 | fn sql_param_list(num_args: usize) -> String { 124 | let mut tokens = String::new(); 125 | if num_args == 0 { 126 | return tokens; 127 | } 128 | 129 | let arg_name = |num: usize| format!("{{sql_{}}}", num); 130 | 131 | for i in 0..(num_args - 1) { 132 | let arg_name = arg_name(i); 133 | tokens.push_str(&format!("{},", arg_name)); 134 | } 135 | 136 | let arg_name = arg_name(num_args - 1); 137 | tokens.push_str(&arg_name); 138 | 139 | tokens 140 | } 141 | 142 | /// Returns a token stream for the function that creates the function 143 | /// 144 | /// # Return 145 | /// 146 | /// The TokenStream of all the args, and a boolean if the first arg is the PgAllocator 147 | fn sql_param_types(arg_types: &[Type]) -> (TokenStream, bool) { 148 | let mut tokens = TokenStream::new(); 149 | 150 | // 1 to skip first 0, to use first arg. 151 | let first_param_pg_allocator = arg_types 152 | .first() 153 | .map_or(false, |ty| check_for_pg_allocator(ty)); 154 | 155 | let arg_types = if first_param_pg_allocator { 156 | &arg_types[1..] 157 | } else { 158 | arg_types 159 | }; 160 | 161 | for (i, arg_type) in arg_types.iter().enumerate() { 162 | let sql_name = Ident::new(&format!("sql_{}", i), arg_type.span()); 163 | 164 | let sql_param = quote!( 165 | #sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(<#arg_type>::is_array()), 166 | ); 167 | 168 | tokens.extend(sql_param); 169 | } 170 | 171 | (tokens, first_param_pg_allocator) 172 | } 173 | 174 | fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream { 175 | let mut outputs = outputs.clone(); 176 | lifetime::strip_return_type(&mut outputs); 177 | 178 | let ty = match outputs { 179 | syn::ReturnType::Default => quote!(()), 180 | syn::ReturnType::Type(_, ty) => quote!(#ty), 181 | }; 182 | 183 | quote_spanned!(ty.span() => pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt(<#ty>::is_array())) 184 | } 185 | 186 | /// Returns Rust code to figure out if the function takes optional arguments. Functions with 187 | /// non-optional arguments will be declared with the STRICT option. PostgreSQL behavior: 188 | /// 189 | /// > If this parameter is specified, the function is not executed when there are null arguments; 190 | /// > instead a null result is assumed automatically. 191 | fn sql_function_options(arg_types: &[Type]) -> TokenStream { 192 | if arg_types.is_empty() { 193 | return quote!("",); 194 | } 195 | 196 | let first_param_pg_allocator = arg_types 197 | .first() 198 | .map_or(false, |ty| check_for_pg_allocator(ty)); 199 | 200 | let arg_types = if first_param_pg_allocator { 201 | &arg_types[1..] 202 | } else { 203 | arg_types 204 | }; 205 | 206 | // if it's empty param list, return empty param list 207 | if arg_types.is_empty() { 208 | return quote!("",); 209 | } 210 | 211 | quote!( 212 | { 213 | let optional_args = [ #( <#arg_types>::is_option() ),* ]; 214 | if optional_args.iter().all(|&x| x) { "" } 215 | else if !optional_args.iter().any(|&x| x) { " STRICT" } 216 | else { 217 | panic!("Cannot mix Option and non-Option arguments."); 218 | } 219 | }, 220 | ) 221 | } 222 | 223 | fn impl_info_for_fdw(item: &syn::Item) -> TokenStream { 224 | let typ = if let syn::Item::Struct(typ) = item { 225 | typ 226 | } else { 227 | panic!("Annotation only supported on structs") 228 | }; 229 | 230 | let mut decl = item.clone().into_token_stream(); 231 | 232 | let struct_name = &typ.ident; 233 | let func_name = syn::Ident::new(&format!("fdw_{}", struct_name), Span::call_site()); 234 | 235 | let info_fn = get_info_fn(&func_name); 236 | 237 | let fdw_fn = quote!( 238 | #[no_mangle] 239 | pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { 240 | unsafe { pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum() } 241 | } 242 | ); 243 | 244 | let create_sql_name = syn::Ident::new( 245 | &format!("{}_pg_create_stmt", struct_name), 246 | Span::call_site(), 247 | ); 248 | 249 | let sql_stmt = format!( 250 | " 251 | CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT; 252 | CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR; 253 | ", 254 | struct_name, func_name, 255 | ); 256 | 257 | // declare a function that can be used to output a create statement for the externed function 258 | // all create statements will be put into a common module for access 259 | let create_sql_def = quote!( 260 | #[allow(unused)] 261 | pub fn #create_sql_name(library_path: &str) -> String { 262 | use pg_extend::pg_type::PgTypeInfo; 263 | 264 | format!( 265 | #sql_stmt, 266 | library_path = library_path 267 | ) 268 | } 269 | ); 270 | 271 | decl.extend(info_fn); 272 | decl.extend(create_sql_def); 273 | decl.extend(fdw_fn); 274 | 275 | decl 276 | } 277 | 278 | fn get_info_fn(func_name: &syn::Ident) -> TokenStream { 279 | let func_info_name = syn::Ident::new(&format!("pg_finfo_{}", func_name), Span::call_site()); 280 | 281 | // create the Postgres info 282 | quote!( 283 | #[no_mangle] 284 | pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record { 285 | const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 }; 286 | &my_finfo 287 | } 288 | ) 289 | } 290 | 291 | fn impl_info_for_fn(item: &syn::Item) -> TokenStream { 292 | let func = if let syn::Item::Fn(func) = item { 293 | &func.sig 294 | } else { 295 | panic!("annotation only supported on functions"); 296 | }; 297 | 298 | let func_name = &func.ident; 299 | 300 | if func.variadic.is_some() { 301 | panic!("variadic functions (...) not supported") 302 | } 303 | 304 | let inputs = &func.inputs; 305 | let output = &func.output; 306 | 307 | // declare the function 308 | let mut function = TokenStream::default(); 309 | 310 | let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site()); 311 | let func_info = get_info_fn(&func_wrapper_name); 312 | // join the function information in 313 | function.extend(func_info); 314 | 315 | let arg_types = get_arg_types(inputs); 316 | let (get_args_from_datums, has_pg_allocator) = extract_arg_data(&arg_types); 317 | // remove the optional Rust arguments from the sql argument count 318 | let num_sql_args = if has_pg_allocator { 319 | arg_types.len() - 1 320 | } else { 321 | arg_types.len() 322 | }; 323 | 324 | let func_params = create_function_params(num_sql_args, has_pg_allocator); 325 | 326 | // wrap the original function in a pg_wrapper function 327 | let func_wrapper = quote_spanned!( func_name.span() => 328 | #[no_mangle] 329 | #[allow(unused_variables, unused_mut, clippy::suspicious_else_formatting, clippy::unit_arg, clippy::let_unit_value)] 330 | pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { 331 | use std::panic; 332 | use pg_extend::pg_alloc::PgAllocator; 333 | 334 | // All params will be in the "current" memory context at the call-site 335 | let memory_context = PgAllocator::current_context(); 336 | 337 | let func_info = unsafe { 338 | func_call_info 339 | .as_mut() 340 | .expect("func_call_info was unexpectedly NULL") 341 | }; 342 | 343 | // guard the Postgres process against the panic, and give us an oportunity to cleanup 344 | let panic_result = panic::catch_unwind(|| { 345 | // extract the argument list 346 | let mut args = pg_extend::get_args(func_info); 347 | 348 | // arbitrary Datum conversions occur here, and could panic 349 | // so this is inside the catch unwind 350 | #get_args_from_datums 351 | 352 | // this is the meat of the function call into the extension code 353 | let result = #func_name(#func_params); 354 | 355 | // arbitrary Rust code could panic, so this is guarded 356 | pg_extend::pg_datum::PgDatum::from(result) 357 | }); 358 | 359 | // see if we caught a panic 360 | match panic_result { 361 | Ok(result) => { 362 | // in addition to the null case, we should handle result types probably 363 | let isnull: pg_extend::pg_bool::Bool = result.is_null().into(); 364 | func_info.isnull = isnull.into(); 365 | 366 | // return the datum 367 | unsafe { 368 | result.into_datum() 369 | } 370 | } 371 | Err(err) => { 372 | use std::sync::atomic::compiler_fence; 373 | use std::sync::atomic::Ordering; 374 | use pg_extend::error; 375 | 376 | // ensure the return value is null 377 | func_info.isnull = pg_extend::pg_bool::Bool::from(true).into(); 378 | 379 | // The Rust code paniced, we need to recover to Postgres via a longjump 380 | // A postgres logging error of Error will do this for us. 381 | compiler_fence(Ordering::SeqCst); 382 | if let Some(msg) = err.downcast_ref::<&'static str>() { 383 | error!("panic executing Rust '{}': {}", stringify!(#func_name), msg); 384 | } 385 | 386 | if let Some(msg) = err.downcast_ref::() { 387 | error!("panic executing Rust '{}': {}", stringify!(#func_name), msg); 388 | } 389 | 390 | error!("panic executing Rust '{}'", stringify!(#func_name)); 391 | 392 | unreachable!("log should have longjmped above, this is a bug in pg-extend-rs"); 393 | } 394 | } 395 | } 396 | ); 397 | 398 | let create_sql_name = 399 | syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site()); 400 | 401 | let (sql_param_types, _has_pg_allocator) = sql_param_types(&arg_types); 402 | let sql_params = sql_param_list(num_sql_args); 403 | let sql_options = sql_function_options(&arg_types); 404 | let sql_return = sql_return_type(output); 405 | 406 | // ret and library_path are replacements at runtime 407 | let sql_stmt = format!( 408 | "CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C{{opts}};", 409 | func_name, sql_params, func_wrapper_name, 410 | ); 411 | 412 | // declare a function that can be used to output a create statement for the externed function 413 | // all create statements will be put into a common module for access 414 | let create_sql_def = quote!( 415 | #[allow(unused)] 416 | pub fn #create_sql_name(library_path: &str) -> String { 417 | use pg_extend::pg_type::PgTypeInfo; 418 | format!( 419 | #sql_stmt, 420 | #sql_param_types 421 | ret = #sql_return, 422 | opts = #sql_options 423 | library_path = library_path 424 | ) 425 | } 426 | ); 427 | 428 | function.extend(func_wrapper); 429 | function.extend(create_sql_def); 430 | 431 | function 432 | } 433 | 434 | /// An attribute macro for wrapping Rust functions with boiler plate for defining and 435 | /// calling conventions between Postgres and Rust. 436 | /// 437 | /// This mimics the C macro for defining functions 438 | /// 439 | /// ```c 440 | /// #define PG_FUNCTION_INFO_V1(funcname) \ 441 | /// extern Datum funcname(PG_FUNCTION_ARGS); \ 442 | /// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \ 443 | /// const Pg_finfo_record * \ 444 | /// CppConcat(pg_finfo_,funcname) (void) \ 445 | /// { \ 446 | /// static const Pg_finfo_record my_finfo = { 1 }; \ 447 | /// return &my_finfo; \ 448 | /// } \ 449 | /// ``` 450 | /// 451 | /// # Returns 452 | /// 453 | /// The result of this macro will be to produce a new function wrapping the one annotated but prepended with 454 | /// `pg_` to distinquish them and also declares a function for Postgres to get the Function information; 455 | /// 456 | /// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced, 457 | /// the wrapper function with a signature of: 458 | /// 459 | /// ```rust,no_run 460 | /// extern crate pg_extend; 461 | /// use pg_extend::pg_sys; 462 | /// 463 | /// #[no_mangle] 464 | /// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum 465 | /// # { 466 | /// # unimplemented!() 467 | /// # } 468 | /// ``` 469 | /// 470 | /// and the info function with a signature of: 471 | /// 472 | /// ```rust,no_run 473 | /// extern crate pg_extend; 474 | /// use pg_extend::pg_sys; 475 | /// 476 | /// #[no_mangle] 477 | /// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record 478 | /// # { 479 | /// # unimplemented!() 480 | /// # } 481 | /// ``` 482 | /// 483 | #[proc_macro_attribute] 484 | #[allow(clippy::needless_pass_by_value)] 485 | pub fn pg_extern( 486 | _attr: proc_macro::TokenStream, 487 | item: proc_macro::TokenStream, 488 | ) -> proc_macro::TokenStream { 489 | // get a usable token stream 490 | let ast: syn::Item = parse_macro_input!(item as syn::Item); 491 | 492 | // output the original function definition. 493 | let mut expanded: TokenStream = ast.clone().into_token_stream(); 494 | 495 | // Build the impl 496 | expanded.extend(impl_info_for_fn(&ast)); 497 | 498 | // Return the generated impl 499 | proc_macro::TokenStream::from(expanded) 500 | } 501 | 502 | /// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper 503 | /// This is mostly a slimmed down version of pg_extern, with none of the data argument handling. 504 | #[proc_macro_attribute] 505 | #[allow(clippy::needless_pass_by_value)] 506 | pub fn pg_foreignwrapper( 507 | _attr: proc_macro::TokenStream, 508 | item: proc_macro::TokenStream, 509 | ) -> proc_macro::TokenStream { 510 | // get a usable token stream 511 | let ast: syn::Item = parse_macro_input!(item as syn::Item); 512 | 513 | // Build the impl 514 | let expanded: TokenStream = impl_info_for_fdw(&ast); 515 | 516 | // Return the generated impl 517 | proc_macro::TokenStream::from(expanded) 518 | } 519 | -------------------------------------------------------------------------------- /pg-extern-attr/src/lifetime.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018-2019 Benjamin Fry 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | fn lifetime_to_anon(lifetime: &mut syn::Lifetime) { 9 | let anon_lifetime = syn::Ident::new("_", lifetime.ident.span()); 10 | lifetime.ident = anon_lifetime; 11 | } 12 | 13 | fn sl_lifetime_def(lifetime_def: &mut syn::LifetimeDef) { 14 | lifetime_to_anon(&mut lifetime_def.lifetime); 15 | 16 | for lifetime in &mut lifetime_def.bounds { 17 | lifetime_to_anon(lifetime); 18 | } 19 | } 20 | 21 | fn sl_type_param_bound(bound: &mut syn::TypeParamBound) { 22 | use syn::TypeParamBound::*; 23 | match bound { 24 | Trait(ref mut trait_bound) => { 25 | if let Some(bound_lifetimes) = trait_bound.lifetimes.as_mut() { 26 | for lifetime_def in &mut bound_lifetimes.lifetimes { 27 | sl_lifetime_def(lifetime_def); 28 | } 29 | }; 30 | 31 | sl_path(&mut trait_bound.path); 32 | } 33 | Lifetime(ref mut lifetime) => lifetime_to_anon(lifetime), 34 | } 35 | } 36 | 37 | fn sl_generic_argument(args: &mut syn::GenericArgument) { 38 | use syn::GenericArgument::*; 39 | match args { 40 | Lifetime(ref mut lifetime) => lifetime_to_anon(lifetime), 41 | Type(ref mut ty) => strip_type(ty), 42 | Binding(ref mut binding) => strip_type(&mut binding.ty), 43 | Constraint(ref mut constraint) => { 44 | for mut bound in &mut constraint.bounds { 45 | sl_type_param_bound(&mut bound); 46 | } 47 | } 48 | Const(expr) => unimplemented!("Const not supported by pg-extern: {:?}", expr), 49 | } 50 | } 51 | 52 | fn sl_path(path: &mut syn::Path) { 53 | for p in &mut path.segments { 54 | use syn::PathArguments::*; 55 | 56 | let path_arguments = &mut p.arguments; 57 | 58 | match path_arguments { 59 | None => (), 60 | AngleBracketed(ref mut angle_bracketed_generic_arguments) => { 61 | for generic_argument in &mut angle_bracketed_generic_arguments.args { 62 | sl_generic_argument(generic_argument); 63 | } 64 | } 65 | Parenthesized(ref mut parenthesizedgeneric_arguments) => { 66 | for ty in &mut parenthesizedgeneric_arguments.inputs { 67 | strip_type(ty); 68 | } 69 | 70 | strip_return_type(&mut parenthesizedgeneric_arguments.output); 71 | } 72 | } 73 | } 74 | } 75 | 76 | fn sl_type_path(type_path: &mut syn::TypePath) { 77 | if let Some(ref mut qself) = type_path.qself.as_mut() { 78 | strip_type(&mut qself.ty); 79 | }; 80 | 81 | sl_path(&mut type_path.path); 82 | } 83 | 84 | pub(crate) fn strip_return_type(return_type: &mut syn::ReturnType) { 85 | use syn::ReturnType::*; 86 | match return_type { 87 | Default => (), 88 | Type(_, ref mut ty) => strip_type(ty), 89 | } 90 | } 91 | 92 | pub(crate) fn strip_type(ty: &mut syn::Type) { 93 | use syn::Type::*; 94 | 95 | match ty { 96 | Slice(ref mut type_slice) => strip_type(&mut type_slice.elem), 97 | Array(type_array) => strip_type(&mut type_array.elem), 98 | Ptr(type_ptr) => strip_type(&mut type_ptr.elem), 99 | Reference(type_reference) => strip_type(&mut type_reference.elem), 100 | BareFn(type_bare_fn) => { 101 | unimplemented!("BareFn not supported by pg-extern: {:?}", type_bare_fn) 102 | } 103 | Never(_type_never) => (), 104 | Tuple(type_tuple) => { 105 | for mut i in &mut type_tuple.elems { 106 | strip_type(&mut i); 107 | } 108 | } 109 | Path(ref mut type_path) => sl_type_path(type_path), 110 | TraitObject(type_trait_object) => unimplemented!( 111 | "TraitObject not supported by pg-extern: {:?}", 112 | type_trait_object 113 | ), 114 | ImplTrait(type_impl_trait) => unimplemented!( 115 | "ImplTrait not supported by pg-extern: {:?}", 116 | type_impl_trait 117 | ), 118 | Paren(type_paren) => unimplemented!("Paren not supported by pg-extern: {:?}", type_paren), 119 | Group(type_group) => unimplemented!("Group not supported by pg-extern: {:?}", type_group), 120 | Infer(type_infer) => unimplemented!("Infer not supported by pg-extern: {:?}", type_infer), 121 | Macro(type_macro) => unimplemented!("Macro not supported by pg-extern: {:?}", type_macro), 122 | Verbatim(type_verbatim) => { 123 | unimplemented!("Verbatim not supported by pg-extern: {:?}", type_verbatim) 124 | } 125 | t => unimplemented!("Unsupported type: {:?}", t), 126 | } 127 | } 128 | --------------------------------------------------------------------------------