├── .gitignore ├── AUTHORS ├── LICENSE ├── README.rst ├── __init__.pxd ├── __init__.py ├── buffer.pxd ├── buffer.pxi ├── buffer.pyx ├── codecs ├── __init__.pxd ├── bits.pyx ├── bytea.pyx ├── context.pyx ├── datetime.pyx ├── float.pyx ├── geometry.pyx ├── hstore.pyx ├── int.pyx ├── json.pyx ├── jsonpath.pyx ├── misc.pyx ├── network.pyx ├── numeric.pyx ├── pg_snapshot.pyx ├── text.pyx ├── tid.pyx └── uuid.pyx ├── consts.pxi ├── cpythonx.pxd ├── debug.h ├── debug.pxd ├── frb.pxd ├── frb.pyx ├── hton.h ├── hton.pxd ├── pgproto.pxd ├── pgproto.pyi ├── pgproto.pyx ├── tohex.h ├── tohex.pxd ├── types.py └── uuid.pyx /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.pyo 3 | *.so 4 | *.pyd 5 | *~ 6 | .DS_Store 7 | __pycache__/ 8 | /*.c 9 | *.html 10 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Main contributors 2 | ================= 3 | 4 | MagicStack Inc.: 5 | Elvis Pranskevichus 6 | Yury Selivanov 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2016-present the asyncpg authors and contributors. 2 | 3 | Apache License 4 | Version 2.0, January 2004 5 | http://www.apache.org/licenses/ 6 | 7 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 8 | 9 | 1. Definitions. 10 | 11 | "License" shall mean the terms and conditions for use, reproduction, 12 | and distribution as defined by Sections 1 through 9 of this document. 13 | 14 | "Licensor" shall mean the copyright owner or entity authorized by 15 | the copyright owner that is granting the License. 16 | 17 | "Legal Entity" shall mean the union of the acting entity and all 18 | other entities that control, are controlled by, or are under common 19 | control with that entity. For the purposes of this definition, 20 | "control" means (i) the power, direct or indirect, to cause the 21 | direction or management of such entity, whether by contract or 22 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 23 | outstanding shares, or (iii) beneficial ownership of such entity. 24 | 25 | "You" (or "Your") shall mean an individual or Legal Entity 26 | exercising permissions granted by this License. 27 | 28 | "Source" form shall mean the preferred form for making modifications, 29 | including but not limited to software source code, documentation 30 | source, and configuration files. 31 | 32 | "Object" form shall mean any form resulting from mechanical 33 | transformation or translation of a Source form, including but 34 | not limited to compiled object code, generated documentation, 35 | and conversions to other media types. 36 | 37 | "Work" shall mean the work of authorship, whether in Source or 38 | Object form, made available under the License, as indicated by a 39 | copyright notice that is included in or attached to the work 40 | (an example is provided in the Appendix below). 41 | 42 | "Derivative Works" shall mean any work, whether in Source or Object 43 | form, that is based on (or derived from) the Work and for which the 44 | editorial revisions, annotations, elaborations, or other modifications 45 | represent, as a whole, an original work of authorship. For the purposes 46 | of this License, Derivative Works shall not include works that remain 47 | separable from, or merely link (or bind by name) to the interfaces of, 48 | the Work and Derivative Works thereof. 49 | 50 | "Contribution" shall mean any work of authorship, including 51 | the original version of the Work and any modifications or additions 52 | to that Work or Derivative Works thereof, that is intentionally 53 | submitted to Licensor for inclusion in the Work by the copyright owner 54 | or by an individual or Legal Entity authorized to submit on behalf of 55 | the copyright owner. For the purposes of this definition, "submitted" 56 | means any form of electronic, verbal, or written communication sent 57 | to the Licensor or its representatives, including but not limited to 58 | communication on electronic mailing lists, source code control systems, 59 | and issue tracking systems that are managed by, or on behalf of, the 60 | Licensor for the purpose of discussing and improving the Work, but 61 | excluding communication that is conspicuously marked or otherwise 62 | designated in writing by the copyright owner as "Not a Contribution." 63 | 64 | "Contributor" shall mean Licensor and any individual or Legal Entity 65 | on behalf of whom a Contribution has been received by Licensor and 66 | subsequently incorporated within the Work. 67 | 68 | 2. Grant of Copyright License. Subject to the terms and conditions of 69 | this License, each Contributor hereby grants to You a perpetual, 70 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 71 | copyright license to reproduce, prepare Derivative Works of, 72 | publicly display, publicly perform, sublicense, and distribute the 73 | Work and such Derivative Works in Source or Object form. 74 | 75 | 3. Grant of Patent License. Subject to the terms and conditions of 76 | this License, each Contributor hereby grants to You a perpetual, 77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 78 | (except as stated in this section) patent license to make, have made, 79 | use, offer to sell, sell, import, and otherwise transfer the Work, 80 | where such license applies only to those patent claims licensable 81 | by such Contributor that are necessarily infringed by their 82 | Contribution(s) alone or by combination of their Contribution(s) 83 | with the Work to which such Contribution(s) was submitted. If You 84 | institute patent litigation against any entity (including a 85 | cross-claim or counterclaim in a lawsuit) alleging that the Work 86 | or a Contribution incorporated within the Work constitutes direct 87 | or contributory patent infringement, then any patent licenses 88 | granted to You under this License for that Work shall terminate 89 | as of the date such litigation is filed. 90 | 91 | 4. Redistribution. You may reproduce and distribute copies of the 92 | Work or Derivative Works thereof in any medium, with or without 93 | modifications, and in Source or Object form, provided that You 94 | meet the following conditions: 95 | 96 | (a) You must give any other recipients of the Work or 97 | Derivative Works a copy of this License; and 98 | 99 | (b) You must cause any modified files to carry prominent notices 100 | stating that You changed the files; and 101 | 102 | (c) You must retain, in the Source form of any Derivative Works 103 | that You distribute, all copyright, patent, trademark, and 104 | attribution notices from the Source form of the Work, 105 | excluding those notices that do not pertain to any part of 106 | the Derivative Works; and 107 | 108 | (d) If the Work includes a "NOTICE" text file as part of its 109 | distribution, then any Derivative Works that You distribute must 110 | include a readable copy of the attribution notices contained 111 | within such NOTICE file, excluding those notices that do not 112 | pertain to any part of the Derivative Works, in at least one 113 | of the following places: within a NOTICE text file distributed 114 | as part of the Derivative Works; within the Source form or 115 | documentation, if provided along with the Derivative Works; or, 116 | within a display generated by the Derivative Works, if and 117 | wherever such third-party notices normally appear. The contents 118 | of the NOTICE file are for informational purposes only and 119 | do not modify the License. You may add Your own attribution 120 | notices within Derivative Works that You distribute, alongside 121 | or as an addendum to the NOTICE text from the Work, provided 122 | that such additional attribution notices cannot be construed 123 | as modifying the License. 124 | 125 | You may add Your own copyright statement to Your modifications and 126 | may provide additional or different license terms and conditions 127 | for use, reproduction, or distribution of Your modifications, or 128 | for any such Derivative Works as a whole, provided Your use, 129 | reproduction, and distribution of the Work otherwise complies with 130 | the conditions stated in this License. 131 | 132 | 5. Submission of Contributions. Unless You explicitly state otherwise, 133 | any Contribution intentionally submitted for inclusion in the Work 134 | by You to the Licensor shall be under the terms and conditions of 135 | this License, without any additional terms or conditions. 136 | Notwithstanding the above, nothing herein shall supersede or modify 137 | the terms of any separate license agreement you may have executed 138 | with Licensor regarding such Contributions. 139 | 140 | 6. Trademarks. This License does not grant permission to use the trade 141 | names, trademarks, service marks, or product names of the Licensor, 142 | except as required for reasonable and customary use in describing the 143 | origin of the Work and reproducing the content of the NOTICE file. 144 | 145 | 7. Disclaimer of Warranty. Unless required by applicable law or 146 | agreed to in writing, Licensor provides the Work (and each 147 | Contributor provides its Contributions) on an "AS IS" BASIS, 148 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 149 | implied, including, without limitation, any warranties or conditions 150 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 151 | PARTICULAR PURPOSE. You are solely responsible for determining the 152 | appropriateness of using or redistributing the Work and assume any 153 | risks associated with Your exercise of permissions under this License. 154 | 155 | 8. Limitation of Liability. In no event and under no legal theory, 156 | whether in tort (including negligence), contract, or otherwise, 157 | unless required by applicable law (such as deliberate and grossly 158 | negligent acts) or agreed to in writing, shall any Contributor be 159 | liable to You for damages, including any direct, indirect, special, 160 | incidental, or consequential damages of any character arising as a 161 | result of this License or out of the use or inability to use the 162 | Work (including but not limited to damages for loss of goodwill, 163 | work stoppage, computer failure or malfunction, or any and all 164 | other commercial damages or losses), even if such Contributor 165 | has been advised of the possibility of such damages. 166 | 167 | 9. Accepting Warranty or Additional Liability. While redistributing 168 | the Work or Derivative Works thereof, You may choose to offer, 169 | and charge a fee for, acceptance of support, warranty, indemnity, 170 | or other liability obligations and/or rights consistent with this 171 | License. However, in accepting such obligations, You may act only 172 | on Your own behalf and on Your sole responsibility, not on behalf 173 | of any other Contributor, and only if You agree to indemnify, 174 | defend, and hold each Contributor harmless for any liability 175 | incurred by, or claims asserted against, such Contributor by reason 176 | of your accepting any such warranty or additional liability. 177 | 178 | END OF TERMS AND CONDITIONS 179 | 180 | APPENDIX: How to apply the Apache License to your work. 181 | 182 | To apply the Apache License to your work, attach the following 183 | boilerplate notice, with the fields enclosed by brackets "[]" 184 | replaced with your own identifying information. (Don't include 185 | the brackets!) The text should be enclosed in the appropriate 186 | comment syntax for the file format. We also recommend that a 187 | file or class name and description of purpose be included on the 188 | same "printed page" as the copyright notice for easier 189 | identification within third-party archives. 190 | 191 | Copyright (C) 2016-present the asyncpg authors and contributors 192 | 193 | 194 | Licensed under the Apache License, Version 2.0 (the "License"); 195 | you may not use this file except in compliance with the License. 196 | You may obtain a copy of the License at 197 | 198 | http://www.apache.org/licenses/LICENSE-2.0 199 | 200 | Unless required by applicable law or agreed to in writing, software 201 | distributed under the License is distributed on an "AS IS" BASIS, 202 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 203 | See the License for the specific language governing permissions and 204 | limitations under the License. 205 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Low-level IO utilities for PostgreSQL drivers. 2 | -------------------------------------------------------------------------------- /__init__.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | -------------------------------------------------------------------------------- /buffer.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | from libc.stdint cimport int8_t, uint8_t, int16_t, uint16_t, \ 9 | int32_t, uint32_t, int64_t, uint64_t 10 | 11 | 12 | include "./buffer.pxi" 13 | 14 | 15 | cdef class WriteBuffer: 16 | cdef: 17 | # Preallocated small buffer 18 | bint _smallbuf_inuse 19 | char _smallbuf[_BUFFER_INITIAL_SIZE] 20 | 21 | char *_buf 22 | 23 | # Allocated size 24 | ssize_t _size 25 | 26 | # Length of data in the buffer 27 | ssize_t _length 28 | 29 | # Number of memoryviews attached to the buffer 30 | int _view_count 31 | 32 | # True is start_message was used 33 | bint _message_mode 34 | 35 | cdef inline len(self): 36 | return self._length 37 | 38 | cdef inline write_len_prefixed_utf8(self, str s): 39 | return self.write_len_prefixed_bytes(s.encode('utf-8')) 40 | 41 | cdef inline _check_readonly(self) 42 | cdef inline _ensure_alloced(self, ssize_t extra_length) 43 | cdef _reallocate(self, ssize_t new_size) 44 | cdef inline reset(self) 45 | cdef inline start_message(self, char type) 46 | cdef inline end_message(self) 47 | cdef write_buffer(self, WriteBuffer buf) 48 | cdef write_byte(self, char b) 49 | cdef write_bytes(self, bytes data) 50 | cdef write_len_prefixed_buffer(self, WriteBuffer buf) 51 | cdef write_len_prefixed_bytes(self, bytes data) 52 | cdef write_bytestring(self, bytes string) 53 | cdef write_str(self, str string, str encoding) 54 | cdef write_frbuf(self, FRBuffer *buf) 55 | cdef write_cstr(self, const char *data, ssize_t len) 56 | cdef write_int16(self, int16_t i) 57 | cdef write_int32(self, int32_t i) 58 | cdef write_int64(self, int64_t i) 59 | cdef write_float(self, float f) 60 | cdef write_double(self, double d) 61 | 62 | @staticmethod 63 | cdef WriteBuffer new_message(char type) 64 | 65 | @staticmethod 66 | cdef WriteBuffer new() 67 | 68 | 69 | ctypedef const char * (*try_consume_message_method)(object, ssize_t*) 70 | ctypedef int32_t (*take_message_type_method)(object, char) except -1 71 | ctypedef int32_t (*take_message_method)(object) except -1 72 | ctypedef char (*get_message_type_method)(object) 73 | 74 | 75 | cdef class ReadBuffer: 76 | cdef: 77 | # A deque of buffers (bytes objects) 78 | object _bufs 79 | object _bufs_append 80 | object _bufs_popleft 81 | 82 | # A pointer to the first buffer in `_bufs` 83 | bytes _buf0 84 | 85 | # A pointer to the previous first buffer 86 | # (used to prolong the life of _buf0 when using 87 | # methods like _try_read_bytes) 88 | bytes _buf0_prev 89 | 90 | # Number of buffers in `_bufs` 91 | int32_t _bufs_len 92 | 93 | # A read position in the first buffer in `_bufs` 94 | ssize_t _pos0 95 | 96 | # Length of the first buffer in `_bufs` 97 | ssize_t _len0 98 | 99 | # A total number of buffered bytes in ReadBuffer 100 | ssize_t _length 101 | 102 | char _current_message_type 103 | int32_t _current_message_len 104 | ssize_t _current_message_len_unread 105 | bint _current_message_ready 106 | 107 | cdef inline len(self): 108 | return self._length 109 | 110 | cdef inline char get_message_type(self): 111 | return self._current_message_type 112 | 113 | cdef inline int32_t get_message_length(self): 114 | return self._current_message_len 115 | 116 | cdef feed_data(self, data) 117 | cdef inline _ensure_first_buf(self) 118 | cdef _switch_to_next_buf(self) 119 | cdef inline char read_byte(self) except? -1 120 | cdef inline const char* _try_read_bytes(self, ssize_t nbytes) 121 | cdef inline _read_into(self, char *buf, ssize_t nbytes) 122 | cdef inline _read_and_discard(self, ssize_t nbytes) 123 | cdef bytes read_bytes(self, ssize_t nbytes) 124 | cdef bytes read_len_prefixed_bytes(self) 125 | cdef str read_len_prefixed_utf8(self) 126 | cdef read_uuid(self) 127 | cdef inline int64_t read_int64(self) except? -1 128 | cdef inline int32_t read_int32(self) except? -1 129 | cdef inline int16_t read_int16(self) except? -1 130 | cdef inline read_null_str(self) 131 | cdef int32_t take_message(self) except -1 132 | cdef inline int32_t take_message_type(self, char mtype) except -1 133 | cdef int32_t put_message(self) except -1 134 | cdef inline const char* try_consume_message(self, ssize_t* len) 135 | cdef bytes consume_message(self) 136 | cdef discard_message(self) 137 | cdef int32_t redirect_messages(self, WriteBuffer buf, char mtype, int stop_at=?) 138 | cdef bytearray consume_messages(self, char mtype) 139 | cdef finish_message(self) 140 | cdef inline _finish_message(self) 141 | 142 | @staticmethod 143 | cdef ReadBuffer new_message_parser(object data) 144 | -------------------------------------------------------------------------------- /buffer.pxi: -------------------------------------------------------------------------------- 1 | DEF _BUFFER_INITIAL_SIZE = 1024 2 | DEF _BUFFER_MAX_GROW = 65536 3 | DEF _BUFFER_FREELIST_SIZE = 256 4 | -------------------------------------------------------------------------------- /buffer.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | from libc.string cimport memcpy 9 | 10 | import collections 11 | 12 | class BufferError(Exception): 13 | pass 14 | 15 | include "./buffer.pxi" 16 | 17 | @cython.no_gc_clear 18 | @cython.final 19 | @cython.freelist(_BUFFER_FREELIST_SIZE) 20 | cdef class WriteBuffer: 21 | 22 | def __cinit__(self): 23 | self._smallbuf_inuse = True 24 | self._buf = self._smallbuf 25 | self._size = _BUFFER_INITIAL_SIZE 26 | self._length = 0 27 | self._message_mode = 0 28 | 29 | def __dealloc__(self): 30 | if self._buf is not NULL and not self._smallbuf_inuse: 31 | cpython.PyMem_Free(self._buf) 32 | self._buf = NULL 33 | self._size = 0 34 | 35 | if self._view_count: 36 | raise BufferError( 37 | 'Deallocating buffer with attached memoryviews') 38 | 39 | def __getbuffer__(self, Py_buffer *buffer, int flags): 40 | self._view_count += 1 41 | 42 | cpython.PyBuffer_FillInfo( 43 | buffer, self, self._buf, self._length, 44 | 1, # read-only 45 | flags) 46 | 47 | def __releasebuffer__(self, Py_buffer *buffer): 48 | self._view_count -= 1 49 | 50 | cdef inline _check_readonly(self): 51 | if self._view_count: 52 | raise BufferError('the buffer is in read-only mode') 53 | 54 | cdef inline _ensure_alloced(self, ssize_t extra_length): 55 | cdef ssize_t new_size = extra_length + self._length 56 | 57 | if new_size > self._size: 58 | self._reallocate(new_size) 59 | 60 | cdef _reallocate(self, ssize_t new_size): 61 | cdef char *new_buf 62 | 63 | if new_size < _BUFFER_MAX_GROW: 64 | new_size = _BUFFER_MAX_GROW 65 | else: 66 | # Add a little extra 67 | new_size += _BUFFER_INITIAL_SIZE 68 | 69 | if self._smallbuf_inuse: 70 | new_buf = cpython.PyMem_Malloc( 71 | sizeof(char) * new_size) 72 | if new_buf is NULL: 73 | self._buf = NULL 74 | self._size = 0 75 | self._length = 0 76 | raise MemoryError 77 | memcpy(new_buf, self._buf, self._size) 78 | self._size = new_size 79 | self._buf = new_buf 80 | self._smallbuf_inuse = False 81 | else: 82 | new_buf = cpython.PyMem_Realloc( 83 | self._buf, new_size) 84 | if new_buf is NULL: 85 | cpython.PyMem_Free(self._buf) 86 | self._buf = NULL 87 | self._size = 0 88 | self._length = 0 89 | raise MemoryError 90 | self._buf = new_buf 91 | self._size = new_size 92 | 93 | cdef inline start_message(self, char type): 94 | if self._length != 0: 95 | raise BufferError( 96 | 'cannot start_message for a non-empty buffer') 97 | self._ensure_alloced(5) 98 | self._message_mode = 1 99 | self._buf[0] = type 100 | self._length = 5 101 | 102 | cdef inline end_message(self): 103 | # "length-1" to exclude the message type byte 104 | cdef ssize_t mlen = self._length - 1 105 | 106 | self._check_readonly() 107 | if not self._message_mode: 108 | raise BufferError( 109 | 'end_message can only be called with start_message') 110 | if self._length < 5: 111 | raise BufferError('end_message: buffer is too small') 112 | if mlen > _MAXINT32: 113 | raise BufferError('end_message: message is too large') 114 | 115 | hton.pack_int32(&self._buf[1], mlen) 116 | return self 117 | 118 | cdef inline reset(self): 119 | self._length = 0 120 | self._message_mode = 0 121 | 122 | cdef write_buffer(self, WriteBuffer buf): 123 | self._check_readonly() 124 | 125 | if not buf._length: 126 | return 127 | 128 | self._ensure_alloced(buf._length) 129 | memcpy(self._buf + self._length, 130 | buf._buf, 131 | buf._length) 132 | self._length += buf._length 133 | 134 | cdef write_byte(self, char b): 135 | self._check_readonly() 136 | 137 | self._ensure_alloced(1) 138 | self._buf[self._length] = b 139 | self._length += 1 140 | 141 | cdef write_bytes(self, bytes data): 142 | cdef char* buf 143 | cdef ssize_t len 144 | 145 | cpython.PyBytes_AsStringAndSize(data, &buf, &len) 146 | self.write_cstr(buf, len) 147 | 148 | cdef write_bytestring(self, bytes string): 149 | cdef char* buf 150 | cdef ssize_t len 151 | 152 | cpython.PyBytes_AsStringAndSize(string, &buf, &len) 153 | # PyBytes_AsStringAndSize returns a null-terminated buffer, 154 | # but the null byte is not counted in len. hence the + 1 155 | self.write_cstr(buf, len + 1) 156 | 157 | cdef write_str(self, str string, str encoding): 158 | self.write_bytestring(string.encode(encoding)) 159 | 160 | cdef write_len_prefixed_buffer(self, WriteBuffer buf): 161 | # Write a length-prefixed (not NULL-terminated) bytes sequence. 162 | self.write_int32(buf.len()) 163 | self.write_buffer(buf) 164 | 165 | cdef write_len_prefixed_bytes(self, bytes data): 166 | # Write a length-prefixed (not NULL-terminated) bytes sequence. 167 | cdef: 168 | char *buf 169 | ssize_t size 170 | 171 | cpython.PyBytes_AsStringAndSize(data, &buf, &size) 172 | if size > _MAXINT32: 173 | raise BufferError('string is too large') 174 | # `size` does not account for the NULL at the end. 175 | self.write_int32(size) 176 | self.write_cstr(buf, size) 177 | 178 | cdef write_frbuf(self, FRBuffer *buf): 179 | cdef: 180 | ssize_t buf_len = buf.len 181 | if buf_len > 0: 182 | self.write_cstr(frb_read_all(buf), buf_len) 183 | 184 | cdef write_cstr(self, const char *data, ssize_t len): 185 | self._check_readonly() 186 | self._ensure_alloced(len) 187 | 188 | memcpy(self._buf + self._length, data, len) 189 | self._length += len 190 | 191 | cdef write_int16(self, int16_t i): 192 | self._check_readonly() 193 | self._ensure_alloced(2) 194 | 195 | hton.pack_int16(&self._buf[self._length], i) 196 | self._length += 2 197 | 198 | cdef write_int32(self, int32_t i): 199 | self._check_readonly() 200 | self._ensure_alloced(4) 201 | 202 | hton.pack_int32(&self._buf[self._length], i) 203 | self._length += 4 204 | 205 | cdef write_int64(self, int64_t i): 206 | self._check_readonly() 207 | self._ensure_alloced(8) 208 | 209 | hton.pack_int64(&self._buf[self._length], i) 210 | self._length += 8 211 | 212 | cdef write_float(self, float f): 213 | self._check_readonly() 214 | self._ensure_alloced(4) 215 | 216 | hton.pack_float(&self._buf[self._length], f) 217 | self._length += 4 218 | 219 | cdef write_double(self, double d): 220 | self._check_readonly() 221 | self._ensure_alloced(8) 222 | 223 | hton.pack_double(&self._buf[self._length], d) 224 | self._length += 8 225 | 226 | @staticmethod 227 | cdef WriteBuffer new_message(char type): 228 | cdef WriteBuffer buf 229 | buf = WriteBuffer.__new__(WriteBuffer) 230 | buf.start_message(type) 231 | return buf 232 | 233 | @staticmethod 234 | cdef WriteBuffer new(): 235 | cdef WriteBuffer buf 236 | buf = WriteBuffer.__new__(WriteBuffer) 237 | return buf 238 | 239 | 240 | @cython.no_gc_clear 241 | @cython.final 242 | @cython.freelist(_BUFFER_FREELIST_SIZE) 243 | cdef class ReadBuffer: 244 | 245 | def __cinit__(self): 246 | self._bufs = collections.deque() 247 | self._bufs_append = self._bufs.append 248 | self._bufs_popleft = self._bufs.popleft 249 | self._bufs_len = 0 250 | self._buf0 = None 251 | self._buf0_prev = None 252 | self._pos0 = 0 253 | self._len0 = 0 254 | self._length = 0 255 | 256 | self._current_message_type = 0 257 | self._current_message_len = 0 258 | self._current_message_len_unread = 0 259 | self._current_message_ready = 0 260 | 261 | cdef feed_data(self, data): 262 | cdef: 263 | ssize_t dlen 264 | bytes data_bytes 265 | 266 | if not cpython.PyBytes_CheckExact(data): 267 | if cpythonx.PyByteArray_CheckExact(data): 268 | # ProactorEventLoop in Python 3.10+ seems to be sending 269 | # bytearray objects instead of bytes. Handle this here 270 | # to avoid duplicating this check in every data_received(). 271 | data = bytes(data) 272 | else: 273 | raise BufferError( 274 | 'feed_data: a bytes or bytearray object expected') 275 | 276 | # Uncomment the below code to test code paths that 277 | # read single int/str/bytes sequences are split over 278 | # multiple received buffers. 279 | # 280 | # ll = 107 281 | # if len(data) > ll: 282 | # self.feed_data(data[:ll]) 283 | # self.feed_data(data[ll:]) 284 | # return 285 | 286 | data_bytes = data 287 | 288 | dlen = cpython.Py_SIZE(data_bytes) 289 | if dlen == 0: 290 | # EOF? 291 | return 292 | 293 | self._bufs_append(data_bytes) 294 | self._length += dlen 295 | 296 | if self._bufs_len == 0: 297 | # First buffer 298 | self._len0 = dlen 299 | self._buf0 = data_bytes 300 | 301 | self._bufs_len += 1 302 | 303 | cdef inline _ensure_first_buf(self): 304 | if PG_DEBUG: 305 | if self._len0 == 0: 306 | raise BufferError('empty first buffer') 307 | if self._length == 0: 308 | raise BufferError('empty buffer') 309 | 310 | if self._pos0 == self._len0: 311 | self._switch_to_next_buf() 312 | 313 | cdef _switch_to_next_buf(self): 314 | # The first buffer is fully read, discard it 315 | self._bufs_popleft() 316 | self._bufs_len -= 1 317 | 318 | # Shouldn't fail, since we've checked that `_length >= 1` 319 | # in _ensure_first_buf() 320 | self._buf0_prev = self._buf0 321 | self._buf0 = self._bufs[0] 322 | 323 | self._pos0 = 0 324 | self._len0 = len(self._buf0) 325 | 326 | if PG_DEBUG: 327 | if self._len0 < 1: 328 | raise BufferError( 329 | 'debug: second buffer of ReadBuffer is empty') 330 | 331 | cdef inline const char* _try_read_bytes(self, ssize_t nbytes): 332 | # Try to read *nbytes* from the first buffer. 333 | # 334 | # Returns pointer to data if there is at least *nbytes* 335 | # in the buffer, NULL otherwise. 336 | # 337 | # Important: caller must call _ensure_first_buf() prior 338 | # to calling try_read_bytes, and must not overread 339 | 340 | cdef: 341 | const char *result 342 | 343 | if PG_DEBUG: 344 | if nbytes > self._length: 345 | return NULL 346 | 347 | if self._current_message_ready: 348 | if self._current_message_len_unread < nbytes: 349 | return NULL 350 | 351 | if self._pos0 + nbytes <= self._len0: 352 | result = cpython.PyBytes_AS_STRING(self._buf0) 353 | result += self._pos0 354 | self._pos0 += nbytes 355 | self._length -= nbytes 356 | if self._current_message_ready: 357 | self._current_message_len_unread -= nbytes 358 | return result 359 | else: 360 | return NULL 361 | 362 | cdef inline _read_into(self, char *buf, ssize_t nbytes): 363 | cdef: 364 | ssize_t nread 365 | char *buf0 366 | 367 | while True: 368 | buf0 = cpython.PyBytes_AS_STRING(self._buf0) 369 | 370 | if self._pos0 + nbytes > self._len0: 371 | nread = self._len0 - self._pos0 372 | memcpy(buf, buf0 + self._pos0, nread) 373 | self._pos0 = self._len0 374 | self._length -= nread 375 | nbytes -= nread 376 | buf += nread 377 | self._ensure_first_buf() 378 | 379 | else: 380 | memcpy(buf, buf0 + self._pos0, nbytes) 381 | self._pos0 += nbytes 382 | self._length -= nbytes 383 | break 384 | 385 | cdef inline _read_and_discard(self, ssize_t nbytes): 386 | cdef: 387 | ssize_t nread 388 | 389 | self._ensure_first_buf() 390 | while True: 391 | if self._pos0 + nbytes > self._len0: 392 | nread = self._len0 - self._pos0 393 | self._pos0 = self._len0 394 | self._length -= nread 395 | nbytes -= nread 396 | self._ensure_first_buf() 397 | 398 | else: 399 | self._pos0 += nbytes 400 | self._length -= nbytes 401 | break 402 | 403 | cdef bytes read_bytes(self, ssize_t nbytes): 404 | cdef: 405 | bytes result 406 | ssize_t nread 407 | const char *cbuf 408 | char *buf 409 | 410 | self._ensure_first_buf() 411 | cbuf = self._try_read_bytes(nbytes) 412 | if cbuf != NULL: 413 | return cpython.PyBytes_FromStringAndSize(cbuf, nbytes) 414 | 415 | if nbytes > self._length: 416 | raise BufferError( 417 | 'not enough data to read {} bytes'.format(nbytes)) 418 | 419 | if self._current_message_ready: 420 | self._current_message_len_unread -= nbytes 421 | if self._current_message_len_unread < 0: 422 | raise BufferError('buffer overread') 423 | 424 | result = cpython.PyBytes_FromStringAndSize(NULL, nbytes) 425 | buf = cpython.PyBytes_AS_STRING(result) 426 | self._read_into(buf, nbytes) 427 | return result 428 | 429 | cdef bytes read_len_prefixed_bytes(self): 430 | cdef int32_t size = self.read_int32() 431 | if size < 0: 432 | raise BufferError( 433 | 'negative length for a len-prefixed bytes value') 434 | if size == 0: 435 | return b'' 436 | return self.read_bytes(size) 437 | 438 | cdef str read_len_prefixed_utf8(self): 439 | cdef: 440 | int32_t size 441 | const char *cbuf 442 | 443 | size = self.read_int32() 444 | if size < 0: 445 | raise BufferError( 446 | 'negative length for a len-prefixed bytes value') 447 | 448 | if size == 0: 449 | return '' 450 | 451 | self._ensure_first_buf() 452 | cbuf = self._try_read_bytes(size) 453 | if cbuf != NULL: 454 | return cpython.PyUnicode_DecodeUTF8(cbuf, size, NULL) 455 | else: 456 | return self.read_bytes(size).decode('utf-8') 457 | 458 | cdef read_uuid(self): 459 | cdef: 460 | bytes mem 461 | const char *cbuf 462 | 463 | self._ensure_first_buf() 464 | cbuf = self._try_read_bytes(16) 465 | if cbuf != NULL: 466 | return pg_uuid_from_buf(cbuf) 467 | else: 468 | return pg_UUID(self.read_bytes(16)) 469 | 470 | cdef inline char read_byte(self) except? -1: 471 | cdef const char *first_byte 472 | 473 | if PG_DEBUG: 474 | if not self._buf0: 475 | raise BufferError( 476 | 'debug: first buffer of ReadBuffer is empty') 477 | 478 | self._ensure_first_buf() 479 | first_byte = self._try_read_bytes(1) 480 | if first_byte is NULL: 481 | raise BufferError('not enough data to read one byte') 482 | 483 | return first_byte[0] 484 | 485 | cdef inline int64_t read_int64(self) except? -1: 486 | cdef: 487 | bytes mem 488 | const char *cbuf 489 | 490 | self._ensure_first_buf() 491 | cbuf = self._try_read_bytes(8) 492 | if cbuf != NULL: 493 | return hton.unpack_int64(cbuf) 494 | else: 495 | mem = self.read_bytes(8) 496 | return hton.unpack_int64(cpython.PyBytes_AS_STRING(mem)) 497 | 498 | cdef inline int32_t read_int32(self) except? -1: 499 | cdef: 500 | bytes mem 501 | const char *cbuf 502 | 503 | self._ensure_first_buf() 504 | cbuf = self._try_read_bytes(4) 505 | if cbuf != NULL: 506 | return hton.unpack_int32(cbuf) 507 | else: 508 | mem = self.read_bytes(4) 509 | return hton.unpack_int32(cpython.PyBytes_AS_STRING(mem)) 510 | 511 | cdef inline int16_t read_int16(self) except? -1: 512 | cdef: 513 | bytes mem 514 | const char *cbuf 515 | 516 | self._ensure_first_buf() 517 | cbuf = self._try_read_bytes(2) 518 | if cbuf != NULL: 519 | return hton.unpack_int16(cbuf) 520 | else: 521 | mem = self.read_bytes(2) 522 | return hton.unpack_int16(cpython.PyBytes_AS_STRING(mem)) 523 | 524 | cdef inline read_null_str(self): 525 | if not self._current_message_ready: 526 | raise BufferError( 527 | 'read_null_str only works when the message guaranteed ' 528 | 'to be in the buffer') 529 | 530 | cdef: 531 | ssize_t pos 532 | ssize_t nread 533 | bytes result 534 | const char *buf 535 | const char *buf_start 536 | 537 | self._ensure_first_buf() 538 | 539 | buf_start = cpython.PyBytes_AS_STRING(self._buf0) 540 | buf = buf_start + self._pos0 541 | while buf - buf_start < self._len0: 542 | if buf[0] == 0: 543 | pos = buf - buf_start 544 | nread = pos - self._pos0 545 | buf = self._try_read_bytes(nread + 1) 546 | if buf != NULL: 547 | return cpython.PyBytes_FromStringAndSize(buf, nread) 548 | else: 549 | break 550 | else: 551 | buf += 1 552 | 553 | result = b'' 554 | while True: 555 | pos = self._buf0.find(b'\x00', self._pos0) 556 | if pos >= 0: 557 | result += self._buf0[self._pos0 : pos] 558 | nread = pos - self._pos0 + 1 559 | self._pos0 = pos + 1 560 | self._length -= nread 561 | 562 | self._current_message_len_unread -= nread 563 | if self._current_message_len_unread < 0: 564 | raise BufferError( 565 | 'read_null_str: buffer overread') 566 | 567 | return result 568 | 569 | else: 570 | result += self._buf0[self._pos0:] 571 | nread = self._len0 - self._pos0 572 | self._pos0 = self._len0 573 | self._length -= nread 574 | 575 | self._current_message_len_unread -= nread 576 | if self._current_message_len_unread < 0: 577 | raise BufferError( 578 | 'read_null_str: buffer overread') 579 | 580 | self._ensure_first_buf() 581 | 582 | cdef int32_t take_message(self) except -1: 583 | cdef: 584 | const char *cbuf 585 | 586 | if self._current_message_ready: 587 | return 1 588 | 589 | if self._current_message_type == 0: 590 | if self._length < 1: 591 | return 0 592 | self._ensure_first_buf() 593 | cbuf = self._try_read_bytes(1) 594 | if cbuf == NULL: 595 | raise BufferError( 596 | 'failed to read one byte on a non-empty buffer') 597 | self._current_message_type = cbuf[0] 598 | 599 | if self._current_message_len == 0: 600 | if self._length < 4: 601 | return 0 602 | 603 | self._ensure_first_buf() 604 | cbuf = self._try_read_bytes(4) 605 | if cbuf != NULL: 606 | self._current_message_len = hton.unpack_int32(cbuf) 607 | else: 608 | self._current_message_len = self.read_int32() 609 | 610 | self._current_message_len_unread = self._current_message_len - 4 611 | 612 | if self._length < self._current_message_len_unread: 613 | return 0 614 | 615 | self._current_message_ready = 1 616 | return 1 617 | 618 | cdef inline int32_t take_message_type(self, char mtype) except -1: 619 | cdef const char *buf0 620 | 621 | if self._current_message_ready: 622 | return self._current_message_type == mtype 623 | elif self._length >= 1: 624 | self._ensure_first_buf() 625 | buf0 = cpython.PyBytes_AS_STRING(self._buf0) 626 | 627 | return buf0[self._pos0] == mtype and self.take_message() 628 | else: 629 | return 0 630 | 631 | cdef int32_t put_message(self) except -1: 632 | if not self._current_message_ready: 633 | raise BufferError( 634 | 'cannot put message: no message taken') 635 | self._current_message_ready = False 636 | return 0 637 | 638 | cdef inline const char* try_consume_message(self, ssize_t* len): 639 | cdef: 640 | ssize_t buf_len 641 | const char *buf 642 | 643 | if not self._current_message_ready: 644 | return NULL 645 | 646 | self._ensure_first_buf() 647 | buf_len = self._current_message_len_unread 648 | buf = self._try_read_bytes(buf_len) 649 | if buf != NULL: 650 | len[0] = buf_len 651 | self._finish_message() 652 | return buf 653 | 654 | cdef discard_message(self): 655 | if not self._current_message_ready: 656 | raise BufferError('no message to discard') 657 | if self._current_message_len_unread > 0: 658 | self._read_and_discard(self._current_message_len_unread) 659 | self._current_message_len_unread = 0 660 | self._finish_message() 661 | 662 | cdef bytes consume_message(self): 663 | if not self._current_message_ready: 664 | raise BufferError('no message to consume') 665 | if self._current_message_len_unread > 0: 666 | mem = self.read_bytes(self._current_message_len_unread) 667 | else: 668 | mem = b'' 669 | self._finish_message() 670 | return mem 671 | 672 | cdef int32_t redirect_messages(self, WriteBuffer buf, char mtype, 673 | int stop_at=0): 674 | # Redirects messages from self into buf until either 675 | # a message with a type different than mtype is encountered, or 676 | # buf contains stop_at bytes. 677 | # Returns the number of messages redirected. 678 | 679 | if not self._current_message_ready: 680 | raise BufferError( 681 | 'consume_full_messages called on a buffer without a ' 682 | 'complete first message') 683 | if mtype != self._current_message_type: 684 | raise BufferError( 685 | 'consume_full_messages called with a wrong mtype') 686 | if self._current_message_len_unread != self._current_message_len - 4: 687 | raise BufferError( 688 | 'consume_full_messages called on a partially read message') 689 | 690 | cdef: 691 | const char* cbuf 692 | ssize_t cbuf_len 693 | int32_t msg_len 694 | ssize_t new_pos0 695 | ssize_t pos_delta 696 | int32_t done 697 | int32_t count 698 | 699 | count = 0 700 | while True: 701 | count += 1 702 | buf.write_byte(mtype) 703 | buf.write_int32(self._current_message_len) 704 | 705 | cbuf = self.try_consume_message(&cbuf_len) 706 | if cbuf != NULL: 707 | buf.write_cstr(cbuf, cbuf_len) 708 | else: 709 | buf.write_bytes(self.consume_message()) 710 | 711 | if self._length > 0: 712 | self._ensure_first_buf() 713 | else: 714 | return count 715 | 716 | if stop_at and buf._length >= stop_at: 717 | return count 718 | 719 | # Fast path: exhaust buf0 as efficiently as possible. 720 | if self._pos0 + 5 <= self._len0: 721 | cbuf = cpython.PyBytes_AS_STRING(self._buf0) 722 | new_pos0 = self._pos0 723 | cbuf_len = self._len0 724 | 725 | done = 0 726 | # Scan the first buffer and find the position of the 727 | # end of the last "mtype" message. 728 | while new_pos0 + 5 <= cbuf_len: 729 | if (cbuf + new_pos0)[0] != mtype: 730 | done = 1 731 | break 732 | if (stop_at and 733 | (buf._length + new_pos0 - self._pos0) > stop_at): 734 | done = 1 735 | break 736 | msg_len = hton.unpack_int32(cbuf + new_pos0 + 1) + 1 737 | if new_pos0 + msg_len > cbuf_len: 738 | break 739 | new_pos0 += msg_len 740 | count += 1 741 | 742 | if new_pos0 != self._pos0: 743 | assert self._pos0 < new_pos0 <= self._len0 744 | 745 | pos_delta = new_pos0 - self._pos0 746 | buf.write_cstr( 747 | cbuf + self._pos0, 748 | pos_delta 749 | ) 750 | 751 | self._pos0 = new_pos0 752 | self._length -= pos_delta 753 | 754 | assert self._length >= 0 755 | 756 | if done: 757 | # The next message is of a different type. 758 | return count 759 | 760 | # Back to slow path. 761 | if not self.take_message_type(mtype): 762 | return count 763 | 764 | cdef bytearray consume_messages(self, char mtype): 765 | """Consume consecutive messages of the same type.""" 766 | cdef: 767 | char *buf 768 | ssize_t nbytes 769 | ssize_t total_bytes = 0 770 | bytearray result 771 | 772 | if not self.take_message_type(mtype): 773 | return None 774 | 775 | # consume_messages is a volume-oriented method, so 776 | # we assume that the remainder of the buffer will contain 777 | # messages of the requested type. 778 | result = cpythonx.PyByteArray_FromStringAndSize(NULL, self._length) 779 | buf = cpythonx.PyByteArray_AsString(result) 780 | 781 | while self.take_message_type(mtype): 782 | self._ensure_first_buf() 783 | nbytes = self._current_message_len_unread 784 | self._read_into(buf, nbytes) 785 | buf += nbytes 786 | total_bytes += nbytes 787 | self._finish_message() 788 | 789 | # Clamp the result to an actual size read. 790 | cpythonx.PyByteArray_Resize(result, total_bytes) 791 | 792 | return result 793 | 794 | cdef finish_message(self): 795 | if self._current_message_type == 0 or not self._current_message_ready: 796 | # The message has already been finished (e.g by consume_message()), 797 | # or has been put back by put_message(). 798 | return 799 | 800 | if self._current_message_len_unread: 801 | if PG_DEBUG: 802 | mtype = chr(self._current_message_type) 803 | 804 | discarded = self.consume_message() 805 | 806 | if PG_DEBUG: 807 | print('!!! discarding message {!r} unread data: {!r}'.format( 808 | mtype, 809 | discarded)) 810 | 811 | self._finish_message() 812 | 813 | cdef inline _finish_message(self): 814 | self._current_message_type = 0 815 | self._current_message_len = 0 816 | self._current_message_ready = 0 817 | self._current_message_len_unread = 0 818 | 819 | @staticmethod 820 | cdef ReadBuffer new_message_parser(object data): 821 | cdef ReadBuffer buf 822 | 823 | buf = ReadBuffer.__new__(ReadBuffer) 824 | buf.feed_data(data) 825 | 826 | buf._current_message_ready = 1 827 | buf._current_message_len_unread = buf._len0 828 | 829 | return buf 830 | -------------------------------------------------------------------------------- /codecs/__init__.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef class CodecContext: 9 | 10 | cpdef get_text_codec(self) 11 | cdef is_encoding_utf8(self) 12 | cpdef get_json_decoder(self) 13 | cdef is_decoding_json(self) 14 | cpdef get_json_encoder(self) 15 | cdef is_encoding_json(self) 16 | 17 | 18 | ctypedef object (*encode_func)(CodecContext settings, 19 | WriteBuffer buf, 20 | object obj) 21 | 22 | ctypedef object (*decode_func)(CodecContext settings, 23 | FRBuffer *buf) 24 | 25 | 26 | # Datetime 27 | cdef date_encode(CodecContext settings, WriteBuffer buf, obj) 28 | cdef date_decode(CodecContext settings, FRBuffer * buf) 29 | cdef date_encode_tuple(CodecContext settings, WriteBuffer buf, obj) 30 | cdef date_decode_tuple(CodecContext settings, FRBuffer * buf) 31 | cdef timestamp_encode(CodecContext settings, WriteBuffer buf, obj) 32 | cdef timestamp_decode(CodecContext settings, FRBuffer * buf) 33 | cdef timestamp_encode_tuple(CodecContext settings, WriteBuffer buf, obj) 34 | cdef timestamp_decode_tuple(CodecContext settings, FRBuffer * buf) 35 | cdef timestamptz_encode(CodecContext settings, WriteBuffer buf, obj) 36 | cdef timestamptz_decode(CodecContext settings, FRBuffer * buf) 37 | cdef time_encode(CodecContext settings, WriteBuffer buf, obj) 38 | cdef time_decode(CodecContext settings, FRBuffer * buf) 39 | cdef time_encode_tuple(CodecContext settings, WriteBuffer buf, obj) 40 | cdef time_decode_tuple(CodecContext settings, FRBuffer * buf) 41 | cdef timetz_encode(CodecContext settings, WriteBuffer buf, obj) 42 | cdef timetz_decode(CodecContext settings, FRBuffer * buf) 43 | cdef timetz_encode_tuple(CodecContext settings, WriteBuffer buf, obj) 44 | cdef timetz_decode_tuple(CodecContext settings, FRBuffer * buf) 45 | cdef interval_encode(CodecContext settings, WriteBuffer buf, obj) 46 | cdef interval_decode(CodecContext settings, FRBuffer * buf) 47 | cdef interval_encode_tuple(CodecContext settings, WriteBuffer buf, tuple obj) 48 | cdef interval_decode_tuple(CodecContext settings, FRBuffer * buf) 49 | 50 | 51 | # Bits 52 | cdef bits_encode(CodecContext settings, WriteBuffer wbuf, obj) 53 | cdef bits_decode(CodecContext settings, FRBuffer * buf) 54 | 55 | 56 | # Bools 57 | cdef bool_encode(CodecContext settings, WriteBuffer buf, obj) 58 | cdef bool_decode(CodecContext settings, FRBuffer * buf) 59 | 60 | 61 | # Geometry 62 | cdef box_encode(CodecContext settings, WriteBuffer wbuf, obj) 63 | cdef box_decode(CodecContext settings, FRBuffer * buf) 64 | cdef line_encode(CodecContext settings, WriteBuffer wbuf, obj) 65 | cdef line_decode(CodecContext settings, FRBuffer * buf) 66 | cdef lseg_encode(CodecContext settings, WriteBuffer wbuf, obj) 67 | cdef lseg_decode(CodecContext settings, FRBuffer * buf) 68 | cdef point_encode(CodecContext settings, WriteBuffer wbuf, obj) 69 | cdef point_decode(CodecContext settings, FRBuffer * buf) 70 | cdef path_encode(CodecContext settings, WriteBuffer wbuf, obj) 71 | cdef path_decode(CodecContext settings, FRBuffer * buf) 72 | cdef poly_encode(CodecContext settings, WriteBuffer wbuf, obj) 73 | cdef poly_decode(CodecContext settings, FRBuffer * buf) 74 | cdef circle_encode(CodecContext settings, WriteBuffer wbuf, obj) 75 | cdef circle_decode(CodecContext settings, FRBuffer * buf) 76 | 77 | 78 | # Hstore 79 | cdef hstore_encode(CodecContext settings, WriteBuffer buf, obj) 80 | cdef hstore_decode(CodecContext settings, FRBuffer * buf) 81 | 82 | 83 | # Ints 84 | cdef int2_encode(CodecContext settings, WriteBuffer buf, obj) 85 | cdef int2_decode(CodecContext settings, FRBuffer * buf) 86 | cdef int4_encode(CodecContext settings, WriteBuffer buf, obj) 87 | cdef int4_decode(CodecContext settings, FRBuffer * buf) 88 | cdef uint4_encode(CodecContext settings, WriteBuffer buf, obj) 89 | cdef uint4_decode(CodecContext settings, FRBuffer * buf) 90 | cdef int8_encode(CodecContext settings, WriteBuffer buf, obj) 91 | cdef int8_decode(CodecContext settings, FRBuffer * buf) 92 | cdef uint8_encode(CodecContext settings, WriteBuffer buf, obj) 93 | cdef uint8_decode(CodecContext settings, FRBuffer * buf) 94 | 95 | 96 | # Floats 97 | cdef float4_encode(CodecContext settings, WriteBuffer buf, obj) 98 | cdef float4_decode(CodecContext settings, FRBuffer * buf) 99 | cdef float8_encode(CodecContext settings, WriteBuffer buf, obj) 100 | cdef float8_decode(CodecContext settings, FRBuffer * buf) 101 | 102 | 103 | # JSON 104 | cdef jsonb_encode(CodecContext settings, WriteBuffer buf, obj) 105 | cdef jsonb_decode(CodecContext settings, FRBuffer * buf) 106 | cdef json_encode(CodecContext settings, WriteBuffer buf, obj) 107 | cdef json_decode(CodecContext settings, FRBuffer *buf) 108 | 109 | 110 | # JSON path 111 | cdef jsonpath_encode(CodecContext settings, WriteBuffer buf, obj) 112 | cdef jsonpath_decode(CodecContext settings, FRBuffer * buf) 113 | 114 | 115 | # Text 116 | cdef as_pg_string_and_size( 117 | CodecContext settings, obj, char **cstr, ssize_t *size) 118 | cdef text_encode(CodecContext settings, WriteBuffer buf, obj) 119 | cdef text_decode(CodecContext settings, FRBuffer * buf) 120 | 121 | # Bytea 122 | cdef bytea_encode(CodecContext settings, WriteBuffer wbuf, obj) 123 | cdef bytea_decode(CodecContext settings, FRBuffer * buf) 124 | 125 | 126 | # UUID 127 | cdef uuid_encode(CodecContext settings, WriteBuffer wbuf, obj) 128 | cdef uuid_decode(CodecContext settings, FRBuffer * buf) 129 | 130 | 131 | # Numeric 132 | cdef numeric_encode_text(CodecContext settings, WriteBuffer buf, obj) 133 | cdef numeric_decode_text(CodecContext settings, FRBuffer * buf) 134 | cdef numeric_encode_binary(CodecContext settings, WriteBuffer buf, obj) 135 | cdef numeric_decode_binary(CodecContext settings, FRBuffer * buf) 136 | cdef numeric_decode_binary_ex(CodecContext settings, FRBuffer * buf, 137 | bint trail_fract_zero) 138 | 139 | 140 | # Void 141 | cdef void_encode(CodecContext settings, WriteBuffer buf, obj) 142 | cdef void_decode(CodecContext settings, FRBuffer * buf) 143 | 144 | 145 | # tid 146 | cdef tid_encode(CodecContext settings, WriteBuffer buf, obj) 147 | cdef tid_decode(CodecContext settings, FRBuffer * buf) 148 | 149 | 150 | # Network 151 | cdef cidr_encode(CodecContext settings, WriteBuffer buf, obj) 152 | cdef cidr_decode(CodecContext settings, FRBuffer * buf) 153 | cdef inet_encode(CodecContext settings, WriteBuffer buf, obj) 154 | cdef inet_decode(CodecContext settings, FRBuffer * buf) 155 | 156 | 157 | # pg_snapshot 158 | cdef pg_snapshot_encode(CodecContext settings, WriteBuffer buf, obj) 159 | cdef pg_snapshot_decode(CodecContext settings, FRBuffer * buf) 160 | -------------------------------------------------------------------------------- /codecs/bits.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef bits_encode(CodecContext settings, WriteBuffer wbuf, obj): 9 | cdef: 10 | Py_buffer pybuf 11 | bint pybuf_used = False 12 | char *buf 13 | ssize_t len 14 | ssize_t bitlen 15 | 16 | if cpython.PyBytes_CheckExact(obj): 17 | buf = cpython.PyBytes_AS_STRING(obj) 18 | len = cpython.Py_SIZE(obj) 19 | bitlen = len * 8 20 | elif isinstance(obj, pgproto_types.BitString): 21 | cpython.PyBytes_AsStringAndSize(obj.bytes, &buf, &len) 22 | bitlen = obj.__len__() 23 | else: 24 | cpython.PyObject_GetBuffer(obj, &pybuf, cpython.PyBUF_SIMPLE) 25 | pybuf_used = True 26 | buf = pybuf.buf 27 | len = pybuf.len 28 | bitlen = len * 8 29 | 30 | try: 31 | if bitlen > _MAXINT32: 32 | raise ValueError('bit value too long') 33 | wbuf.write_int32(4 + len) 34 | wbuf.write_int32(bitlen) 35 | wbuf.write_cstr(buf, len) 36 | finally: 37 | if pybuf_used: 38 | cpython.PyBuffer_Release(&pybuf) 39 | 40 | 41 | cdef bits_decode(CodecContext settings, FRBuffer *buf): 42 | cdef: 43 | int32_t bitlen = hton.unpack_int32(frb_read(buf, 4)) 44 | ssize_t buf_len = buf.len 45 | 46 | bytes_ = cpython.PyBytes_FromStringAndSize(frb_read_all(buf), buf_len) 47 | return pgproto_types.BitString.frombytes(bytes_, bitlen) 48 | -------------------------------------------------------------------------------- /codecs/bytea.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef bytea_encode(CodecContext settings, WriteBuffer wbuf, obj): 9 | cdef: 10 | Py_buffer pybuf 11 | bint pybuf_used = False 12 | char *buf 13 | ssize_t len 14 | 15 | if cpython.PyBytes_CheckExact(obj): 16 | buf = cpython.PyBytes_AS_STRING(obj) 17 | len = cpython.Py_SIZE(obj) 18 | else: 19 | cpython.PyObject_GetBuffer(obj, &pybuf, cpython.PyBUF_SIMPLE) 20 | pybuf_used = True 21 | buf = pybuf.buf 22 | len = pybuf.len 23 | 24 | try: 25 | wbuf.write_int32(len) 26 | wbuf.write_cstr(buf, len) 27 | finally: 28 | if pybuf_used: 29 | cpython.PyBuffer_Release(&pybuf) 30 | 31 | 32 | cdef bytea_decode(CodecContext settings, FRBuffer *buf): 33 | cdef ssize_t buf_len = buf.len 34 | return cpython.PyBytes_FromStringAndSize(frb_read_all(buf), buf_len) 35 | -------------------------------------------------------------------------------- /codecs/context.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef class CodecContext: 9 | 10 | cpdef get_text_codec(self): 11 | raise NotImplementedError 12 | 13 | cdef is_encoding_utf8(self): 14 | raise NotImplementedError 15 | 16 | cpdef get_json_decoder(self): 17 | raise NotImplementedError 18 | 19 | cdef is_decoding_json(self): 20 | return False 21 | 22 | cpdef get_json_encoder(self): 23 | raise NotImplementedError 24 | 25 | cdef is_encoding_json(self): 26 | return False 27 | -------------------------------------------------------------------------------- /codecs/datetime.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cimport cpython.datetime 9 | import datetime 10 | 11 | cpython.datetime.import_datetime() 12 | 13 | utc = datetime.timezone.utc 14 | date_from_ordinal = datetime.date.fromordinal 15 | timedelta = datetime.timedelta 16 | 17 | pg_epoch_datetime = datetime.datetime(2000, 1, 1) 18 | cdef int32_t pg_epoch_datetime_ts = \ 19 | cpython.PyLong_AsLong(int(pg_epoch_datetime.timestamp())) 20 | 21 | pg_epoch_datetime_utc = datetime.datetime(2000, 1, 1, tzinfo=utc) 22 | cdef int32_t pg_epoch_datetime_utc_ts = \ 23 | cpython.PyLong_AsLong(int(pg_epoch_datetime_utc.timestamp())) 24 | 25 | pg_epoch_date = datetime.date(2000, 1, 1) 26 | cdef int32_t pg_date_offset_ord = \ 27 | cpython.PyLong_AsLong(pg_epoch_date.toordinal()) 28 | 29 | # Binary representations of infinity for datetimes. 30 | cdef int64_t pg_time64_infinity = 0x7fffffffffffffff 31 | cdef int64_t pg_time64_negative_infinity = 0x8000000000000000 32 | cdef int32_t pg_date_infinity = 0x7fffffff 33 | cdef int32_t pg_date_negative_infinity = 0x80000000 34 | 35 | infinity_datetime = datetime.datetime( 36 | datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) 37 | 38 | cdef int32_t infinity_datetime_ord = cpython.PyLong_AsLong( 39 | infinity_datetime.toordinal()) 40 | 41 | cdef int64_t infinity_datetime_ts = 252455615999999999 42 | 43 | negative_infinity_datetime = datetime.datetime( 44 | datetime.MINYEAR, 1, 1, 0, 0, 0, 0) 45 | 46 | cdef int32_t negative_infinity_datetime_ord = cpython.PyLong_AsLong( 47 | negative_infinity_datetime.toordinal()) 48 | 49 | cdef int64_t negative_infinity_datetime_ts = -63082281600000000 50 | 51 | infinity_date = datetime.date(datetime.MAXYEAR, 12, 31) 52 | 53 | cdef int32_t infinity_date_ord = cpython.PyLong_AsLong( 54 | infinity_date.toordinal()) 55 | 56 | negative_infinity_date = datetime.date(datetime.MINYEAR, 1, 1) 57 | 58 | cdef int32_t negative_infinity_date_ord = cpython.PyLong_AsLong( 59 | negative_infinity_date.toordinal()) 60 | 61 | 62 | cdef inline _local_timezone(): 63 | d = datetime.datetime.now(datetime.timezone.utc).astimezone() 64 | return datetime.timezone(d.utcoffset()) 65 | 66 | 67 | cdef inline _encode_time(WriteBuffer buf, int64_t seconds, 68 | int32_t microseconds): 69 | # XXX: add support for double timestamps 70 | # int64 timestamps, 71 | cdef int64_t ts = seconds * 1000000 + microseconds 72 | 73 | if ts == infinity_datetime_ts: 74 | buf.write_int64(pg_time64_infinity) 75 | elif ts == negative_infinity_datetime_ts: 76 | buf.write_int64(pg_time64_negative_infinity) 77 | else: 78 | buf.write_int64(ts) 79 | 80 | 81 | cdef inline int32_t _decode_time(FRBuffer *buf, int64_t *seconds, 82 | int32_t *microseconds): 83 | cdef int64_t ts = hton.unpack_int64(frb_read(buf, 8)) 84 | 85 | if ts == pg_time64_infinity: 86 | return 1 87 | elif ts == pg_time64_negative_infinity: 88 | return -1 89 | else: 90 | seconds[0] = ts // 1000000 91 | microseconds[0] = (ts % 1000000) 92 | return 0 93 | 94 | 95 | cdef date_encode(CodecContext settings, WriteBuffer buf, obj): 96 | cdef: 97 | int32_t ordinal = cpython.PyLong_AsLong(obj.toordinal()) 98 | int32_t pg_ordinal 99 | 100 | if ordinal == infinity_date_ord: 101 | pg_ordinal = pg_date_infinity 102 | elif ordinal == negative_infinity_date_ord: 103 | pg_ordinal = pg_date_negative_infinity 104 | else: 105 | pg_ordinal = ordinal - pg_date_offset_ord 106 | 107 | buf.write_int32(4) 108 | buf.write_int32(pg_ordinal) 109 | 110 | 111 | cdef date_encode_tuple(CodecContext settings, WriteBuffer buf, obj): 112 | cdef: 113 | int32_t pg_ordinal 114 | 115 | if len(obj) != 1: 116 | raise ValueError( 117 | 'date tuple encoder: expecting 1 element ' 118 | 'in tuple, got {}'.format(len(obj))) 119 | 120 | pg_ordinal = obj[0] 121 | buf.write_int32(4) 122 | buf.write_int32(pg_ordinal) 123 | 124 | 125 | cdef date_decode(CodecContext settings, FRBuffer *buf): 126 | cdef int32_t pg_ordinal = hton.unpack_int32(frb_read(buf, 4)) 127 | 128 | if pg_ordinal == pg_date_infinity: 129 | return infinity_date 130 | elif pg_ordinal == pg_date_negative_infinity: 131 | return negative_infinity_date 132 | else: 133 | return date_from_ordinal(pg_ordinal + pg_date_offset_ord) 134 | 135 | 136 | cdef date_decode_tuple(CodecContext settings, FRBuffer *buf): 137 | cdef int32_t pg_ordinal = hton.unpack_int32(frb_read(buf, 4)) 138 | 139 | return (pg_ordinal,) 140 | 141 | 142 | cdef timestamp_encode(CodecContext settings, WriteBuffer buf, obj): 143 | if not cpython.datetime.PyDateTime_Check(obj): 144 | if cpython.datetime.PyDate_Check(obj): 145 | obj = datetime.datetime(obj.year, obj.month, obj.day) 146 | else: 147 | raise TypeError( 148 | 'expected a datetime.date or datetime.datetime instance, ' 149 | 'got {!r}'.format(type(obj).__name__) 150 | ) 151 | 152 | delta = obj - pg_epoch_datetime 153 | cdef: 154 | int64_t seconds = cpython.PyLong_AsLongLong(delta.days) * 86400 + \ 155 | cpython.PyLong_AsLong(delta.seconds) 156 | int32_t microseconds = cpython.PyLong_AsLong( 157 | delta.microseconds) 158 | 159 | buf.write_int32(8) 160 | _encode_time(buf, seconds, microseconds) 161 | 162 | 163 | cdef timestamp_encode_tuple(CodecContext settings, WriteBuffer buf, obj): 164 | cdef: 165 | int64_t microseconds 166 | 167 | if len(obj) != 1: 168 | raise ValueError( 169 | 'timestamp tuple encoder: expecting 1 element ' 170 | 'in tuple, got {}'.format(len(obj))) 171 | 172 | microseconds = obj[0] 173 | 174 | buf.write_int32(8) 175 | buf.write_int64(microseconds) 176 | 177 | 178 | cdef timestamp_decode(CodecContext settings, FRBuffer *buf): 179 | cdef: 180 | int64_t seconds = 0 181 | int32_t microseconds = 0 182 | int32_t inf = _decode_time(buf, &seconds, µseconds) 183 | 184 | if inf > 0: 185 | # positive infinity 186 | return infinity_datetime 187 | elif inf < 0: 188 | # negative infinity 189 | return negative_infinity_datetime 190 | else: 191 | return pg_epoch_datetime.__add__( 192 | timedelta(0, seconds, microseconds)) 193 | 194 | 195 | cdef timestamp_decode_tuple(CodecContext settings, FRBuffer *buf): 196 | cdef: 197 | int64_t ts = hton.unpack_int64(frb_read(buf, 8)) 198 | 199 | return (ts,) 200 | 201 | 202 | cdef timestamptz_encode(CodecContext settings, WriteBuffer buf, obj): 203 | if not cpython.datetime.PyDateTime_Check(obj): 204 | if cpython.datetime.PyDate_Check(obj): 205 | obj = datetime.datetime(obj.year, obj.month, obj.day, 206 | tzinfo=_local_timezone()) 207 | else: 208 | raise TypeError( 209 | 'expected a datetime.date or datetime.datetime instance, ' 210 | 'got {!r}'.format(type(obj).__name__) 211 | ) 212 | 213 | buf.write_int32(8) 214 | 215 | if obj == infinity_datetime: 216 | buf.write_int64(pg_time64_infinity) 217 | return 218 | elif obj == negative_infinity_datetime: 219 | buf.write_int64(pg_time64_negative_infinity) 220 | return 221 | 222 | utc_dt = obj.astimezone(utc) 223 | 224 | delta = utc_dt - pg_epoch_datetime_utc 225 | cdef: 226 | int64_t seconds = cpython.PyLong_AsLongLong(delta.days) * 86400 + \ 227 | cpython.PyLong_AsLong(delta.seconds) 228 | int32_t microseconds = cpython.PyLong_AsLong( 229 | delta.microseconds) 230 | 231 | _encode_time(buf, seconds, microseconds) 232 | 233 | 234 | cdef timestamptz_decode(CodecContext settings, FRBuffer *buf): 235 | cdef: 236 | int64_t seconds = 0 237 | int32_t microseconds = 0 238 | int32_t inf = _decode_time(buf, &seconds, µseconds) 239 | 240 | if inf > 0: 241 | # positive infinity 242 | return infinity_datetime 243 | elif inf < 0: 244 | # negative infinity 245 | return negative_infinity_datetime 246 | else: 247 | return pg_epoch_datetime_utc.__add__( 248 | timedelta(0, seconds, microseconds)) 249 | 250 | 251 | cdef time_encode(CodecContext settings, WriteBuffer buf, obj): 252 | cdef: 253 | int64_t seconds = cpython.PyLong_AsLong(obj.hour) * 3600 + \ 254 | cpython.PyLong_AsLong(obj.minute) * 60 + \ 255 | cpython.PyLong_AsLong(obj.second) 256 | int32_t microseconds = cpython.PyLong_AsLong(obj.microsecond) 257 | 258 | buf.write_int32(8) 259 | _encode_time(buf, seconds, microseconds) 260 | 261 | 262 | cdef time_encode_tuple(CodecContext settings, WriteBuffer buf, obj): 263 | cdef: 264 | int64_t microseconds 265 | 266 | if len(obj) != 1: 267 | raise ValueError( 268 | 'time tuple encoder: expecting 1 element ' 269 | 'in tuple, got {}'.format(len(obj))) 270 | 271 | microseconds = obj[0] 272 | 273 | buf.write_int32(8) 274 | buf.write_int64(microseconds) 275 | 276 | 277 | cdef time_decode(CodecContext settings, FRBuffer *buf): 278 | cdef: 279 | int64_t seconds = 0 280 | int32_t microseconds = 0 281 | 282 | _decode_time(buf, &seconds, µseconds) 283 | 284 | cdef: 285 | int64_t minutes = (seconds / 60) 286 | int64_t sec = seconds % 60 287 | int64_t hours = (minutes / 60) 288 | int64_t min = minutes % 60 289 | 290 | return datetime.time(hours, min, sec, microseconds) 291 | 292 | 293 | cdef time_decode_tuple(CodecContext settings, FRBuffer *buf): 294 | cdef: 295 | int64_t ts = hton.unpack_int64(frb_read(buf, 8)) 296 | 297 | return (ts,) 298 | 299 | 300 | cdef timetz_encode(CodecContext settings, WriteBuffer buf, obj): 301 | offset = obj.tzinfo.utcoffset(None) 302 | 303 | cdef: 304 | int32_t offset_sec = \ 305 | cpython.PyLong_AsLong(offset.days) * 24 * 60 * 60 + \ 306 | cpython.PyLong_AsLong(offset.seconds) 307 | 308 | int64_t seconds = cpython.PyLong_AsLong(obj.hour) * 3600 + \ 309 | cpython.PyLong_AsLong(obj.minute) * 60 + \ 310 | cpython.PyLong_AsLong(obj.second) 311 | 312 | int32_t microseconds = cpython.PyLong_AsLong(obj.microsecond) 313 | 314 | buf.write_int32(12) 315 | _encode_time(buf, seconds, microseconds) 316 | # In Python utcoffset() is the difference between the local time 317 | # and the UTC, whereas in PostgreSQL it's the opposite, 318 | # so we need to flip the sign. 319 | buf.write_int32(-offset_sec) 320 | 321 | 322 | cdef timetz_encode_tuple(CodecContext settings, WriteBuffer buf, obj): 323 | cdef: 324 | int64_t microseconds 325 | int32_t offset_sec 326 | 327 | if len(obj) != 2: 328 | raise ValueError( 329 | 'time tuple encoder: expecting 2 elements2 ' 330 | 'in tuple, got {}'.format(len(obj))) 331 | 332 | microseconds = obj[0] 333 | offset_sec = obj[1] 334 | 335 | buf.write_int32(12) 336 | buf.write_int64(microseconds) 337 | buf.write_int32(offset_sec) 338 | 339 | 340 | cdef timetz_decode(CodecContext settings, FRBuffer *buf): 341 | time = time_decode(settings, buf) 342 | cdef int32_t offset = (hton.unpack_int32(frb_read(buf, 4)) / 60) 343 | # See the comment in the `timetz_encode` method. 344 | return time.replace(tzinfo=datetime.timezone(timedelta(minutes=-offset))) 345 | 346 | 347 | cdef timetz_decode_tuple(CodecContext settings, FRBuffer *buf): 348 | cdef: 349 | int64_t microseconds = hton.unpack_int64(frb_read(buf, 8)) 350 | int32_t offset_sec = hton.unpack_int32(frb_read(buf, 4)) 351 | 352 | return (microseconds, offset_sec) 353 | 354 | 355 | cdef interval_encode(CodecContext settings, WriteBuffer buf, obj): 356 | cdef: 357 | int32_t days = cpython.PyLong_AsLong(obj.days) 358 | int64_t seconds = cpython.PyLong_AsLongLong(obj.seconds) 359 | int32_t microseconds = cpython.PyLong_AsLong(obj.microseconds) 360 | 361 | buf.write_int32(16) 362 | _encode_time(buf, seconds, microseconds) 363 | buf.write_int32(days) 364 | buf.write_int32(0) # Months 365 | 366 | 367 | cdef interval_encode_tuple(CodecContext settings, WriteBuffer buf, 368 | tuple obj): 369 | cdef: 370 | int32_t months 371 | int32_t days 372 | int64_t microseconds 373 | 374 | if len(obj) != 3: 375 | raise ValueError( 376 | 'interval tuple encoder: expecting 3 elements ' 377 | 'in tuple, got {}'.format(len(obj))) 378 | 379 | months = obj[0] 380 | days = obj[1] 381 | microseconds = obj[2] 382 | 383 | buf.write_int32(16) 384 | buf.write_int64(microseconds) 385 | buf.write_int32(days) 386 | buf.write_int32(months) 387 | 388 | 389 | cdef interval_decode(CodecContext settings, FRBuffer *buf): 390 | cdef: 391 | int32_t days 392 | int32_t months 393 | int32_t years 394 | int64_t seconds = 0 395 | int32_t microseconds = 0 396 | 397 | _decode_time(buf, &seconds, µseconds) 398 | 399 | days = hton.unpack_int32(frb_read(buf, 4)) 400 | months = hton.unpack_int32(frb_read(buf, 4)) 401 | 402 | if months < 0: 403 | years = -(-months // 12) 404 | months = -(-months % 12) 405 | else: 406 | years = (months // 12) 407 | months = (months % 12) 408 | 409 | return datetime.timedelta(days=days + months * 30 + years * 365, 410 | seconds=seconds, microseconds=microseconds) 411 | 412 | 413 | cdef interval_decode_tuple(CodecContext settings, FRBuffer *buf): 414 | cdef: 415 | int32_t days 416 | int32_t months 417 | int64_t microseconds 418 | 419 | microseconds = hton.unpack_int64(frb_read(buf, 8)) 420 | days = hton.unpack_int32(frb_read(buf, 4)) 421 | months = hton.unpack_int32(frb_read(buf, 4)) 422 | 423 | return (months, days, microseconds) 424 | -------------------------------------------------------------------------------- /codecs/float.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | from libc cimport math 9 | 10 | 11 | cdef float4_encode(CodecContext settings, WriteBuffer buf, obj): 12 | cdef double dval = cpython.PyFloat_AsDouble(obj) 13 | cdef float fval = dval 14 | if math.isinf(fval) and not math.isinf(dval): 15 | raise ValueError('value out of float32 range') 16 | 17 | buf.write_int32(4) 18 | buf.write_float(fval) 19 | 20 | 21 | cdef float4_decode(CodecContext settings, FRBuffer *buf): 22 | cdef float f = hton.unpack_float(frb_read(buf, 4)) 23 | return cpython.PyFloat_FromDouble(f) 24 | 25 | 26 | cdef float8_encode(CodecContext settings, WriteBuffer buf, obj): 27 | cdef double dval = cpython.PyFloat_AsDouble(obj) 28 | buf.write_int32(8) 29 | buf.write_double(dval) 30 | 31 | 32 | cdef float8_decode(CodecContext settings, FRBuffer *buf): 33 | cdef double f = hton.unpack_double(frb_read(buf, 8)) 34 | return cpython.PyFloat_FromDouble(f) 35 | -------------------------------------------------------------------------------- /codecs/geometry.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef inline _encode_points(WriteBuffer wbuf, object points): 9 | cdef object point 10 | 11 | for point in points: 12 | wbuf.write_double(point[0]) 13 | wbuf.write_double(point[1]) 14 | 15 | 16 | cdef inline _decode_points(FRBuffer *buf): 17 | cdef: 18 | int32_t npts = hton.unpack_int32(frb_read(buf, 4)) 19 | pts = cpython.PyTuple_New(npts) 20 | int32_t i 21 | object point 22 | double x 23 | double y 24 | 25 | for i in range(npts): 26 | x = hton.unpack_double(frb_read(buf, 8)) 27 | y = hton.unpack_double(frb_read(buf, 8)) 28 | point = pgproto_types.Point(x, y) 29 | cpython.Py_INCREF(point) 30 | cpython.PyTuple_SET_ITEM(pts, i, point) 31 | 32 | return pts 33 | 34 | 35 | cdef box_encode(CodecContext settings, WriteBuffer wbuf, obj): 36 | wbuf.write_int32(32) 37 | _encode_points(wbuf, (obj[0], obj[1])) 38 | 39 | 40 | cdef box_decode(CodecContext settings, FRBuffer *buf): 41 | cdef: 42 | double high_x = hton.unpack_double(frb_read(buf, 8)) 43 | double high_y = hton.unpack_double(frb_read(buf, 8)) 44 | double low_x = hton.unpack_double(frb_read(buf, 8)) 45 | double low_y = hton.unpack_double(frb_read(buf, 8)) 46 | 47 | return pgproto_types.Box( 48 | pgproto_types.Point(high_x, high_y), 49 | pgproto_types.Point(low_x, low_y)) 50 | 51 | 52 | cdef line_encode(CodecContext settings, WriteBuffer wbuf, obj): 53 | wbuf.write_int32(24) 54 | wbuf.write_double(obj[0]) 55 | wbuf.write_double(obj[1]) 56 | wbuf.write_double(obj[2]) 57 | 58 | 59 | cdef line_decode(CodecContext settings, FRBuffer *buf): 60 | cdef: 61 | double A = hton.unpack_double(frb_read(buf, 8)) 62 | double B = hton.unpack_double(frb_read(buf, 8)) 63 | double C = hton.unpack_double(frb_read(buf, 8)) 64 | 65 | return pgproto_types.Line(A, B, C) 66 | 67 | 68 | cdef lseg_encode(CodecContext settings, WriteBuffer wbuf, obj): 69 | wbuf.write_int32(32) 70 | _encode_points(wbuf, (obj[0], obj[1])) 71 | 72 | 73 | cdef lseg_decode(CodecContext settings, FRBuffer *buf): 74 | cdef: 75 | double p1_x = hton.unpack_double(frb_read(buf, 8)) 76 | double p1_y = hton.unpack_double(frb_read(buf, 8)) 77 | double p2_x = hton.unpack_double(frb_read(buf, 8)) 78 | double p2_y = hton.unpack_double(frb_read(buf, 8)) 79 | 80 | return pgproto_types.LineSegment((p1_x, p1_y), (p2_x, p2_y)) 81 | 82 | 83 | cdef point_encode(CodecContext settings, WriteBuffer wbuf, obj): 84 | wbuf.write_int32(16) 85 | wbuf.write_double(obj[0]) 86 | wbuf.write_double(obj[1]) 87 | 88 | 89 | cdef point_decode(CodecContext settings, FRBuffer *buf): 90 | cdef: 91 | double x = hton.unpack_double(frb_read(buf, 8)) 92 | double y = hton.unpack_double(frb_read(buf, 8)) 93 | 94 | return pgproto_types.Point(x, y) 95 | 96 | 97 | cdef path_encode(CodecContext settings, WriteBuffer wbuf, obj): 98 | cdef: 99 | int8_t is_closed = 0 100 | ssize_t npts 101 | ssize_t encoded_len 102 | int32_t i 103 | 104 | if cpython.PyTuple_Check(obj): 105 | is_closed = 1 106 | elif cpython.PyList_Check(obj): 107 | is_closed = 0 108 | elif isinstance(obj, pgproto_types.Path): 109 | is_closed = obj.is_closed 110 | 111 | npts = len(obj) 112 | encoded_len = 1 + 4 + 16 * npts 113 | if encoded_len > _MAXINT32: 114 | raise ValueError('path value too long') 115 | 116 | wbuf.write_int32(encoded_len) 117 | wbuf.write_byte(is_closed) 118 | wbuf.write_int32(npts) 119 | 120 | _encode_points(wbuf, obj) 121 | 122 | 123 | cdef path_decode(CodecContext settings, FRBuffer *buf): 124 | cdef: 125 | int8_t is_closed = (frb_read(buf, 1)[0]) 126 | 127 | return pgproto_types.Path(*_decode_points(buf), is_closed=is_closed == 1) 128 | 129 | 130 | cdef poly_encode(CodecContext settings, WriteBuffer wbuf, obj): 131 | cdef: 132 | bint is_closed 133 | ssize_t npts 134 | ssize_t encoded_len 135 | int32_t i 136 | 137 | npts = len(obj) 138 | encoded_len = 4 + 16 * npts 139 | if encoded_len > _MAXINT32: 140 | raise ValueError('polygon value too long') 141 | 142 | wbuf.write_int32(encoded_len) 143 | wbuf.write_int32(npts) 144 | _encode_points(wbuf, obj) 145 | 146 | 147 | cdef poly_decode(CodecContext settings, FRBuffer *buf): 148 | return pgproto_types.Polygon(*_decode_points(buf)) 149 | 150 | 151 | cdef circle_encode(CodecContext settings, WriteBuffer wbuf, obj): 152 | wbuf.write_int32(24) 153 | wbuf.write_double(obj[0][0]) 154 | wbuf.write_double(obj[0][1]) 155 | wbuf.write_double(obj[1]) 156 | 157 | 158 | cdef circle_decode(CodecContext settings, FRBuffer *buf): 159 | cdef: 160 | double center_x = hton.unpack_double(frb_read(buf, 8)) 161 | double center_y = hton.unpack_double(frb_read(buf, 8)) 162 | double radius = hton.unpack_double(frb_read(buf, 8)) 163 | 164 | return pgproto_types.Circle((center_x, center_y), radius) 165 | -------------------------------------------------------------------------------- /codecs/hstore.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef hstore_encode(CodecContext settings, WriteBuffer buf, obj): 9 | cdef: 10 | char *str 11 | ssize_t size 12 | ssize_t count 13 | object items 14 | WriteBuffer item_buf = WriteBuffer.new() 15 | 16 | count = len(obj) 17 | if count > _MAXINT32: 18 | raise ValueError('hstore value is too large') 19 | item_buf.write_int32(count) 20 | 21 | if hasattr(obj, 'items'): 22 | items = obj.items() 23 | else: 24 | items = obj 25 | 26 | for k, v in items: 27 | if k is None: 28 | raise ValueError('null value not allowed in hstore key') 29 | as_pg_string_and_size(settings, k, &str, &size) 30 | item_buf.write_int32(size) 31 | item_buf.write_cstr(str, size) 32 | if v is None: 33 | item_buf.write_int32(-1) 34 | else: 35 | as_pg_string_and_size(settings, v, &str, &size) 36 | item_buf.write_int32(size) 37 | item_buf.write_cstr(str, size) 38 | 39 | buf.write_int32(item_buf.len()) 40 | buf.write_buffer(item_buf) 41 | 42 | 43 | cdef hstore_decode(CodecContext settings, FRBuffer *buf): 44 | cdef: 45 | dict result 46 | uint32_t elem_count 47 | int32_t elem_len 48 | uint32_t i 49 | str k 50 | str v 51 | 52 | result = {} 53 | 54 | elem_count = hton.unpack_int32(frb_read(buf, 4)) 55 | if elem_count == 0: 56 | return result 57 | 58 | for i in range(elem_count): 59 | elem_len = hton.unpack_int32(frb_read(buf, 4)) 60 | if elem_len < 0: 61 | raise ValueError('null value not allowed in hstore key') 62 | 63 | k = decode_pg_string(settings, frb_read(buf, elem_len), elem_len) 64 | 65 | elem_len = hton.unpack_int32(frb_read(buf, 4)) 66 | if elem_len < 0: 67 | v = None 68 | else: 69 | v = decode_pg_string(settings, frb_read(buf, elem_len), elem_len) 70 | 71 | result[k] = v 72 | 73 | return result 74 | -------------------------------------------------------------------------------- /codecs/int.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef bool_encode(CodecContext settings, WriteBuffer buf, obj): 9 | if not cpython.PyBool_Check(obj): 10 | raise TypeError('a boolean is required (got type {})'.format( 11 | type(obj).__name__)) 12 | 13 | buf.write_int32(1) 14 | buf.write_byte(b'\x01' if obj is True else b'\x00') 15 | 16 | 17 | cdef bool_decode(CodecContext settings, FRBuffer *buf): 18 | return frb_read(buf, 1)[0] is b'\x01' 19 | 20 | 21 | cdef int2_encode(CodecContext settings, WriteBuffer buf, obj): 22 | cdef int overflow = 0 23 | cdef long val 24 | 25 | try: 26 | if type(obj) is not int and hasattr(type(obj), '__int__'): 27 | # Silence a Python warning about implicit __int__ 28 | # conversion. 29 | obj = int(obj) 30 | val = cpython.PyLong_AsLong(obj) 31 | except OverflowError: 32 | overflow = 1 33 | 34 | if overflow or val < INT16_MIN or val > INT16_MAX: 35 | raise OverflowError('value out of int16 range') 36 | 37 | buf.write_int32(2) 38 | buf.write_int16(val) 39 | 40 | 41 | cdef int2_decode(CodecContext settings, FRBuffer *buf): 42 | return cpython.PyLong_FromLong(hton.unpack_int16(frb_read(buf, 2))) 43 | 44 | 45 | cdef int4_encode(CodecContext settings, WriteBuffer buf, obj): 46 | cdef int overflow = 0 47 | cdef long val = 0 48 | 49 | try: 50 | if type(obj) is not int and hasattr(type(obj), '__int__'): 51 | # Silence a Python warning about implicit __int__ 52 | # conversion. 53 | obj = int(obj) 54 | val = cpython.PyLong_AsLong(obj) 55 | except OverflowError: 56 | overflow = 1 57 | 58 | # "long" and "long long" have the same size for x86_64, need an extra check 59 | if overflow or (sizeof(val) > 4 and (val < INT32_MIN or val > INT32_MAX)): 60 | raise OverflowError('value out of int32 range') 61 | 62 | buf.write_int32(4) 63 | buf.write_int32(val) 64 | 65 | 66 | cdef int4_decode(CodecContext settings, FRBuffer *buf): 67 | return cpython.PyLong_FromLong(hton.unpack_int32(frb_read(buf, 4))) 68 | 69 | 70 | cdef uint4_encode(CodecContext settings, WriteBuffer buf, obj): 71 | cdef int overflow = 0 72 | cdef unsigned long val = 0 73 | 74 | try: 75 | if type(obj) is not int and hasattr(type(obj), '__int__'): 76 | # Silence a Python warning about implicit __int__ 77 | # conversion. 78 | obj = int(obj) 79 | val = cpython.PyLong_AsUnsignedLong(obj) 80 | except OverflowError: 81 | overflow = 1 82 | 83 | # "long" and "long long" have the same size for x86_64, need an extra check 84 | if overflow or (sizeof(val) > 4 and val > UINT32_MAX): 85 | raise OverflowError('value out of uint32 range') 86 | 87 | buf.write_int32(4) 88 | buf.write_int32(val) 89 | 90 | 91 | cdef uint4_decode(CodecContext settings, FRBuffer *buf): 92 | return cpython.PyLong_FromUnsignedLong( 93 | hton.unpack_int32(frb_read(buf, 4))) 94 | 95 | 96 | cdef int8_encode(CodecContext settings, WriteBuffer buf, obj): 97 | cdef int overflow = 0 98 | cdef long long val 99 | 100 | try: 101 | if type(obj) is not int and hasattr(type(obj), '__int__'): 102 | # Silence a Python warning about implicit __int__ 103 | # conversion. 104 | obj = int(obj) 105 | val = cpython.PyLong_AsLongLong(obj) 106 | except OverflowError: 107 | overflow = 1 108 | 109 | # Just in case for systems with "long long" bigger than 8 bytes 110 | if overflow or (sizeof(val) > 8 and (val < INT64_MIN or val > INT64_MAX)): 111 | raise OverflowError('value out of int64 range') 112 | 113 | buf.write_int32(8) 114 | buf.write_int64(val) 115 | 116 | 117 | cdef int8_decode(CodecContext settings, FRBuffer *buf): 118 | return cpython.PyLong_FromLongLong(hton.unpack_int64(frb_read(buf, 8))) 119 | 120 | 121 | cdef uint8_encode(CodecContext settings, WriteBuffer buf, obj): 122 | cdef int overflow = 0 123 | cdef unsigned long long val = 0 124 | 125 | try: 126 | if type(obj) is not int and hasattr(type(obj), '__int__'): 127 | # Silence a Python warning about implicit __int__ 128 | # conversion. 129 | obj = int(obj) 130 | val = cpython.PyLong_AsUnsignedLongLong(obj) 131 | except OverflowError: 132 | overflow = 1 133 | 134 | # Just in case for systems with "long long" bigger than 8 bytes 135 | if overflow or (sizeof(val) > 8 and val > UINT64_MAX): 136 | raise OverflowError('value out of uint64 range') 137 | 138 | buf.write_int32(8) 139 | buf.write_int64(val) 140 | 141 | 142 | cdef uint8_decode(CodecContext settings, FRBuffer *buf): 143 | return cpython.PyLong_FromUnsignedLongLong( 144 | hton.unpack_int64(frb_read(buf, 8))) -------------------------------------------------------------------------------- /codecs/json.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef jsonb_encode(CodecContext settings, WriteBuffer buf, obj): 9 | cdef: 10 | char *str 11 | ssize_t size 12 | 13 | if settings.is_encoding_json(): 14 | obj = settings.get_json_encoder().encode(obj) 15 | 16 | as_pg_string_and_size(settings, obj, &str, &size) 17 | 18 | if size > 0x7fffffff - 1: 19 | raise ValueError('string too long') 20 | 21 | buf.write_int32(size + 1) 22 | buf.write_byte(1) # JSONB format version 23 | buf.write_cstr(str, size) 24 | 25 | 26 | cdef jsonb_decode(CodecContext settings, FRBuffer *buf): 27 | cdef uint8_t format = (frb_read(buf, 1)[0]) 28 | 29 | if format != 1: 30 | raise ValueError('unexpected JSONB format: {}'.format(format)) 31 | 32 | rv = text_decode(settings, buf) 33 | 34 | if settings.is_decoding_json(): 35 | rv = settings.get_json_decoder().decode(rv) 36 | 37 | return rv 38 | 39 | 40 | cdef json_encode(CodecContext settings, WriteBuffer buf, obj): 41 | cdef: 42 | char *str 43 | ssize_t size 44 | 45 | if settings.is_encoding_json(): 46 | obj = settings.get_json_encoder().encode(obj) 47 | 48 | text_encode(settings, buf, obj) 49 | 50 | 51 | cdef json_decode(CodecContext settings, FRBuffer *buf): 52 | rv = text_decode(settings, buf) 53 | 54 | if settings.is_decoding_json(): 55 | rv = settings.get_json_decoder().decode(rv) 56 | 57 | return rv 58 | -------------------------------------------------------------------------------- /codecs/jsonpath.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef jsonpath_encode(CodecContext settings, WriteBuffer buf, obj): 9 | cdef: 10 | char *str 11 | ssize_t size 12 | 13 | as_pg_string_and_size(settings, obj, &str, &size) 14 | 15 | if size > 0x7fffffff - 1: 16 | raise ValueError('string too long') 17 | 18 | buf.write_int32(size + 1) 19 | buf.write_byte(1) # jsonpath format version 20 | buf.write_cstr(str, size) 21 | 22 | 23 | cdef jsonpath_decode(CodecContext settings, FRBuffer *buf): 24 | cdef uint8_t format = (frb_read(buf, 1)[0]) 25 | 26 | if format != 1: 27 | raise ValueError('unexpected jsonpath format: {}'.format(format)) 28 | 29 | return text_decode(settings, buf) 30 | -------------------------------------------------------------------------------- /codecs/misc.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef void_encode(CodecContext settings, WriteBuffer buf, obj): 9 | # Void is zero bytes 10 | buf.write_int32(0) 11 | 12 | 13 | cdef void_decode(CodecContext settings, FRBuffer *buf): 14 | # Do nothing; void will be passed as NULL so this function 15 | # will never be called. 16 | pass 17 | -------------------------------------------------------------------------------- /codecs/network.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | import ipaddress 9 | 10 | 11 | # defined in postgresql/src/include/inet.h 12 | # 13 | DEF PGSQL_AF_INET = 2 # AF_INET 14 | DEF PGSQL_AF_INET6 = 3 # AF_INET + 1 15 | 16 | 17 | _ipaddr = ipaddress.ip_address 18 | _ipiface = ipaddress.ip_interface 19 | _ipnet = ipaddress.ip_network 20 | 21 | 22 | cdef inline uint8_t _ip_max_prefix_len(int32_t family): 23 | # Maximum number of bits in the network prefix of the specified 24 | # IP protocol version. 25 | if family == PGSQL_AF_INET: 26 | return 32 27 | else: 28 | return 128 29 | 30 | 31 | cdef inline int32_t _ip_addr_len(int32_t family): 32 | # Length of address in bytes for the specified IP protocol version. 33 | if family == PGSQL_AF_INET: 34 | return 4 35 | else: 36 | return 16 37 | 38 | 39 | cdef inline int8_t _ver_to_family(int32_t version): 40 | if version == 4: 41 | return PGSQL_AF_INET 42 | else: 43 | return PGSQL_AF_INET6 44 | 45 | 46 | cdef inline _net_encode(WriteBuffer buf, int8_t family, uint32_t bits, 47 | int8_t is_cidr, bytes addr): 48 | 49 | cdef: 50 | char *addrbytes 51 | ssize_t addrlen 52 | 53 | cpython.PyBytes_AsStringAndSize(addr, &addrbytes, &addrlen) 54 | 55 | buf.write_int32(4 + addrlen) 56 | buf.write_byte(family) 57 | buf.write_byte(bits) 58 | buf.write_byte(is_cidr) 59 | buf.write_byte(addrlen) 60 | buf.write_cstr(addrbytes, addrlen) 61 | 62 | 63 | cdef net_decode(CodecContext settings, FRBuffer *buf, bint as_cidr): 64 | cdef: 65 | int32_t family = frb_read(buf, 1)[0] 66 | uint8_t bits = frb_read(buf, 1)[0] 67 | int prefix_len 68 | int32_t is_cidr = frb_read(buf, 1)[0] 69 | int32_t addrlen = frb_read(buf, 1)[0] 70 | bytes addr 71 | uint8_t max_prefix_len = _ip_max_prefix_len(family) 72 | 73 | if is_cidr != as_cidr: 74 | raise ValueError('unexpected CIDR flag set in non-cidr value') 75 | 76 | if family != PGSQL_AF_INET and family != PGSQL_AF_INET6: 77 | raise ValueError('invalid address family in "{}" value'.format( 78 | 'cidr' if is_cidr else 'inet' 79 | )) 80 | 81 | max_prefix_len = _ip_max_prefix_len(family) 82 | 83 | if bits > max_prefix_len: 84 | raise ValueError('invalid network prefix length in "{}" value'.format( 85 | 'cidr' if is_cidr else 'inet' 86 | )) 87 | 88 | if addrlen != _ip_addr_len(family): 89 | raise ValueError('invalid address length in "{}" value'.format( 90 | 'cidr' if is_cidr else 'inet' 91 | )) 92 | 93 | addr = cpython.PyBytes_FromStringAndSize(frb_read(buf, addrlen), addrlen) 94 | 95 | if as_cidr or bits != max_prefix_len: 96 | prefix_len = cpython.PyLong_FromLong(bits) 97 | 98 | if as_cidr: 99 | return _ipnet((addr, prefix_len)) 100 | else: 101 | return _ipiface((addr, prefix_len)) 102 | else: 103 | return _ipaddr(addr) 104 | 105 | 106 | cdef cidr_encode(CodecContext settings, WriteBuffer buf, obj): 107 | cdef: 108 | object ipnet 109 | int8_t family 110 | 111 | ipnet = _ipnet(obj) 112 | family = _ver_to_family(ipnet.version) 113 | _net_encode(buf, family, ipnet.prefixlen, 1, ipnet.network_address.packed) 114 | 115 | 116 | cdef cidr_decode(CodecContext settings, FRBuffer *buf): 117 | return net_decode(settings, buf, True) 118 | 119 | 120 | cdef inet_encode(CodecContext settings, WriteBuffer buf, obj): 121 | cdef: 122 | object ipaddr 123 | int8_t family 124 | 125 | try: 126 | ipaddr = _ipaddr(obj) 127 | except ValueError: 128 | # PostgreSQL accepts *both* CIDR and host values 129 | # for the host datatype. 130 | ipaddr = _ipiface(obj) 131 | family = _ver_to_family(ipaddr.version) 132 | _net_encode(buf, family, ipaddr.network.prefixlen, 1, ipaddr.packed) 133 | else: 134 | family = _ver_to_family(ipaddr.version) 135 | _net_encode(buf, family, _ip_max_prefix_len(family), 0, ipaddr.packed) 136 | 137 | 138 | cdef inet_decode(CodecContext settings, FRBuffer *buf): 139 | return net_decode(settings, buf, False) 140 | -------------------------------------------------------------------------------- /codecs/numeric.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | from libc.math cimport abs, log10 9 | from libc.stdio cimport snprintf 10 | 11 | import decimal 12 | 13 | # defined in postgresql/src/backend/utils/adt/numeric.c 14 | DEF DEC_DIGITS = 4 15 | DEF MAX_DSCALE = 0x3FFF 16 | DEF NUMERIC_POS = 0x0000 17 | DEF NUMERIC_NEG = 0x4000 18 | DEF NUMERIC_NAN = 0xC000 19 | DEF NUMERIC_PINF = 0xD000 20 | DEF NUMERIC_NINF = 0xF000 21 | 22 | _Dec = decimal.Decimal 23 | 24 | 25 | cdef numeric_encode_text(CodecContext settings, WriteBuffer buf, obj): 26 | text_encode(settings, buf, str(obj)) 27 | 28 | 29 | cdef numeric_decode_text(CodecContext settings, FRBuffer *buf): 30 | return _Dec(text_decode(settings, buf)) 31 | 32 | 33 | cdef numeric_encode_binary(CodecContext settings, WriteBuffer buf, obj): 34 | cdef: 35 | object dec 36 | object dt 37 | int64_t exponent 38 | int64_t i 39 | int64_t j 40 | tuple pydigits 41 | int64_t num_pydigits 42 | int16_t pgdigit 43 | int64_t num_pgdigits 44 | int16_t dscale 45 | int64_t dweight 46 | int64_t weight 47 | uint16_t sign 48 | int64_t padding_size = 0 49 | 50 | if isinstance(obj, _Dec): 51 | dec = obj 52 | else: 53 | dec = _Dec(obj) 54 | 55 | dt = dec.as_tuple() 56 | 57 | if dt.exponent == 'n' or dt.exponent == 'N': 58 | # NaN 59 | sign = NUMERIC_NAN 60 | num_pgdigits = 0 61 | weight = 0 62 | dscale = 0 63 | elif dt.exponent == 'F': 64 | # Infinity 65 | if dt.sign: 66 | sign = NUMERIC_NINF 67 | else: 68 | sign = NUMERIC_PINF 69 | num_pgdigits = 0 70 | weight = 0 71 | dscale = 0 72 | else: 73 | exponent = dt.exponent 74 | if exponent < 0 and -exponent > MAX_DSCALE: 75 | raise ValueError( 76 | 'cannot encode Decimal value into numeric: ' 77 | 'exponent is too small') 78 | 79 | if dt.sign: 80 | sign = NUMERIC_NEG 81 | else: 82 | sign = NUMERIC_POS 83 | 84 | pydigits = dt.digits 85 | num_pydigits = len(pydigits) 86 | 87 | dweight = num_pydigits + exponent - 1 88 | if dweight >= 0: 89 | weight = (dweight + DEC_DIGITS) // DEC_DIGITS - 1 90 | else: 91 | weight = -((-dweight - 1) // DEC_DIGITS + 1) 92 | 93 | if weight > 2 ** 16 - 1: 94 | raise ValueError( 95 | 'cannot encode Decimal value into numeric: ' 96 | 'exponent is too large') 97 | 98 | padding_size = \ 99 | (weight + 1) * DEC_DIGITS - (dweight + 1) 100 | num_pgdigits = \ 101 | (num_pydigits + padding_size + DEC_DIGITS - 1) // DEC_DIGITS 102 | 103 | if num_pgdigits > 2 ** 16 - 1: 104 | raise ValueError( 105 | 'cannot encode Decimal value into numeric: ' 106 | 'number of digits is too large') 107 | 108 | # Pad decimal digits to provide room for correct Postgres 109 | # digit alignment in the digit computation loop. 110 | pydigits = (0,) * DEC_DIGITS + pydigits + (0,) * DEC_DIGITS 111 | 112 | if exponent < 0: 113 | if -exponent > MAX_DSCALE: 114 | raise ValueError( 115 | 'cannot encode Decimal value into numeric: ' 116 | 'exponent is too small') 117 | dscale = -exponent 118 | else: 119 | dscale = 0 120 | 121 | buf.write_int32(2 + 2 + 2 + 2 + 2 * num_pgdigits) 122 | buf.write_int16(num_pgdigits) 123 | buf.write_int16(weight) 124 | buf.write_int16(sign) 125 | buf.write_int16(dscale) 126 | 127 | j = DEC_DIGITS - padding_size 128 | 129 | for i in range(num_pgdigits): 130 | pgdigit = (pydigits[j] * 1000 + pydigits[j + 1] * 100 + 131 | pydigits[j + 2] * 10 + pydigits[j + 3]) 132 | j += DEC_DIGITS 133 | buf.write_int16(pgdigit) 134 | 135 | 136 | # The decoding strategy here is to form a string representation of 137 | # the numeric var, as it is faster than passing an iterable of digits. 138 | # For this reason the below code is pure overhead and is ~25% slower 139 | # than the simple text decoder above. That said, we need the binary 140 | # decoder to support binary COPY with numeric values. 141 | cdef numeric_decode_binary_ex( 142 | CodecContext settings, 143 | FRBuffer *buf, 144 | bint trail_fract_zero, 145 | ): 146 | cdef: 147 | uint16_t num_pgdigits = hton.unpack_int16(frb_read(buf, 2)) 148 | int16_t weight = hton.unpack_int16(frb_read(buf, 2)) 149 | uint16_t sign = hton.unpack_int16(frb_read(buf, 2)) 150 | uint16_t dscale = hton.unpack_int16(frb_read(buf, 2)) 151 | int16_t pgdigit0 152 | ssize_t i 153 | int16_t pgdigit 154 | object pydigits 155 | ssize_t num_pydigits 156 | ssize_t actual_num_pydigits 157 | ssize_t buf_size 158 | int64_t exponent 159 | int64_t abs_exponent 160 | ssize_t exponent_chars 161 | ssize_t front_padding = 0 162 | ssize_t num_fract_digits 163 | ssize_t trailing_fract_zeros_adj 164 | char smallbuf[_NUMERIC_DECODER_SMALLBUF_SIZE] 165 | char *charbuf 166 | char *bufptr 167 | bint buf_allocated = False 168 | 169 | if sign == NUMERIC_NAN: 170 | # Not-a-number 171 | return _Dec('NaN') 172 | elif sign == NUMERIC_PINF: 173 | # +Infinity 174 | return _Dec('Infinity') 175 | elif sign == NUMERIC_NINF: 176 | # -Infinity 177 | return _Dec('-Infinity') 178 | 179 | if num_pgdigits == 0: 180 | # Zero 181 | return _Dec('0e-' + str(dscale)) 182 | 183 | pgdigit0 = hton.unpack_int16(frb_read(buf, 2)) 184 | if weight >= 0: 185 | if pgdigit0 < 10: 186 | front_padding = 3 187 | elif pgdigit0 < 100: 188 | front_padding = 2 189 | elif pgdigit0 < 1000: 190 | front_padding = 1 191 | 192 | # The number of fractional decimal digits actually encoded in 193 | # base-DEC_DEIGITS digits sent by Postgres. 194 | num_fract_digits = (num_pgdigits - weight - 1) * DEC_DIGITS 195 | 196 | # The trailing zero adjustment necessary to obtain exactly 197 | # dscale number of fractional digits in output. May be negative, 198 | # which indicates that trailing zeros in the last input digit 199 | # should be discarded. 200 | trailing_fract_zeros_adj = dscale - num_fract_digits 201 | 202 | # Maximum possible number of decimal digits in base 10. 203 | # The actual number might be up to 3 digits smaller due to 204 | # leading zeros in first input digit. 205 | num_pydigits = num_pgdigits * DEC_DIGITS 206 | if trailing_fract_zeros_adj > 0: 207 | num_pydigits += trailing_fract_zeros_adj 208 | 209 | # Exponent. 210 | exponent = (weight + 1) * DEC_DIGITS - front_padding 211 | abs_exponent = abs(exponent) 212 | if abs_exponent != 0: 213 | # Number of characters required to render absolute exponent value 214 | # in decimal. 215 | exponent_chars = log10(abs_exponent) + 1 216 | else: 217 | exponent_chars = 0 218 | 219 | # Output buffer size. 220 | buf_size = ( 221 | 1 + # sign 222 | 1 + # leading zero 223 | 1 + # decimal dot 224 | num_pydigits + # digits 225 | 1 + # possible trailing zero padding 226 | 2 + # exponent indicator (E-,E+) 227 | exponent_chars + # exponent 228 | 1 # null terminator char 229 | ) 230 | 231 | if buf_size > _NUMERIC_DECODER_SMALLBUF_SIZE: 232 | charbuf = cpython.PyMem_Malloc(buf_size) 233 | buf_allocated = True 234 | else: 235 | charbuf = smallbuf 236 | 237 | try: 238 | bufptr = charbuf 239 | 240 | if sign == NUMERIC_NEG: 241 | bufptr[0] = b'-' 242 | bufptr += 1 243 | 244 | bufptr[0] = b'0' 245 | bufptr[1] = b'.' 246 | bufptr += 2 247 | 248 | if weight >= 0: 249 | bufptr = _unpack_digit_stripping_lzeros(bufptr, pgdigit0) 250 | else: 251 | bufptr = _unpack_digit(bufptr, pgdigit0) 252 | 253 | for i in range(1, num_pgdigits): 254 | pgdigit = hton.unpack_int16(frb_read(buf, 2)) 255 | bufptr = _unpack_digit(bufptr, pgdigit) 256 | 257 | if dscale: 258 | if trailing_fract_zeros_adj > 0: 259 | for i in range(trailing_fract_zeros_adj): 260 | bufptr[i] = b'0' 261 | 262 | # If display scale is _less_ than the number of rendered digits, 263 | # trailing_fract_zeros_adj will be negative and this will strip 264 | # the excess trailing zeros. 265 | bufptr += trailing_fract_zeros_adj 266 | 267 | if trail_fract_zero: 268 | # Check if the number of rendered digits matches the exponent, 269 | # and if so, add another trailing zero, so the result always 270 | # appears with a decimal point. 271 | actual_num_pydigits = bufptr - charbuf - 2 272 | if sign == NUMERIC_NEG: 273 | actual_num_pydigits -= 1 274 | 275 | if actual_num_pydigits == abs_exponent: 276 | bufptr[0] = b'0' 277 | bufptr += 1 278 | 279 | if exponent != 0: 280 | bufptr[0] = b'E' 281 | if exponent < 0: 282 | bufptr[1] = b'-' 283 | else: 284 | bufptr[1] = b'+' 285 | bufptr += 2 286 | snprintf(bufptr, exponent_chars + 1, '%d', 287 | abs_exponent) 288 | bufptr += exponent_chars 289 | 290 | bufptr[0] = 0 291 | 292 | pydigits = cpythonx.PyUnicode_FromString(charbuf) 293 | 294 | return _Dec(pydigits) 295 | 296 | finally: 297 | if buf_allocated: 298 | cpython.PyMem_Free(charbuf) 299 | 300 | 301 | cdef numeric_decode_binary(CodecContext settings, FRBuffer *buf): 302 | return numeric_decode_binary_ex(settings, buf, False) 303 | 304 | 305 | cdef inline char *_unpack_digit_stripping_lzeros(char *buf, int64_t pgdigit): 306 | cdef: 307 | int64_t d 308 | bint significant 309 | 310 | d = pgdigit // 1000 311 | significant = (d > 0) 312 | if significant: 313 | pgdigit -= d * 1000 314 | buf[0] = (d + b'0') 315 | buf += 1 316 | 317 | d = pgdigit // 100 318 | significant |= (d > 0) 319 | if significant: 320 | pgdigit -= d * 100 321 | buf[0] = (d + b'0') 322 | buf += 1 323 | 324 | d = pgdigit // 10 325 | significant |= (d > 0) 326 | if significant: 327 | pgdigit -= d * 10 328 | buf[0] = (d + b'0') 329 | buf += 1 330 | 331 | buf[0] = (pgdigit + b'0') 332 | buf += 1 333 | 334 | return buf 335 | 336 | 337 | cdef inline char *_unpack_digit(char *buf, int64_t pgdigit): 338 | cdef: 339 | int64_t d 340 | 341 | d = pgdigit // 1000 342 | pgdigit -= d * 1000 343 | buf[0] = (d + b'0') 344 | 345 | d = pgdigit // 100 346 | pgdigit -= d * 100 347 | buf[1] = (d + b'0') 348 | 349 | d = pgdigit // 10 350 | pgdigit -= d * 10 351 | buf[2] = (d + b'0') 352 | 353 | buf[3] = (pgdigit + b'0') 354 | buf += 4 355 | 356 | return buf 357 | -------------------------------------------------------------------------------- /codecs/pg_snapshot.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef pg_snapshot_encode(CodecContext settings, WriteBuffer buf, obj): 9 | cdef: 10 | ssize_t nxip 11 | uint64_t xmin 12 | uint64_t xmax 13 | int i 14 | WriteBuffer xip_buf = WriteBuffer.new() 15 | 16 | if not (cpython.PyTuple_Check(obj) or cpython.PyList_Check(obj)): 17 | raise TypeError( 18 | 'list or tuple expected (got type {})'.format(type(obj))) 19 | 20 | if len(obj) != 3: 21 | raise ValueError( 22 | 'invalid number of elements in txid_snapshot tuple, expecting 4') 23 | 24 | nxip = len(obj[2]) 25 | if nxip > _MAXINT32: 26 | raise ValueError('txid_snapshot value is too long') 27 | 28 | xmin = obj[0] 29 | xmax = obj[1] 30 | 31 | for i in range(nxip): 32 | xip_buf.write_int64( 33 | cpython.PyLong_AsUnsignedLongLong(obj[2][i])) 34 | 35 | buf.write_int32(20 + xip_buf.len()) 36 | 37 | buf.write_int32(nxip) 38 | buf.write_int64(xmin) 39 | buf.write_int64(xmax) 40 | buf.write_buffer(xip_buf) 41 | 42 | 43 | cdef pg_snapshot_decode(CodecContext settings, FRBuffer *buf): 44 | cdef: 45 | int32_t nxip 46 | uint64_t xmin 47 | uint64_t xmax 48 | tuple xip_tup 49 | int32_t i 50 | object xip 51 | 52 | nxip = hton.unpack_int32(frb_read(buf, 4)) 53 | xmin = hton.unpack_int64(frb_read(buf, 8)) 54 | xmax = hton.unpack_int64(frb_read(buf, 8)) 55 | 56 | xip_tup = cpython.PyTuple_New(nxip) 57 | for i in range(nxip): 58 | xip = cpython.PyLong_FromUnsignedLongLong( 59 | hton.unpack_int64(frb_read(buf, 8))) 60 | cpython.Py_INCREF(xip) 61 | cpython.PyTuple_SET_ITEM(xip_tup, i, xip) 62 | 63 | return (xmin, xmax, xip_tup) 64 | -------------------------------------------------------------------------------- /codecs/text.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef inline as_pg_string_and_size( 9 | CodecContext settings, obj, char **cstr, ssize_t *size): 10 | 11 | if not cpython.PyUnicode_Check(obj): 12 | raise TypeError('expected str, got {}'.format(type(obj).__name__)) 13 | 14 | if settings.is_encoding_utf8(): 15 | cstr[0] = cpythonx.PyUnicode_AsUTF8AndSize(obj, size) 16 | else: 17 | encoded = settings.get_text_codec().encode(obj)[0] 18 | cpython.PyBytes_AsStringAndSize(encoded, cstr, size) 19 | 20 | if size[0] > 0x7fffffff: 21 | raise ValueError('string too long') 22 | 23 | 24 | cdef text_encode(CodecContext settings, WriteBuffer buf, obj): 25 | cdef: 26 | char *str 27 | ssize_t size 28 | 29 | as_pg_string_and_size(settings, obj, &str, &size) 30 | 31 | buf.write_int32(size) 32 | buf.write_cstr(str, size) 33 | 34 | 35 | cdef inline decode_pg_string(CodecContext settings, const char* data, 36 | ssize_t len): 37 | 38 | if settings.is_encoding_utf8(): 39 | # decode UTF-8 in strict mode 40 | return cpython.PyUnicode_DecodeUTF8(data, len, NULL) 41 | else: 42 | bytes = cpython.PyBytes_FromStringAndSize(data, len) 43 | return settings.get_text_codec().decode(bytes)[0] 44 | 45 | 46 | cdef text_decode(CodecContext settings, FRBuffer *buf): 47 | cdef ssize_t buf_len = buf.len 48 | return decode_pg_string(settings, frb_read_all(buf), buf_len) 49 | -------------------------------------------------------------------------------- /codecs/tid.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef tid_encode(CodecContext settings, WriteBuffer buf, obj): 9 | cdef int overflow = 0 10 | cdef unsigned long block, offset 11 | 12 | if not (cpython.PyTuple_Check(obj) or cpython.PyList_Check(obj)): 13 | raise TypeError( 14 | 'list or tuple expected (got type {})'.format(type(obj))) 15 | 16 | if len(obj) != 2: 17 | raise ValueError( 18 | 'invalid number of elements in tid tuple, expecting 2') 19 | 20 | try: 21 | block = cpython.PyLong_AsUnsignedLong(obj[0]) 22 | except OverflowError: 23 | overflow = 1 24 | 25 | # "long" and "long long" have the same size for x86_64, need an extra check 26 | if overflow or (sizeof(block) > 4 and block > UINT32_MAX): 27 | raise OverflowError('tuple id block value out of uint32 range') 28 | 29 | try: 30 | offset = cpython.PyLong_AsUnsignedLong(obj[1]) 31 | overflow = 0 32 | except OverflowError: 33 | overflow = 1 34 | 35 | if overflow or offset > 65535: 36 | raise OverflowError('tuple id offset value out of uint16 range') 37 | 38 | buf.write_int32(6) 39 | buf.write_int32(block) 40 | buf.write_int16(offset) 41 | 42 | 43 | cdef tid_decode(CodecContext settings, FRBuffer *buf): 44 | cdef: 45 | uint32_t block 46 | uint16_t offset 47 | 48 | block = hton.unpack_int32(frb_read(buf, 4)) 49 | offset = hton.unpack_int16(frb_read(buf, 2)) 50 | 51 | return (block, offset) 52 | -------------------------------------------------------------------------------- /codecs/uuid.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef uuid_encode(CodecContext settings, WriteBuffer wbuf, obj): 9 | cdef: 10 | char buf[16] 11 | 12 | if type(obj) is pg_UUID: 13 | wbuf.write_int32(16) 14 | wbuf.write_cstr((obj)._data, 16) 15 | elif cpython.PyUnicode_Check(obj): 16 | pg_uuid_bytes_from_str(obj, buf) 17 | wbuf.write_int32(16) 18 | wbuf.write_cstr(buf, 16) 19 | else: 20 | bytea_encode(settings, wbuf, obj.bytes) 21 | 22 | 23 | cdef uuid_decode(CodecContext settings, FRBuffer *buf): 24 | if buf.len != 16: 25 | raise TypeError( 26 | f'cannot decode UUID, expected 16 bytes, got {buf.len}') 27 | return pg_uuid_from_buf(frb_read_all(buf)) 28 | -------------------------------------------------------------------------------- /consts.pxi: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | DEF _MAXINT32 = 2**31 - 1 9 | DEF _NUMERIC_DECODER_SMALLBUF_SIZE = 256 10 | -------------------------------------------------------------------------------- /cpythonx.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | from cpython cimport Py_buffer 9 | 10 | cdef extern from "Python.h": 11 | int PyUnicode_1BYTE_KIND 12 | 13 | int PyByteArray_CheckExact(object) 14 | int PyByteArray_Resize(object, ssize_t) except -1 15 | object PyByteArray_FromStringAndSize(const char *, ssize_t) 16 | char* PyByteArray_AsString(object) 17 | 18 | object PyUnicode_FromString(const char *u) 19 | const char* PyUnicode_AsUTF8AndSize( 20 | object unicode, ssize_t *size) except NULL 21 | 22 | object PyUnicode_FromKindAndData( 23 | int kind, const void *buffer, Py_ssize_t size) 24 | -------------------------------------------------------------------------------- /debug.h: -------------------------------------------------------------------------------- 1 | #ifndef PG_DEBUG 2 | #define PG_DEBUG 0 3 | #endif 4 | -------------------------------------------------------------------------------- /debug.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef extern from "debug.h": 9 | 10 | cdef int PG_DEBUG 11 | -------------------------------------------------------------------------------- /frb.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef: 9 | 10 | struct FRBuffer: 11 | const char* buf 12 | ssize_t len 13 | 14 | inline ssize_t frb_get_len(FRBuffer *frb): 15 | return frb.len 16 | 17 | inline void frb_set_len(FRBuffer *frb, ssize_t new_len): 18 | frb.len = new_len 19 | 20 | inline void frb_init(FRBuffer *frb, const char *buf, ssize_t len): 21 | frb.buf = buf 22 | frb.len = len 23 | 24 | inline const char* frb_read(FRBuffer *frb, ssize_t n) except NULL: 25 | cdef const char *result 26 | 27 | frb_check(frb, n) 28 | 29 | result = frb.buf 30 | frb.buf += n 31 | frb.len -= n 32 | 33 | return result 34 | 35 | inline const char* frb_read_all(FRBuffer *frb): 36 | cdef const char *result 37 | result = frb.buf 38 | frb.buf += frb.len 39 | frb.len = 0 40 | return result 41 | 42 | inline FRBuffer *frb_slice_from(FRBuffer *frb, 43 | FRBuffer* source, ssize_t len): 44 | frb.buf = frb_read(source, len) 45 | frb.len = len 46 | return frb 47 | 48 | object frb_check(FRBuffer *frb, ssize_t n) 49 | -------------------------------------------------------------------------------- /frb.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef object frb_check(FRBuffer *frb, ssize_t n): 9 | if n > frb.len: 10 | raise AssertionError( 11 | f'insufficient data in buffer: requested {n} ' 12 | f'remaining {frb.len}') 13 | -------------------------------------------------------------------------------- /hton.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #if defined(__linux__) || defined(__CYGWIN__) || defined(__sun) 4 | #include 5 | #elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) \ 6 | || defined(__DragonFly__) 7 | #include 8 | #elif defined(__APPLE__) 9 | #include 10 | #elif defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) 11 | /* Assume Windows is always LE. There seems to be no reliable way 12 | to detect endianness there */ 13 | #define __LITTLE_ENDIAN 1234 14 | #define __BIG_ENDIAN 4321 15 | #define __BYTE_ORDER __LITTLE_ENDIAN 16 | #endif 17 | 18 | #if defined(_BYTE_ORDER) && !defined(__BYTE_ORDER) 19 | #define __BYTE_ORDER _BYTE_ORDER 20 | #endif 21 | 22 | #if defined(BYTE_ORDER) && !defined(__BYTE_ORDER) 23 | #define __BYTE_ORDER BYTE_ORDER 24 | #endif 25 | 26 | #if defined(_LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN) 27 | #define __LITTLE_ENDIAN _LITTLE_ENDIAN 28 | #endif 29 | 30 | #if defined(LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN) 31 | #define __LITTLE_ENDIAN LITTLE_ENDIAN 32 | #endif 33 | 34 | #if defined(_BIG_ENDIAN) && !defined(__BIG_ENDIAN) 35 | #define __BIG_ENDIAN _BIG_ENDIAN 36 | #endif 37 | 38 | #if defined(BIG_ENDIAN) && !defined(__BIG_ENDIAN) 39 | #define __BIG_ENDIAN BIG_ENDIAN 40 | #endif 41 | 42 | #if !defined(__BYTE_ORDER) || !defined(__LITTLE_ENDIAN) \ 43 | || !defined(__BIG_ENDIAN) 44 | #error Cannot determine platform byte order. 45 | #endif 46 | 47 | #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) 48 | 49 | #define apg_bswap16(x) __builtin_bswap16(x) 50 | #define apg_bswap32(x) __builtin_bswap32(x) 51 | #define apg_bswap64(x) __builtin_bswap64(x) 52 | 53 | #elif defined(_MSC_VER) 54 | 55 | #define apg_bswap16(x) _byteswap_ushort(x) 56 | #define apg_bswap32(x) _byteswap_ulong(x) 57 | #define apg_bswap64(x) _byteswap_uint64(x) 58 | 59 | #else 60 | 61 | static inline uint16_t 62 | apg_bswap16(uint16_t) 63 | { 64 | return ((x << 8) & 0xff00) | (x >> 8) & 0x00ff)); 65 | } 66 | 67 | static inline uint32_t 68 | apg_bswap32(uint32_t x) 69 | { 70 | return ( 71 | ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | 72 | ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff) 73 | ); 74 | } 75 | 76 | static inline uint64_t 77 | apg_bswap64(uint64_t x) 78 | { 79 | return ( 80 | ((x << 56) & 0xff00000000000000ULL) | 81 | ((x << 40) & 0x00ff000000000000ULL) | 82 | ((x << 24) & 0x0000ff0000000000ULL) | 83 | ((x << 8) & 0x000000ff00000000ULL) | 84 | ((x >> 8) & 0x00000000ff000000ULL) | 85 | ((x >> 24) & 0x0000000000ff0000ULL) | 86 | ((x >> 40) & 0x000000000000ff00ULL) | 87 | ((x >> 56) & 0x00000000000000ffULL); 88 | ); 89 | } 90 | 91 | #endif 92 | 93 | #if __BYTE_ORDER == __BIG_ENDIAN 94 | 95 | #define apg_hton16(x) (x) 96 | #define apg_hton32(x) (x) 97 | #define apg_hton64(x) (x) 98 | 99 | #define apg_ntoh16(x) (x) 100 | #define apg_ntoh32(x) (x) 101 | #define apg_ntoh64(x) (x) 102 | 103 | #elif __BYTE_ORDER == __LITTLE_ENDIAN 104 | 105 | #define apg_hton16(x) apg_bswap16(x) 106 | #define apg_hton32(x) apg_bswap32(x) 107 | #define apg_hton64(x) apg_bswap64(x) 108 | 109 | #define apg_ntoh16(x) apg_bswap16(x) 110 | #define apg_ntoh32(x) apg_bswap32(x) 111 | #define apg_ntoh64(x) apg_bswap64(x) 112 | 113 | #else 114 | 115 | #error Unsupported byte order. 116 | 117 | #endif 118 | 119 | 120 | static inline void 121 | pack_int16(char *buf, int16_t x) 122 | { 123 | uint16_t nx = apg_hton16((uint16_t)x); 124 | /* NOTE: the memcpy below is _important_ to support systems 125 | which disallow unaligned access. On systems, which do 126 | allow unaligned access it will be optimized away by the 127 | compiler 128 | */ 129 | memcpy(buf, &nx, sizeof(uint16_t)); 130 | } 131 | 132 | 133 | static inline void 134 | pack_int32(char *buf, int64_t x) 135 | { 136 | uint32_t nx = apg_hton32((uint32_t)x); 137 | memcpy(buf, &nx, sizeof(uint32_t)); 138 | } 139 | 140 | 141 | static inline void 142 | pack_int64(char *buf, int64_t x) 143 | { 144 | uint64_t nx = apg_hton64((uint64_t)x); 145 | memcpy(buf, &nx, sizeof(uint64_t)); 146 | } 147 | 148 | 149 | 150 | static inline uint16_t 151 | unpack_uint16(const char *buf) 152 | { 153 | uint16_t nx; 154 | memcpy((char *)&nx, buf, sizeof(uint16_t)); 155 | return apg_ntoh16(nx); 156 | } 157 | 158 | 159 | static inline int16_t 160 | unpack_int16(const char *buf) 161 | { 162 | return (int16_t)unpack_uint16(buf); 163 | } 164 | 165 | 166 | static inline uint32_t 167 | unpack_uint32(const char *buf) 168 | { 169 | uint32_t nx; 170 | memcpy((char *)&nx, buf, sizeof(uint32_t)); 171 | return apg_ntoh32(nx); 172 | } 173 | 174 | 175 | static inline int32_t 176 | unpack_int32(const char *buf) 177 | { 178 | return (int32_t)unpack_uint32(buf); 179 | } 180 | 181 | static inline uint64_t 182 | unpack_uint64(const char *buf) 183 | { 184 | uint64_t nx; 185 | memcpy((char *)&nx, buf, sizeof(uint64_t)); 186 | return apg_ntoh64(nx); 187 | } 188 | 189 | static inline int64_t 190 | unpack_int64(const char *buf) 191 | { 192 | return (int64_t)unpack_uint64(buf); 193 | } 194 | 195 | 196 | union _apg_floatconv { 197 | uint32_t i; 198 | float f; 199 | }; 200 | 201 | 202 | union _apg_doubleconv { 203 | uint64_t i; 204 | double f; 205 | }; 206 | 207 | 208 | static inline void 209 | pack_float(char *buf, float f) 210 | { 211 | union _apg_floatconv v; 212 | v.f = f; 213 | pack_int32(buf, (int32_t)v.i); 214 | } 215 | 216 | 217 | static inline void 218 | pack_double(char *buf, double f) 219 | { 220 | union _apg_doubleconv v; 221 | v.f = f; 222 | pack_int64(buf, (int64_t)v.i); 223 | } 224 | 225 | 226 | static inline float 227 | unpack_float(const char *buf) 228 | { 229 | union _apg_floatconv v; 230 | v.i = (uint32_t)unpack_int32(buf); 231 | return v.f; 232 | } 233 | 234 | 235 | static inline double 236 | unpack_double(const char *buf) 237 | { 238 | union _apg_doubleconv v; 239 | v.i = (uint64_t)unpack_int64(buf); 240 | return v.f; 241 | } 242 | -------------------------------------------------------------------------------- /hton.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | from libc.stdint cimport int16_t, int32_t, uint16_t, uint32_t, int64_t, uint64_t 9 | 10 | 11 | cdef extern from "./hton.h": 12 | cdef void pack_int16(char *buf, int16_t x); 13 | cdef void pack_int32(char *buf, int32_t x); 14 | cdef void pack_int64(char *buf, int64_t x); 15 | cdef void pack_float(char *buf, float f); 16 | cdef void pack_double(char *buf, double f); 17 | cdef int16_t unpack_int16(const char *buf); 18 | cdef uint16_t unpack_uint16(const char *buf); 19 | cdef int32_t unpack_int32(const char *buf); 20 | cdef uint32_t unpack_uint32(const char *buf); 21 | cdef int64_t unpack_int64(const char *buf); 22 | cdef uint64_t unpack_uint64(const char *buf); 23 | cdef float unpack_float(const char *buf); 24 | cdef double unpack_double(const char *buf); 25 | -------------------------------------------------------------------------------- /pgproto.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cimport cython 9 | cimport cpython 10 | 11 | from libc.stdint cimport int16_t, int32_t, uint16_t, uint32_t, int64_t, uint64_t 12 | 13 | 14 | include "./consts.pxi" 15 | include "./frb.pxd" 16 | include "./buffer.pxd" 17 | 18 | 19 | include "./codecs/__init__.pxd" 20 | -------------------------------------------------------------------------------- /pgproto.pyi: -------------------------------------------------------------------------------- 1 | import codecs 2 | import typing 3 | import uuid 4 | 5 | class CodecContext: 6 | def get_text_codec(self) -> codecs.CodecInfo: ... 7 | def get_json_decoder(self) -> object: ... 8 | def get_json_encoder(self) -> object: ... 9 | 10 | @typing.final 11 | class ReadBuffer: ... 12 | 13 | @typing.final 14 | class WriteBuffer: ... 15 | 16 | class BufferError(Exception): ... 17 | 18 | @typing.final 19 | class UUID(uuid.UUID): 20 | def __init__(self, inp: typing.AnyStr) -> None: ... 21 | -------------------------------------------------------------------------------- /pgproto.pyx: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cimport cython 9 | cimport cpython 10 | 11 | from . cimport cpythonx 12 | 13 | from libc.stdint cimport int8_t, uint8_t, int16_t, uint16_t, \ 14 | int32_t, uint32_t, int64_t, uint64_t, \ 15 | INT16_MIN, INT16_MAX, INT32_MIN, INT32_MAX, \ 16 | UINT32_MAX, INT64_MIN, INT64_MAX, UINT64_MAX 17 | 18 | 19 | from . cimport hton 20 | from . cimport tohex 21 | 22 | from .debug cimport PG_DEBUG 23 | from . import types as pgproto_types 24 | 25 | 26 | include "./consts.pxi" 27 | include "./frb.pyx" 28 | include "./buffer.pyx" 29 | include "./uuid.pyx" 30 | 31 | include "./codecs/context.pyx" 32 | 33 | include "./codecs/bytea.pyx" 34 | include "./codecs/text.pyx" 35 | 36 | include "./codecs/datetime.pyx" 37 | include "./codecs/float.pyx" 38 | include "./codecs/int.pyx" 39 | include "./codecs/json.pyx" 40 | include "./codecs/jsonpath.pyx" 41 | include "./codecs/uuid.pyx" 42 | include "./codecs/numeric.pyx" 43 | include "./codecs/bits.pyx" 44 | include "./codecs/geometry.pyx" 45 | include "./codecs/hstore.pyx" 46 | include "./codecs/misc.pyx" 47 | include "./codecs/network.pyx" 48 | include "./codecs/tid.pyx" 49 | include "./codecs/pg_snapshot.pyx" 50 | -------------------------------------------------------------------------------- /tohex.h: -------------------------------------------------------------------------------- 1 | #define HEX_PRELUDE \ 2 | const char *__hexm = "0123456789abcdef"; 3 | 4 | #define HEX_1_BYTE(buf, dest) \ 5 | { \ 6 | char byte = (buf)[0]; \ 7 | (dest)[0] = __hexm[(byte >> 4) & 0x0F]; \ 8 | (dest)[1] = __hexm[byte & 0x0F]; \ 9 | } 10 | 11 | #define HEX_2_BYTES(buf, dest) \ 12 | { \ 13 | HEX_1_BYTE(buf, dest) \ 14 | HEX_1_BYTE(buf + 1, dest + 2) \ 15 | } 16 | 17 | #define HEX_4_BYTES(buf, dest) \ 18 | { \ 19 | HEX_2_BYTES(buf, dest) \ 20 | HEX_2_BYTES(buf + 2, dest + 4) \ 21 | } 22 | 23 | #define HEX_8_BYTES(buf, dest) \ 24 | { \ 25 | HEX_4_BYTES(buf, dest) \ 26 | HEX_4_BYTES(buf + 4, dest + 8) \ 27 | } 28 | 29 | 30 | static inline void 31 | uuid_to_str(const char *source, char *dest) 32 | { 33 | HEX_PRELUDE 34 | 35 | HEX_4_BYTES(source, dest) 36 | dest[8] = '-'; 37 | HEX_2_BYTES(source + 4, dest + 9) 38 | dest[13] = '-'; 39 | HEX_2_BYTES(source + 6, dest + 14) 40 | dest[18] = '-'; 41 | HEX_2_BYTES(source + 8, dest + 19) 42 | dest[23] = '-'; 43 | HEX_4_BYTES(source + 10, dest + 24) 44 | HEX_2_BYTES(source + 14, dest + 32) 45 | } 46 | 47 | 48 | static inline void 49 | uuid_to_hex(const char *source, char *dest) 50 | { 51 | HEX_PRELUDE 52 | HEX_8_BYTES(source, dest) 53 | HEX_8_BYTES(source + 8, dest + 16) 54 | } 55 | -------------------------------------------------------------------------------- /tohex.pxd: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | 8 | cdef extern from "./tohex.h": 9 | cdef void uuid_to_str(const char *source, char *dest) 10 | cdef void uuid_to_hex(const char *source, char *dest) 11 | -------------------------------------------------------------------------------- /types.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2016-present the asyncpg authors and contributors 2 | # 3 | # 4 | # This module is part of asyncpg and is released under 5 | # the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | from __future__ import annotations 8 | 9 | import typing 10 | 11 | if typing.TYPE_CHECKING: 12 | import builtins 13 | import sys 14 | 15 | if sys.version_info < (3, 11): 16 | from typing_extensions import Self 17 | else: 18 | from typing import Self 19 | 20 | 21 | __all__ = ( 22 | 'BitString', 'Point', 'Path', 'Polygon', 23 | 'Box', 'Line', 'LineSegment', 'Circle', 24 | ) 25 | 26 | _BitOrderType = typing.Literal['big', 'little'] 27 | 28 | 29 | class BitString: 30 | """Immutable representation of PostgreSQL `bit` and `varbit` types.""" 31 | 32 | __slots__ = '_bytes', '_bitlength' 33 | 34 | _bytes: bytes 35 | _bitlength: int 36 | 37 | def __init__(self, 38 | bitstring: builtins.bytes | None = None) -> None: 39 | if not bitstring: 40 | self._bytes = bytes() 41 | self._bitlength = 0 42 | else: 43 | bytelen = len(bitstring) // 8 + 1 44 | bytes_ = bytearray(bytelen) 45 | byte = 0 46 | byte_pos = 0 47 | bit_pos = 0 48 | 49 | for i, bit in enumerate(bitstring): 50 | if bit == ' ': # type: ignore 51 | continue 52 | bit = int(bit) 53 | if bit != 0 and bit != 1: 54 | raise ValueError( 55 | 'invalid bit value at position {}'.format(i)) 56 | 57 | byte |= bit << (8 - bit_pos - 1) 58 | bit_pos += 1 59 | if bit_pos == 8: 60 | bytes_[byte_pos] = byte 61 | byte = 0 62 | byte_pos += 1 63 | bit_pos = 0 64 | 65 | if bit_pos != 0: 66 | bytes_[byte_pos] = byte 67 | 68 | bitlen = byte_pos * 8 + bit_pos 69 | bytelen = byte_pos + (1 if bit_pos else 0) 70 | 71 | self._bytes = bytes(bytes_[:bytelen]) 72 | self._bitlength = bitlen 73 | 74 | @classmethod 75 | def frombytes(cls, 76 | bytes_: builtins.bytes | None = None, 77 | bitlength: int | None = None) -> Self: 78 | if bitlength is None: 79 | if bytes_ is None: 80 | bytes_ = bytes() 81 | bitlength = 0 82 | else: 83 | bitlength = len(bytes_) * 8 84 | else: 85 | if bytes_ is None: 86 | bytes_ = bytes(bitlength // 8 + 1) 87 | bitlength = bitlength 88 | else: 89 | bytes_len = len(bytes_) * 8 90 | 91 | if bytes_len == 0 and bitlength != 0: 92 | raise ValueError('invalid bit length specified') 93 | 94 | if bytes_len != 0 and bitlength == 0: 95 | raise ValueError('invalid bit length specified') 96 | 97 | if bitlength < bytes_len - 8: 98 | raise ValueError('invalid bit length specified') 99 | 100 | if bitlength > bytes_len: 101 | raise ValueError('invalid bit length specified') 102 | 103 | result = cls() 104 | result._bytes = bytes_ 105 | result._bitlength = bitlength 106 | 107 | return result 108 | 109 | @property 110 | def bytes(self) -> builtins.bytes: 111 | return self._bytes 112 | 113 | def as_string(self) -> str: 114 | s = '' 115 | 116 | for i in range(self._bitlength): 117 | s += str(self._getitem(i)) 118 | if i % 4 == 3: 119 | s += ' ' 120 | 121 | return s.strip() 122 | 123 | def to_int(self, bitorder: _BitOrderType = 'big', 124 | *, signed: bool = False) -> int: 125 | """Interpret the BitString as a Python int. 126 | Acts similarly to int.from_bytes. 127 | 128 | :param bitorder: 129 | Determines the bit order used to interpret the BitString. By 130 | default, this function uses Postgres conventions for casting bits 131 | to ints. If bitorder is 'big', the most significant bit is at the 132 | start of the string (this is the same as the default). If bitorder 133 | is 'little', the most significant bit is at the end of the string. 134 | 135 | :param bool signed: 136 | Determines whether two's complement is used to interpret the 137 | BitString. If signed is False, the returned value is always 138 | non-negative. 139 | 140 | :return int: An integer representing the BitString. Information about 141 | the BitString's exact length is lost. 142 | 143 | .. versionadded:: 0.18.0 144 | """ 145 | x = int.from_bytes(self._bytes, byteorder='big') 146 | x >>= -self._bitlength % 8 147 | if bitorder == 'big': 148 | pass 149 | elif bitorder == 'little': 150 | x = int(bin(x)[:1:-1].ljust(self._bitlength, '0'), 2) 151 | else: 152 | raise ValueError("bitorder must be either 'big' or 'little'") 153 | 154 | if signed and self._bitlength > 0 and x & (1 << (self._bitlength - 1)): 155 | x -= 1 << self._bitlength 156 | return x 157 | 158 | @classmethod 159 | def from_int(cls, x: int, length: int, 160 | bitorder: _BitOrderType = 'big', *, signed: bool = False) \ 161 | -> Self: 162 | """Represent the Python int x as a BitString. 163 | Acts similarly to int.to_bytes. 164 | 165 | :param int x: 166 | An integer to represent. Negative integers are represented in two's 167 | complement form, unless the argument signed is False, in which case 168 | negative integers raise an OverflowError. 169 | 170 | :param int length: 171 | The length of the resulting BitString. An OverflowError is raised 172 | if the integer is not representable in this many bits. 173 | 174 | :param bitorder: 175 | Determines the bit order used in the BitString representation. By 176 | default, this function uses Postgres conventions for casting ints 177 | to bits. If bitorder is 'big', the most significant bit is at the 178 | start of the string (this is the same as the default). If bitorder 179 | is 'little', the most significant bit is at the end of the string. 180 | 181 | :param bool signed: 182 | Determines whether two's complement is used in the BitString 183 | representation. If signed is False and a negative integer is given, 184 | an OverflowError is raised. 185 | 186 | :return BitString: A BitString representing the input integer, in the 187 | form specified by the other input args. 188 | 189 | .. versionadded:: 0.18.0 190 | """ 191 | # Exception types are by analogy to int.to_bytes 192 | if length < 0: 193 | raise ValueError("length argument must be non-negative") 194 | elif length < x.bit_length(): 195 | raise OverflowError("int too big to convert") 196 | 197 | if x < 0: 198 | if not signed: 199 | raise OverflowError("can't convert negative int to unsigned") 200 | x &= (1 << length) - 1 201 | 202 | if bitorder == 'big': 203 | pass 204 | elif bitorder == 'little': 205 | x = int(bin(x)[:1:-1].ljust(length, '0'), 2) 206 | else: 207 | raise ValueError("bitorder must be either 'big' or 'little'") 208 | 209 | x <<= (-length % 8) 210 | bytes_ = x.to_bytes((length + 7) // 8, byteorder='big') 211 | return cls.frombytes(bytes_, length) 212 | 213 | def __repr__(self) -> str: 214 | return ''.format(self.as_string()) 215 | 216 | __str__: typing.Callable[['BitString'], str] = __repr__ 217 | 218 | def __eq__(self, other: object) -> bool: 219 | if not isinstance(other, BitString): 220 | return NotImplemented 221 | 222 | return (self._bytes == other._bytes and 223 | self._bitlength == other._bitlength) 224 | 225 | def __hash__(self) -> int: 226 | return hash((self._bytes, self._bitlength)) 227 | 228 | def _getitem(self, i: int) -> int: 229 | byte = self._bytes[i // 8] 230 | shift = 8 - i % 8 - 1 231 | return (byte >> shift) & 0x1 232 | 233 | def __getitem__(self, i: int) -> int: 234 | if isinstance(i, slice): 235 | raise NotImplementedError('BitString does not support slices') 236 | 237 | if i >= self._bitlength: 238 | raise IndexError('index out of range') 239 | 240 | return self._getitem(i) 241 | 242 | def __len__(self) -> int: 243 | return self._bitlength 244 | 245 | 246 | class Point(typing.Tuple[float, float]): 247 | """Immutable representation of PostgreSQL `point` type.""" 248 | 249 | __slots__ = () 250 | 251 | def __new__( 252 | cls, 253 | x: ( 254 | typing.SupportsFloat | 255 | typing.SupportsIndex | 256 | str | 257 | builtins.bytes | 258 | builtins.bytearray 259 | ), 260 | y: ( 261 | typing.SupportsFloat | 262 | typing.SupportsIndex | 263 | str | 264 | builtins.bytes | 265 | builtins.bytearray 266 | ) 267 | ) -> Self: 268 | return super().__new__(cls, 269 | typing.cast(typing.Any, (float(x), float(y)))) 270 | 271 | def __repr__(self) -> str: 272 | return '{}.{}({})'.format( 273 | type(self).__module__, 274 | type(self).__name__, 275 | tuple.__repr__(self) 276 | ) 277 | 278 | @property 279 | def x(self) -> float: 280 | return self[0] 281 | 282 | @property 283 | def y(self) -> float: 284 | return self[1] 285 | 286 | 287 | class Box(typing.Tuple[Point, Point]): 288 | """Immutable representation of PostgreSQL `box` type.""" 289 | 290 | __slots__ = () 291 | 292 | def __new__(cls, high: typing.Sequence[float], 293 | low: typing.Sequence[float]) -> Self: 294 | return super().__new__(cls, 295 | typing.cast(typing.Any, (Point(*high), 296 | Point(*low)))) 297 | 298 | def __repr__(self) -> str: 299 | return '{}.{}({})'.format( 300 | type(self).__module__, 301 | type(self).__name__, 302 | tuple.__repr__(self) 303 | ) 304 | 305 | @property 306 | def high(self) -> Point: 307 | return self[0] 308 | 309 | @property 310 | def low(self) -> Point: 311 | return self[1] 312 | 313 | 314 | class Line(typing.Tuple[float, float, float]): 315 | """Immutable representation of PostgreSQL `line` type.""" 316 | 317 | __slots__ = () 318 | 319 | def __new__(cls, A: float, B: float, C: float) -> Self: 320 | return super().__new__(cls, typing.cast(typing.Any, (A, B, C))) 321 | 322 | @property 323 | def A(self) -> float: 324 | return self[0] 325 | 326 | @property 327 | def B(self) -> float: 328 | return self[1] 329 | 330 | @property 331 | def C(self) -> float: 332 | return self[2] 333 | 334 | 335 | class LineSegment(typing.Tuple[Point, Point]): 336 | """Immutable representation of PostgreSQL `lseg` type.""" 337 | 338 | __slots__ = () 339 | 340 | def __new__(cls, p1: typing.Sequence[float], 341 | p2: typing.Sequence[float]) -> Self: 342 | return super().__new__(cls, 343 | typing.cast(typing.Any, (Point(*p1), 344 | Point(*p2)))) 345 | 346 | def __repr__(self) -> str: 347 | return '{}.{}({})'.format( 348 | type(self).__module__, 349 | type(self).__name__, 350 | tuple.__repr__(self) 351 | ) 352 | 353 | @property 354 | def p1(self) -> Point: 355 | return self[0] 356 | 357 | @property 358 | def p2(self) -> Point: 359 | return self[1] 360 | 361 | 362 | class Path: 363 | """Immutable representation of PostgreSQL `path` type.""" 364 | 365 | __slots__ = '_is_closed', 'points' 366 | 367 | points: typing.Tuple[Point, ...] 368 | 369 | def __init__(self, *points: typing.Sequence[float], 370 | is_closed: bool = False) -> None: 371 | self.points = tuple(Point(*p) for p in points) 372 | self._is_closed = is_closed 373 | 374 | @property 375 | def is_closed(self) -> bool: 376 | return self._is_closed 377 | 378 | def __eq__(self, other: object) -> bool: 379 | if not isinstance(other, Path): 380 | return NotImplemented 381 | 382 | return (self.points == other.points and 383 | self._is_closed == other._is_closed) 384 | 385 | def __hash__(self) -> int: 386 | return hash((self.points, self.is_closed)) 387 | 388 | def __iter__(self) -> typing.Iterator[Point]: 389 | return iter(self.points) 390 | 391 | def __len__(self) -> int: 392 | return len(self.points) 393 | 394 | @typing.overload 395 | def __getitem__(self, i: int) -> Point: 396 | ... 397 | 398 | @typing.overload 399 | def __getitem__(self, i: slice) -> typing.Tuple[Point, ...]: 400 | ... 401 | 402 | def __getitem__( 403 | self, i: int | slice 404 | ) -> Point | typing.Tuple[Point, ...]: 405 | return self.points[i] 406 | 407 | def __contains__(self, point: object) -> bool: 408 | return point in self.points 409 | 410 | 411 | class Polygon(Path): 412 | """Immutable representation of PostgreSQL `polygon` type.""" 413 | 414 | __slots__ = () 415 | 416 | def __init__(self, *points: typing.Sequence[float]) -> None: 417 | # polygon is always closed 418 | super().__init__(*points, is_closed=True) 419 | 420 | 421 | class Circle(typing.Tuple[Point, float]): 422 | """Immutable representation of PostgreSQL `circle` type.""" 423 | 424 | __slots__ = () 425 | 426 | def __new__(cls, center: Point, radius: float) -> Self: 427 | return super().__new__(cls, typing.cast(typing.Any, (center, radius))) 428 | 429 | @property 430 | def center(self) -> Point: 431 | return self[0] 432 | 433 | @property 434 | def radius(self) -> float: 435 | return self[1] 436 | -------------------------------------------------------------------------------- /uuid.pyx: -------------------------------------------------------------------------------- 1 | import functools 2 | import uuid 3 | 4 | cimport cython 5 | cimport cpython 6 | 7 | from libc.stdint cimport uint8_t, int8_t 8 | from libc.string cimport memcpy, memcmp 9 | 10 | 11 | cdef extern from "Python.h": 12 | int PyUnicode_1BYTE_KIND 13 | const char* PyUnicode_AsUTF8AndSize( 14 | object unicode, Py_ssize_t *size) except NULL 15 | object PyUnicode_FromKindAndData( 16 | int kind, const void *buffer, Py_ssize_t size) 17 | 18 | 19 | cdef extern from "./tohex.h": 20 | cdef void uuid_to_str(const char *source, char *dest) 21 | cdef void uuid_to_hex(const char *source, char *dest) 22 | 23 | 24 | # A more efficient UUID type implementation 25 | # (6-7x faster than the starndard uuid.UUID): 26 | # 27 | # -= Benchmark results (less is better): =- 28 | # 29 | # std_UUID(bytes): 1.2368 30 | # c_UUID(bytes): * 0.1645 (7.52x) 31 | # object(): 0.1483 32 | # 33 | # std_UUID(str): 1.8038 34 | # c_UUID(str): * 0.2313 (7.80x) 35 | # 36 | # str(std_UUID()): 1.4625 37 | # str(c_UUID()): * 0.2681 (5.46x) 38 | # str(object()): 0.5975 39 | # 40 | # std_UUID().bytes: 0.3508 41 | # c_UUID().bytes: * 0.1068 (3.28x) 42 | # 43 | # std_UUID().int: 0.0871 44 | # c_UUID().int: * 0.0856 45 | # 46 | # std_UUID().hex: 0.4871 47 | # c_UUID().hex: * 0.1405 48 | # 49 | # hash(std_UUID()): 0.3635 50 | # hash(c_UUID()): * 0.1564 (2.32x) 51 | # 52 | # dct[std_UUID()]: 0.3319 53 | # dct[c_UUID()]: * 0.1570 (2.11x) 54 | # 55 | # std_UUID() ==: 0.3478 56 | # c_UUID() ==: * 0.0915 (3.80x) 57 | 58 | 59 | cdef char _hextable[256] 60 | _hextable[:] = [ 61 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 62 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 63 | -1,-1, 0,1,2,3,4,5,6,7,8,9,-1,-1,-1,-1,-1,-1,-1,10,11,12,13,14,15,-1, 64 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 65 | -1,-1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 66 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 67 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 68 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 69 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 70 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 71 | -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 72 | ] 73 | 74 | 75 | cdef std_UUID = uuid.UUID 76 | 77 | 78 | cdef pg_uuid_bytes_from_str(str u, char *out): 79 | cdef: 80 | const char *orig_buf 81 | Py_ssize_t size 82 | unsigned char ch 83 | uint8_t acc, part, acc_set 84 | int i, j 85 | 86 | orig_buf = PyUnicode_AsUTF8AndSize(u, &size) 87 | if size > 36 or size < 32: 88 | raise ValueError( 89 | f'invalid UUID {u!r}: ' 90 | f'length must be between 32..36 characters, got {size}') 91 | 92 | acc_set = 0 93 | j = 0 94 | for i in range(size): 95 | ch = orig_buf[i] 96 | if ch == b'-': 97 | continue 98 | 99 | part = _hextable[ch] 100 | if part == -1: 101 | if ch >= 0x20 and ch <= 0x7e: 102 | raise ValueError( 103 | f'invalid UUID {u!r}: unexpected character {chr(ch)!r}') 104 | else: 105 | raise ValueError('invalid UUID {u!r}: unexpected character') 106 | 107 | if acc_set: 108 | acc |= part 109 | out[j] = acc 110 | acc_set = 0 111 | j += 1 112 | else: 113 | acc = (part << 4) 114 | acc_set = 1 115 | 116 | if j > 16 or (j == 16 and acc_set): 117 | raise ValueError( 118 | f'invalid UUID {u!r}: decodes to more than 16 bytes') 119 | 120 | if j != 16: 121 | raise ValueError( 122 | f'invalid UUID {u!r}: decodes to less than 16 bytes') 123 | 124 | 125 | cdef class __UUIDReplaceMe: 126 | pass 127 | 128 | 129 | cdef pg_uuid_from_buf(const char *buf): 130 | cdef: 131 | UUID u = UUID.__new__(UUID) 132 | memcpy(u._data, buf, 16) 133 | return u 134 | 135 | 136 | @cython.final 137 | @cython.no_gc_clear 138 | cdef class UUID(__UUIDReplaceMe): 139 | 140 | cdef: 141 | char _data[16] 142 | object _int 143 | object _hash 144 | object __weakref__ 145 | 146 | def __cinit__(self): 147 | self._int = None 148 | self._hash = None 149 | 150 | def __init__(self, inp): 151 | cdef: 152 | char *buf 153 | Py_ssize_t size 154 | 155 | if cpython.PyBytes_Check(inp): 156 | cpython.PyBytes_AsStringAndSize(inp, &buf, &size) 157 | if size != 16: 158 | raise ValueError(f'16 bytes were expected, got {size}') 159 | memcpy(self._data, buf, 16) 160 | 161 | elif cpython.PyUnicode_Check(inp): 162 | pg_uuid_bytes_from_str(inp, self._data) 163 | else: 164 | raise TypeError(f'a bytes or str object expected, got {inp!r}') 165 | 166 | @property 167 | def bytes(self): 168 | return cpython.PyBytes_FromStringAndSize(self._data, 16) 169 | 170 | @property 171 | def int(self): 172 | if self._int is None: 173 | # The cache is important because `self.int` can be 174 | # used multiple times by __hash__ etc. 175 | # 176 | # The or 0 works around a bug interaction between cpython 177 | # 3.10 and earlier and Cython ~3.0.11 in which 178 | # int.from_bytes returns a "non-canonical 0" and then 179 | # Cython's implementation of & mishandles it. 180 | # See cython/cython#6480. 181 | self._int = int.from_bytes(self.bytes, 'big') or 0 182 | return self._int 183 | 184 | @property 185 | def is_safe(self): 186 | return uuid.SafeUUID.unknown 187 | 188 | def __str__(self): 189 | cdef char out[36] 190 | uuid_to_str(self._data, out) 191 | return PyUnicode_FromKindAndData(PyUnicode_1BYTE_KIND, out, 36) 192 | 193 | @property 194 | def hex(self): 195 | cdef char out[32] 196 | uuid_to_hex(self._data, out) 197 | return PyUnicode_FromKindAndData(PyUnicode_1BYTE_KIND, out, 32) 198 | 199 | def __repr__(self): 200 | return f"UUID('{self}')" 201 | 202 | def __reduce__(self): 203 | return (type(self), (self.bytes,)) 204 | 205 | def __eq__(self, other): 206 | if type(other) is UUID: 207 | return memcmp(self._data, (other)._data, 16) == 0 208 | if isinstance(other, std_UUID): 209 | return self.int == other.int 210 | return NotImplemented 211 | 212 | def __ne__(self, other): 213 | if type(other) is UUID: 214 | return memcmp(self._data, (other)._data, 16) != 0 215 | if isinstance(other, std_UUID): 216 | return self.int != other.int 217 | return NotImplemented 218 | 219 | def __lt__(self, other): 220 | if type(other) is UUID: 221 | return memcmp(self._data, (other)._data, 16) < 0 222 | if isinstance(other, std_UUID): 223 | return self.int < other.int 224 | return NotImplemented 225 | 226 | def __gt__(self, other): 227 | if type(other) is UUID: 228 | return memcmp(self._data, (other)._data, 16) > 0 229 | if isinstance(other, std_UUID): 230 | return self.int > other.int 231 | return NotImplemented 232 | 233 | def __le__(self, other): 234 | if type(other) is UUID: 235 | return memcmp(self._data, (other)._data, 16) <= 0 236 | if isinstance(other, std_UUID): 237 | return self.int <= other.int 238 | return NotImplemented 239 | 240 | def __ge__(self, other): 241 | if type(other) is UUID: 242 | return memcmp(self._data, (other)._data, 16) >= 0 243 | if isinstance(other, std_UUID): 244 | return self.int >= other.int 245 | return NotImplemented 246 | 247 | def __hash__(self): 248 | # In EdgeDB every schema object has a uuid and there are 249 | # huge hash-maps of them. We want UUID.__hash__ to be 250 | # as fast as possible. 251 | if self._hash is not None: 252 | return self._hash 253 | 254 | self._hash = hash(self.int) 255 | return self._hash 256 | 257 | def __int__(self): 258 | return self.int 259 | 260 | @property 261 | def bytes_le(self): 262 | bytes = self.bytes 263 | return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + 264 | bytes[8:]) 265 | 266 | @property 267 | def fields(self): 268 | return (self.time_low, self.time_mid, self.time_hi_version, 269 | self.clock_seq_hi_variant, self.clock_seq_low, self.node) 270 | 271 | @property 272 | def time_low(self): 273 | return self.int >> 96 274 | 275 | @property 276 | def time_mid(self): 277 | return (self.int >> 80) & 0xffff 278 | 279 | @property 280 | def time_hi_version(self): 281 | return (self.int >> 64) & 0xffff 282 | 283 | @property 284 | def clock_seq_hi_variant(self): 285 | return (self.int >> 56) & 0xff 286 | 287 | @property 288 | def clock_seq_low(self): 289 | return (self.int >> 48) & 0xff 290 | 291 | @property 292 | def time(self): 293 | return (((self.time_hi_version & 0x0fff) << 48) | 294 | (self.time_mid << 32) | self.time_low) 295 | 296 | @property 297 | def clock_seq(self): 298 | return (((self.clock_seq_hi_variant & 0x3f) << 8) | 299 | self.clock_seq_low) 300 | 301 | @property 302 | def node(self): 303 | return self.int & 0xffffffffffff 304 | 305 | @property 306 | def urn(self): 307 | return 'urn:uuid:' + str(self) 308 | 309 | @property 310 | def variant(self): 311 | if not self.int & (0x8000 << 48): 312 | return uuid.RESERVED_NCS 313 | elif not self.int & (0x4000 << 48): 314 | return uuid.RFC_4122 315 | elif not self.int & (0x2000 << 48): 316 | return uuid.RESERVED_MICROSOFT 317 | else: 318 | return uuid.RESERVED_FUTURE 319 | 320 | @property 321 | def version(self): 322 | # The version bits are only meaningful for RFC 4122 UUIDs. 323 | if self.variant == uuid.RFC_4122: 324 | return int((self.int >> 76) & 0xf) 325 | 326 | 327 | # 328 | # In order for `isinstance(pgproto.UUID, uuid.UUID)` to work, 329 | # patch __bases__ and __mro__ by injecting `uuid.UUID`. 330 | # 331 | # We apply brute-force here because the following pattern stopped 332 | # working with Python 3.8: 333 | # 334 | # cdef class OurUUID: 335 | # ... 336 | # 337 | # class UUID(OurUUID, uuid.UUID): 338 | # ... 339 | # 340 | # With Python 3.8 it now produces 341 | # 342 | # "TypeError: multiple bases have instance lay-out conflict" 343 | # 344 | # error. Maybe it's possible to fix this some other way, but 345 | # the best solution possible would be to just contribute our 346 | # faster UUID to the standard library and not have this problem 347 | # at all. For now this hack is pretty safe and should be 348 | # compatible with future Pythons for long enough. 349 | # 350 | assert UUID.__bases__[0] is __UUIDReplaceMe 351 | assert UUID.__mro__[1] is __UUIDReplaceMe 352 | cpython.Py_INCREF(std_UUID) 353 | cpython.PyTuple_SET_ITEM(UUID.__bases__, 0, std_UUID) 354 | cpython.Py_INCREF(std_UUID) 355 | cpython.PyTuple_SET_ITEM(UUID.__mro__, 1, std_UUID) 356 | # 357 | 358 | 359 | cdef pg_UUID = UUID 360 | --------------------------------------------------------------------------------