├── .github └── workflows │ └── ci.yml ├── LICENSE ├── Makefile ├── ODBCLoader.cpp ├── README.md ├── bin └── act ├── ddl ├── install.sql └── uninstall.sql ├── examples ├── Tests.out └── sample_usage.sql └── tests ├── config ├── odbc.ini └── odbcinst.ini ├── copy_test.sql └── expected └── copy_test.out /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | # Allows you to run this workflow manually from the Actions tab 9 | workflow_dispatch: 10 | 11 | jobs: 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Set up MySQL server 19 | run: | 20 | docker run -d -p 3306:3306 -e MYSQL_ALLOW_EMPTY_PASSWORD=yes --name test-mysql mysql:8.0 21 | echo "MySQL startup ..." 22 | until docker exec test-mysql mysql -u root -e "SELECT 1;"; do \ 23 | echo "..."; \ 24 | sleep 1; \ 25 | done; 26 | docker exec -u root test-mysql mysql -u root -e "CREATE DATABASE testdb;" 27 | docker exec -u root test-mysql mysql -u root testdb -e "CREATE TABLE test_source (i integer, b boolean, f float, v varchar(32), c char(32), lv varchar(9999), bn binary(32), vb varbinary(32), lvb varbinary(9999), d date, t time, ts timestamp null, tz varchar(80), tsz varchar(80), n numeric(20,4));" 28 | docker exec -u root test-mysql /bin/bash -c "(echo \"INSERT INTO test_source VALUES (null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);\"; for i in \`seq 1 9\`; do echo \"INSERT INTO test_source VALUES (\$i, 1, \$i.5, 'test \$i', 'test \$i', 'test \$i', 'test \$i', 'test \$i', 'test \$i', '\$((\$i+11))00/1/\$i', '4:0\$i', '2038-01-0\$i 03:14:07', '1:2\$i:00', 'June 1, \$((\$i+11))00 03:2\$i EST', '123456.7890');\"; done) | mysql -u root testdb" 29 | docker exec -u root test-mysql mysql -u root testdb -e "select * from test_source;" 30 | - name: Set up a Vertica server 31 | timeout-minutes: 15 32 | run: | 33 | docker run -d -p 5433:5433 -p 5444:5444 \ 34 | -e ODBCSYSINI=/var/odbc-loader/tests/config \ 35 | --add-host=host.docker.internal:host-gateway \ 36 | --name vertica_docker \ 37 | opentext/vertica-ce:24.2.0-1 38 | echo "Vertica startup ..." 39 | until docker exec vertica_docker test -f /data/vertica/VMart/agent_start.out; do \ 40 | echo "..."; \ 41 | sleep 3; \ 42 | done; 43 | echo "Vertica is up" 44 | docker exec -u dbadmin vertica_docker /opt/vertica/bin/vsql -c "\l" 45 | docker exec -u dbadmin vertica_docker /opt/vertica/bin/vsql -c "select version()" 46 | - name: Build & Install UDx 47 | run: | 48 | docker exec -u root vertica_docker dnf -y install gcc-toolset-9-gcc-c++.x86_64 49 | docker exec -u root vertica_docker dnf -y install unixODBC-devel 50 | docker exec -u root vertica_docker dnf -y install pcre-devel 51 | docker exec -u root vertica_docker dnf -y install perl 52 | docker cp ${{ github.workspace }} vertica_docker:/var/odbc-loader 53 | docker exec -u root vertica_docker /bin/bash -c "sudo chown -R dbadmin:verticadba /var/odbc-loader" 54 | docker exec -u root -w /var/odbc-loader vertica_docker dnf install -y make 55 | docker exec -u dbadmin -w /var/odbc-loader vertica_docker scl enable gcc-toolset-9 "bash -c 'make; make install'" 56 | 57 | - name: Install ODBC clients 58 | run: | 59 | docker exec -w /var/odbc-loader -u root vertica_docker wget https://repo.mysql.com/mysql80-community-release-el8-9.noarch.rpm 60 | docker exec -w /var/odbc-loader -u root vertica_docker dnf install -y mysql80-community-release-el8-9.noarch.rpm 61 | docker exec -w /var/odbc-loader -u root vertica_docker dnf install -y mysql-connector-odbc 62 | 63 | - name: Run Tests 64 | run: | 65 | docker exec -w /var/odbc-loader -u dbadmin vertica_docker make test 66 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | TMPDIR ?= /tmp 3 | SDK_HOME ?= /opt/vertica/sdk 4 | SHELL = /bin/bash 5 | VSQL ?= /opt/vertica/bin/vsql 6 | LOADER_DEBUG = 0 7 | TARGET ?= ./lib 8 | 9 | ALL_CXXFLAGS := $(CXXFLAGS) -I $(SDK_HOME)/include -I $(SDK_HOME)/examples/HelperLibraries -fPIC -shared -Wall -g -std=c++11 -D_GLIBCXX_USE_CXX11_ABI=1 10 | ALL_CXXFLAGS += -DLOADER_DEBUG=$(LOADER_DEBUG) 11 | 12 | build: $(TARGET)/ODBCLoader.so 13 | ## See targets below for actual build logic 14 | 15 | clean: 16 | rm $(TARGET)/ODBCLoader.so 17 | 18 | install: build 19 | # install ODBCLoader 20 | @$(VSQL) -f ddl/install.sql 21 | 22 | uninstall: 23 | # uninstall ODBCLoader 24 | @$(VSQL) -f ddl/uninstall.sql 25 | 26 | example: 27 | @# Try uninstalling first, just in case we have a stale version around 28 | -@$(MAKE) -s uninstall >/dev/null 2>&1 29 | @$(MAKE) --no-print-dir install 30 | @# Use bash's "trap" to uninstall and still return an error 31 | @trap '$(MAKE) --no-print-dir uninstall' EXIT; $(MAKE) --no-print-dir test_example 32 | 33 | test_example: 34 | # run tests 35 | @$(VSQL) -f examples/sample_usage.sql > examples/Tests.actual 2>&1 36 | @# filter out variable messages (i.e., mariadb vs mysql) 37 | @diff -u examples/Tests.out <(perl -pe 's/^vsql:[\/_:\w\.]* /vsql: /; \ 38 | s/\[ODBC[^\]]*\]/[...]/g; \ 39 | s/\[mysql[^\]]*\]/[...]/g; \ 40 | s/(Error parsing .* )\(.*\)$$/$$1(...)/; \ 41 | s/mariadb/MySQL/ig; ' examples/Tests.actual) 42 | @echo ALL TESTS SUCCESSFUL 43 | 44 | test: 45 | @$(VSQL) -f tests/copy_test.sql > $(TMPDIR)/copy_test.out 2>&1 46 | @diff -u tests/expected/copy_test.out <(perl -pe 's/^vsql:[\/_:\w\.]* /vsql: /; \ 47 | s/\[ODBC[^\]]*\]/[...]/g; \ 48 | s/\[mysql[^\]]*\]/[...]/g; \ 49 | s/(Error parsing .* )\(.*\)$$/$$1(...)/; \ 50 | s/mariadb/MySQL/ig; ' $(TMPDIR)/copy_test.out) 51 | @echo ALL TESTS SUCCESSFUL 52 | 53 | .PHONY: build clean install uninstall example test_example test 54 | 55 | 56 | ## Actual build target 57 | $(TARGET)/ODBCLoader.so: ODBCLoader.cpp $(SDK_HOME)/include/Vertica.cpp $(SDK_HOME)/include/BuildInfo.h 58 | mkdir -p $(TARGET) 59 | $(CXX) $(ALL_CXXFLAGS) -o $@ $(SDK_HOME)/include/Vertica.cpp ODBCLoader.cpp -lodbc -lpcrecpp -lpcre 60 | -------------------------------------------------------------------------------- /ODBCLoader.cpp: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2005 - 2012 Vertica, an HP company -*- C++ -*- */ 2 | // vim:ru:sm:ts=4:et:tw=0 3 | 4 | #include "Vertica.h" 5 | #include "StringParsers.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | // To deal with TimeTz and TimestampTz. 18 | // No standard native-SQL representation for these, 19 | // so we ask for them as strings and re-parse them. 20 | // (Ew...) 21 | #include "StringParsers.h" 22 | 23 | // To support Vertica SDK before 9.3: 24 | #ifndef SDK_BUILD_ASSERTIONS_H // conveniently doesn't exist before 9.3 25 | #define parseTimeTz(a,b,c,d,e,f) parseTimeTz(a,b,c,d,e) 26 | #define parseTimestampTz(a,b,c,d,e,f) parseTimestampTz(a,b,c,d,e) 27 | #define parseNumeric(a,b,c,d,e,f) parseNumeric(a,b,c,d,e) 28 | #endif 29 | 30 | #define MIN_ROWSET 1 // Min rowset value 31 | #define MAX_ROWSET 10000 // Max rowset value 32 | #define DEF_ROWSET 100 // Default rowset 33 | #define MAX_PRELEN 2048 // Max predicate length 34 | #define MAX_PRENUM 10 // Max predicate number 35 | #define REG_CASTRM R"(::\w+(\(.*?\))*)" 36 | #define REG_ANYMTC R"(\s*=\s*ANY\s*\(ARRAY\[(.*)\])" 37 | #define REG_ANYREP R"( IN(\1)" 38 | #define REG_TILDEM R"(\s*~~\s*)" 39 | #define REG_TILDER R"( LIKE )" 40 | #define REG_ENDSCO R"(\s*;\s*$)" 41 | #define REG_QUERYP R"(^\s*\(*\s*override_query\s*<\s*'\s*(.*)\s*'.*$)" 42 | 43 | using namespace Vertica; 44 | 45 | // ii declare global variable colInTable (# columns in source table, vidx (array conataining index of column in SELECT) 46 | int colInTable = 0; 47 | std::vector vidx; 48 | // 49 | static inline TimeADT getTimeFromHMS(uint32 hour, uint8 min, uint8 sec) { 50 | return getTimeFromUnixTime(sec + min*60 + hour*3600); 51 | } 52 | 53 | class ODBCLoader : public UDParser { 54 | public: 55 | ODBCLoader() : quirks(NoQuirks) {} 56 | 57 | // Maximum length of diagnostic-message text 58 | // that we can receive from the ODBC driver. 59 | // Currently must fit on the stack. 60 | // Diagnostics messages are expected to be short, 61 | // but the spec does not define a max length. 62 | static const uint32 MAX_DIAG_MSG_TEXT_LENGTH = 1024; 63 | 64 | // Periodically, we need to break out of our 65 | // loop fetching data from the remote server and 66 | // let Vertica do some accounting. (Mostly check 67 | // to see if this query has been cancelled.) 68 | // This knob sets how many times we should try to 69 | // read another row before doing so. 70 | // Each such break incurs the cost of a C++ 71 | // virtual function call; this number should be 72 | // big enough to effectively amortize that cost. 73 | static const uint32 ROWS_PER_BREAK = 10000; 74 | 75 | private: 76 | // Keep a copy of the information about each column. 77 | // Note that Vertica doesn't let us safely keep a reference to 78 | // the internal copy of this data structure that it shows us. 79 | // But keeping a copy is fine. 80 | SizedColumnTypes colInfo; 81 | 82 | // ODBC connection/query state 83 | SQLHENV env; 84 | SQLHDBC dbc; 85 | SQLHSTMT stmt; 86 | SQLSMALLINT numcols; 87 | SQLULEN nfrows; // Number of fetched rows 88 | size_t rowset; 89 | 90 | enum PerDBQuirks { 91 | NoQuirks = 0, 92 | Oracle 93 | }; 94 | 95 | PerDBQuirks quirks; 96 | 97 | // MF keeping this to re-use the code in the Fetch loop... 98 | struct Buf { 99 | SQLLEN len; 100 | SQLPOINTER buf; 101 | }; 102 | 103 | //std::vector col_data_bufs; 104 | // MF we're going to use rowset in "column binding format" so we need 105 | // for each retrieved column two arrays: 106 | // one containing "rowset" results 107 | // one containing "rowset" length indicators 108 | // resp and len are the pointers to the pointers array. 109 | SQLPOINTER *resp ; // result array pointers pointer 110 | SQLLEN **lenp ; // length array pointers pointer 111 | 112 | // MF we want to determine Vertica/ODBC types & sizes once and for all... 113 | BaseDataOID *vtype ; // Vertica types pointer 114 | uint32 *stype ; // Vertica data type size 115 | 116 | StringParsers parser; 117 | 118 | // Gets the Vertica type of the specified column 119 | VerticaType getVerticaTypeOfCol(SQLSMALLINT colnum) { 120 | return colInfo.getColumnType(colnum); 121 | } 122 | 123 | // Gets the ODBC type of the specified column 124 | SQLSMALLINT getODBCTypeOfCol(SQLSMALLINT colnum) { 125 | VerticaType type = getVerticaTypeOfCol(colnum); 126 | switch (type.getTypeOid()) { 127 | case BoolOID: return SQL_BIT; 128 | case Int8OID: return SQL_BIGINT; 129 | case Float8OID: return SQL_DOUBLE; 130 | case CharOID: return SQL_CHAR; 131 | case VarcharOID: return SQL_LONGVARCHAR; 132 | case DateOID: return SQL_DATE; 133 | case TimeOID: return SQL_TIME; 134 | case TimestampOID: return SQL_TIMESTAMP; 135 | case TimestampTzOID: return SQL_VARCHAR; // Don't know how to deal with timezones in ODBC; just get them as a string and parse it 136 | case IntervalOID: return SQL_INTERVAL_DAY_TO_SECOND; 137 | case IntervalYMOID: return SQL_INTERVAL_YEAR_TO_MONTH; 138 | case TimeTzOID: return SQL_VARCHAR; // Don't know how to deal with timezones in ODBC; just get them as a string and parse it 139 | case NumericOID: return SQL_NUMERIC; 140 | case BinaryOID: return SQL_BINARY; 141 | case VarbinaryOID: return SQL_LONGVARBINARY; 142 | 143 | #ifndef NO_LONG_OIDS 144 | case LongVarbinaryOID: return SQL_LONGVARBINARY; 145 | case LongVarcharOID: return SQL_LONGVARCHAR; 146 | #endif // NO_LONG_OIDS 147 | 148 | default: vt_report_error(0, "Unrecognized Vertica type: %s (OID %llu)", type.getTypeStr(), type.getTypeOid()); return SQL_UNKNOWN_TYPE; // Should never get here; vt_report_error() shouldn't return 149 | } 150 | } 151 | 152 | // Gets the ODBC C data-type identifier for the specified column 153 | SQLSMALLINT getCTypeOfCol(SQLSMALLINT colnum) { 154 | VerticaType type = getVerticaTypeOfCol(colnum); 155 | switch (type.getTypeOid()) { 156 | case BoolOID: return SQL_C_BIT; 157 | case Int8OID: return (quirks != Oracle ? SQL_C_SBIGINT : SQL_C_CHAR); 158 | case Float8OID: return SQL_C_DOUBLE; 159 | case CharOID: return SQL_C_CHAR; 160 | case VarcharOID: return SQL_C_CHAR; 161 | case DateOID: return SQL_C_DATE; 162 | case TimeOID: return SQL_C_TIME; 163 | case TimestampOID: return SQL_C_TIMESTAMP; 164 | case TimestampTzOID: return SQL_C_CHAR; // Don't know how to deal with timezones in ODBC; just get them as a string and parse it 165 | case IntervalOID: return SQL_C_INTERVAL_DAY_TO_SECOND; 166 | case IntervalYMOID: return SQL_C_INTERVAL_YEAR_TO_MONTH; 167 | case TimeTzOID: return SQL_C_CHAR; // Don't know how to deal with timezones in ODBC; just get them as a string and parse it 168 | case NumericOID: return SQL_C_CHAR; 169 | case BinaryOID: return SQL_C_BINARY; 170 | case VarbinaryOID: return SQL_C_BINARY; 171 | 172 | #ifndef NO_LONG_OIDS 173 | case LongVarbinaryOID: return SQL_C_BINARY; 174 | case LongVarcharOID: return SQL_C_CHAR; 175 | #endif // NO_LONG_OIDS 176 | 177 | default: vt_report_error(0, "Unrecognized Vertica type %s (OID: %llu)", type.getTypeStr(), type.getTypeOid()); return SQL_UNKNOWN_TYPE; // Should never get here; vt_report_error() shouldn't return 178 | } 179 | } 180 | 181 | // Return the size of the memory allocation needed to store ODBC data for column 'colnum' 182 | uint32 getFieldSizeForCol(SQLSMALLINT colnum) { 183 | VerticaType type = getVerticaTypeOfCol(colnum); 184 | switch (type.getTypeOid()) { 185 | // Everything fixed-length is the same size in Vertica as ODBC 186 | case BoolOID: case Int8OID: case Float8OID: 187 | return type.getMaxSize(); 188 | 189 | // Everything string-based is the same size too. 190 | // Except ODBC may decide that we want a trailing null terminator. 191 | case CharOID: case VarcharOID: case BinaryOID: case VarbinaryOID: 192 | 193 | #ifndef NO_LONG_OIDS 194 | case LongVarbinaryOID: case LongVarcharOID: 195 | #endif // NO_LONG_OIDS 196 | 197 | return type.getMaxSize() + 1; 198 | 199 | // Numeric is a special beast 200 | // Needs to be size of their header plus our(/their) data 201 | // Let's be lazy for now and just do their header plus our total size (includes our header) 202 | // EDIT: Just use strings for Numeric's as well; some DB's seem to have trouble scaling them. 203 | case NumericOID: 204 | return 128; 205 | 206 | // Things represented as char's because there's no good native type 207 | // could be just about any length. 208 | // So just make something up; hope it's long enough. 209 | case TimestampTzOID: case TimeTzOID: 210 | return 80; 211 | 212 | // Everything struct-based needs to be the size of that struct 213 | case DateOID: return sizeof(DATE_STRUCT); 214 | case TimeOID: return sizeof(TIME_STRUCT); 215 | case TimestampOID: return sizeof(TIMESTAMP_STRUCT); 216 | case IntervalOID: case IntervalYMOID: return sizeof(SQL_INTERVAL_STRUCT); 217 | 218 | // Otherwise it's a type we don't know about 219 | default: vt_report_error(0, "Unrecognized Vertica type: %s (OID: %llu)", type.getTypeStr(), type.getTypeOid()); return (uint32)-1; // Should never get here; vt_report_error() shouldn't return 220 | } 221 | } 222 | 223 | void handleReturnCode(ServerInterface &srvInterface, int r, SQLSMALLINT handle_type, SQLHANDLE handle, const char *fn_name) { 224 | // Check for error codes; retrieve error messages if any 225 | bool error = false; 226 | switch (r) { 227 | case SQL_SUCCESS: return; 228 | 229 | case SQL_ERROR: error = true; // Fall through 230 | case SQL_SUCCESS_WITH_INFO: { 231 | SQLCHAR state_rec[6]; 232 | SQLINTEGER native_code; 233 | SQLCHAR message_text[MAX_DIAG_MSG_TEXT_LENGTH]; 234 | SQLSMALLINT msg_length; 235 | SQLRETURN r_diag = SQLGetDiagRec(handle_type, handle, 1, &state_rec[0], &native_code, 236 | &message_text[0], MAX_DIAG_MSG_TEXT_LENGTH, &msg_length); 237 | 238 | // No infinite loops! 239 | // Throw out secondary 'info' messages; 240 | // if our process for fetching info messages generates info messages, 241 | // we'll be at it for a while... 242 | if (r_diag != SQL_SUCCESS && r_diag != SQL_SUCCESS_WITH_INFO) { 243 | if (error) { 244 | vt_report_error(0, "ODBC Error: Error reported attempting to get the error message for another error! Unable to display the error message. Original error was in function %s.", fn_name); 245 | } else { 246 | srvInterface.log("ODBC Warning: Error reported attempting to get the warning message for another operation! Unable to display the warning message. Original warning was in function %s.", fn_name); 247 | } 248 | } 249 | 250 | const char *truncated = (msg_length > (SQLSMALLINT)MAX_DIAG_MSG_TEXT_LENGTH ? "... (message truncated)" : ""); 251 | 252 | if (error) { 253 | vt_report_error(0, "ODBC Error: %s failed with error code %s, native code %d [%s%s]", 254 | fn_name, &state_rec[0], (int)native_code, &message_text[0], truncated); 255 | } else { 256 | srvInterface.log("ODBC Warning: %s emitted a warning with error code %s, native code %d [%s%s]", 257 | fn_name, &state_rec[0], (int)native_code, &message_text[0], truncated); 258 | } 259 | 260 | break; 261 | } 262 | 263 | case SQL_INVALID_HANDLE: vt_report_error(0, "ODBC Error: %s failed with internal error SQL_INVALID_HANDLE", fn_name); break; 264 | 265 | case SQL_STILL_EXECUTING: vt_report_error(0, "ODBC Error: Synchronous function %s returned SQL_STILL_EXECUTING", fn_name); break; 266 | 267 | case SQL_NO_DATA: vt_report_error(0, "ODBC Error: %s returned SQL_NO_DATA. Were we cancelled remotely?", fn_name); break; 268 | 269 | case SQL_NEED_DATA: vt_report_error(0, "ODBC Error: %s eturned SQL_NEED_DATA. Are we calling a stored procedure? We do not provide parameter values to remote databases; arguments must be hardcoded.", fn_name); break; 270 | 271 | // TODO: Apparently this isn't defined but is a valid return code sometimes? 272 | // case SQL_PARAM_DATA_AVAILABLE: vt_report_error(0, "ODBC Error: Returned SQL_PARAM_DATA_AVAILABLE. Remote server wants us to handle ODBC Parameters that we didn't set."); 273 | 274 | default: vt_report_error(0, 275 | "ODBC Error: Invalid return code from %s: %d. " \ 276 | "Expected values are %d (SQL_SUCCESS), %d (SQL_SUCCESS_WITH_INFO), %d (SQL_ERROR), " \ 277 | "%d (SQL_INVALID_HANDLE), %d (SQL_STILL_EXECUTING), %d (SQL_NO_DATA), or %d (SQL_NEED_DATA).", 278 | fn_name, r, SQL_SUCCESS, SQL_SUCCESS_WITH_INFO, SQL_ERROR, 279 | SQL_INVALID_HANDLE, SQL_STILL_EXECUTING, SQL_NO_DATA, SQL_NEED_DATA); 280 | } 281 | } 282 | 283 | public: 284 | 285 | virtual StreamState process(ServerInterface &srvInterface, DataBuffer &input, InputState input_state) { 286 | // Every so many iterations we want to 287 | // break out and check for Vertica cancel messages 288 | uint32 iter_counter = 0; 289 | 290 | SQLRETURN fetchRet; 291 | while (SQL_SUCCEEDED(fetchRet = SQLFetch(stmt))) { 292 | #if LOADER_DEBUG 293 | srvInterface.log("DEBUG Number of fetched rows/columns = %lu/%d", nfrows, numcols); 294 | #endif 295 | for (uint32 j = 0; j < (uint32)nfrows; j++) { // for each fetched row... 296 | for (SQLUSMALLINT i = 0; i < colInTable; i++) 297 | writer->setNull(i); // set all cols to NULL 298 | for (SQLUSMALLINT i = 0; i < numcols; i++) { // for each column... 299 | #if LOADER_DEBUG 300 | srvInterface.log("DEBUG nfrows=%u j=%u i=%d lenp[%d][%d]=%ld", (uint32)nfrows, j, i, i, j, lenp[i][j]); 301 | #endif 302 | 303 | // MF allocate & set Buf struct so we can re-use the original code in the Fetch loop... 304 | Buf data ; 305 | 306 | // MF SQLPOINTER is a (void *) so it would generate an arithmetic warning if not casted 307 | data.buf = (SQLPOINTER)( (uint8_t *)resp[i] + stype[i] * j ) ; 308 | data.len = lenp[i][j] ; 309 | 310 | std::string rejectReason = "unrecognized syntax from remote database"; 311 | 312 | if ((int)data.len != (int)SQL_NULL_DATA ) { // (re)write NOT NULL cols 313 | switch (vtype[i]) { 314 | 315 | // Simple fixed-length types 316 | // Let C++ figure out how to convert from, ie., SQLBIGINT to vint. 317 | // (Both are native C++ types with appropriate meanings, so hopefully this will DTRT.) 318 | // (In most implementations they are probably the same type so this is a no-op.) 319 | case BoolOID: 320 | writer->setBool(vidx.at(i), (*(SQLCHAR*)data.buf == SQL_TRUE ? VTrue : VFalse)); 321 | break; 322 | case Int8OID: 323 | if (quirks != Oracle) { 324 | writer->setInt(vidx.at(i), *(SQLBIGINT*)data.buf); 325 | } else { 326 | // Oracle doesn't support int64 as a type. 327 | // So we get the data as a string and parse it to an int64. 328 | if (data.len == SQL_NTS) { writer->setInt(vidx.at(i), vint_null); } 329 | else { writer->setInt(vidx.at(i), (vint)atoll((char*)data.buf)); } 330 | } 331 | break; 332 | case Float8OID: 333 | writer->setFloat(vidx.at(i), *(SQLDOUBLE*)data.buf); 334 | break; 335 | case CharOID: case BinaryOID: 336 | case VarcharOID: case VarbinaryOID: 337 | #ifndef NO_LONG_OIDS 338 | case LongVarcharOID: case LongVarbinaryOID: 339 | #endif 340 | if (data.len == SQL_NTS) { 341 | data.len = strnlen((char*)data.buf, getFieldSizeForCol(vidx.at(i))); 342 | } 343 | writer->getStringRef(vidx.at(i)).copy((char*)data.buf, data.len); 344 | break; 345 | 346 | // Date/Time functions that work in reasonably direct ways 347 | case DateOID: { 348 | SQL_DATE_STRUCT &s = *(SQL_DATE_STRUCT*)data.buf; 349 | struct tm d = {0,0,0,s.day,s.month-1,s.year-1900,0,0,-1}; 350 | time_t unixtime = mktime(&d); 351 | writer->setDate(vidx.at(i), getDateFromUnixTime(unixtime + d.tm_gmtoff)); 352 | break; 353 | } 354 | case TimeOID: { 355 | SQL_TIME_STRUCT &s = *(SQL_TIME_STRUCT*)data.buf; 356 | writer->setTime(vidx.at(i), getTimeFromHMS(s.hour, s.minute, s.second)); 357 | break; 358 | } 359 | case TimestampOID: { 360 | SQL_TIMESTAMP_STRUCT &s = *(SQL_TIMESTAMP_STRUCT*)data.buf; 361 | struct tm d = {s.second,s.minute,s.hour,s.day,s.month-1,s.year-1900,0,0,-1}; 362 | time_t unixtime = mktime(&d); 363 | // s.fraction is in nanoseconds; Vertica only does microsecond resolution 364 | // setTimestamp() wants time since epoch localtime. 365 | writer->setTimestamp(vidx.at(i), getTimestampFromUnixTime(unixtime + d.tm_gmtoff) + s.fraction/1000); 366 | break; 367 | } 368 | 369 | // Date/Time functions that require string-parsing 370 | case TimeTzOID: { 371 | // Hacky workaround: Some databases (ie., us) send the empty string instead of NULL here 372 | if (((char*)data.buf)[0] == '\0') { writer->setNull(vidx.at(i)); break; } 373 | TimeADT t = 0; 374 | 375 | if (!parser.parseTimeTz((char*)data.buf, (size_t)data.len, i, t, getVerticaTypeOfCol(vidx.at(i)), rejectReason)) { 376 | vt_report_error(0, "Error parsing TimeTz: '%s' (%s)", (char*)data.buf, rejectReason.c_str()); // No rejected-rows for us! Die on failure. 377 | } 378 | writer->setTimeTz(vidx.at(i),t); 379 | break; 380 | } 381 | 382 | case TimestampTzOID: { 383 | // Hacky workaround: Some databases (ie., us) send the empty string instead of NULL here 384 | if (((char*)data.buf)[0] == '\0') { writer->setNull(vidx.at(i)); break; } 385 | TimestampTz t = 0; 386 | if (!parser.parseTimestampTz((char*)data.buf, (size_t)data.len, i, t, getVerticaTypeOfCol(vidx.at(i)), rejectReason)) { 387 | vt_report_error(0, "Error parsing TimestampTz: '%s' (%s)", (char*)data.buf, rejectReason.c_str()); // No rejected-rows for us! Die on failure. 388 | } 389 | writer->setTimestampTz(vidx.at(i),t); 390 | break; 391 | } 392 | 393 | case IntervalOID: { 394 | SQL_INTERVAL_STRUCT &intv = *(SQL_INTERVAL_STRUCT*)data.buf; 395 | 396 | // Make sure we know what we're talking about 397 | if (intv.interval_type != SQL_IS_DAY_TO_SECOND) { 398 | vt_report_error(0, "Error parsing Interval: Is type %d; expecting type 10 (SQL_IS_HOUR_TO_SECOND)", (int)intv.interval_type); 399 | } 400 | 401 | // Vertica Intervals are stored as durations in microseconds 402 | Interval ret = ((intv.intval.day_second.day*usPerDay) 403 | + (intv.intval.day_second.hour*usPerHour) 404 | + (intv.intval.day_second.minute*usPerMinute) 405 | + (intv.intval.day_second.second*usPerSecond) 406 | + (intv.intval.day_second.fraction/1000)) // Fractions are in nanoseconds; we do microseconds 407 | * (intv.interval_sign == SQL_TRUE ? -1 : 1); // Apply the sign bit 408 | 409 | writer->setInterval(vidx.at(i), ret); 410 | break; 411 | } 412 | 413 | case IntervalYMOID: { 414 | SQL_INTERVAL_STRUCT &intv = *(SQL_INTERVAL_STRUCT*)data.buf; 415 | 416 | // Make sure we know what we're talking about 417 | if (intv.interval_type != SQL_IS_YEAR_TO_MONTH) { 418 | vt_report_error(0, "Error parsing Interval: Is type %d; expecting type 7 (SQL_IS_YEAR_TO_MONTH)", (int)intv.interval_type); 419 | } 420 | 421 | // Vertica Intervals are stored as durations in months 422 | Interval ret = ((intv.intval.year_month.year*MONTHS_PER_YEAR) 423 | + (intv.intval.year_month.month)) 424 | * (intv.interval_sign == SQL_TRUE ? -1 : 1); // Apply the sign bit 425 | 426 | writer->setInterval(vidx.at(i), ret); 427 | break; 428 | } 429 | 430 | // TODO: Sort out the binary ODBC Numeric format 431 | // and the abilities of various DB's to cast to/from it on demand; 432 | // make this use the native binary format and cast/convert as needed. 433 | case NumericOID: { 434 | // Hacky workaround: Some databases may send the empty string instead of NULL here 435 | if (((char*)data.buf)[0] == '\0') { writer->setNull(vidx.at(i)); break; } 436 | if (!parser.parseNumeric((char*)data.buf, (size_t)data.len, i, writer->getNumericRef(vidx.at(i)), getVerticaTypeOfCol(vidx.at(i)), rejectReason)) { 437 | vt_report_error(0, "Error parsing Numeric: '%s' (%s)", (char*)data.buf, rejectReason.c_str()); // No rejected-rows for us! Die on failure. 438 | } 439 | break; 440 | } 441 | 442 | default: 443 | vt_report_error(0, "Unrecognized Vertica type %s (OID %llu)", 444 | getVerticaTypeOfCol(vidx.at(i)).getTypeStr(), 445 | getVerticaTypeOfCol(vidx.at(i)).getTypeOid()); 446 | } // End SWITCH 447 | } // End IF NOT NULL 448 | } // End FOR EACH COLUMN 449 | 450 | writer->next(); // avanzamento alla riga successiva (scrive e avanza il cursor) 451 | 452 | if (++iter_counter == ROWS_PER_BREAK) { 453 | // Periodically yield and let upstream do its thing 454 | return KEEP_GOING; 455 | } 456 | } // End FOR EACH ROW 457 | } // End FETCH LOOP 458 | 459 | // If SQLFetch() failed for some reason, report it 460 | // But, SQLFetch() is allowed to return SQL_NO_DATA from time to time. 461 | // TODO: Maybe be smarter if we're getting SQL_NO_DATA forever / apparently stuck? 462 | if (fetchRet != SQL_NO_DATA) { 463 | handleReturnCode(srvInterface, fetchRet, SQL_HANDLE_STMT, stmt, "SQLFetch()"); 464 | } 465 | 466 | return DONE; 467 | } // End PROCESS 468 | 469 | void setQuirksMode(ServerInterface &srvInterface, SQLHDBC &dbc) { 470 | // Set the quirks mode based on the DB name 471 | SQLSMALLINT len; 472 | char buf[32]; 473 | memset(&buf[0], 0, 32); 474 | 475 | SQLGetInfo(dbc, SQL_SERVER_NAME, buf, 476 | sizeof(buf) - 1 /* leave a byte for null-termination */, 477 | &len); 478 | srvInterface.log("ODBC Loader: Connecting to server of type '%s'", buf); 479 | 480 | std::string db_type(buf, len); 481 | if (db_type == "ORCL") { 482 | quirks = Oracle; 483 | } 484 | } 485 | 486 | virtual void setup(ServerInterface &srvInterface, SizedColumnTypes &returnType) { 487 | // Capture our column types 488 | colInfo = returnType; 489 | bool src_rfilter = true ; // Rows filtering flag 490 | bool src_cfilter = true ; // Column filtering flag 491 | bool oq_flag = false ; // Query Ovverride flag 492 | std::string connect = "" ; // Connect string 493 | std::string query = "" ; // Remote system query string 494 | std::string predicates = "" ; // Predicates 495 | 496 | // Read User defined Session parameters 497 | if (srvInterface.getUDSessionParamReader("library").containsParameter("src_rfilter")) { 498 | src_rfilter = ( srvInterface.getUDSessionParamReader("library").getStringRef("src_rfilter").str() == "f" ) ? false : true ; 499 | } else if (srvInterface.getParamReader().containsParameter("src_rfilter")) { 500 | src_rfilter = srvInterface.getParamReader().getBoolRef("src_rfilter") ; 501 | } 502 | if (srvInterface.getUDSessionParamReader("library").containsParameter("override_query")) { 503 | query = srvInterface.getUDSessionParamReader("library").getStringRef("override_query").str() ; 504 | } else { 505 | query = srvInterface.getParamReader().getStringRef("query").str(); 506 | } 507 | if (srvInterface.getUDSessionParamReader("library").containsParameter("src_cfilter")) { 508 | src_cfilter = ( srvInterface.getUDSessionParamReader("library").getStringRef("src_cfilter").str() == "f" ) ? false : true ; 509 | } else if (srvInterface.getParamReader().containsParameter("src_cfilter")) { 510 | src_cfilter = srvInterface.getParamReader().getBoolRef("src_cfilter") ; 511 | } 512 | connect = srvInterface.getParamReader().getStringRef("connect").str(); 513 | #if LOADER_DEBUG 514 | srvInterface.log("DEBUG Initial connect=<%s>", connect.c_str()); 515 | srvInterface.log("DEBUG Initial query=<%s>", query.c_str()); 516 | srvInterface.log("DEBUG SETUP src_rfilter is %s", src_rfilter ? "true" : "false" ); 517 | srvInterface.log("DEBUG SETUP src_cfilter is %s", src_cfilter ? "true" : "false" ); 518 | #endif 519 | 520 | // Check Connection string, Query and Rowset "public" parameters 521 | 522 | // Check "rowset" parameter 523 | if (srvInterface.getParamReader().containsParameter("rowset")) { 524 | vint rowset_param = srvInterface.getParamReader().getIntRef("rowset") ; 525 | if ( rowset_param < MIN_ROWSET || rowset_param > MAX_ROWSET ) 526 | vt_report_error(0, "Error: Invalid rowset=%zd. Permitted values between %d and %d", rowset_param, MIN_ROWSET, MAX_ROWSET); 527 | else 528 | rowset = (size_t) rowset_param ; 529 | } else { 530 | rowset = DEF_ROWSET ; // use default if not set 531 | } 532 | 533 | // Check "hidden" parameters __pred_#__ to filter out rows 534 | char pred[16] ; 535 | for ( unsigned int k = 0, l = 0 ; k < MAX_PRENUM ; k++ ) { 536 | snprintf(pred, sizeof(pred), "__pred_%u__", k ) ; 537 | if (srvInterface.getParamReader().containsParameter(pred)) { 538 | std::string mpred = srvInterface.getParamReader().getStringRef(pred).str() ; 539 | #if LOADER_DEBUG 540 | srvInterface.log("DEBUG predicate [%s] length=%zu, string=<%s>", pred, strlen(mpred.c_str()), mpred.c_str()); 541 | #endif 542 | if ( pcrecpp::RE(REG_QUERYP, pcrecpp::RE_Options(PCRE_DOTALL)).FullMatch(mpred) ) { 543 | pcrecpp::RE(REG_QUERYP, pcrecpp::RE_Options(PCRE_DOTALL)).GlobalReplace("\\1", &mpred) ; 544 | query = mpred ; 545 | oq_flag = true ; 546 | #if LOADER_DEBUG 547 | srvInterface.log("DEBUG new query length=%zu, new query string=<%s>",query.length(), query.c_str()); 548 | #endif 549 | } else if ( src_rfilter ) { 550 | pcrecpp::RE(REG_ANYMTC).GlobalReplace(REG_ANYREP, &mpred) ; // to replace ANY(ARRAY()) with IN() 551 | pcrecpp::RE(REG_TILDEM).GlobalReplace(REG_TILDER, &mpred) ; // to replace ~~ with LIKE 552 | if ( l++ ) 553 | predicates += " AND " + mpred ; 554 | else 555 | predicates += " WHERE " + mpred ; 556 | } 557 | } else { 558 | break ; 559 | } 560 | } 561 | 562 | // Remove ending semicolon from "query" (if any) 563 | pcrecpp::RE(REG_ENDSCO).GlobalReplace("", &query) ; 564 | 565 | // Check "hidden" parameters __query_col_name__ and __query_col_idx__ to filter out columns 566 | if ( src_cfilter ) { 567 | if (srvInterface.getParamReader().containsParameter("__query_col_name__")) { 568 | if (srvInterface.getParamReader().containsParameter("__query_col_idx__")) { 569 | colInTable = (int)colInfo.getColumnCount() ; 570 | #if LOADER_DEBUG 571 | srvInterface.log("DEBUG __query_col_name__=<%s>",srvInterface.getParamReader().getStringRef("__query_col_name__").str().c_str()); 572 | srvInterface.log("DEBUG __query_col_idx__=<%s>",srvInterface.getParamReader().getStringRef("__query_col_idx__").str().c_str()); 573 | srvInterface.log("-----> External Table Columns, colInTable=<%d>", colInTable); 574 | #endif 575 | std::string slist=srvInterface.getParamReader().getStringRef("__query_col_name__").str(); 576 | std::stringstream ss_idx(srvInterface.getParamReader().getStringRef("__query_col_idx__").str()); 577 | std::string tk_idx ; 578 | vidx.clear(); 579 | while (std::getline(ss_idx, tk_idx, ',')) { 580 | vidx.push_back(stoi(tk_idx)); 581 | } 582 | 583 | // MF to remove Vertica casts (::) 584 | pcrecpp::RE(REG_CASTRM).GlobalReplace("", &slist) ; 585 | query = "SELECT " + slist + " FROM ( " + query + " ) sq" ; 586 | } else { 587 | query = "SELECT " + 588 | srvInterface.getParamReader().getStringRef("__query_col_name__").str() + 589 | " FROM ( " + 590 | query + 591 | " ) sq" ; 592 | } 593 | } 594 | 595 | } else { 596 | query = oq_flag ? "SELECT '.' AS override_query, sq.* FROM ( " + query + " ) sq" : "SELECT * FROM ( " + query + " ) sq" ; 597 | } 598 | 599 | // Append predicates to outer SELECT 600 | if ( src_rfilter ) 601 | query += predicates ; 602 | 603 | SQLRETURN r; 604 | #if LOADER_DEBUG 605 | srvInterface.log("DEBUG query=%s", query.c_str()); 606 | srvInterface.log("DEBUG rowset=%zu", rowset); 607 | #endif 608 | 609 | // Establish an ODBC connection 610 | r = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env); 611 | handleReturnCode(srvInterface, r, SQL_HANDLE_ENV, env, "SQLAllocHandle()"); 612 | 613 | r = SQLSetEnvAttr(env, SQL_ATTR_ODBC_VERSION, (void*)SQL_OV_ODBC3, 0); 614 | handleReturnCode(srvInterface, r, SQL_HANDLE_ENV, env, "SQLSetEnvAttr()"); 615 | 616 | r = SQLAllocHandle(SQL_HANDLE_DBC, env, &dbc); 617 | handleReturnCode(srvInterface, r, SQL_HANDLE_DBC, dbc, "SQLAllocHandle(SQL_HANDLE_DBC)"); 618 | 619 | r = SQLDriverConnect(dbc, NULL, (SQLCHAR*)connect.c_str(), SQL_NTS, NULL, 0, NULL, SQL_DRIVER_COMPLETE); 620 | handleReturnCode(srvInterface, r, SQL_HANDLE_DBC, dbc, "SQLDriverConnect()"); 621 | 622 | // We have a connection; now we know enough to figure out 623 | // which DB we have to customize to 624 | setQuirksMode(srvInterface, dbc); 625 | 626 | r = SQLAllocHandle(SQL_HANDLE_STMT, dbc, &stmt); 627 | handleReturnCode(srvInterface, r, SQL_HANDLE_STMT, stmt, "SQLAllocHandle(SQL_HANDLE_STMT)"); 628 | 629 | // Set bind by column statement attribute: 630 | r = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_BIND_TYPE, (SQLPOINTER)SQL_BIND_BY_COLUMN, 0) ; 631 | handleReturnCode(srvInterface, r, SQL_HANDLE_STMT, stmt, "SQLSetStmtAttr(SQL_ATTR_ROW_BIND_TYPE)"); 632 | 633 | // Set ROW_ARRAY_SIZE statement attribute: 634 | r = SQLSetStmtAttr(stmt, SQL_ATTR_ROW_ARRAY_SIZE, (SQLPOINTER)rowset, 0) ; 635 | handleReturnCode(srvInterface, r, SQL_HANDLE_STMT, stmt, "SQLSetStmtAttr(SQL_ATTR_ROW_ARRAY_SIZE)"); 636 | 637 | // Set ROWS_FETCHED_PTR statement attribute: 638 | r = SQLSetStmtAttr(stmt, SQL_ATTR_ROWS_FETCHED_PTR, &nfrows, 0) ; 639 | handleReturnCode(srvInterface, r, SQL_HANDLE_STMT, stmt, "SQLSetStmtAttr(SQL_ATTR_ROWS_FETCHED_PTR)"); 640 | 641 | r = SQLExecDirect(stmt, (SQLCHAR*)query.c_str(), SQL_NTS); 642 | handleReturnCode(srvInterface, r, SQL_HANDLE_STMT, stmt, "SQLExecDirect()"); 643 | 644 | r = SQLNumResultCols(stmt, &numcols); 645 | handleReturnCode(srvInterface, r, SQL_HANDLE_STMT, stmt, "SQLNumResultCols()"); 646 | 647 | // Allocate space for result & length array pointers 648 | resp = (SQLPOINTER *)srvInterface.allocator->alloc(numcols * sizeof(SQLPOINTER)) ; 649 | lenp = (SQLLEN **)srvInterface.allocator->alloc(numcols * sizeof(SQLLEN *)) ; 650 | 651 | // Allocate space for Vertica data types OID and size 652 | vtype = (BaseDataOID *)srvInterface.allocator->alloc(numcols * sizeof(BaseDataOID)) ; 653 | stype = (uint32 *)srvInterface.allocator->alloc(numcols * sizeof(uint32)) ; 654 | 655 | // Set up column-data buffers 656 | // Bind to the columns in question 657 | for (SQLSMALLINT i = 0; i < numcols; i++) { 658 | vtype[i] = getVerticaTypeOfCol(vidx.at(i)).getTypeOid(); 659 | stype[i] = getFieldSizeForCol(vidx.at(i)) ; 660 | #if LOADER_DEBUG 661 | srvInterface.log("DEBUG i=%d rowset=%zu stype[i]=%d", i, rowset, stype[i]); 662 | #endif 663 | resp[i] = (SQLPOINTER)srvInterface.allocator->alloc(stype[i] * rowset); 664 | lenp[i] = (SQLLEN *)srvInterface.allocator->alloc(sizeof(SQLLEN) * rowset); 665 | 666 | r = SQLBindCol(stmt, i+1, getCTypeOfCol(vidx.at(i)), resp[i], stype[i], lenp[i]); 667 | handleReturnCode(srvInterface, r, SQL_HANDLE_STMT, stmt, "SQLBindCol()"); 668 | } 669 | } 670 | 671 | virtual void destroy(ServerInterface &srvInterface, SizedColumnTypes &returnType) { 672 | // Fix for Issue #1. Commit before calling SQLDisconnect to avoid HY010 error. 673 | SQLRETURN r_end_tran = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_COMMIT); 674 | handleReturnCode(srvInterface, r_end_tran, SQL_HANDLE_DBC, dbc, "SQLEndTran()"); 675 | 676 | // Try to free even on error, to minimize the risk of memory leaks. 677 | // But do check for errors in the end. 678 | SQLRETURN r_disconnect = SQLDisconnect(dbc); 679 | SQLRETURN r_free_dbc = SQLFreeHandle(SQL_HANDLE_DBC, dbc); 680 | SQLRETURN r_free_env = SQLFreeHandle(SQL_HANDLE_ENV, env); 681 | 682 | handleReturnCode(srvInterface, r_disconnect, SQL_HANDLE_DBC, dbc, "SQLDisconnect()"); 683 | handleReturnCode(srvInterface, r_free_dbc, SQL_HANDLE_DBC, dbc, "SQLFreeHandle(SQL_HANDLE_DBC)"); 684 | handleReturnCode(srvInterface, r_free_env, SQL_HANDLE_ENV, env, "SQLFreeHandle(SQL_HANDLE_ENV)"); 685 | } 686 | }; 687 | 688 | class ODBCLoaderFactory : public ParserFactory { 689 | public: 690 | virtual void plan(ServerInterface &srvInterface, 691 | PerColumnParamReader &perColumnParamReader, 692 | PlanContext &planCtxt) { 693 | if (!srvInterface.getParamReader().containsParameter("connect")) { 694 | vt_report_error(0, "Error: ODBCConnect requires a 'connect' string containing ODBC connect information (at minimum, 'DSN=myDSN' for some myDSN in odbc.ini)"); 695 | } 696 | if (!srvInterface.getParamReader().containsParameter("query")) { 697 | vt_report_error(0, "Error: ODBCConnect requires a 'query' string, the query to execute on the remote system"); 698 | } 699 | } 700 | 701 | virtual UDParser* prepare(ServerInterface &srvInterface, 702 | PerColumnParamReader &perColumnParamReader, 703 | PlanContext &planCtxt, 704 | const SizedColumnTypes &returnType) 705 | { 706 | return vt_createFuncObj(srvInterface.allocator, ODBCLoader); 707 | } 708 | 709 | virtual void getParameterType(ServerInterface &srvInterface, 710 | SizedColumnTypes ¶meterTypes) { 711 | parameterTypes.addVarchar(65000, "connect"); 712 | parameterTypes.addVarchar(65000, "query"); 713 | parameterTypes.addVarchar(65000, "__query_col_name__"); 714 | parameterTypes.addVarchar(65000, "__query_col_idx__"); 715 | char pred[16] ; 716 | for ( unsigned int k = 0 ; k < MAX_PRENUM ; k++ ) { 717 | snprintf(pred, sizeof(pred), "__pred_%u__", k ) ; 718 | parameterTypes.addVarchar(MAX_PRELEN, pred); 719 | } 720 | parameterTypes.addInt("rowset"); 721 | parameterTypes.addBool("src_rfilter"); 722 | parameterTypes.addBool("src_cfilter"); 723 | } 724 | }; 725 | 726 | RegisterFactory(ODBCLoaderFactory); 727 | 728 | 729 | // ODBCLoader does all the real work. 730 | // This is basically a stub that tells Vertica to run the query on the current node only. 731 | class ODBCSource : public UDSource { 732 | public: 733 | virtual StreamState process(ServerInterface &srvInterface, DataBuffer &output) { 734 | if (output.size < 1) return OUTPUT_NEEDED; 735 | output.offset = 1; 736 | return DONE; 737 | } 738 | }; 739 | 740 | class ODBCSourceFactory : public SourceFactory { 741 | public: 742 | 743 | virtual void plan(ServerInterface &srvInterface, 744 | NodeSpecifyingPlanContext &planCtxt) { 745 | // Make the query only run on the current node. 746 | std::vector nodes; 747 | nodes.push_back(srvInterface.getCurrentNodeName()); 748 | planCtxt.setTargetNodes(nodes); 749 | } 750 | 751 | 752 | virtual std::vector prepareUDSources(ServerInterface &srvInterface, 753 | NodeSpecifyingPlanContext &planCtxt) { 754 | std::vector retVal; 755 | retVal.push_back(vt_createFuncObj(srvInterface.allocator, ODBCSource)); 756 | return retVal; 757 | } 758 | }; 759 | RegisterFactory(ODBCSourceFactory); 760 | 761 | // Library Metadata 762 | RegisterLibrary ( 763 | "Vertica Team", 764 | __DATE__, 765 | "0.10.6", 766 | "v11.x.x", 767 | "TBD", 768 | "With this loader Vertica can COPY and SELECT from any ODBC data source", 769 | "", 770 | "" 771 | ); 772 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![License](https://img.shields.io/badge/License-Apache%202.0-orange.svg)](https://opensource.org/licenses/Apache-2.0) 2 | [![test status](https://github.com/vertica/ODBC-Loader/actions/workflows/ci.yml/badge.svg)](https://github.com/vertica/ODBC-Loader/actions/workflows/ci.yml) 3 | 4 | ## INTRODUCTION 5 | This package contains two User-Defined Load functions, **ODBCSource()** and **ODBCLoader()**, that can be used to: 6 | - load data directly from a remote database 7 | - run queries against remote databases from Vertica (for example to join local Vertica-managed tables with MySQL, PostgreSQL, etc.) 8 | 9 | Data retrieved from external databases is neither converted into an intermediate file formats nor landed to disk; the data is fetched over the network via ODBC and copied directly into Vertica as it is received. When used with Vertica versions 10.1 and above *predicate pushdown* and *column filtering* is applied to the remote database data extraction process. 10 | 11 | ## INSTALLING / UNINSTALLING 12 | ### Preprequisites 13 | In order to install the ODBCLoader package you need to install on **all nodes of your Vertica cluster**: 14 | - an ODBC Driver Manager. This library has been tested with unixODBC. You need to install the development libraries (for example: ``yum install unixODBC-devel``) 15 | - the ODBC Drivers to interface the remote databases 16 | - Perl Compatible Regular Expression library (``yum install pcre-devel pcre-cpp``) 17 | 18 | In order to compile the ODBCLoader you also have to setup a development environment as defined in the standard documentation ([Setting Up a Development Environment](https://www.vertica.com/docs/10.1.x/HTML/Content/Authoring/ExtendingVertica/UDx/DevEnvironment.htm)). 19 | 20 | ### Building and installing the library 21 | This operation has to be executed as ``dbadmin`` from **one of the node of the cluster** (Vertica will *propagate* the libraries across the cluster): 22 | ``` 23 | $ make 24 | $ make install 25 | ``` 26 | Please check/modify ``ddl/install.sql`` to build the library "FENCED" or "UNFENCED" 27 | 28 | ### Uninstalling the library 29 | To remove the ODBCLoader library from your cluster: 30 | ``` 31 | $ make uninstall 32 | ``` 33 | 34 | ## Usage 35 | 36 | The ODBCLoader can be used to load data from external databases or to query non-Vertica databases through external tables. 37 | 38 | ### Data loading 39 | The general syntax to load data from external databases via ODBCLoader is: 40 | ``` 41 | COPY myschema.myverticatable 42 | WITH SOURCE ODBCSource() PARSER ODBCLoader( 43 | connect='DSN=some_odbc_dsn;', 44 | query='select * from remote_table', 45 | [rowset=] 46 | ) 47 | ; 48 | ``` 49 | where ``rowset`` is an optional parameter to define the number of rows fetched from the remote database in each SQLFetch() call (default = 100). Increasing this parameter can improve the performance but will also increase memory usage. 50 | 51 | This will cause Vertica to connect to the remote database identified by the given "connect" string and execute the given query. It will then fetch the results of the query and load them into the table ``myschema.myverticatable``. 52 | 53 | ``myverticatable`` must have the same number of columns as ``remote_table``. The column types must also match up, or the ODBC driver for the remote database must be able to cast the column types to the Vertica types. If necessary, you can always explicitly cast on the remote side by modifying the query, or on the local side with a Vertica COPY expression. 54 | 55 | The ``query`` argument to ODBCLoader can be any valid SQL query, or any other statement that the remote database will recognize and that will cause it to return a rowset. 56 | 57 | The ``connect`` argument to ODBCLoader can be any valid ODBC connect string. It is common to configure /etc/odbc.ini and /etc/odbcinst.ini with all the necessary information, then simply reference the DSN listing in /etc/odbc.ini in each query. For help configuring these files, or for more information on valid 'connect' strings, please see the documentation that came with the ODBC driver for the remote database product that you are connecting to, as the format of the string is specified by the driver. 58 | 59 | ### Federated queries 60 | As we said we can use the ODBCLoader to run *federated queries* against other databases (for example to join Vertica tables with MySQL tables) taking advantage of both *predicate pushdown* and *column pruning* in order to move from the external database to Vertica only the data really needed. 61 | 62 | To use this feature we Use Vertica External Tables as a gateway. Let's use an example to illustrate the process. External Tables, differently from Vertica-managed tables, leave the data *outside Vertica* In the following example we define an External Table in Vertica (public.epeople) retrieving data from the ODBC Data Source "pmf" through the query ``SELECT * FROM public.people``: 63 | 64 | ```sql 65 | CREATE EXTERNAL TABLE public.epeople( 66 | id INTEGER, 67 | name VARCHAR(20) 68 | ) AS COPY WITH 69 | SOURCE ODBCSource() 70 | PARSER ODBCLoader( 71 | connect='DSN=pmf', 72 | query='SELECT * FROM public.people' 73 | ) ; 74 | ``` 75 | 76 | When you define the External Table **nothing** is retrieved from the external database; we just save - in Vertica - the information needed to extract the data from the external source. 77 | 78 | Now, if we run, in Vertica, a query like this: 79 | ```sql 80 | SELECT * FROM public.epeople WHERE id > 100; 81 | ``` 82 | The ODBCLoader will rewrite the original query defined in the previous External Table definition as follow: 83 | ```sql 84 | SELECT id, name FROM public.people WHERE id > 100; 85 | ``` 86 | As you can see this query will *pushdown* the predicate to the external database. 87 | 88 | The other important feature to limit the amount of data being moved from the external database to Vertica is **columns pruning**. This means we extract from the external database only the columns needed to run the query in Vertica. As we have to retrieve all columns defined in the External Table, the ones not needed in the Vertica query will be replaced by NULL. So, for example, if we run, in Vertica: 89 | ```sql 90 | SELECT id FROM public.epeople WHERE id > 100; 91 | ``` 92 | the following query will be executed against the external database: 93 | ```sql 94 | SELECT id, NULL FROM public.people WHERE id > 100; 95 | ``` 96 | 97 | ### Optional configuration switches 98 | The ODBCLoader accept, in the External Table definition, the following, optional, configuration switches. 99 | 100 | Amount of rows retrieved during each SQLFetch() iteration from the external database. The **default value for this parameter is 100** and you can alter it in the ``CREATE EXTERNAL TABLE`` statement by defining a different ``rowset``. For example: 101 | 102 | ```sql 103 | CREATE EXTERNAL TABLE public.epeople( 104 | id INTEGER, 105 | name VARCHAR(20) 106 | ) AS COPY WITH 107 | SOURCE ODBCSource() 108 | PARSER ODBCLoader( 109 | connect='DSN=pmf', 110 | query='SELECT * FROM public.people', 111 | rowset = 500 112 | ) ; 113 | ``` 114 | Please consider that increasing this parameter will also increase the memory consumption of the ODBCLoader. 115 | 116 | You can **switch predicate pushdown on/off** with the optional boolean parameter ``src_rfilter``. The default value is true (meaning we perform predicate pushdown). You can set ``src_rfilter`` either in the ``CREATE EXTERNAL TABLE`` statement or using a **SESSION PARAMETER** as follows: 117 | ```sql 118 | ALTER SESSION SET UDPARAMETER FOR ODBCLoaderLib src_rfilter = false ; 119 | ``` 120 | 121 | You can **switch columns filtering on/off** with the optional boolean parameter ``src_cfilter``. The default value is true (meaning we perform predicate pushdown). You can set ``src_cfilter`` either in the ``CREATE EXTERNAL TABLE`` statement or using a **SESSION PARAMETER** as follows: 122 | ```sql 123 | ALTER SESSION SET UDPARAMETER FOR ODBCLoaderLib src_cfilter = false ; 124 | ``` 125 | 126 | You can overwrite the query defined in the original ``CREATE EXTERNAL TABLE`` statement using the session parameter ``override_query`` as follows: 127 | ```sql 128 | ALTER SESSION SET UDPARAMETER FOR ODBCLoaderLib override_query = ' 129 | SELECT * FROM people WHERE id % 2 130 | ' 131 | ``` 132 | The default length for session parameters is 2kB so, if your override_query is longer you might need to increase the session parameter max length before setting ``override_query``. For example: 133 | ```sql 134 | ALTER SESSION SET MaxSessionUDParameterSize = 16384 ; 135 | ALTER SESSION SET UDPARAMETER FOR ODBCLoaderLib override_query = ' 136 | SELECT 00 AS id, name, gender,bdate FROM mmf.mypeople UNION ALL 137 | SELECT 01 AS id, name, gender,bdate FROM mmf.mypeople UNION ALL 138 | SELECT 02 AS id, name, gender,bdate FROM mmf.mypeople UNION ALL 139 | SELECT 03 AS id, name, gender,bdate FROM mmf.mypeople UNION ALL 140 | SELECT 04 AS id, name, gender,bdate FROM mmf.mypeople UNION ALL 141 | ... long query here ... 142 | ' 143 | ``` 144 | **Please note**: session parameters are - of course - *session-scoped*. Have a look to the standard Vertica documentation to learn how to set/clear/check [User-Defined Session Parameters](https://www.vertica.com/docs/10.1.x/HTML/Content/Authoring/ExtendingVertica/UDx/Parameters/UDSessionParameters.htm). 145 | 146 | ### Pushed-down predicates conversion 147 | When pushing predicates down to the external database, ODBCLoader performs the following changes: 148 | 1. Vertica specific data type casting ``::`` will be removed 149 | 2. ``~~`` will be converted to ``LIKE`` 150 | 3. ``ANY(ARRAY)`` will be converted to ``IN()``. For example ``ID = ANY(ARRAY[1,2,3])`` will be converted to ``ID IN(1,2,3)`` 151 | 152 | ### Database Specific Notes 153 | **ORACLE**. All integers in Vertica are 64-bit integers. Oracle doesn't support 64-bit integers; their ODBC driver can't even cast to them on request. This code contains a quirk/workaround for Oracle that retrieves integers as C strings and re-parses them. However, the quirk doesn't reliably detect Oracle database servers right now. You can force Oracle with an obvious modification to the setQuirksMode() function in ODBCLoader.cpp. If you know of a more-reliable way to detect Oracle, or a better workaround, patches welcome :-) 154 | 155 | **MYSQL**. The MySQL ODBC driver comes in both a thread-safe and thread-unsafe build and configuration. The thread-unsafe version is KNOWN TO CRASH VERTICA if used in multiple COPY statements concurrently! (Vertica is, after all, highly multithreaded). Linux distributions aren't consistently careful to package thread-safe defaults. So if you're connecting to MySQL, be very careful to set up a thread-safe configuration. 156 | 157 | **VERTICA**. If you have to COPY data from one Vertica cluster to another use the Vertica's built-in IMPORT/EXPORT capabilities which are dramaticaly faster 158 | 159 | ## DEBUGGING 160 | ### ODBC layer tracing 161 | The simpler way to check how the ODBCLoader rewrite the query sent to the external database is to enable ODBC traces in odbcinst.ini. For example: 162 | ``` 163 | [ODBC] 164 | Trace=on 165 | Tracefile=/tmp/uodbc.trc 166 | ``` 167 | And then grep the SQL from the trace file: 168 | ``` 169 | $ tail -f /tmp/uodbc.trc | grep 'SQL = ' 170 | ``` 171 | Please remember to switch ODBC traces off at the end of your debug session because they will slowdown everything and create huge log files... 172 | 173 | ### ODBC_Loader DEBUG 174 | If the ODBC tracing was not enough you can (re)compie this library with LOADER_DEBUG flag set to 1 as shown here: 175 | ``` 176 | $ rm -rf build && make install LOADER_DEBUG=1 177 | ``` 178 | this will print extra messages in the Vertica log files (either ``UDxLogs/UDxFencedProcesses.log`` or ``vertica.log`` depending if the library was "FENCED" or "UNFENCED"). **Caution:** don't do this in production because it will flood your logs with debug messages and slowdown everything. 179 | 180 | ### PCRE Missing symbols 181 | 182 | The following error has been reported, during the deloyment phase, on a few Linux Distributions: 183 | ``` 184 | undefined symbol: _ZNK7pcrecpp2RE13GlobalReplaceERKNS_11StringPieceEPSs 185 | ``` 186 | 187 | #### To fix this issue you might want to... 188 | 189 | **STEP 1: get rid of the standard pcre packages**: 190 | Remove ``pcre-devel`` and ``pcre-cpp`` packages (if installed) using the appropriate package management commands. For example: 191 | 192 | ``` 193 | # yum remove pcre-devel pcre-cpp 194 | ``` 195 | 196 | **STEP 2: install PCRE from sources**: 197 | ``` 198 | # tar xzvf pcre-8.45.tar.gz 199 | # cd pcre-8.45 200 | # ./configure CXXFLAGS='-std=c++11 -D_GLIBCXX_USE_CXX11_ABI=0' 201 | # make && make install 202 | ``` 203 | 204 | **STEP 3: update you ld.so config and recreate its cache**: 205 | ``` 206 | # echo "/usr/local/lib" > /etc/ld.so.conf.d/local.conf && rm /etc/ld.so.cache && ldconfig 207 | ``` 208 | 209 | #### But if existing version PCRE must be kept, you could... 210 | 211 | **STEP 1: install PCRE from sources to a dedicated location**: 212 | ``` 213 | # tar xzvf pcre-8.45.tar.gz 214 | # cd pcre-8.45 215 | # ./configure CXXFLAGS='-std=c++11 -D_GLIBCXX_USE_CXX11_ABI=0' --prefix=/opt/pcre 216 | # make && make install 217 | ``` 218 | 219 | **STEP 2: set PATHs for PCRE header files and libraries**: 220 | ``` 221 | echo 'export LD_LIBRARY_PATH=/opt/pcre/lib:${LD_LIBRARY_PATH}' >> /home/dbadmin/.bashrc 222 | 223 | export CPLUS_INCLUDE_PATH=/opt/pcre/include:${CPLUS_INCLUDE_PATH} 224 | export LIBRARY_PATH=/opt/pcre/lib:${LIBRARY_PATH} 225 | export LD_LIBRARY_PATH=/opt/pcre/lib:${LD_LIBRARY_PATH} 226 | 227 | # restart vertica database to effect settings 228 | admintools -t stop_db -d testdb; admintools -t start_db -d testdb 229 | 230 | # Building and installing the library as mentioned before 231 | ``` 232 | 233 | ## Sample ODBC Configurations 234 | The following two configuration files ```odbc.ini``` and ```odbcinst.ini``` have been used to define two data sources: **pmf** to connect to PostgreSQL and **mmf** to connect to MySQL: 235 | ``` 236 | $ cat /etc/odbc.ini 237 | [ODBC Data Sources] 238 | PSQLODBC = PostgreSQL ODBC 239 | MYODBC = MySQL ODBC 240 | 241 | [pmf] 242 | Description = PostgreSQL mftest2 243 | Driver = PSQLODBC 244 | Trace = No 245 | TraceFile = sql.log 246 | Database = pmf 247 | Servername = mftest2 248 | UserName = 249 | Password = 250 | Port = 5432 251 | SSLmode = allow 252 | ReadOnly = 0 253 | Protocol = 7.4-1 254 | FakeOidIndex = 0 255 | ShowOidColumn = 0 256 | RowVersioning = 0 257 | ShowSystemTables = 0 258 | ConnSettings = 259 | Fetch = 1000 260 | Socket = 4096 261 | UnknownSizes = 0 262 | MaxVarcharSize = 1024 263 | MaxLongVarcharSize = 8190 264 | Debug = 0 265 | CommLog = 0 266 | Optimizer = 0 267 | Ksqo = 0 268 | UseDeclareFetch = 0 269 | TextAsLongVarchar = 1 270 | UnknownsAsLongVarchar = 0 271 | BoolsAsChar = 1 272 | Parse = 0 273 | CancelAsFreeStmt = 0 274 | ExtraSysTablePrefixes = dd_ 275 | LFConversion = 0 276 | UpdatableCursors = 0 277 | DisallowPremature = 0 278 | TrueIsMinus1 = 0 279 | BI = 0 280 | ByteaAsLongVarBinary = 0 281 | LowerCaseIdentifier = 0 282 | GssAuthUseGSS = 0 283 | XaOpt = 1 284 | UseServerSidePrepare = 0 285 | 286 | [mmf] 287 | Description = MySQL mftest2 288 | Driver = MYODBC 289 | SERVER = mftest2 290 | PORT = 3306 291 | SQL-MODE = 'ANSI_QUOTES' 292 | 293 | $ cat /etc/odbcinst.ini 294 | [ODBC] 295 | Trace=off 296 | Tracefile=/tmp/uodbc.trc 297 | 298 | [PSQLODBC] 299 | Description=PostgreSQL ODBC Driver 300 | Driver64=/usr/lib64/psqlodbcw.so 301 | UsageCount=1 302 | 303 | [MYODBC] 304 | Driver=/usr/lib64/libmyodbc8w.so 305 | UsageCount=1 306 | 307 | [MySQL ODBC 8.0 ANSI Driver] 308 | Driver=/usr/lib64/libmyodbc8a.so 309 | UsageCount=1 310 | ``` 311 | 312 | ## License 313 | 314 | Apache 2.0 License, please see `LICENSE` for details. 315 | 316 | -------------------------------------------------------------------------------- /bin/act: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vertica/ODBC-Loader/eae4c95e0515ae9068784fad9832f1c7e44dca49/bin/act -------------------------------------------------------------------------------- /ddl/install.sql: -------------------------------------------------------------------------------- 1 | \set libfile '\''`pwd`'/lib/ODBCLoader.so\''; 2 | CREATE OR REPLACE LIBRARY ODBCLoaderLib AS :libfile; 3 | CREATE OR REPLACE PARSER ODBCLoader AS LANGUAGE 'C++' NAME 'ODBCLoaderFactory' LIBRARY ODBCLoaderLib FENCED; 4 | CREATE OR REPLACE SOURCE ODBCSource AS LANGUAGE 'C++' NAME 'ODBCSourceFactory' LIBRARY ODBCLoaderLib FENCED; 5 | --CREATE OR REPLACE PARSER ODBCLoader AS LANGUAGE 'C++' NAME 'ODBCLoaderFactory' LIBRARY ODBCLoaderLib NOT FENCED; 6 | --CREATE OR REPLACE SOURCE ODBCSource AS LANGUAGE 'C++' NAME 'ODBCSourceFactory' LIBRARY ODBCLoaderLib NOT FENCED; 7 | GRANT EXECUTE ON SOURCE public.ODBCSource() TO public; 8 | GRANT EXECUTE ON PARSER public.ODBCLoader() TO public; 9 | -------------------------------------------------------------------------------- /ddl/uninstall.sql: -------------------------------------------------------------------------------- 1 | DROP LIBRARY ODBCLoaderLib CASCADE; -------------------------------------------------------------------------------- /examples/Tests.out: -------------------------------------------------------------------------------- 1 | SET 2 | CREATE TABLE 3 | Rows Loaded 4 | ------------- 5 | 10 6 | (1 row) 7 | 8 | i | b | f | v | c | lv | bn | vb | lvb | d | t | ts | tz | tsz | n 9 | ---+---+-----+--------+--------+--------+----------------+--------+--------+------------+----------+---------------------+-------------+------------------------+------------- 10 | | | | | | | | | | | | | | | 11 | 1 | t | 1.5 | test 1 | test 1 | test 1 | test 1\000\000 | test 1 | test 1 | 1200-01-01 | 04:01:00 | 2038-01-01 03:14:07 | 01:21:00-05 | 1200-06-01 03:21:00-05 | 123456.7890 12 | 2 | t | 2.5 | test 2 | test 2 | test 2 | test 2\000\000 | test 2 | test 2 | 1300-01-02 | 04:02:00 | 2038-01-02 03:14:07 | 01:22:00-05 | 1300-06-01 03:22:00-05 | 123456.7890 13 | 3 | t | 3.5 | test 3 | test 3 | test 3 | test 3\000\000 | test 3 | test 3 | 1400-01-03 | 04:03:00 | 2038-01-03 03:14:07 | 01:23:00-05 | 1400-06-01 03:23:00-05 | 123456.7890 14 | 4 | t | 4.5 | test 4 | test 4 | test 4 | test 4\000\000 | test 4 | test 4 | 1500-01-04 | 04:04:00 | 2038-01-04 03:14:07 | 01:24:00-05 | 1500-06-01 03:24:00-05 | 123456.7890 15 | 5 | t | 5.5 | test 5 | test 5 | test 5 | test 5\000\000 | test 5 | test 5 | 1600-01-05 | 04:05:00 | 2038-01-05 03:14:07 | 01:25:00-05 | 1600-06-01 03:25:00-05 | 123456.7890 16 | 6 | t | 6.5 | test 6 | test 6 | test 6 | test 6\000\000 | test 6 | test 6 | 1700-01-06 | 04:06:00 | 2038-01-06 03:14:07 | 01:26:00-05 | 1700-06-01 03:26:00-05 | 123456.7890 17 | 7 | t | 7.5 | test 7 | test 7 | test 7 | test 7\000\000 | test 7 | test 7 | 1800-01-07 | 04:07:00 | 2038-01-07 03:14:07 | 01:27:00-05 | 1800-06-01 03:27:00-05 | 123456.7890 18 | 8 | t | 8.5 | test 8 | test 8 | test 8 | test 8\000\000 | test 8 | test 8 | 1900-01-08 | 04:08:00 | 2038-01-08 03:14:07 | 01:28:00-05 | 1900-06-01 03:28:00-05 | 123456.7890 19 | 9 | t | 9.5 | test 9 | test 9 | test 9 | test 9\000\000 | test 9 | test 9 | 2000-01-09 | 04:09:00 | 2038-01-09 03:14:07 | 01:29:00-05 | 2000-06-01 03:29:00-05 | 123456.7890 20 | (10 rows) 21 | 22 | i b f v c lv bn vb lvb d t ts tz tsz n 23 | NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 24 | 1 1 1.5 test 1 test 1 test 1 test 1\0\0 test 1 test 1 1200-01-01 04:01:00 2038-01-01 03:14:07 1:21:00 June 1, 1200 03:21 EST 123456.7890 25 | 2 1 2.5 test 2 test 2 test 2 test 2\0\0 test 2 test 2 1300-01-02 04:02:00 2038-01-02 03:14:07 1:22:00 June 1, 1300 03:22 EST 123456.7890 26 | 3 1 3.5 test 3 test 3 test 3 test 3\0\0 test 3 test 3 1400-01-03 04:03:00 2038-01-03 03:14:07 1:23:00 June 1, 1400 03:23 EST 123456.7890 27 | 4 1 4.5 test 4 test 4 test 4 test 4\0\0 test 4 test 4 1500-01-04 04:04:00 2038-01-04 03:14:07 1:24:00 June 1, 1500 03:24 EST 123456.7890 28 | 5 1 5.5 test 5 test 5 test 5 test 5\0\0 test 5 test 5 1600-01-05 04:05:00 2038-01-05 03:14:07 1:25:00 June 1, 1600 03:25 EST 123456.7890 29 | 6 1 6.5 test 6 test 6 test 6 test 6\0\0 test 6 test 6 1700-01-06 04:06:00 2038-01-06 03:14:07 1:26:00 June 1, 1700 03:26 EST 123456.7890 30 | 7 1 7.5 test 7 test 7 test 7 test 7\0\0 test 7 test 7 1800-01-07 04:07:00 2038-01-07 03:14:07 1:27:00 June 1, 1800 03:27 EST 123456.7890 31 | 8 1 8.5 test 8 test 8 test 8 test 8\0\0 test 8 test 8 1900-01-08 04:08:00 2038-01-08 03:14:07 1:28:00 June 1, 1900 03:28 EST 123456.7890 32 | 9 1 9.5 test 9 test 9 test 9 test 9\0\0 test 9 test 9 2000-01-09 04:09:00 2038-01-09 03:14:07 1:29:00 June 1, 2000 03:29 EST 123456.7890 33 | CREATE TABLE 34 | Rows Loaded 35 | ------------- 36 | 10 37 | (1 row) 38 | 39 | i | b | f | v | c | lv | d | t | ts | tz | tsz | n 40 | ---+---+-----+--------+--------+--------+------------+----------+---------------------+-------------+------------------------+------------- 41 | | | | | | | | | | | | 42 | | | | | | | | | | | | 43 | 1 | t | 1.5 | test 1 | test 1 | test 1 | 1200-01-01 | 04:01:00 | 2038-01-01 03:14:07 | 01:21:00-05 | 1200-06-01 03:21:00-05 | 123456.7890 44 | 1 | t | 1.5 | test 1 | test 1 | test 1 | 1200-01-01 | 04:01:00 | 2038-01-01 03:14:07 | 01:21:00-05 | 1200-06-01 03:21:00-05 | 123456.7890 45 | 2 | t | 2.5 | test 2 | test 2 | test 2 | 1300-01-02 | 04:02:00 | 2038-01-02 03:14:07 | 01:22:00-05 | 1300-06-01 03:22:00-05 | 123456.7890 46 | 2 | t | 2.5 | test 2 | test 2 | test 2 | 1300-01-02 | 04:02:00 | 2038-01-02 03:14:07 | 01:22:00-05 | 1300-06-01 03:22:00-05 | 123456.7890 47 | 3 | t | 3.5 | test 3 | test 3 | test 3 | 1400-01-03 | 04:03:00 | 2038-01-03 03:14:07 | 01:23:00-05 | 1400-06-01 03:23:00-05 | 123456.7890 48 | 3 | t | 3.5 | test 3 | test 3 | test 3 | 1400-01-03 | 04:03:00 | 2038-01-03 03:14:07 | 01:23:00-05 | 1400-06-01 03:23:00-05 | 123456.7890 49 | 4 | t | 4.5 | test 4 | test 4 | test 4 | 1500-01-04 | 04:04:00 | 2038-01-04 03:14:07 | 01:24:00-05 | 1500-06-01 03:24:00-05 | 123456.7890 50 | 4 | t | 4.5 | test 4 | test 4 | test 4 | 1500-01-04 | 04:04:00 | 2038-01-04 03:14:07 | 01:24:00-05 | 1500-06-01 03:24:00-05 | 123456.7890 51 | 5 | t | 5.5 | test 5 | test 5 | test 5 | 1600-01-05 | 04:05:00 | 2038-01-05 03:14:07 | 01:25:00-05 | 1600-06-01 03:25:00-05 | 123456.7890 52 | 5 | t | 5.5 | test 5 | test 5 | test 5 | 1600-01-05 | 04:05:00 | 2038-01-05 03:14:07 | 01:25:00-05 | 1600-06-01 03:25:00-05 | 123456.7890 53 | 6 | t | 6.5 | test 6 | test 6 | test 6 | 1700-01-06 | 04:06:00 | 2038-01-06 03:14:07 | 01:26:00-05 | 1700-06-01 03:26:00-05 | 123456.7890 54 | 6 | t | 6.5 | test 6 | test 6 | test 6 | 1700-01-06 | 04:06:00 | 2038-01-06 03:14:07 | 01:26:00-05 | 1700-06-01 03:26:00-05 | 123456.7890 55 | 7 | t | 7.5 | test 7 | test 7 | test 7 | 1800-01-07 | 04:07:00 | 2038-01-07 03:14:07 | 01:27:00-05 | 1800-06-01 03:27:00-05 | 123456.7890 56 | 7 | t | 7.5 | test 7 | test 7 | test 7 | 1800-01-07 | 04:07:00 | 2038-01-07 03:14:07 | 01:27:00-05 | 1800-06-01 03:27:00-05 | 123456.7890 57 | 8 | t | 8.5 | test 8 | test 8 | test 8 | 1900-01-08 | 04:08:00 | 2038-01-08 03:14:07 | 01:28:00-05 | 1900-06-01 03:28:00-05 | 123456.7890 58 | 8 | t | 8.5 | test 8 | test 8 | test 8 | 1900-01-08 | 04:08:00 | 2038-01-08 03:14:07 | 01:28:00-05 | 1900-06-01 03:28:00-05 | 123456.7890 59 | 9 | t | 9.5 | test 9 | test 9 | test 9 | 2000-01-09 | 04:09:00 | 2038-01-09 03:14:07 | 01:29:00-05 | 2000-06-01 03:29:00-05 | 123456.7890 60 | 9 | t | 9.5 | test 9 | test 9 | test 9 | 2000-01-09 | 04:09:00 | 2038-01-09 03:14:07 | 01:29:00-05 | 2000-06-01 03:29:00-05 | 123456.7890 61 | (20 rows) 62 | 63 | DROP TABLE 64 | vsql: ERROR 5861: Error calling process() in User Function UDParser at [...], error code: 0, message: Error parsing Numeric: '9999999999999999.9999' (...) 65 | vsql: ERROR 5861: Error calling process() in User Function UDParser at [...], error code: 0, message: Error parsing TimestampTz: 'June 1, 2000 03:2X EST' (...) 66 | vsql: ERROR 5861: Error calling process() in User Function UDParser at [...], error code: 0, message: Error parsing TimeTz: '1:2:A EST' (...) 67 | i | b | f | v | c | lv | bn | vb | lvb | d | t | ts | tz | tsz | n 68 | ---+---+---+---+---+----+----+----+-----+---+---+----+----+-----+--- 69 | (0 rows) 70 | 71 | vsql: ERROR 5861: Error calling setup() in User Function UDParser at [...], error code: 0, message: ODBC Error: SQLDriverConnect() failed with error code IM002, native code 0 [[unixODBC][Driver Manager]Data source name not found, and no default driver specified] 72 | vsql: ERROR 5861: Error calling setup() in User Function UDParser at [...], error code: 0, message: ODBC Error: SQLExecDirect() failed with error code 42000, native code 1064 [[MySQL][...][...]You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'hi!' at line 1] 73 | DROP TABLE 74 | -------------------------------------------------------------------------------- /examples/sample_usage.sql: -------------------------------------------------------------------------------- 1 | -- Try some stuff out 2 | -- Requires that you have a local MySQL instance with no root password, 3 | -- and ODBC properly installed and configured to connect to it under the "MySQL" DSN. 4 | \! echo "CREATE DATABASE testdb;" | mysql -u root 5 | 6 | -- Set output to a fixed timezone regardless of where this is being tested 7 | set time zone to 'EST'; 8 | 9 | -- Populate a table in MySQL 10 | -- Create the timezone-based entries as varchar's; MySQL doesn't do timezones at all AFAICT... 11 | -- MySQL Timestamps automagically rewrite 'null' to CURRENT_TIMESTAMP() unless the column in question explicitly allows null values. (Older MySQL's have other nonstandard behavior re: timestamps.) 12 | \! echo "CREATE TABLE test_mysql (i integer, b boolean, f float, v varchar(32), c char(32), lv varchar(9999), bn binary(32), vb varbinary(32), lvb varbinary(9999), d date, t time, ts timestamp null, tz varchar(80), tsz varchar(80), n numeric(20,4));" | mysql -u root testdb 13 | \! (echo "INSERT INTO test_mysql VALUES (null, null, null, null, null, null, null, null, null, null, null, null, null, null, null);"; for i in `seq 1 9`; do echo "INSERT INTO test_mysql VALUES ($i, 1, $i.5, 'test $i', 'test $i', 'test $i', 'test $i', 'test $i', 'test $i', '$(($i+11))00/1/$i', '4:0$i', '2038-01-0$i 03:14:07 EST', '1:2$i:00', 'June 1, $(($i+11))00 03:2$i EST', '123456.7890');"; done) | mysql -u root testdb 14 | 15 | -- Create the corresponding table in Vertica 16 | CREATE TABLE test_vertica (i integer, b boolean, f float, v varchar(32), c char(32), lv varchar(9999), bn binary(32), vb varbinary(32), lvb varbinary(999), d date, t time, ts timestamp, tz timetz, tsz timestamptz, n numeric(18,4)); 17 | 18 | -- Copy from MySQL into Vertica 19 | COPY test_vertica WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=Default', query='SELECT * FROM testdb.test_mysql;'); 20 | 21 | -- Verify that Vertica and MySQL have the same contents 22 | SELECT i,b,f,v,trim(c::varchar) as c,lv,bn::binary(8) as bn,vb,lvb,d,t,ts,tz,tsz,n FROM test_vertica ORDER BY i,b,f,v; 23 | \! echo "SELECT i,b,f,v,trim(c) as c,lv,cast(bn as binary(8)) as bn,vb,lvb,d,t,ts,tz,tsz,n FROM test_mysql ORDER BY i,b,f,v;" | mysql -u root testdb 24 | 25 | -- Now try copying from ourselves into ourselves; see what happens 26 | CREATE TABLE test_vertica_no_bin AS SELECT i,b,f,v,c,lv,d,t,ts,tz,tsz,n FROM test_vertica; 27 | -- run this in a separate session so it uses the local time zone 28 | COPY test_vertica_no_bin WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=VerticaDSN;ConnSettings=set+session+timezone+to+''EST''', query='SELECT * FROM test_vertica_no_bin;'); 29 | 30 | SELECT i,b,f,v,trim(c::varchar) as c,lv,d,t,ts,tz,tsz,n FROM test_vertica_no_bin ORDER BY i,b,f,v; 31 | DROP TABLE test_vertica_no_bin; 32 | 33 | -- try some failures 34 | \! echo "INSERT INTO test_mysql VALUES (11, null, null, null, null, null, null, null, null, null, null, null, null, null, '99999999999999999999');" | mysql -u root testdb 35 | \! echo "INSERT INTO test_mysql VALUES (12, null, null, null, null, null, null, null, null, null, null, null, null, 'June 1, 2000 03:2X EST', null);" | mysql -u root testdb 36 | \! echo "INSERT INTO test_mysql VALUES (13, null, null, null, null, null, null, null, null, null, null, null, '1:2:A EST', null, null);" | mysql -u root testdb 37 | COPY test_vertica WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=Default', query='SELECT * FROM testdb.test_mysql where i=11;'); 38 | COPY test_vertica WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=Default', query='SELECT * FROM testdb.test_mysql where i=12;'); 39 | COPY test_vertica WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=Default', query='SELECT * FROM testdb.test_mysql where i=13;'); 40 | select * from test_vertica where i>10; 41 | 42 | -- Try some invalid commands; make sure they error out correctly 43 | COPY test_vertica WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=InvalidDSN', query='SELECT * FROM testdb.test_mysql;'); 44 | COPY test_vertica WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=Default', query='hi!'); 45 | 46 | -- Clean up after ourselves 47 | DROP TABLE test_vertica; 48 | \! echo "DROP DATABASE testdb;" | mysql -u root 49 | 50 | -------------------------------------------------------------------------------- /tests/config/odbc.ini: -------------------------------------------------------------------------------- 1 | [ODBC Data Sources] 2 | PSQLODBC = PostgreSQL ODBC 3 | MYODBC = MySQL ODBC 4 | 5 | [MySQL] 6 | Description = MySQL CI Test 7 | Driver = MYODBC 8 | SERVER = host.docker.internal 9 | PORT = 3306 10 | USER = root 11 | Database = testdb 12 | SQL-MODE = 'ANSI_QUOTES' 13 | 14 | [PostgreSQL] 15 | Description = PostgreSQL CI Test 16 | Driver = PSQLODBC 17 | Trace = No 18 | TraceFile = sql.log 19 | Database = testdb 20 | Servername = host.docker.internal 21 | UserName = 22 | Password = 23 | Port = 5433 24 | SSLmode = allow -------------------------------------------------------------------------------- /tests/config/odbcinst.ini: -------------------------------------------------------------------------------- 1 | [ODBC] 2 | Trace=off 3 | Tracefile=/tmp/uodbc.trc 4 | 5 | [PSQLODBC] 6 | Description=PostgreSQL ODBC Driver 7 | Driver64=/usr/lib64/psqlodbcw.so 8 | UsageCount=1 9 | 10 | [MYODBC] 11 | Driver=/usr/lib64/libmyodbc8w.so 12 | UsageCount=1 13 | 14 | [MySQL ODBC 8.0 ANSI Driver] 15 | Driver=/usr/lib64/libmyodbc8a.so 16 | UsageCount=1 -------------------------------------------------------------------------------- /tests/copy_test.sql: -------------------------------------------------------------------------------- 1 | \timing off 2 | 3 | -- Set output to a fixed timezone regardless of where this is being tested 4 | set time zone to 'EST'; 5 | 6 | -- Create the corresponding table in Vertica 7 | CREATE TABLE test_vertica (i integer, b boolean, f float, v varchar(32), c char(32), lv varchar(9999), bn binary(32), vb varbinary(32), lvb varbinary(999), d date, t time, ts timestamp, tz timetz, tsz timestamptz, n numeric(18,4)); 8 | 9 | -- Copy from MySQL into Vertica 10 | COPY test_vertica WITH SOURCE ODBCSource() PARSER ODBCLoader(connect='DSN=MySQL', query='SELECT * FROM testdb.test_source;'); 11 | 12 | -- Verify the output 13 | SELECT i,b,f,v,trim(c::varchar) as c,lv,bn::binary(8) as bn,vb,lvb,d,t,ts,tz,tsz,n FROM test_vertica ORDER BY i,b,f,v; 14 | 15 | -- Clean up 16 | DROP TABLE test_vertica; 17 | -------------------------------------------------------------------------------- /tests/expected/copy_test.out: -------------------------------------------------------------------------------- 1 | Timing is on. 2 | Pager usage is off. 3 | Timing is off. 4 | SET 5 | CREATE TABLE 6 | Rows Loaded 7 | ------------- 8 | 10 9 | (1 row) 10 | 11 | i | b | f | v | c | lv | bn | vb | lvb | d | t | ts | tz | tsz | n 12 | ---+---+-----+--------+--------+--------+----------------+--------+--------+------------+----------+---------------------+-------------+------------------------+------------- 13 | | | | | | | | | | | | | | | 14 | 1 | t | 1.5 | test 1 | test 1 | test 1 | test 1\000\000 | test 1 | test 1 | 1200-01-01 | 04:01:00 | 2038-01-01 03:14:07 | 01:21:00-05 | 1200-06-01 03:21:00-05 | 123456.7890 15 | 2 | t | 2.5 | test 2 | test 2 | test 2 | test 2\000\000 | test 2 | test 2 | 1300-01-02 | 04:02:00 | 2038-01-02 03:14:07 | 01:22:00-05 | 1300-06-01 03:22:00-05 | 123456.7890 16 | 3 | t | 3.5 | test 3 | test 3 | test 3 | test 3\000\000 | test 3 | test 3 | 1400-01-03 | 04:03:00 | 2038-01-03 03:14:07 | 01:23:00-05 | 1400-06-01 03:23:00-05 | 123456.7890 17 | 4 | t | 4.5 | test 4 | test 4 | test 4 | test 4\000\000 | test 4 | test 4 | 1500-01-04 | 04:04:00 | 2038-01-04 03:14:07 | 01:24:00-05 | 1500-06-01 03:24:00-05 | 123456.7890 18 | 5 | t | 5.5 | test 5 | test 5 | test 5 | test 5\000\000 | test 5 | test 5 | 1600-01-05 | 04:05:00 | 2038-01-05 03:14:07 | 01:25:00-05 | 1600-06-01 03:25:00-05 | 123456.7890 19 | 6 | t | 6.5 | test 6 | test 6 | test 6 | test 6\000\000 | test 6 | test 6 | 1700-01-06 | 04:06:00 | 2038-01-06 03:14:07 | 01:26:00-05 | 1700-06-01 03:26:00-05 | 123456.7890 20 | 7 | t | 7.5 | test 7 | test 7 | test 7 | test 7\000\000 | test 7 | test 7 | 1800-01-07 | 04:07:00 | 2038-01-07 03:14:07 | 01:27:00-05 | 1800-06-01 03:27:00-05 | 123456.7890 21 | 8 | t | 8.5 | test 8 | test 8 | test 8 | test 8\000\000 | test 8 | test 8 | 1900-01-08 | 04:08:00 | 2038-01-08 03:14:07 | 01:28:00-05 | 1900-06-01 03:28:00-05 | 123456.7890 22 | 9 | t | 9.5 | test 9 | test 9 | test 9 | test 9\000\000 | test 9 | test 9 | 2000-01-09 | 04:09:00 | 2038-01-09 03:14:07 | 01:29:00-05 | 2000-06-01 03:29:00-05 | 123456.7890 23 | (10 rows) 24 | 25 | DROP TABLE 26 | --------------------------------------------------------------------------------