├── .gitignore ├── LICENSE ├── README.md ├── adapter.go ├── adapter_test.go ├── go.mod ├── go.sum └── vendor ├── github.com ├── casbin │ └── casbin │ │ ├── LICENSE │ │ ├── config │ │ └── config.go │ │ ├── log │ │ ├── default_logger.go │ │ ├── log_util.go │ │ └── logger.go │ │ ├── model │ │ ├── assertion.go │ │ ├── function.go │ │ ├── model.go │ │ └── policy.go │ │ ├── rbac │ │ └── role_manager.go │ │ └── util │ │ ├── builtin_operators.go │ │ └── util.go └── lib │ └── pq │ ├── .gitignore │ ├── .travis.sh │ ├── .travis.yml │ ├── CONTRIBUTING.md │ ├── LICENSE.md │ ├── README.md │ ├── TESTS.md │ ├── array.go │ ├── buf.go │ ├── conn.go │ ├── conn_go18.go │ ├── connector.go │ ├── copy.go │ ├── doc.go │ ├── encode.go │ ├── error.go │ ├── go.mod │ ├── notify.go │ ├── oid │ ├── doc.go │ ├── gen.go │ └── types.go │ ├── rows.go │ ├── ssl.go │ ├── ssl_go1.7.go │ ├── ssl_permissions.go │ ├── ssl_renegotiation.go │ ├── ssl_windows.go │ ├── url.go │ ├── user_posix.go │ ├── user_windows.go │ └── uuid.go └── modules.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | 7 | # Test binary, build with `go test -c` 8 | *.test 9 | 10 | # Output of the go coverage tool, specifically when used with LiteIDE 11 | *.out 12 | 13 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 14 | .glide/ 15 | .idea/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # casbin-postgres-adapter 2 | PostgreSQL Adapter for Casbin 3 | -------------------------------------------------------------------------------- /adapter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package adapter 16 | 17 | import ( 18 | "database/sql" 19 | "errors" 20 | "fmt" 21 | "strings" 22 | 23 | "github.com/casbin/casbin/model" 24 | _ "github.com/lib/pq" // This is for MySQL initialization. 25 | ) 26 | 27 | // Adapter represents the MySQL adapter for policy storage. 28 | type Adapter struct { 29 | driverName string 30 | dataSourceName string 31 | db *sql.DB 32 | } 33 | 34 | // NewAdapter is the constructor for Adapter. 35 | func NewAdapter(driverName string, dataSourceName string) *Adapter { 36 | a := Adapter{} 37 | a.driverName = driverName 38 | a.dataSourceName = dataSourceName 39 | return &a 40 | } 41 | 42 | func (a *Adapter) createDatabase() error { 43 | db, err := sql.Open(a.driverName, a.dataSourceName) 44 | if err != nil { 45 | return err 46 | } 47 | defer db.Close() 48 | 49 | _, err = db.Exec("CREATE DATABASE IF NOT EXISTS casbin") 50 | return err 51 | } 52 | 53 | func (a *Adapter) open() { 54 | if err := a.createDatabase(); err != nil { 55 | panic(err) 56 | } 57 | 58 | db, err := sql.Open(a.driverName, a.dataSourceName+"casbin") 59 | if err != nil { 60 | panic(err) 61 | } 62 | 63 | a.db = db 64 | 65 | a.createTable() 66 | } 67 | 68 | func (a *Adapter) close() { 69 | a.db.Close() 70 | } 71 | 72 | func (a *Adapter) createTable() { 73 | _, err := a.db.Exec("CREATE table IF NOT EXISTS policy (ptype VARCHAR(10), v0 VARCHAR(256), v1 VARCHAR(256), v2 VARCHAR(256), v3 VARCHAR(256), v4 VARCHAR(256), v5 VARCHAR(256))") 74 | if err != nil { 75 | panic(err) 76 | } 77 | } 78 | 79 | func (a *Adapter) dropTable() { 80 | _, err := a.db.Exec("DROP table policy") 81 | if err != nil { 82 | panic(err) 83 | } 84 | } 85 | 86 | func loadPolicyLine(line string, model model.Model) { 87 | if line == "" { 88 | return 89 | } 90 | 91 | tokens := strings.Split(line, ", ") 92 | 93 | key := tokens[0] 94 | sec := key[:1] 95 | model[sec][key].Policy = append(model[sec][key].Policy, tokens[1:]) 96 | } 97 | 98 | // LoadPolicy loads policy from database. 99 | func (a *Adapter) LoadPolicy(model model.Model) error { 100 | a.open() 101 | defer a.close() 102 | 103 | var ( 104 | ptype string 105 | v0 string 106 | v1 string 107 | v2 string 108 | v3 string 109 | v4 string 110 | v5 string 111 | ) 112 | 113 | rows, err := a.db.Query("select * from policy") 114 | if err != nil { 115 | return err 116 | } 117 | defer rows.Close() 118 | for rows.Next() { 119 | err := rows.Scan(&ptype, &v1, &v2, &v3, &v4) 120 | if err != nil { 121 | return err 122 | } 123 | 124 | line := ptype 125 | if v0 != "" { 126 | line += ", " + v0 127 | } 128 | if v1 != "" { 129 | line += ", " + v1 130 | } 131 | if v2 != "" { 132 | line += ", " + v2 133 | } 134 | if v3 != "" { 135 | line += ", " + v3 136 | } 137 | if v4 != "" { 138 | line += ", " + v4 139 | } 140 | if v5 != "" { 141 | line += ", " + v5 142 | } 143 | 144 | loadPolicyLine(line, model) 145 | } 146 | err = rows.Err() 147 | return err 148 | } 149 | 150 | func (a *Adapter) writeTableLine(ptype string, rule []string) string { 151 | params := make([]string, 7) 152 | params[0] = ptype 153 | for i, v := range rule { 154 | params[i+1] = v 155 | } 156 | 157 | return fmt.Sprintf("( %s )", strings.Join(params, ",")) 158 | } 159 | 160 | // SavePolicy saves policy to database. 161 | func (a *Adapter) SavePolicy(model model.Model) error { 162 | a.open() 163 | defer a.close() 164 | 165 | a.dropTable() 166 | a.createTable() 167 | 168 | var values []string 169 | 170 | for ptype, ast := range model["p"] { 171 | for _, rule := range ast.Policy { 172 | line := a.writeTableLine(ptype, rule) 173 | values = append(values, line) 174 | } 175 | } 176 | 177 | for ptype, ast := range model["g"] { 178 | for _, rule := range ast.Policy { 179 | line := a.writeTableLine(ptype, rule) 180 | values = append(values, line) 181 | } 182 | } 183 | 184 | if len(values) == 0 { 185 | return errors.New("policy is empty") 186 | } 187 | 188 | stmt := fmt.Sprintf(" INSERT INTO policy VALUES %s ;", strings.Join(values, ",")) 189 | _, err := a.db.Exec(stmt) 190 | 191 | return err 192 | } 193 | 194 | func (a *Adapter) AddPolicy(sec string, ptype string, policy []string) error { 195 | return errors.New("not implemented") 196 | } 197 | 198 | func (a *Adapter) RemovePolicy(sec string, ptype string, policy []string) error { 199 | return errors.New("not implemented") 200 | } 201 | 202 | func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { 203 | return errors.New("not implemented") 204 | } 205 | -------------------------------------------------------------------------------- /adapter_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package adapter 16 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/going/casbin-postgres-adapter 2 | 3 | require ( 4 | github.com/casbin/casbin v1.8.0 5 | github.com/lib/pq v1.0.0 6 | ) 7 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= 2 | github.com/casbin/casbin v1.8.0 h1:eEDIzfiSg6aR5lqeQQ+YUhVLccsxykq1zcpWFOI4Kxo= 3 | github.com/casbin/casbin v1.8.0/go.mod h1:z8uPsfBJGUsnkagrt3G8QvjgTKFMBJ32UP8HpZllfog= 4 | github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= 5 | github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= 6 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/config/config.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package config 16 | 17 | import ( 18 | "bufio" 19 | "bytes" 20 | "errors" 21 | "fmt" 22 | "io" 23 | "os" 24 | "strconv" 25 | "strings" 26 | "sync" 27 | ) 28 | 29 | var ( 30 | // DEFAULT_SECTION specifies the name of a section if no name provided 31 | DEFAULT_SECTION = "default" 32 | // DEFAULT_COMMENT defines what character(s) indicate a comment `#` 33 | DEFAULT_COMMENT = []byte{'#'} 34 | // DEFAULT_COMMENT_SEM defines what alternate character(s) indicate a comment `;` 35 | DEFAULT_COMMENT_SEM = []byte{';'} 36 | // DEFAULT_MULTI_LINE_SEPARATOR defines what character indicates a multi-line content 37 | DEFAULT_MULTI_LINE_SEPARATOR = []byte{'\\'} 38 | ) 39 | 40 | // ConfigInterface defines the behavior of a Config implemenation 41 | type ConfigInterface interface { 42 | String(key string) string 43 | Strings(key string) []string 44 | Bool(key string) (bool, error) 45 | Int(key string) (int, error) 46 | Int64(key string) (int64, error) 47 | Float64(key string) (float64, error) 48 | Set(key string, value string) error 49 | } 50 | 51 | // Config represents an implementation of the ConfigInterface 52 | type Config struct { 53 | // map is not safe. 54 | sync.RWMutex 55 | // Section:key=value 56 | data map[string]map[string]string 57 | } 58 | 59 | // NewConfig create an empty configuration representation from file. 60 | func NewConfig(confName string) (ConfigInterface, error) { 61 | c := &Config{ 62 | data: make(map[string]map[string]string), 63 | } 64 | err := c.parse(confName) 65 | return c, err 66 | } 67 | 68 | // NewConfigFromText create an empty configuration representation from text. 69 | func NewConfigFromText(text string) (ConfigInterface, error) { 70 | c := &Config{ 71 | data: make(map[string]map[string]string), 72 | } 73 | err := c.parseBuffer(bufio.NewReader(strings.NewReader(text))) 74 | return c, err 75 | } 76 | 77 | // AddConfig adds a new section->key:value to the configuration. 78 | func (c *Config) AddConfig(section string, option string, value string) bool { 79 | if section == "" { 80 | section = DEFAULT_SECTION 81 | } 82 | 83 | if _, ok := c.data[section]; !ok { 84 | c.data[section] = make(map[string]string) 85 | } 86 | 87 | _, ok := c.data[section][option] 88 | c.data[section][option] = value 89 | 90 | return !ok 91 | } 92 | 93 | func (c *Config) parse(fname string) (err error) { 94 | c.Lock() 95 | f, err := os.Open(fname) 96 | if err != nil { 97 | return err 98 | } 99 | defer c.Unlock() 100 | defer f.Close() 101 | 102 | buf := bufio.NewReader(f) 103 | return c.parseBuffer(buf) 104 | } 105 | 106 | func (c *Config) parseBuffer(buf *bufio.Reader) error { 107 | var section string 108 | var lineNum int 109 | var buffer bytes.Buffer 110 | var canWrite bool 111 | for { 112 | if canWrite { 113 | if err := c.write(section, lineNum, &buffer); err != nil { 114 | return err 115 | } else { 116 | canWrite = false 117 | } 118 | } 119 | lineNum++ 120 | line, _, err := buf.ReadLine() 121 | if err == io.EOF { 122 | // force write when buffer is not flushed yet 123 | if buffer.Len() > 0 { 124 | if err := c.write(section, lineNum, &buffer); err != nil { 125 | return err 126 | } 127 | } 128 | break 129 | } else if err != nil { 130 | return err 131 | } 132 | 133 | line = bytes.TrimSpace(line) 134 | switch { 135 | case bytes.Equal(line, []byte{}), bytes.HasPrefix(line, DEFAULT_COMMENT_SEM), 136 | bytes.HasPrefix(line, DEFAULT_COMMENT): 137 | canWrite = true 138 | continue 139 | case bytes.HasPrefix(line, []byte{'['}) && bytes.HasSuffix(line, []byte{']'}): 140 | // force write when buffer is not flushed yet 141 | if buffer.Len() > 0 { 142 | if err := c.write(section, lineNum, &buffer); err != nil { 143 | return err 144 | } 145 | canWrite = false 146 | } 147 | section = string(line[1 : len(line)-1]) 148 | default: 149 | var p []byte 150 | if bytes.HasSuffix(line, DEFAULT_MULTI_LINE_SEPARATOR) { 151 | p = bytes.TrimSpace(line[:len(line)-1]) 152 | } else { 153 | p = line 154 | canWrite = true 155 | } 156 | 157 | if _, err := buffer.Write(p); err != nil { 158 | return err 159 | } 160 | } 161 | } 162 | 163 | return nil 164 | } 165 | 166 | func (c *Config) write(section string, lineNum int, b *bytes.Buffer) error { 167 | if b.Len() <= 0 { 168 | return nil 169 | } 170 | 171 | optionVal := bytes.SplitN(b.Bytes(), []byte{'='}, 2) 172 | if len(optionVal) != 2 { 173 | return fmt.Errorf("parse the content error : line %d , %s = ? ", lineNum, optionVal[0]) 174 | } 175 | option := bytes.TrimSpace(optionVal[0]) 176 | value := bytes.TrimSpace(optionVal[1]) 177 | c.AddConfig(section, string(option), string(value)) 178 | 179 | // flush buffer after adding 180 | b.Reset() 181 | 182 | return nil 183 | } 184 | 185 | // Bool lookups up the value using the provided key and converts the value to a bool 186 | func (c *Config) Bool(key string) (bool, error) { 187 | return strconv.ParseBool(c.get(key)) 188 | } 189 | 190 | // Int lookups up the value using the provided key and converts the value to a int 191 | func (c *Config) Int(key string) (int, error) { 192 | return strconv.Atoi(c.get(key)) 193 | } 194 | 195 | // Int64 lookups up the value using the provided key and converts the value to a int64 196 | func (c *Config) Int64(key string) (int64, error) { 197 | return strconv.ParseInt(c.get(key), 10, 64) 198 | } 199 | 200 | // Float64 lookups up the value using the provided key and converts the value to a float64 201 | func (c *Config) Float64(key string) (float64, error) { 202 | return strconv.ParseFloat(c.get(key), 64) 203 | } 204 | 205 | // String lookups up the value using the provided key and converts the value to a string 206 | func (c *Config) String(key string) string { 207 | return c.get(key) 208 | } 209 | 210 | // Strings lookups up the value using the provided key and converts the value to an array of string 211 | // by splitting the string by comma 212 | func (c *Config) Strings(key string) []string { 213 | v := c.get(key) 214 | if v == "" { 215 | return nil 216 | } 217 | return strings.Split(v, ",") 218 | } 219 | 220 | // Set sets the value for the specific key in the Config 221 | func (c *Config) Set(key string, value string) error { 222 | c.Lock() 223 | defer c.Unlock() 224 | if len(key) == 0 { 225 | return errors.New("key is empty") 226 | } 227 | 228 | var ( 229 | section string 230 | option string 231 | ) 232 | 233 | keys := strings.Split(strings.ToLower(key), "::") 234 | if len(keys) >= 2 { 235 | section = keys[0] 236 | option = keys[1] 237 | } else { 238 | option = keys[0] 239 | } 240 | 241 | c.AddConfig(section, option, value) 242 | return nil 243 | } 244 | 245 | // section.key or key 246 | func (c *Config) get(key string) string { 247 | var ( 248 | section string 249 | option string 250 | ) 251 | 252 | keys := strings.Split(strings.ToLower(key), "::") 253 | if len(keys) >= 2 { 254 | section = keys[0] 255 | option = keys[1] 256 | } else { 257 | section = DEFAULT_SECTION 258 | option = keys[0] 259 | } 260 | 261 | if value, ok := c.data[section][option]; ok { 262 | return value 263 | } 264 | 265 | return "" 266 | } 267 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/log/default_logger.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package log 16 | 17 | import "log" 18 | 19 | // DefaultLogger is the implementation for a Logger using golang log. 20 | type DefaultLogger struct { 21 | enable bool 22 | } 23 | 24 | func (l *DefaultLogger) EnableLog(enable bool) { 25 | l.enable = enable 26 | } 27 | 28 | func (l *DefaultLogger) IsEnabled() bool { 29 | return l.enable 30 | } 31 | 32 | func (l *DefaultLogger) Print(v ...interface{}) { 33 | if l.enable { 34 | log.Print(v...) 35 | } 36 | } 37 | 38 | func (l *DefaultLogger) Printf(format string, v ...interface{}) { 39 | if l.enable { 40 | log.Printf(format, v...) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/log/log_util.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package log 16 | 17 | var logger Logger = &DefaultLogger{} 18 | 19 | // SetLogger sets the current logger. 20 | func SetLogger(l Logger) { 21 | logger = l 22 | } 23 | 24 | // GetLogger returns the current logger. 25 | func GetLogger() Logger { 26 | return logger 27 | } 28 | 29 | // LogPrint prints the log. 30 | func LogPrint(v ...interface{}) { 31 | logger.Print(v...) 32 | } 33 | 34 | // LogPrintf prints the log with the format. 35 | func LogPrintf(format string, v ...interface{}) { 36 | logger.Printf(format, v...) 37 | } 38 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/log/logger.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package log 16 | 17 | // Logger is the logging interface implementation. 18 | type Logger interface { 19 | //EnableLog controls whether print the message. 20 | EnableLog(bool) 21 | 22 | //IsEnabled returns if logger is enabled. 23 | IsEnabled() bool 24 | 25 | //Print formats using the default formats for its operands and logs the message. 26 | Print(...interface{}) 27 | 28 | //Printf formats according to a format specifier and logs the message. 29 | Printf(string, ...interface{}) 30 | } 31 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/model/assertion.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package model 16 | 17 | import ( 18 | "errors" 19 | "strings" 20 | 21 | "github.com/casbin/casbin/log" 22 | "github.com/casbin/casbin/rbac" 23 | ) 24 | 25 | // Assertion represents an expression in a section of the model. 26 | // For example: r = sub, obj, act 27 | type Assertion struct { 28 | Key string 29 | Value string 30 | Tokens []string 31 | Policy [][]string 32 | RM rbac.RoleManager 33 | } 34 | 35 | func (ast *Assertion) buildRoleLinks(rm rbac.RoleManager) { 36 | ast.RM = rm 37 | count := strings.Count(ast.Value, "_") 38 | for _, rule := range ast.Policy { 39 | if count < 2 { 40 | panic(errors.New("the number of \"_\" in role definition should be at least 2")) 41 | } 42 | if len(rule) < count { 43 | panic(errors.New("grouping policy elements do not meet role definition")) 44 | } 45 | 46 | if count == 2 { 47 | // error intentionally ignored 48 | ast.RM.AddLink(rule[0], rule[1]) 49 | } else if count == 3 { 50 | // error intentionally ignored 51 | ast.RM.AddLink(rule[0], rule[1], rule[2]) 52 | } else if count == 4 { 53 | // error intentionally ignored 54 | ast.RM.AddLink(rule[0], rule[1], rule[2], rule[3]) 55 | } 56 | } 57 | 58 | log.LogPrint("Role links for: " + ast.Key) 59 | ast.RM.PrintRoles() 60 | } 61 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/model/function.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package model 16 | 17 | import "github.com/casbin/casbin/util" 18 | 19 | // FunctionMap represents the collection of Function. 20 | type FunctionMap map[string]func(args ...interface{}) (interface{}, error) 21 | 22 | // Function represents a function that is used in the matchers, used to get attributes in ABAC. 23 | type Function func(args ...interface{}) (interface{}, error) 24 | 25 | // AddFunction adds an expression function. 26 | func (fm FunctionMap) AddFunction(name string, function Function) { 27 | fm[name] = function 28 | } 29 | 30 | // LoadFunctionMap loads an initial function map. 31 | func LoadFunctionMap() FunctionMap { 32 | fm := make(FunctionMap) 33 | 34 | fm.AddFunction("keyMatch", util.KeyMatchFunc) 35 | fm.AddFunction("keyMatch2", util.KeyMatch2Func) 36 | fm.AddFunction("regexMatch", util.RegexMatchFunc) 37 | fm.AddFunction("ipMatch", util.IPMatchFunc) 38 | 39 | return fm 40 | } 41 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/model/model.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package model 16 | 17 | import ( 18 | "strconv" 19 | "strings" 20 | 21 | "github.com/casbin/casbin/config" 22 | "github.com/casbin/casbin/log" 23 | "github.com/casbin/casbin/util" 24 | ) 25 | 26 | // Model represents the whole access control model. 27 | type Model map[string]AssertionMap 28 | 29 | // AssertionMap is the collection of assertions, can be "r", "p", "g", "e", "m". 30 | type AssertionMap map[string]*Assertion 31 | 32 | var sectionNameMap = map[string]string{ 33 | "r": "request_definition", 34 | "p": "policy_definition", 35 | "g": "role_definition", 36 | "e": "policy_effect", 37 | "m": "matchers", 38 | } 39 | 40 | func loadAssertion(model Model, cfg config.ConfigInterface, sec string, key string) bool { 41 | value := cfg.String(sectionNameMap[sec] + "::" + key) 42 | return model.AddDef(sec, key, value) 43 | } 44 | 45 | // AddDef adds an assertion to the model. 46 | func (model Model) AddDef(sec string, key string, value string) bool { 47 | ast := Assertion{} 48 | ast.Key = key 49 | ast.Value = value 50 | 51 | if ast.Value == "" { 52 | return false 53 | } 54 | 55 | if sec == "r" || sec == "p" { 56 | ast.Tokens = strings.Split(ast.Value, ", ") 57 | for i := range ast.Tokens { 58 | ast.Tokens[i] = key + "_" + ast.Tokens[i] 59 | } 60 | } else { 61 | ast.Value = util.RemoveComments(util.EscapeAssertion(ast.Value)) 62 | } 63 | 64 | _, ok := model[sec] 65 | if !ok { 66 | model[sec] = make(AssertionMap) 67 | } 68 | 69 | model[sec][key] = &ast 70 | return true 71 | } 72 | 73 | func getKeySuffix(i int) string { 74 | if i == 1 { 75 | return "" 76 | } 77 | 78 | return strconv.Itoa(i) 79 | } 80 | 81 | func loadSection(model Model, cfg config.ConfigInterface, sec string) { 82 | i := 1 83 | for { 84 | if !loadAssertion(model, cfg, sec, sec+getKeySuffix(i)) { 85 | break 86 | } else { 87 | i++ 88 | } 89 | } 90 | } 91 | 92 | // LoadModel loads the model from model CONF file. 93 | func (model Model) LoadModel(path string) { 94 | cfg, err := config.NewConfig(path) 95 | if err != nil { 96 | panic(err) 97 | } 98 | 99 | loadSection(model, cfg, "r") 100 | loadSection(model, cfg, "p") 101 | loadSection(model, cfg, "e") 102 | loadSection(model, cfg, "m") 103 | 104 | loadSection(model, cfg, "g") 105 | } 106 | 107 | // LoadModelFromText loads the model from the text. 108 | func (model Model) LoadModelFromText(text string) { 109 | cfg, err := config.NewConfigFromText(text) 110 | if err != nil { 111 | panic(err) 112 | } 113 | 114 | loadSection(model, cfg, "r") 115 | loadSection(model, cfg, "p") 116 | loadSection(model, cfg, "e") 117 | loadSection(model, cfg, "m") 118 | 119 | loadSection(model, cfg, "g") 120 | } 121 | 122 | // PrintModel prints the model to the log. 123 | func (model Model) PrintModel() { 124 | log.LogPrint("Model:") 125 | for k, v := range model { 126 | for i, j := range v { 127 | log.LogPrintf("%s.%s: %s", k, i, j.Value) 128 | } 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/model/policy.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package model 16 | 17 | import ( 18 | "github.com/casbin/casbin/log" 19 | "github.com/casbin/casbin/rbac" 20 | "github.com/casbin/casbin/util" 21 | ) 22 | 23 | // BuildRoleLinks initializes the roles in RBAC. 24 | func (model Model) BuildRoleLinks(rm rbac.RoleManager) { 25 | for _, ast := range model["g"] { 26 | ast.buildRoleLinks(rm) 27 | } 28 | } 29 | 30 | // PrintPolicy prints the policy to log. 31 | func (model Model) PrintPolicy() { 32 | log.LogPrint("Policy:") 33 | for key, ast := range model["p"] { 34 | log.LogPrint(key, ": ", ast.Value, ": ", ast.Policy) 35 | } 36 | 37 | for key, ast := range model["g"] { 38 | log.LogPrint(key, ": ", ast.Value, ": ", ast.Policy) 39 | } 40 | } 41 | 42 | // ClearPolicy clears all current policy. 43 | func (model Model) ClearPolicy() { 44 | for _, ast := range model["p"] { 45 | ast.Policy = nil 46 | } 47 | 48 | for _, ast := range model["g"] { 49 | ast.Policy = nil 50 | } 51 | } 52 | 53 | // GetPolicy gets all rules in a policy. 54 | func (model Model) GetPolicy(sec string, ptype string) [][]string { 55 | return model[sec][ptype].Policy 56 | } 57 | 58 | // GetFilteredPolicy gets rules based on field filters from a policy. 59 | func (model Model) GetFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) [][]string { 60 | res := [][]string{} 61 | 62 | for _, rule := range model[sec][ptype].Policy { 63 | matched := true 64 | for i, fieldValue := range fieldValues { 65 | if fieldValue != "" && rule[fieldIndex+i] != fieldValue { 66 | matched = false 67 | break 68 | } 69 | } 70 | 71 | if matched { 72 | res = append(res, rule) 73 | } 74 | } 75 | 76 | return res 77 | } 78 | 79 | // HasPolicy determines whether a model has the specified policy rule. 80 | func (model Model) HasPolicy(sec string, ptype string, rule []string) bool { 81 | for _, r := range model[sec][ptype].Policy { 82 | if util.ArrayEquals(rule, r) { 83 | return true 84 | } 85 | } 86 | 87 | return false 88 | } 89 | 90 | // AddPolicy adds a policy rule to the model. 91 | func (model Model) AddPolicy(sec string, ptype string, rule []string) bool { 92 | if !model.HasPolicy(sec, ptype, rule) { 93 | model[sec][ptype].Policy = append(model[sec][ptype].Policy, rule) 94 | return true 95 | } 96 | return false 97 | } 98 | 99 | // RemovePolicy removes a policy rule from the model. 100 | func (model Model) RemovePolicy(sec string, ptype string, rule []string) bool { 101 | for i, r := range model[sec][ptype].Policy { 102 | if util.ArrayEquals(rule, r) { 103 | model[sec][ptype].Policy = append(model[sec][ptype].Policy[:i], model[sec][ptype].Policy[i+1:]...) 104 | return true 105 | } 106 | } 107 | 108 | return false 109 | } 110 | 111 | // RemoveFilteredPolicy removes policy rules based on field filters from the model. 112 | func (model Model) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) bool { 113 | tmp := [][]string{} 114 | res := false 115 | for _, rule := range model[sec][ptype].Policy { 116 | matched := true 117 | for i, fieldValue := range fieldValues { 118 | if fieldValue != "" && rule[fieldIndex+i] != fieldValue { 119 | matched = false 120 | break 121 | } 122 | } 123 | 124 | if matched { 125 | res = true 126 | } else { 127 | tmp = append(tmp, rule) 128 | } 129 | } 130 | 131 | model[sec][ptype].Policy = tmp 132 | return res 133 | } 134 | 135 | // GetValuesForFieldInPolicy gets all values for a field for all rules in a policy, duplicated values are removed. 136 | func (model Model) GetValuesForFieldInPolicy(sec string, ptype string, fieldIndex int) []string { 137 | values := []string{} 138 | 139 | for _, rule := range model[sec][ptype].Policy { 140 | values = append(values, rule[fieldIndex]) 141 | } 142 | 143 | util.ArrayRemoveDuplicates(&values) 144 | 145 | return values 146 | } 147 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/rbac/role_manager.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package rbac 16 | 17 | // RoleManager provides interface to define the operations for managing roles. 18 | type RoleManager interface { 19 | // Clear clears all stored data and resets the role manager to the initial state. 20 | Clear() error 21 | // AddLink adds the inheritance link between two roles. role: name1 and role: name2. 22 | // domain is a prefix to the roles (can be used for other purposes). 23 | AddLink(name1 string, name2 string, domain ...string) error 24 | // DeleteLink deletes the inheritance link between two roles. role: name1 and role: name2. 25 | // domain is a prefix to the roles (can be used for other purposes). 26 | DeleteLink(name1 string, name2 string, domain ...string) error 27 | // HasLink determines whether a link exists between two roles. role: name1 inherits role: name2. 28 | // domain is a prefix to the roles (can be used for other purposes). 29 | HasLink(name1 string, name2 string, domain ...string) (bool, error) 30 | // GetRoles gets the roles that a user inherits. 31 | // domain is a prefix to the roles (can be used for other purposes). 32 | GetRoles(name string, domain ...string) ([]string, error) 33 | // GetUsers gets the users that inherits a role. 34 | // domain is a prefix to the users (can be used for other purposes). 35 | GetUsers(name string, domain ...string) ([]string, error) 36 | // PrintRoles prints all the roles to log. 37 | PrintRoles() error 38 | } 39 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/util/builtin_operators.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package util 16 | 17 | import ( 18 | "net" 19 | "regexp" 20 | "strings" 21 | 22 | "github.com/casbin/casbin/rbac" 23 | ) 24 | 25 | // KeyMatch determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. 26 | // For example, "/foo/bar" matches "/foo/*" 27 | func KeyMatch(key1 string, key2 string) bool { 28 | i := strings.Index(key2, "*") 29 | if i == -1 { 30 | return key1 == key2 31 | } 32 | 33 | if len(key1) > i { 34 | return key1[:i] == key2[:i] 35 | } 36 | return key1 == key2[:i] 37 | } 38 | 39 | // KeyMatchFunc is the wrapper for KeyMatch. 40 | func KeyMatchFunc(args ...interface{}) (interface{}, error) { 41 | name1 := args[0].(string) 42 | name2 := args[1].(string) 43 | 44 | return bool(KeyMatch(name1, name2)), nil 45 | } 46 | 47 | // KeyMatch2 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. 48 | // For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/:resource" 49 | func KeyMatch2(key1 string, key2 string) bool { 50 | key2 = strings.Replace(key2, "/*", "/.*", -1) 51 | 52 | re := regexp.MustCompile(`(.*):[^/]+(.*)`) 53 | for { 54 | if !strings.Contains(key2, "/:") { 55 | break 56 | } 57 | 58 | key2 = "^" + re.ReplaceAllString(key2, "$1[^/]+$2") + "$" 59 | } 60 | 61 | return RegexMatch(key1, key2) 62 | } 63 | 64 | // KeyMatch2Func is the wrapper for KeyMatch2. 65 | func KeyMatch2Func(args ...interface{}) (interface{}, error) { 66 | name1 := args[0].(string) 67 | name2 := args[1].(string) 68 | 69 | return bool(KeyMatch2(name1, name2)), nil 70 | } 71 | 72 | // KeyMatch3 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. 73 | // For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/{resource}" 74 | func KeyMatch3(key1 string, key2 string) bool { 75 | key2 = strings.Replace(key2, "/*", "/.*", -1) 76 | 77 | re := regexp.MustCompile(`(.*)\{[^/]+\}(.*)`) 78 | for { 79 | if !strings.Contains(key2, "/{") { 80 | break 81 | } 82 | 83 | key2 = re.ReplaceAllString(key2, "$1[^/]+$2") 84 | } 85 | 86 | return RegexMatch(key1, key2) 87 | } 88 | 89 | // KeyMatch3Func is the wrapper for KeyMatch3. 90 | func KeyMatch3Func(args ...interface{}) (interface{}, error) { 91 | name1 := args[0].(string) 92 | name2 := args[1].(string) 93 | 94 | return bool(KeyMatch3(name1, name2)), nil 95 | } 96 | 97 | // RegexMatch determines whether key1 matches the pattern of key2 in regular expression. 98 | func RegexMatch(key1 string, key2 string) bool { 99 | res, err := regexp.MatchString(key2, key1) 100 | if err != nil { 101 | panic(err) 102 | } 103 | return res 104 | } 105 | 106 | // RegexMatchFunc is the wrapper for RegexMatch. 107 | func RegexMatchFunc(args ...interface{}) (interface{}, error) { 108 | name1 := args[0].(string) 109 | name2 := args[1].(string) 110 | 111 | return bool(RegexMatch(name1, name2)), nil 112 | } 113 | 114 | // IPMatch determines whether IP address ip1 matches the pattern of IP address ip2, ip2 can be an IP address or a CIDR pattern. 115 | // For example, "192.168.2.123" matches "192.168.2.0/24" 116 | func IPMatch(ip1 string, ip2 string) bool { 117 | objIP1 := net.ParseIP(ip1) 118 | if objIP1 == nil { 119 | panic("invalid argument: ip1 in IPMatch() function is not an IP address.") 120 | } 121 | 122 | _, cidr, err := net.ParseCIDR(ip2) 123 | if err != nil { 124 | objIP2 := net.ParseIP(ip2) 125 | if objIP2 == nil { 126 | panic("invalid argument: ip2 in IPMatch() function is neither an IP address nor a CIDR.") 127 | } 128 | 129 | return objIP1.Equal(objIP2) 130 | } 131 | 132 | return cidr.Contains(objIP1) 133 | } 134 | 135 | // IPMatchFunc is the wrapper for IPMatch. 136 | func IPMatchFunc(args ...interface{}) (interface{}, error) { 137 | ip1 := args[0].(string) 138 | ip2 := args[1].(string) 139 | 140 | return bool(IPMatch(ip1, ip2)), nil 141 | } 142 | 143 | // GenerateGFunction is the factory method of the g(_, _) function. 144 | func GenerateGFunction(rm rbac.RoleManager) func(args ...interface{}) (interface{}, error) { 145 | return func(args ...interface{}) (interface{}, error) { 146 | name1 := args[0].(string) 147 | name2 := args[1].(string) 148 | 149 | if rm == nil { 150 | return name1 == name2, nil 151 | } else if len(args) == 2 { 152 | res, _ := rm.HasLink(name1, name2) 153 | return res, nil 154 | } else { 155 | domain := args[2].(string) 156 | res, _ := rm.HasLink(name1, name2, domain) 157 | return res, nil 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /vendor/github.com/casbin/casbin/util/util.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The casbin Authors. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package util 16 | 17 | import ( 18 | "sort" 19 | "strings" 20 | ) 21 | 22 | // EscapeAssertion escapes the dots in the assertion, because the expression evaluation doesn't support such variable names. 23 | func EscapeAssertion(s string) string { 24 | s = strings.Replace(s, "r.", "r_", -1) 25 | s = strings.Replace(s, "p.", "p_", -1) 26 | return s 27 | } 28 | 29 | // RemoveComments removes the comments starting with # in the text. 30 | func RemoveComments(s string) string { 31 | pos := strings.Index(s, "#") 32 | if pos == -1 { 33 | return s 34 | } 35 | return strings.TrimSpace(s[0:pos]) 36 | } 37 | 38 | // ArrayEquals determines whether two string arrays are identical. 39 | func ArrayEquals(a []string, b []string) bool { 40 | if len(a) != len(b) { 41 | return false 42 | } 43 | 44 | for i, v := range a { 45 | if v != b[i] { 46 | return false 47 | } 48 | } 49 | return true 50 | } 51 | 52 | // Array2DEquals determines whether two 2-dimensional string arrays are identical. 53 | func Array2DEquals(a [][]string, b [][]string) bool { 54 | if len(a) != len(b) { 55 | return false 56 | } 57 | 58 | for i, v := range a { 59 | if !ArrayEquals(v, b[i]) { 60 | return false 61 | } 62 | } 63 | return true 64 | } 65 | 66 | // ArrayRemoveDuplicates removes any duplicated elements in a string array. 67 | func ArrayRemoveDuplicates(s *[]string) { 68 | found := make(map[string]bool) 69 | j := 0 70 | for i, x := range *s { 71 | if !found[x] { 72 | found[x] = true 73 | (*s)[j] = (*s)[i] 74 | j++ 75 | } 76 | } 77 | *s = (*s)[:j] 78 | } 79 | 80 | // ArrayToString gets a printable string for a string array. 81 | func ArrayToString(s []string) string { 82 | return strings.Join(s, ", ") 83 | } 84 | 85 | // ParamsToString gets a printable string for variable number of parameters. 86 | func ParamsToString(s ...string) string { 87 | return strings.Join(s, ", ") 88 | } 89 | 90 | // SetEquals determines whether two string sets are identical. 91 | func SetEquals(a []string, b []string) bool { 92 | if len(a) != len(b) { 93 | return false 94 | } 95 | 96 | sort.Strings(a) 97 | sort.Strings(b) 98 | 99 | for i, v := range a { 100 | if v != b[i] { 101 | return false 102 | } 103 | } 104 | return true 105 | } 106 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/.gitignore: -------------------------------------------------------------------------------- 1 | .db 2 | *.test 3 | *~ 4 | *.swp 5 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/.travis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | client_configure() { 6 | sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key 7 | } 8 | 9 | pgdg_repository() { 10 | local sourcelist='sources.list.d/postgresql.list' 11 | 12 | curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add - 13 | echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist" 14 | sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update 15 | } 16 | 17 | postgresql_configure() { 18 | sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config 19 | local all all trust 20 | hostnossl all pqgossltest 127.0.0.1/32 reject 21 | hostnossl all pqgosslcert 127.0.0.1/32 reject 22 | hostssl all pqgossltest 127.0.0.1/32 trust 23 | hostssl all pqgosslcert 127.0.0.1/32 cert 24 | host all all 127.0.0.1/32 trust 25 | hostnossl all pqgossltest ::1/128 reject 26 | hostnossl all pqgosslcert ::1/128 reject 27 | hostssl all pqgossltest ::1/128 trust 28 | hostssl all pqgosslcert ::1/128 cert 29 | host all all ::1/128 trust 30 | config 31 | 32 | xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates 33 | certs/root.crt 34 | certs/server.crt 35 | certs/server.key 36 | certificates 37 | 38 | sort -VCu <<-versions || 39 | $PGVERSION 40 | 9.2 41 | versions 42 | sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config 43 | ssl_ca_file = 'root.crt' 44 | ssl_cert_file = 'server.crt' 45 | ssl_key_file = 'server.key' 46 | config 47 | 48 | echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null 49 | 50 | sudo service postgresql restart 51 | } 52 | 53 | postgresql_install() { 54 | xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages 55 | postgresql-$PGVERSION 56 | postgresql-server-dev-$PGVERSION 57 | postgresql-contrib-$PGVERSION 58 | packages 59 | } 60 | 61 | postgresql_uninstall() { 62 | sudo service postgresql stop 63 | xargs sudo apt-get -y --purge remove <<-packages 64 | libpq-dev 65 | libpq5 66 | postgresql 67 | postgresql-client-common 68 | postgresql-common 69 | packages 70 | sudo rm -rf /var/lib/postgresql 71 | } 72 | 73 | megacheck_install() { 74 | # Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous 75 | # new error messages in old code. 76 | go get -d honnef.co/go/tools/... 77 | git -C $GOPATH/src/honnef.co/go/tools/ checkout $MEGACHECK_VERSION 78 | go install honnef.co/go/tools/cmd/megacheck 79 | megacheck --version 80 | } 81 | 82 | golint_install() { 83 | go get github.com/golang/lint/golint 84 | } 85 | 86 | $1 87 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.8.x 5 | - 1.9.x 6 | - 1.10.x 7 | - master 8 | 9 | sudo: true 10 | 11 | env: 12 | global: 13 | - PGUSER=postgres 14 | - PQGOSSLTESTS=1 15 | - PQSSLCERTTEST_PATH=$PWD/certs 16 | - PGHOST=127.0.0.1 17 | - MEGACHECK_VERSION=2017.2.2 18 | matrix: 19 | - PGVERSION=10 20 | - PGVERSION=9.6 21 | - PGVERSION=9.5 22 | - PGVERSION=9.4 23 | - PGVERSION=9.3 24 | - PGVERSION=9.2 25 | - PGVERSION=9.1 26 | - PGVERSION=9.0 27 | 28 | before_install: 29 | - ./.travis.sh postgresql_uninstall 30 | - ./.travis.sh pgdg_repository 31 | - ./.travis.sh postgresql_install 32 | - ./.travis.sh postgresql_configure 33 | - ./.travis.sh client_configure 34 | - ./.travis.sh megacheck_install 35 | - ./.travis.sh golint_install 36 | - go get golang.org/x/tools/cmd/goimports 37 | 38 | before_script: 39 | - createdb pqgotest 40 | - createuser -DRS pqgossltest 41 | - createuser -DRS pqgosslcert 42 | 43 | script: 44 | - > 45 | goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' 46 | - go vet ./... 47 | - megacheck -go 1.8 ./... 48 | - golint ./... 49 | - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... 50 | - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... 51 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing to pq 2 | 3 | `pq` has a backlog of pull requests, but contributions are still very 4 | much welcome. You can help with patch review, submitting bug reports, 5 | or adding new functionality. There is no formal style guide, but 6 | please conform to the style of existing code and general Go formatting 7 | conventions when submitting patches. 8 | 9 | ### Patch review 10 | 11 | Help review existing open pull requests by commenting on the code or 12 | proposed functionality. 13 | 14 | ### Bug reports 15 | 16 | We appreciate any bug reports, but especially ones with self-contained 17 | (doesn't depend on code outside of pq), minimal (can't be simplified 18 | further) test cases. It's especially helpful if you can submit a pull 19 | request with just the failing test case (you'll probably want to 20 | pattern it after the tests in 21 | [conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go). 22 | 23 | ### New functionality 24 | 25 | There are a number of pending patches for new functionality, so 26 | additional feature patches will take a while to merge. Still, patches 27 | are generally reviewed based on usefulness and complexity in addition 28 | to time-in-queue, so if you have a knockout idea, take a shot. Feel 29 | free to open an issue discussion your proposed patch beforehand. 30 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011-2013, 'pq' Contributors 2 | Portions Copyright (C) 2011 Blake Mizerany 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 9 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/README.md: -------------------------------------------------------------------------------- 1 | # pq - A pure Go postgres driver for Go's database/sql package 2 | 3 | [![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) 4 | [![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) 5 | 6 | ## Install 7 | 8 | go get github.com/lib/pq 9 | 10 | ## Docs 11 | 12 | For detailed documentation and basic usage examples, please see the package 13 | documentation at . 14 | 15 | ## Tests 16 | 17 | `go test` is used for testing. See [TESTS.md](TESTS.md) for more details. 18 | 19 | ## Features 20 | 21 | * SSL 22 | * Handles bad connections for `database/sql` 23 | * Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) 24 | * Scan binary blobs correctly (i.e. `bytea`) 25 | * Package for `hstore` support 26 | * COPY FROM support 27 | * pq.ParseURL for converting urls to connection strings for sql.Open. 28 | * Many libpq compatible environment variables 29 | * Unix socket support 30 | * Notifications: `LISTEN`/`NOTIFY` 31 | * pgpass support 32 | 33 | ## Future / Things you can help with 34 | 35 | * Better COPY FROM / COPY TO (see discussion in #181) 36 | 37 | ## Thank you (alphabetical) 38 | 39 | Some of these contributors are from the original library `bmizerany/pq.go` whose 40 | code still exists in here. 41 | 42 | * Andy Balholm (andybalholm) 43 | * Ben Berkert (benburkert) 44 | * Benjamin Heatwole (bheatwole) 45 | * Bill Mill (llimllib) 46 | * Bjørn Madsen (aeons) 47 | * Blake Gentry (bgentry) 48 | * Brad Fitzpatrick (bradfitz) 49 | * Charlie Melbye (cmelbye) 50 | * Chris Bandy (cbandy) 51 | * Chris Gilling (cgilling) 52 | * Chris Walsh (cwds) 53 | * Dan Sosedoff (sosedoff) 54 | * Daniel Farina (fdr) 55 | * Eric Chlebek (echlebek) 56 | * Eric Garrido (minusnine) 57 | * Eric Urban (hydrogen18) 58 | * Everyone at The Go Team 59 | * Evan Shaw (edsrzf) 60 | * Ewan Chou (coocood) 61 | * Fazal Majid (fazalmajid) 62 | * Federico Romero (federomero) 63 | * Fumin (fumin) 64 | * Gary Burd (garyburd) 65 | * Heroku (heroku) 66 | * James Pozdena (jpoz) 67 | * Jason McVetta (jmcvetta) 68 | * Jeremy Jay (pbnjay) 69 | * Joakim Sernbrant (serbaut) 70 | * John Gallagher (jgallagher) 71 | * Jonathan Rudenberg (titanous) 72 | * Joël Stemmer (jstemmer) 73 | * Kamil Kisiel (kisielk) 74 | * Kelly Dunn (kellydunn) 75 | * Keith Rarick (kr) 76 | * Kir Shatrov (kirs) 77 | * Lann Martin (lann) 78 | * Maciek Sakrejda (uhoh-itsmaciek) 79 | * Marc Brinkmann (mbr) 80 | * Marko Tiikkaja (johto) 81 | * Matt Newberry (MattNewberry) 82 | * Matt Robenolt (mattrobenolt) 83 | * Martin Olsen (martinolsen) 84 | * Mike Lewis (mikelikespie) 85 | * Nicolas Patry (Narsil) 86 | * Oliver Tonnhofer (olt) 87 | * Patrick Hayes (phayes) 88 | * Paul Hammond (paulhammond) 89 | * Ryan Smith (ryandotsmith) 90 | * Samuel Stauffer (samuel) 91 | * Timothée Peignier (cyberdelia) 92 | * Travis Cline (tmc) 93 | * TruongSinh Tran-Nguyen (truongsinh) 94 | * Yaismel Miranda (ympons) 95 | * notedit (notedit) 96 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/TESTS.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | ## Running Tests 4 | 5 | `go test` is used for testing. A running PostgreSQL 6 | server is required, with the ability to log in. The 7 | database to connect to test with is "pqgotest," on 8 | "localhost" but these can be overridden using [environment 9 | variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). 10 | 11 | Example: 12 | 13 | PGHOST=/run/postgresql go test 14 | 15 | ## Benchmarks 16 | 17 | A benchmark suite can be run as part of the tests: 18 | 19 | go test -bench . 20 | 21 | ## Example setup (Docker) 22 | 23 | Run a postgres container: 24 | 25 | ``` 26 | docker run --expose 5432:5432 postgres 27 | ``` 28 | 29 | Run tests: 30 | 31 | ``` 32 | PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test 33 | ``` 34 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/array.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "bytes" 5 | "database/sql" 6 | "database/sql/driver" 7 | "encoding/hex" 8 | "fmt" 9 | "reflect" 10 | "strconv" 11 | "strings" 12 | ) 13 | 14 | var typeByteSlice = reflect.TypeOf([]byte{}) 15 | var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() 16 | var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() 17 | 18 | // Array returns the optimal driver.Valuer and sql.Scanner for an array or 19 | // slice of any dimension. 20 | // 21 | // For example: 22 | // db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) 23 | // 24 | // var x []sql.NullInt64 25 | // db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x)) 26 | // 27 | // Scanning multi-dimensional arrays is not supported. Arrays where the lower 28 | // bound is not one (such as `[0:0]={1}') are not supported. 29 | func Array(a interface{}) interface { 30 | driver.Valuer 31 | sql.Scanner 32 | } { 33 | switch a := a.(type) { 34 | case []bool: 35 | return (*BoolArray)(&a) 36 | case []float64: 37 | return (*Float64Array)(&a) 38 | case []int64: 39 | return (*Int64Array)(&a) 40 | case []string: 41 | return (*StringArray)(&a) 42 | 43 | case *[]bool: 44 | return (*BoolArray)(a) 45 | case *[]float64: 46 | return (*Float64Array)(a) 47 | case *[]int64: 48 | return (*Int64Array)(a) 49 | case *[]string: 50 | return (*StringArray)(a) 51 | } 52 | 53 | return GenericArray{a} 54 | } 55 | 56 | // ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner 57 | // to override the array delimiter used by GenericArray. 58 | type ArrayDelimiter interface { 59 | // ArrayDelimiter returns the delimiter character(s) for this element's type. 60 | ArrayDelimiter() string 61 | } 62 | 63 | // BoolArray represents a one-dimensional array of the PostgreSQL boolean type. 64 | type BoolArray []bool 65 | 66 | // Scan implements the sql.Scanner interface. 67 | func (a *BoolArray) Scan(src interface{}) error { 68 | switch src := src.(type) { 69 | case []byte: 70 | return a.scanBytes(src) 71 | case string: 72 | return a.scanBytes([]byte(src)) 73 | case nil: 74 | *a = nil 75 | return nil 76 | } 77 | 78 | return fmt.Errorf("pq: cannot convert %T to BoolArray", src) 79 | } 80 | 81 | func (a *BoolArray) scanBytes(src []byte) error { 82 | elems, err := scanLinearArray(src, []byte{','}, "BoolArray") 83 | if err != nil { 84 | return err 85 | } 86 | if *a != nil && len(elems) == 0 { 87 | *a = (*a)[:0] 88 | } else { 89 | b := make(BoolArray, len(elems)) 90 | for i, v := range elems { 91 | if len(v) != 1 { 92 | return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) 93 | } 94 | switch v[0] { 95 | case 't': 96 | b[i] = true 97 | case 'f': 98 | b[i] = false 99 | default: 100 | return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) 101 | } 102 | } 103 | *a = b 104 | } 105 | return nil 106 | } 107 | 108 | // Value implements the driver.Valuer interface. 109 | func (a BoolArray) Value() (driver.Value, error) { 110 | if a == nil { 111 | return nil, nil 112 | } 113 | 114 | if n := len(a); n > 0 { 115 | // There will be exactly two curly brackets, N bytes of values, 116 | // and N-1 bytes of delimiters. 117 | b := make([]byte, 1+2*n) 118 | 119 | for i := 0; i < n; i++ { 120 | b[2*i] = ',' 121 | if a[i] { 122 | b[1+2*i] = 't' 123 | } else { 124 | b[1+2*i] = 'f' 125 | } 126 | } 127 | 128 | b[0] = '{' 129 | b[2*n] = '}' 130 | 131 | return string(b), nil 132 | } 133 | 134 | return "{}", nil 135 | } 136 | 137 | // ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. 138 | type ByteaArray [][]byte 139 | 140 | // Scan implements the sql.Scanner interface. 141 | func (a *ByteaArray) Scan(src interface{}) error { 142 | switch src := src.(type) { 143 | case []byte: 144 | return a.scanBytes(src) 145 | case string: 146 | return a.scanBytes([]byte(src)) 147 | case nil: 148 | *a = nil 149 | return nil 150 | } 151 | 152 | return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) 153 | } 154 | 155 | func (a *ByteaArray) scanBytes(src []byte) error { 156 | elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") 157 | if err != nil { 158 | return err 159 | } 160 | if *a != nil && len(elems) == 0 { 161 | *a = (*a)[:0] 162 | } else { 163 | b := make(ByteaArray, len(elems)) 164 | for i, v := range elems { 165 | b[i], err = parseBytea(v) 166 | if err != nil { 167 | return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) 168 | } 169 | } 170 | *a = b 171 | } 172 | return nil 173 | } 174 | 175 | // Value implements the driver.Valuer interface. It uses the "hex" format which 176 | // is only supported on PostgreSQL 9.0 or newer. 177 | func (a ByteaArray) Value() (driver.Value, error) { 178 | if a == nil { 179 | return nil, nil 180 | } 181 | 182 | if n := len(a); n > 0 { 183 | // There will be at least two curly brackets, 2*N bytes of quotes, 184 | // 3*N bytes of hex formatting, and N-1 bytes of delimiters. 185 | size := 1 + 6*n 186 | for _, x := range a { 187 | size += hex.EncodedLen(len(x)) 188 | } 189 | 190 | b := make([]byte, size) 191 | 192 | for i, s := 0, b; i < n; i++ { 193 | o := copy(s, `,"\\x`) 194 | o += hex.Encode(s[o:], a[i]) 195 | s[o] = '"' 196 | s = s[o+1:] 197 | } 198 | 199 | b[0] = '{' 200 | b[size-1] = '}' 201 | 202 | return string(b), nil 203 | } 204 | 205 | return "{}", nil 206 | } 207 | 208 | // Float64Array represents a one-dimensional array of the PostgreSQL double 209 | // precision type. 210 | type Float64Array []float64 211 | 212 | // Scan implements the sql.Scanner interface. 213 | func (a *Float64Array) Scan(src interface{}) error { 214 | switch src := src.(type) { 215 | case []byte: 216 | return a.scanBytes(src) 217 | case string: 218 | return a.scanBytes([]byte(src)) 219 | case nil: 220 | *a = nil 221 | return nil 222 | } 223 | 224 | return fmt.Errorf("pq: cannot convert %T to Float64Array", src) 225 | } 226 | 227 | func (a *Float64Array) scanBytes(src []byte) error { 228 | elems, err := scanLinearArray(src, []byte{','}, "Float64Array") 229 | if err != nil { 230 | return err 231 | } 232 | if *a != nil && len(elems) == 0 { 233 | *a = (*a)[:0] 234 | } else { 235 | b := make(Float64Array, len(elems)) 236 | for i, v := range elems { 237 | if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { 238 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 239 | } 240 | } 241 | *a = b 242 | } 243 | return nil 244 | } 245 | 246 | // Value implements the driver.Valuer interface. 247 | func (a Float64Array) Value() (driver.Value, error) { 248 | if a == nil { 249 | return nil, nil 250 | } 251 | 252 | if n := len(a); n > 0 { 253 | // There will be at least two curly brackets, N bytes of values, 254 | // and N-1 bytes of delimiters. 255 | b := make([]byte, 1, 1+2*n) 256 | b[0] = '{' 257 | 258 | b = strconv.AppendFloat(b, a[0], 'f', -1, 64) 259 | for i := 1; i < n; i++ { 260 | b = append(b, ',') 261 | b = strconv.AppendFloat(b, a[i], 'f', -1, 64) 262 | } 263 | 264 | return string(append(b, '}')), nil 265 | } 266 | 267 | return "{}", nil 268 | } 269 | 270 | // GenericArray implements the driver.Valuer and sql.Scanner interfaces for 271 | // an array or slice of any dimension. 272 | type GenericArray struct{ A interface{} } 273 | 274 | func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { 275 | var assign func([]byte, reflect.Value) error 276 | var del = "," 277 | 278 | // TODO calculate the assign function for other types 279 | // TODO repeat this section on the element type of arrays or slices (multidimensional) 280 | { 281 | if reflect.PtrTo(rt).Implements(typeSQLScanner) { 282 | // dest is always addressable because it is an element of a slice. 283 | assign = func(src []byte, dest reflect.Value) (err error) { 284 | ss := dest.Addr().Interface().(sql.Scanner) 285 | if src == nil { 286 | err = ss.Scan(nil) 287 | } else { 288 | err = ss.Scan(src) 289 | } 290 | return 291 | } 292 | goto FoundType 293 | } 294 | 295 | assign = func([]byte, reflect.Value) error { 296 | return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) 297 | } 298 | } 299 | 300 | FoundType: 301 | 302 | if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { 303 | del = ad.ArrayDelimiter() 304 | } 305 | 306 | return rt, assign, del 307 | } 308 | 309 | // Scan implements the sql.Scanner interface. 310 | func (a GenericArray) Scan(src interface{}) error { 311 | dpv := reflect.ValueOf(a.A) 312 | switch { 313 | case dpv.Kind() != reflect.Ptr: 314 | return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) 315 | case dpv.IsNil(): 316 | return fmt.Errorf("pq: destination %T is nil", a.A) 317 | } 318 | 319 | dv := dpv.Elem() 320 | switch dv.Kind() { 321 | case reflect.Slice: 322 | case reflect.Array: 323 | default: 324 | return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) 325 | } 326 | 327 | switch src := src.(type) { 328 | case []byte: 329 | return a.scanBytes(src, dv) 330 | case string: 331 | return a.scanBytes([]byte(src), dv) 332 | case nil: 333 | if dv.Kind() == reflect.Slice { 334 | dv.Set(reflect.Zero(dv.Type())) 335 | return nil 336 | } 337 | } 338 | 339 | return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) 340 | } 341 | 342 | func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { 343 | dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) 344 | dims, elems, err := parseArray(src, []byte(del)) 345 | if err != nil { 346 | return err 347 | } 348 | 349 | // TODO allow multidimensional 350 | 351 | if len(dims) > 1 { 352 | return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", 353 | strings.Replace(fmt.Sprint(dims), " ", "][", -1)) 354 | } 355 | 356 | // Treat a zero-dimensional array like an array with a single dimension of zero. 357 | if len(dims) == 0 { 358 | dims = append(dims, 0) 359 | } 360 | 361 | for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { 362 | switch rt.Kind() { 363 | case reflect.Slice: 364 | case reflect.Array: 365 | if rt.Len() != dims[i] { 366 | return fmt.Errorf("pq: cannot convert ARRAY%s to %s", 367 | strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) 368 | } 369 | default: 370 | // TODO handle multidimensional 371 | } 372 | } 373 | 374 | values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) 375 | for i, e := range elems { 376 | if err := assign(e, values.Index(i)); err != nil { 377 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 378 | } 379 | } 380 | 381 | // TODO handle multidimensional 382 | 383 | switch dv.Kind() { 384 | case reflect.Slice: 385 | dv.Set(values.Slice(0, dims[0])) 386 | case reflect.Array: 387 | for i := 0; i < dims[0]; i++ { 388 | dv.Index(i).Set(values.Index(i)) 389 | } 390 | } 391 | 392 | return nil 393 | } 394 | 395 | // Value implements the driver.Valuer interface. 396 | func (a GenericArray) Value() (driver.Value, error) { 397 | if a.A == nil { 398 | return nil, nil 399 | } 400 | 401 | rv := reflect.ValueOf(a.A) 402 | 403 | switch rv.Kind() { 404 | case reflect.Slice: 405 | if rv.IsNil() { 406 | return nil, nil 407 | } 408 | case reflect.Array: 409 | default: 410 | return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) 411 | } 412 | 413 | if n := rv.Len(); n > 0 { 414 | // There will be at least two curly brackets, N bytes of values, 415 | // and N-1 bytes of delimiters. 416 | b := make([]byte, 0, 1+2*n) 417 | 418 | b, _, err := appendArray(b, rv, n) 419 | return string(b), err 420 | } 421 | 422 | return "{}", nil 423 | } 424 | 425 | // Int64Array represents a one-dimensional array of the PostgreSQL integer types. 426 | type Int64Array []int64 427 | 428 | // Scan implements the sql.Scanner interface. 429 | func (a *Int64Array) Scan(src interface{}) error { 430 | switch src := src.(type) { 431 | case []byte: 432 | return a.scanBytes(src) 433 | case string: 434 | return a.scanBytes([]byte(src)) 435 | case nil: 436 | *a = nil 437 | return nil 438 | } 439 | 440 | return fmt.Errorf("pq: cannot convert %T to Int64Array", src) 441 | } 442 | 443 | func (a *Int64Array) scanBytes(src []byte) error { 444 | elems, err := scanLinearArray(src, []byte{','}, "Int64Array") 445 | if err != nil { 446 | return err 447 | } 448 | if *a != nil && len(elems) == 0 { 449 | *a = (*a)[:0] 450 | } else { 451 | b := make(Int64Array, len(elems)) 452 | for i, v := range elems { 453 | if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { 454 | return fmt.Errorf("pq: parsing array element index %d: %v", i, err) 455 | } 456 | } 457 | *a = b 458 | } 459 | return nil 460 | } 461 | 462 | // Value implements the driver.Valuer interface. 463 | func (a Int64Array) Value() (driver.Value, error) { 464 | if a == nil { 465 | return nil, nil 466 | } 467 | 468 | if n := len(a); n > 0 { 469 | // There will be at least two curly brackets, N bytes of values, 470 | // and N-1 bytes of delimiters. 471 | b := make([]byte, 1, 1+2*n) 472 | b[0] = '{' 473 | 474 | b = strconv.AppendInt(b, a[0], 10) 475 | for i := 1; i < n; i++ { 476 | b = append(b, ',') 477 | b = strconv.AppendInt(b, a[i], 10) 478 | } 479 | 480 | return string(append(b, '}')), nil 481 | } 482 | 483 | return "{}", nil 484 | } 485 | 486 | // StringArray represents a one-dimensional array of the PostgreSQL character types. 487 | type StringArray []string 488 | 489 | // Scan implements the sql.Scanner interface. 490 | func (a *StringArray) Scan(src interface{}) error { 491 | switch src := src.(type) { 492 | case []byte: 493 | return a.scanBytes(src) 494 | case string: 495 | return a.scanBytes([]byte(src)) 496 | case nil: 497 | *a = nil 498 | return nil 499 | } 500 | 501 | return fmt.Errorf("pq: cannot convert %T to StringArray", src) 502 | } 503 | 504 | func (a *StringArray) scanBytes(src []byte) error { 505 | elems, err := scanLinearArray(src, []byte{','}, "StringArray") 506 | if err != nil { 507 | return err 508 | } 509 | if *a != nil && len(elems) == 0 { 510 | *a = (*a)[:0] 511 | } else { 512 | b := make(StringArray, len(elems)) 513 | for i, v := range elems { 514 | if b[i] = string(v); v == nil { 515 | return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) 516 | } 517 | } 518 | *a = b 519 | } 520 | return nil 521 | } 522 | 523 | // Value implements the driver.Valuer interface. 524 | func (a StringArray) Value() (driver.Value, error) { 525 | if a == nil { 526 | return nil, nil 527 | } 528 | 529 | if n := len(a); n > 0 { 530 | // There will be at least two curly brackets, 2*N bytes of quotes, 531 | // and N-1 bytes of delimiters. 532 | b := make([]byte, 1, 1+3*n) 533 | b[0] = '{' 534 | 535 | b = appendArrayQuotedBytes(b, []byte(a[0])) 536 | for i := 1; i < n; i++ { 537 | b = append(b, ',') 538 | b = appendArrayQuotedBytes(b, []byte(a[i])) 539 | } 540 | 541 | return string(append(b, '}')), nil 542 | } 543 | 544 | return "{}", nil 545 | } 546 | 547 | // appendArray appends rv to the buffer, returning the extended buffer and 548 | // the delimiter used between elements. 549 | // 550 | // It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. 551 | func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { 552 | var del string 553 | var err error 554 | 555 | b = append(b, '{') 556 | 557 | if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { 558 | return b, del, err 559 | } 560 | 561 | for i := 1; i < n; i++ { 562 | b = append(b, del...) 563 | if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { 564 | return b, del, err 565 | } 566 | } 567 | 568 | return append(b, '}'), del, nil 569 | } 570 | 571 | // appendArrayElement appends rv to the buffer, returning the extended buffer 572 | // and the delimiter to use before the next element. 573 | // 574 | // When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted 575 | // using driver.DefaultParameterConverter and the resulting []byte or string 576 | // is double-quoted. 577 | // 578 | // See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO 579 | func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { 580 | if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { 581 | if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { 582 | if n := rv.Len(); n > 0 { 583 | return appendArray(b, rv, n) 584 | } 585 | 586 | return b, "", nil 587 | } 588 | } 589 | 590 | var del = "," 591 | var err error 592 | var iv interface{} = rv.Interface() 593 | 594 | if ad, ok := iv.(ArrayDelimiter); ok { 595 | del = ad.ArrayDelimiter() 596 | } 597 | 598 | if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { 599 | return b, del, err 600 | } 601 | 602 | switch v := iv.(type) { 603 | case nil: 604 | return append(b, "NULL"...), del, nil 605 | case []byte: 606 | return appendArrayQuotedBytes(b, v), del, nil 607 | case string: 608 | return appendArrayQuotedBytes(b, []byte(v)), del, nil 609 | } 610 | 611 | b, err = appendValue(b, iv) 612 | return b, del, err 613 | } 614 | 615 | func appendArrayQuotedBytes(b, v []byte) []byte { 616 | b = append(b, '"') 617 | for { 618 | i := bytes.IndexAny(v, `"\`) 619 | if i < 0 { 620 | b = append(b, v...) 621 | break 622 | } 623 | if i > 0 { 624 | b = append(b, v[:i]...) 625 | } 626 | b = append(b, '\\', v[i]) 627 | v = v[i+1:] 628 | } 629 | return append(b, '"') 630 | } 631 | 632 | func appendValue(b []byte, v driver.Value) ([]byte, error) { 633 | return append(b, encode(nil, v, 0)...), nil 634 | } 635 | 636 | // parseArray extracts the dimensions and elements of an array represented in 637 | // text format. Only representations emitted by the backend are supported. 638 | // Notably, whitespace around brackets and delimiters is significant, and NULL 639 | // is case-sensitive. 640 | // 641 | // See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO 642 | func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { 643 | var depth, i int 644 | 645 | if len(src) < 1 || src[0] != '{' { 646 | return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) 647 | } 648 | 649 | Open: 650 | for i < len(src) { 651 | switch src[i] { 652 | case '{': 653 | depth++ 654 | i++ 655 | case '}': 656 | elems = make([][]byte, 0) 657 | goto Close 658 | default: 659 | break Open 660 | } 661 | } 662 | dims = make([]int, i) 663 | 664 | Element: 665 | for i < len(src) { 666 | switch src[i] { 667 | case '{': 668 | if depth == len(dims) { 669 | break Element 670 | } 671 | depth++ 672 | dims[depth-1] = 0 673 | i++ 674 | case '"': 675 | var elem = []byte{} 676 | var escape bool 677 | for i++; i < len(src); i++ { 678 | if escape { 679 | elem = append(elem, src[i]) 680 | escape = false 681 | } else { 682 | switch src[i] { 683 | default: 684 | elem = append(elem, src[i]) 685 | case '\\': 686 | escape = true 687 | case '"': 688 | elems = append(elems, elem) 689 | i++ 690 | break Element 691 | } 692 | } 693 | } 694 | default: 695 | for start := i; i < len(src); i++ { 696 | if bytes.HasPrefix(src[i:], del) || src[i] == '}' { 697 | elem := src[start:i] 698 | if len(elem) == 0 { 699 | return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) 700 | } 701 | if bytes.Equal(elem, []byte("NULL")) { 702 | elem = nil 703 | } 704 | elems = append(elems, elem) 705 | break Element 706 | } 707 | } 708 | } 709 | } 710 | 711 | for i < len(src) { 712 | if bytes.HasPrefix(src[i:], del) && depth > 0 { 713 | dims[depth-1]++ 714 | i += len(del) 715 | goto Element 716 | } else if src[i] == '}' && depth > 0 { 717 | dims[depth-1]++ 718 | depth-- 719 | i++ 720 | } else { 721 | return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) 722 | } 723 | } 724 | 725 | Close: 726 | for i < len(src) { 727 | if src[i] == '}' && depth > 0 { 728 | depth-- 729 | i++ 730 | } else { 731 | return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) 732 | } 733 | } 734 | if depth > 0 { 735 | err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) 736 | } 737 | if err == nil { 738 | for _, d := range dims { 739 | if (len(elems) % d) != 0 { 740 | err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") 741 | } 742 | } 743 | } 744 | return 745 | } 746 | 747 | func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { 748 | dims, elems, err := parseArray(src, del) 749 | if err != nil { 750 | return nil, err 751 | } 752 | if len(dims) > 1 { 753 | return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) 754 | } 755 | return elems, err 756 | } 757 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/buf.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | 7 | "github.com/lib/pq/oid" 8 | ) 9 | 10 | type readBuf []byte 11 | 12 | func (b *readBuf) int32() (n int) { 13 | n = int(int32(binary.BigEndian.Uint32(*b))) 14 | *b = (*b)[4:] 15 | return 16 | } 17 | 18 | func (b *readBuf) oid() (n oid.Oid) { 19 | n = oid.Oid(binary.BigEndian.Uint32(*b)) 20 | *b = (*b)[4:] 21 | return 22 | } 23 | 24 | // N.B: this is actually an unsigned 16-bit integer, unlike int32 25 | func (b *readBuf) int16() (n int) { 26 | n = int(binary.BigEndian.Uint16(*b)) 27 | *b = (*b)[2:] 28 | return 29 | } 30 | 31 | func (b *readBuf) string() string { 32 | i := bytes.IndexByte(*b, 0) 33 | if i < 0 { 34 | errorf("invalid message format; expected string terminator") 35 | } 36 | s := (*b)[:i] 37 | *b = (*b)[i+1:] 38 | return string(s) 39 | } 40 | 41 | func (b *readBuf) next(n int) (v []byte) { 42 | v = (*b)[:n] 43 | *b = (*b)[n:] 44 | return 45 | } 46 | 47 | func (b *readBuf) byte() byte { 48 | return b.next(1)[0] 49 | } 50 | 51 | type writeBuf struct { 52 | buf []byte 53 | pos int 54 | } 55 | 56 | func (b *writeBuf) int32(n int) { 57 | x := make([]byte, 4) 58 | binary.BigEndian.PutUint32(x, uint32(n)) 59 | b.buf = append(b.buf, x...) 60 | } 61 | 62 | func (b *writeBuf) int16(n int) { 63 | x := make([]byte, 2) 64 | binary.BigEndian.PutUint16(x, uint16(n)) 65 | b.buf = append(b.buf, x...) 66 | } 67 | 68 | func (b *writeBuf) string(s string) { 69 | b.buf = append(b.buf, (s + "\000")...) 70 | } 71 | 72 | func (b *writeBuf) byte(c byte) { 73 | b.buf = append(b.buf, c) 74 | } 75 | 76 | func (b *writeBuf) bytes(v []byte) { 77 | b.buf = append(b.buf, v...) 78 | } 79 | 80 | func (b *writeBuf) wrap() []byte { 81 | p := b.buf[b.pos:] 82 | binary.BigEndian.PutUint32(p, uint32(len(p))) 83 | return b.buf 84 | } 85 | 86 | func (b *writeBuf) next(c byte) { 87 | p := b.buf[b.pos:] 88 | binary.BigEndian.PutUint32(p, uint32(len(p))) 89 | b.pos = len(b.buf) + 1 90 | b.buf = append(b.buf, c, 0, 0, 0, 0) 91 | } 92 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/conn_go18.go: -------------------------------------------------------------------------------- 1 | // +build go1.8 2 | 3 | package pq 4 | 5 | import ( 6 | "context" 7 | "database/sql" 8 | "database/sql/driver" 9 | "fmt" 10 | "io" 11 | "io/ioutil" 12 | ) 13 | 14 | // Implement the "QueryerContext" interface 15 | func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { 16 | list := make([]driver.Value, len(args)) 17 | for i, nv := range args { 18 | list[i] = nv.Value 19 | } 20 | finish := cn.watchCancel(ctx) 21 | r, err := cn.query(query, list) 22 | if err != nil { 23 | if finish != nil { 24 | finish() 25 | } 26 | return nil, err 27 | } 28 | r.finish = finish 29 | return r, nil 30 | } 31 | 32 | // Implement the "ExecerContext" interface 33 | func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { 34 | list := make([]driver.Value, len(args)) 35 | for i, nv := range args { 36 | list[i] = nv.Value 37 | } 38 | 39 | if finish := cn.watchCancel(ctx); finish != nil { 40 | defer finish() 41 | } 42 | 43 | return cn.Exec(query, list) 44 | } 45 | 46 | // Implement the "ConnBeginTx" interface 47 | func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { 48 | var mode string 49 | 50 | switch sql.IsolationLevel(opts.Isolation) { 51 | case sql.LevelDefault: 52 | // Don't touch mode: use the server's default 53 | case sql.LevelReadUncommitted: 54 | mode = " ISOLATION LEVEL READ UNCOMMITTED" 55 | case sql.LevelReadCommitted: 56 | mode = " ISOLATION LEVEL READ COMMITTED" 57 | case sql.LevelRepeatableRead: 58 | mode = " ISOLATION LEVEL REPEATABLE READ" 59 | case sql.LevelSerializable: 60 | mode = " ISOLATION LEVEL SERIALIZABLE" 61 | default: 62 | return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) 63 | } 64 | 65 | if opts.ReadOnly { 66 | mode += " READ ONLY" 67 | } else { 68 | mode += " READ WRITE" 69 | } 70 | 71 | tx, err := cn.begin(mode) 72 | if err != nil { 73 | return nil, err 74 | } 75 | cn.txnFinish = cn.watchCancel(ctx) 76 | return tx, nil 77 | } 78 | 79 | func (cn *conn) watchCancel(ctx context.Context) func() { 80 | if done := ctx.Done(); done != nil { 81 | finished := make(chan struct{}) 82 | go func() { 83 | select { 84 | case <-done: 85 | _ = cn.cancel() 86 | finished <- struct{}{} 87 | case <-finished: 88 | } 89 | }() 90 | return func() { 91 | select { 92 | case <-finished: 93 | case finished <- struct{}{}: 94 | } 95 | } 96 | } 97 | return nil 98 | } 99 | 100 | func (cn *conn) cancel() error { 101 | c, err := dial(cn.dialer, cn.opts) 102 | if err != nil { 103 | return err 104 | } 105 | defer c.Close() 106 | 107 | { 108 | can := conn{ 109 | c: c, 110 | } 111 | err = can.ssl(cn.opts) 112 | if err != nil { 113 | return err 114 | } 115 | 116 | w := can.writeBuf(0) 117 | w.int32(80877102) // cancel request code 118 | w.int32(cn.processID) 119 | w.int32(cn.secretKey) 120 | 121 | if err := can.sendStartupPacket(w); err != nil { 122 | return err 123 | } 124 | } 125 | 126 | // Read until EOF to ensure that the server received the cancel. 127 | { 128 | _, err := io.Copy(ioutil.Discard, c) 129 | return err 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/connector.go: -------------------------------------------------------------------------------- 1 | // +build go1.10 2 | 3 | package pq 4 | 5 | import ( 6 | "context" 7 | "database/sql/driver" 8 | ) 9 | 10 | // Connector represents a fixed configuration for the pq driver with a given 11 | // name. Connector satisfies the database/sql/driver Connector interface and 12 | // can be used to create any number of DB Conn's via the database/sql OpenDB 13 | // function. 14 | // 15 | // See https://golang.org/pkg/database/sql/driver/#Connector. 16 | // See https://golang.org/pkg/database/sql/#OpenDB. 17 | type connector struct { 18 | name string 19 | } 20 | 21 | // Connect returns a connection to the database using the fixed configuration 22 | // of this Connector. Context is not used. 23 | func (c *connector) Connect(_ context.Context) (driver.Conn, error) { 24 | return (&Driver{}).Open(c.name) 25 | } 26 | 27 | // Driver returnst the underlying driver of this Connector. 28 | func (c *connector) Driver() driver.Driver { 29 | return &Driver{} 30 | } 31 | 32 | var _ driver.Connector = &connector{} 33 | 34 | // NewConnector returns a connector for the pq driver in a fixed configuration 35 | // with the given name. The returned connector can be used to create any number 36 | // of equivalent Conn's. The returned connector is intended to be used with 37 | // database/sql.OpenDB. 38 | // 39 | // See https://golang.org/pkg/database/sql/driver/#Connector. 40 | // See https://golang.org/pkg/database/sql/#OpenDB. 41 | func NewConnector(name string) (driver.Connector, error) { 42 | return &connector{name: name}, nil 43 | } 44 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/copy.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "database/sql/driver" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "sync" 9 | ) 10 | 11 | var ( 12 | errCopyInClosed = errors.New("pq: copyin statement has already been closed") 13 | errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") 14 | errCopyToNotSupported = errors.New("pq: COPY TO is not supported") 15 | errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") 16 | errCopyInProgress = errors.New("pq: COPY in progress") 17 | ) 18 | 19 | // CopyIn creates a COPY FROM statement which can be prepared with 20 | // Tx.Prepare(). The target table should be visible in search_path. 21 | func CopyIn(table string, columns ...string) string { 22 | stmt := "COPY " + QuoteIdentifier(table) + " (" 23 | for i, col := range columns { 24 | if i != 0 { 25 | stmt += ", " 26 | } 27 | stmt += QuoteIdentifier(col) 28 | } 29 | stmt += ") FROM STDIN" 30 | return stmt 31 | } 32 | 33 | // CopyInSchema creates a COPY FROM statement which can be prepared with 34 | // Tx.Prepare(). 35 | func CopyInSchema(schema, table string, columns ...string) string { 36 | stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" 37 | for i, col := range columns { 38 | if i != 0 { 39 | stmt += ", " 40 | } 41 | stmt += QuoteIdentifier(col) 42 | } 43 | stmt += ") FROM STDIN" 44 | return stmt 45 | } 46 | 47 | type copyin struct { 48 | cn *conn 49 | buffer []byte 50 | rowData chan []byte 51 | done chan bool 52 | 53 | closed bool 54 | 55 | sync.Mutex // guards err 56 | err error 57 | } 58 | 59 | const ciBufferSize = 64 * 1024 60 | 61 | // flush buffer before the buffer is filled up and needs reallocation 62 | const ciBufferFlushSize = 63 * 1024 63 | 64 | func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { 65 | if !cn.isInTransaction() { 66 | return nil, errCopyNotSupportedOutsideTxn 67 | } 68 | 69 | ci := ©in{ 70 | cn: cn, 71 | buffer: make([]byte, 0, ciBufferSize), 72 | rowData: make(chan []byte), 73 | done: make(chan bool, 1), 74 | } 75 | // add CopyData identifier + 4 bytes for message length 76 | ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) 77 | 78 | b := cn.writeBuf('Q') 79 | b.string(q) 80 | cn.send(b) 81 | 82 | awaitCopyInResponse: 83 | for { 84 | t, r := cn.recv1() 85 | switch t { 86 | case 'G': 87 | if r.byte() != 0 { 88 | err = errBinaryCopyNotSupported 89 | break awaitCopyInResponse 90 | } 91 | go ci.resploop() 92 | return ci, nil 93 | case 'H': 94 | err = errCopyToNotSupported 95 | break awaitCopyInResponse 96 | case 'E': 97 | err = parseError(r) 98 | case 'Z': 99 | if err == nil { 100 | ci.setBad() 101 | errorf("unexpected ReadyForQuery in response to COPY") 102 | } 103 | cn.processReadyForQuery(r) 104 | return nil, err 105 | default: 106 | ci.setBad() 107 | errorf("unknown response for copy query: %q", t) 108 | } 109 | } 110 | 111 | // something went wrong, abort COPY before we return 112 | b = cn.writeBuf('f') 113 | b.string(err.Error()) 114 | cn.send(b) 115 | 116 | for { 117 | t, r := cn.recv1() 118 | switch t { 119 | case 'c', 'C', 'E': 120 | case 'Z': 121 | // correctly aborted, we're done 122 | cn.processReadyForQuery(r) 123 | return nil, err 124 | default: 125 | ci.setBad() 126 | errorf("unknown response for CopyFail: %q", t) 127 | } 128 | } 129 | } 130 | 131 | func (ci *copyin) flush(buf []byte) { 132 | // set message length (without message identifier) 133 | binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) 134 | 135 | _, err := ci.cn.c.Write(buf) 136 | if err != nil { 137 | panic(err) 138 | } 139 | } 140 | 141 | func (ci *copyin) resploop() { 142 | for { 143 | var r readBuf 144 | t, err := ci.cn.recvMessage(&r) 145 | if err != nil { 146 | ci.setBad() 147 | ci.setError(err) 148 | ci.done <- true 149 | return 150 | } 151 | switch t { 152 | case 'C': 153 | // complete 154 | case 'N': 155 | // NoticeResponse 156 | case 'Z': 157 | ci.cn.processReadyForQuery(&r) 158 | ci.done <- true 159 | return 160 | case 'E': 161 | err := parseError(&r) 162 | ci.setError(err) 163 | default: 164 | ci.setBad() 165 | ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) 166 | ci.done <- true 167 | return 168 | } 169 | } 170 | } 171 | 172 | func (ci *copyin) setBad() { 173 | ci.Lock() 174 | ci.cn.bad = true 175 | ci.Unlock() 176 | } 177 | 178 | func (ci *copyin) isBad() bool { 179 | ci.Lock() 180 | b := ci.cn.bad 181 | ci.Unlock() 182 | return b 183 | } 184 | 185 | func (ci *copyin) isErrorSet() bool { 186 | ci.Lock() 187 | isSet := (ci.err != nil) 188 | ci.Unlock() 189 | return isSet 190 | } 191 | 192 | // setError() sets ci.err if one has not been set already. Caller must not be 193 | // holding ci.Mutex. 194 | func (ci *copyin) setError(err error) { 195 | ci.Lock() 196 | if ci.err == nil { 197 | ci.err = err 198 | } 199 | ci.Unlock() 200 | } 201 | 202 | func (ci *copyin) NumInput() int { 203 | return -1 204 | } 205 | 206 | func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { 207 | return nil, ErrNotSupported 208 | } 209 | 210 | // Exec inserts values into the COPY stream. The insert is asynchronous 211 | // and Exec can return errors from previous Exec calls to the same 212 | // COPY stmt. 213 | // 214 | // You need to call Exec(nil) to sync the COPY stream and to get any 215 | // errors from pending data, since Stmt.Close() doesn't return errors 216 | // to the user. 217 | func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { 218 | if ci.closed { 219 | return nil, errCopyInClosed 220 | } 221 | 222 | if ci.isBad() { 223 | return nil, driver.ErrBadConn 224 | } 225 | defer ci.cn.errRecover(&err) 226 | 227 | if ci.isErrorSet() { 228 | return nil, ci.err 229 | } 230 | 231 | if len(v) == 0 { 232 | return nil, ci.Close() 233 | } 234 | 235 | numValues := len(v) 236 | for i, value := range v { 237 | ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) 238 | if i < numValues-1 { 239 | ci.buffer = append(ci.buffer, '\t') 240 | } 241 | } 242 | 243 | ci.buffer = append(ci.buffer, '\n') 244 | 245 | if len(ci.buffer) > ciBufferFlushSize { 246 | ci.flush(ci.buffer) 247 | // reset buffer, keep bytes for message identifier and length 248 | ci.buffer = ci.buffer[:5] 249 | } 250 | 251 | return driver.RowsAffected(0), nil 252 | } 253 | 254 | func (ci *copyin) Close() (err error) { 255 | if ci.closed { // Don't do anything, we're already closed 256 | return nil 257 | } 258 | ci.closed = true 259 | 260 | if ci.isBad() { 261 | return driver.ErrBadConn 262 | } 263 | defer ci.cn.errRecover(&err) 264 | 265 | if len(ci.buffer) > 0 { 266 | ci.flush(ci.buffer) 267 | } 268 | // Avoid touching the scratch buffer as resploop could be using it. 269 | err = ci.cn.sendSimpleMessage('c') 270 | if err != nil { 271 | return err 272 | } 273 | 274 | <-ci.done 275 | ci.cn.inCopy = false 276 | 277 | if ci.isErrorSet() { 278 | err = ci.err 279 | return err 280 | } 281 | return nil 282 | } 283 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package pq is a pure Go Postgres driver for the database/sql package. 3 | 4 | In most cases clients will use the database/sql package instead of 5 | using this package directly. For example: 6 | 7 | import ( 8 | "database/sql" 9 | 10 | _ "github.com/lib/pq" 11 | ) 12 | 13 | func main() { 14 | connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" 15 | db, err := sql.Open("postgres", connStr) 16 | if err != nil { 17 | log.Fatal(err) 18 | } 19 | 20 | age := 21 21 | rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) 22 | … 23 | } 24 | 25 | You can also connect to a database using a URL. For example: 26 | 27 | connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" 28 | db, err := sql.Open("postgres", connStr) 29 | 30 | 31 | Connection String Parameters 32 | 33 | 34 | Similarly to libpq, when establishing a connection using pq you are expected to 35 | supply a connection string containing zero or more parameters. 36 | A subset of the connection parameters supported by libpq are also supported by pq. 37 | Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) 38 | directly in the connection string. This is different from libpq, which does not allow 39 | run-time parameters in the connection string, instead requiring you to supply 40 | them in the options parameter. 41 | 42 | For compatibility with libpq, the following special connection parameters are 43 | supported: 44 | 45 | * dbname - The name of the database to connect to 46 | * user - The user to sign in as 47 | * password - The user's password 48 | * host - The host to connect to. Values that start with / are for unix 49 | domain sockets. (default is localhost) 50 | * port - The port to bind to. (default is 5432) 51 | * sslmode - Whether or not to use SSL (default is require, this is not 52 | the default for libpq) 53 | * fallback_application_name - An application_name to fall back to if one isn't provided. 54 | * connect_timeout - Maximum wait for connection, in seconds. Zero or 55 | not specified means wait indefinitely. 56 | * sslcert - Cert file location. The file must contain PEM encoded data. 57 | * sslkey - Key file location. The file must contain PEM encoded data. 58 | * sslrootcert - The location of the root certificate file. The file 59 | must contain PEM encoded data. 60 | 61 | Valid values for sslmode are: 62 | 63 | * disable - No SSL 64 | * require - Always SSL (skip verification) 65 | * verify-ca - Always SSL (verify that the certificate presented by the 66 | server was signed by a trusted CA) 67 | * verify-full - Always SSL (verify that the certification presented by 68 | the server was signed by a trusted CA and the server host name 69 | matches the one in the certificate) 70 | 71 | See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING 72 | for more information about connection string parameters. 73 | 74 | Use single quotes for values that contain whitespace: 75 | 76 | "user=pqgotest password='with spaces'" 77 | 78 | A backslash will escape the next character in values: 79 | 80 | "user=space\ man password='it\'s valid'" 81 | 82 | Note that the connection parameter client_encoding (which sets the 83 | text encoding for the connection) may be set but must be "UTF8", 84 | matching with the same rules as Postgres. It is an error to provide 85 | any other value. 86 | 87 | In addition to the parameters listed above, any run-time parameter that can be 88 | set at backend start time can be set in the connection string. For more 89 | information, see 90 | http://www.postgresql.org/docs/current/static/runtime-config.html. 91 | 92 | Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html 93 | supported by libpq are also supported by pq. If any of the environment 94 | variables not supported by pq are set, pq will panic during connection 95 | establishment. Environment variables have a lower precedence than explicitly 96 | provided connection parameters. 97 | 98 | The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html 99 | is supported, but on Windows PGPASSFILE must be specified explicitly. 100 | 101 | 102 | Queries 103 | 104 | 105 | database/sql does not dictate any specific format for parameter 106 | markers in query strings, and pq uses the Postgres-native ordinal markers, 107 | as shown above. The same marker can be reused for the same parameter: 108 | 109 | rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 110 | OR age BETWEEN $2 AND $2 + 3`, "orange", 64) 111 | 112 | pq does not support the LastInsertId() method of the Result type in database/sql. 113 | To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres 114 | RETURNING clause with a standard Query or QueryRow call: 115 | 116 | var userid int 117 | err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) 118 | VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) 119 | 120 | For more details on RETURNING, see the Postgres documentation: 121 | 122 | http://www.postgresql.org/docs/current/static/sql-insert.html 123 | http://www.postgresql.org/docs/current/static/sql-update.html 124 | http://www.postgresql.org/docs/current/static/sql-delete.html 125 | 126 | For additional instructions on querying see the documentation for the database/sql package. 127 | 128 | 129 | Data Types 130 | 131 | 132 | Parameters pass through driver.DefaultParameterConverter before they are handled 133 | by this package. When the binary_parameters connection option is enabled, 134 | []byte values are sent directly to the backend as data in binary format. 135 | 136 | This package returns the following types for values from the PostgreSQL backend: 137 | 138 | - integer types smallint, integer, and bigint are returned as int64 139 | - floating-point types real and double precision are returned as float64 140 | - character types char, varchar, and text are returned as string 141 | - temporal types date, time, timetz, timestamp, and timestamptz are 142 | returned as time.Time 143 | - the boolean type is returned as bool 144 | - the bytea type is returned as []byte 145 | 146 | All other types are returned directly from the backend as []byte values in text format. 147 | 148 | 149 | Errors 150 | 151 | 152 | pq may return errors of type *pq.Error which can be interrogated for error details: 153 | 154 | if err, ok := err.(*pq.Error); ok { 155 | fmt.Println("pq error:", err.Code.Name()) 156 | } 157 | 158 | See the pq.Error type for details. 159 | 160 | 161 | Bulk imports 162 | 163 | You can perform bulk imports by preparing a statement returned by pq.CopyIn (or 164 | pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement 165 | handle can then be repeatedly "executed" to copy data into the target table. 166 | After all data has been processed you should call Exec() once with no arguments 167 | to flush all buffered data. Any call to Exec() might return an error which 168 | should be handled appropriately, but because of the internal buffering an error 169 | returned by Exec() might not be related to the data passed in the call that 170 | failed. 171 | 172 | CopyIn uses COPY FROM internally. It is not possible to COPY outside of an 173 | explicit transaction in pq. 174 | 175 | Usage example: 176 | 177 | txn, err := db.Begin() 178 | if err != nil { 179 | log.Fatal(err) 180 | } 181 | 182 | stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) 183 | if err != nil { 184 | log.Fatal(err) 185 | } 186 | 187 | for _, user := range users { 188 | _, err = stmt.Exec(user.Name, int64(user.Age)) 189 | if err != nil { 190 | log.Fatal(err) 191 | } 192 | } 193 | 194 | _, err = stmt.Exec() 195 | if err != nil { 196 | log.Fatal(err) 197 | } 198 | 199 | err = stmt.Close() 200 | if err != nil { 201 | log.Fatal(err) 202 | } 203 | 204 | err = txn.Commit() 205 | if err != nil { 206 | log.Fatal(err) 207 | } 208 | 209 | 210 | Notifications 211 | 212 | 213 | PostgreSQL supports a simple publish/subscribe model over database 214 | connections. See http://www.postgresql.org/docs/current/static/sql-notify.html 215 | for more information about the general mechanism. 216 | 217 | To start listening for notifications, you first have to open a new connection 218 | to the database by calling NewListener. This connection can not be used for 219 | anything other than LISTEN / NOTIFY. Calling Listen will open a "notification 220 | channel"; once a notification channel is open, a notification generated on that 221 | channel will effect a send on the Listener.Notify channel. A notification 222 | channel will remain open until Unlisten is called, though connection loss might 223 | result in some notifications being lost. To solve this problem, Listener sends 224 | a nil pointer over the Notify channel any time the connection is re-established 225 | following a connection loss. The application can get information about the 226 | state of the underlying connection by setting an event callback in the call to 227 | NewListener. 228 | 229 | A single Listener can safely be used from concurrent goroutines, which means 230 | that there is often no need to create more than one Listener in your 231 | application. However, a Listener is always connected to a single database, so 232 | you will need to create a new Listener instance for every database you want to 233 | receive notifications in. 234 | 235 | The channel name in both Listen and Unlisten is case sensitive, and can contain 236 | any characters legal in an identifier (see 237 | http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS 238 | for more information). Note that the channel name will be truncated to 63 239 | bytes by the PostgreSQL server. 240 | 241 | You can find a complete, working example of Listener usage at 242 | http://godoc.org/github.com/lib/pq/example/listen. 243 | 244 | */ 245 | package pq 246 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/encode.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "bytes" 5 | "database/sql/driver" 6 | "encoding/binary" 7 | "encoding/hex" 8 | "errors" 9 | "fmt" 10 | "math" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | "time" 15 | 16 | "github.com/lib/pq/oid" 17 | ) 18 | 19 | func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { 20 | switch v := x.(type) { 21 | case []byte: 22 | return v 23 | default: 24 | return encode(parameterStatus, x, oid.T_unknown) 25 | } 26 | } 27 | 28 | func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { 29 | switch v := x.(type) { 30 | case int64: 31 | return strconv.AppendInt(nil, v, 10) 32 | case float64: 33 | return strconv.AppendFloat(nil, v, 'f', -1, 64) 34 | case []byte: 35 | if pgtypOid == oid.T_bytea { 36 | return encodeBytea(parameterStatus.serverVersion, v) 37 | } 38 | 39 | return v 40 | case string: 41 | if pgtypOid == oid.T_bytea { 42 | return encodeBytea(parameterStatus.serverVersion, []byte(v)) 43 | } 44 | 45 | return []byte(v) 46 | case bool: 47 | return strconv.AppendBool(nil, v) 48 | case time.Time: 49 | return formatTs(v) 50 | 51 | default: 52 | errorf("encode: unknown type for %T", v) 53 | } 54 | 55 | panic("not reached") 56 | } 57 | 58 | func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { 59 | switch f { 60 | case formatBinary: 61 | return binaryDecode(parameterStatus, s, typ) 62 | case formatText: 63 | return textDecode(parameterStatus, s, typ) 64 | default: 65 | panic("not reached") 66 | } 67 | } 68 | 69 | func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { 70 | switch typ { 71 | case oid.T_bytea: 72 | return s 73 | case oid.T_int8: 74 | return int64(binary.BigEndian.Uint64(s)) 75 | case oid.T_int4: 76 | return int64(int32(binary.BigEndian.Uint32(s))) 77 | case oid.T_int2: 78 | return int64(int16(binary.BigEndian.Uint16(s))) 79 | case oid.T_uuid: 80 | b, err := decodeUUIDBinary(s) 81 | if err != nil { 82 | panic(err) 83 | } 84 | return b 85 | 86 | default: 87 | errorf("don't know how to decode binary parameter of type %d", uint32(typ)) 88 | } 89 | 90 | panic("not reached") 91 | } 92 | 93 | func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { 94 | switch typ { 95 | case oid.T_char, oid.T_varchar, oid.T_text: 96 | return string(s) 97 | case oid.T_bytea: 98 | b, err := parseBytea(s) 99 | if err != nil { 100 | errorf("%s", err) 101 | } 102 | return b 103 | case oid.T_timestamptz: 104 | return parseTs(parameterStatus.currentLocation, string(s)) 105 | case oid.T_timestamp, oid.T_date: 106 | return parseTs(nil, string(s)) 107 | case oid.T_time: 108 | return mustParse("15:04:05", typ, s) 109 | case oid.T_timetz: 110 | return mustParse("15:04:05-07", typ, s) 111 | case oid.T_bool: 112 | return s[0] == 't' 113 | case oid.T_int8, oid.T_int4, oid.T_int2: 114 | i, err := strconv.ParseInt(string(s), 10, 64) 115 | if err != nil { 116 | errorf("%s", err) 117 | } 118 | return i 119 | case oid.T_float4, oid.T_float8: 120 | bits := 64 121 | if typ == oid.T_float4 { 122 | bits = 32 123 | } 124 | f, err := strconv.ParseFloat(string(s), bits) 125 | if err != nil { 126 | errorf("%s", err) 127 | } 128 | return f 129 | } 130 | 131 | return s 132 | } 133 | 134 | // appendEncodedText encodes item in text format as required by COPY 135 | // and appends to buf 136 | func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { 137 | switch v := x.(type) { 138 | case int64: 139 | return strconv.AppendInt(buf, v, 10) 140 | case float64: 141 | return strconv.AppendFloat(buf, v, 'f', -1, 64) 142 | case []byte: 143 | encodedBytea := encodeBytea(parameterStatus.serverVersion, v) 144 | return appendEscapedText(buf, string(encodedBytea)) 145 | case string: 146 | return appendEscapedText(buf, v) 147 | case bool: 148 | return strconv.AppendBool(buf, v) 149 | case time.Time: 150 | return append(buf, formatTs(v)...) 151 | case nil: 152 | return append(buf, "\\N"...) 153 | default: 154 | errorf("encode: unknown type for %T", v) 155 | } 156 | 157 | panic("not reached") 158 | } 159 | 160 | func appendEscapedText(buf []byte, text string) []byte { 161 | escapeNeeded := false 162 | startPos := 0 163 | var c byte 164 | 165 | // check if we need to escape 166 | for i := 0; i < len(text); i++ { 167 | c = text[i] 168 | if c == '\\' || c == '\n' || c == '\r' || c == '\t' { 169 | escapeNeeded = true 170 | startPos = i 171 | break 172 | } 173 | } 174 | if !escapeNeeded { 175 | return append(buf, text...) 176 | } 177 | 178 | // copy till first char to escape, iterate the rest 179 | result := append(buf, text[:startPos]...) 180 | for i := startPos; i < len(text); i++ { 181 | c = text[i] 182 | switch c { 183 | case '\\': 184 | result = append(result, '\\', '\\') 185 | case '\n': 186 | result = append(result, '\\', 'n') 187 | case '\r': 188 | result = append(result, '\\', 'r') 189 | case '\t': 190 | result = append(result, '\\', 't') 191 | default: 192 | result = append(result, c) 193 | } 194 | } 195 | return result 196 | } 197 | 198 | func mustParse(f string, typ oid.Oid, s []byte) time.Time { 199 | str := string(s) 200 | 201 | // check for a 30-minute-offset timezone 202 | if (typ == oid.T_timestamptz || typ == oid.T_timetz) && 203 | str[len(str)-3] == ':' { 204 | f += ":00" 205 | } 206 | t, err := time.Parse(f, str) 207 | if err != nil { 208 | errorf("decode: %s", err) 209 | } 210 | return t 211 | } 212 | 213 | var errInvalidTimestamp = errors.New("invalid timestamp") 214 | 215 | type timestampParser struct { 216 | err error 217 | } 218 | 219 | func (p *timestampParser) expect(str string, char byte, pos int) { 220 | if p.err != nil { 221 | return 222 | } 223 | if pos+1 > len(str) { 224 | p.err = errInvalidTimestamp 225 | return 226 | } 227 | if c := str[pos]; c != char && p.err == nil { 228 | p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) 229 | } 230 | } 231 | 232 | func (p *timestampParser) mustAtoi(str string, begin int, end int) int { 233 | if p.err != nil { 234 | return 0 235 | } 236 | if begin < 0 || end < 0 || begin > end || end > len(str) { 237 | p.err = errInvalidTimestamp 238 | return 0 239 | } 240 | result, err := strconv.Atoi(str[begin:end]) 241 | if err != nil { 242 | if p.err == nil { 243 | p.err = fmt.Errorf("expected number; got '%v'", str) 244 | } 245 | return 0 246 | } 247 | return result 248 | } 249 | 250 | // The location cache caches the time zones typically used by the client. 251 | type locationCache struct { 252 | cache map[int]*time.Location 253 | lock sync.Mutex 254 | } 255 | 256 | // All connections share the same list of timezones. Benchmarking shows that 257 | // about 5% speed could be gained by putting the cache in the connection and 258 | // losing the mutex, at the cost of a small amount of memory and a somewhat 259 | // significant increase in code complexity. 260 | var globalLocationCache = newLocationCache() 261 | 262 | func newLocationCache() *locationCache { 263 | return &locationCache{cache: make(map[int]*time.Location)} 264 | } 265 | 266 | // Returns the cached timezone for the specified offset, creating and caching 267 | // it if necessary. 268 | func (c *locationCache) getLocation(offset int) *time.Location { 269 | c.lock.Lock() 270 | defer c.lock.Unlock() 271 | 272 | location, ok := c.cache[offset] 273 | if !ok { 274 | location = time.FixedZone("", offset) 275 | c.cache[offset] = location 276 | } 277 | 278 | return location 279 | } 280 | 281 | var infinityTsEnabled = false 282 | var infinityTsNegative time.Time 283 | var infinityTsPositive time.Time 284 | 285 | const ( 286 | infinityTsEnabledAlready = "pq: infinity timestamp enabled already" 287 | infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" 288 | ) 289 | 290 | // EnableInfinityTs controls the handling of Postgres' "-infinity" and 291 | // "infinity" "timestamp"s. 292 | // 293 | // If EnableInfinityTs is not called, "-infinity" and "infinity" will return 294 | // []byte("-infinity") and []byte("infinity") respectively, and potentially 295 | // cause error "sql: Scan error on column index 0: unsupported driver -> Scan 296 | // pair: []uint8 -> *time.Time", when scanning into a time.Time value. 297 | // 298 | // Once EnableInfinityTs has been called, all connections created using this 299 | // driver will decode Postgres' "-infinity" and "infinity" for "timestamp", 300 | // "timestamp with time zone" and "date" types to the predefined minimum and 301 | // maximum times, respectively. When encoding time.Time values, any time which 302 | // equals or precedes the predefined minimum time will be encoded to 303 | // "-infinity". Any values at or past the maximum time will similarly be 304 | // encoded to "infinity". 305 | // 306 | // If EnableInfinityTs is called with negative >= positive, it will panic. 307 | // Calling EnableInfinityTs after a connection has been established results in 308 | // undefined behavior. If EnableInfinityTs is called more than once, it will 309 | // panic. 310 | func EnableInfinityTs(negative time.Time, positive time.Time) { 311 | if infinityTsEnabled { 312 | panic(infinityTsEnabledAlready) 313 | } 314 | if !negative.Before(positive) { 315 | panic(infinityTsNegativeMustBeSmaller) 316 | } 317 | infinityTsEnabled = true 318 | infinityTsNegative = negative 319 | infinityTsPositive = positive 320 | } 321 | 322 | /* 323 | * Testing might want to toggle infinityTsEnabled 324 | */ 325 | func disableInfinityTs() { 326 | infinityTsEnabled = false 327 | } 328 | 329 | // This is a time function specific to the Postgres default DateStyle 330 | // setting ("ISO, MDY"), the only one we currently support. This 331 | // accounts for the discrepancies between the parsing available with 332 | // time.Parse and the Postgres date formatting quirks. 333 | func parseTs(currentLocation *time.Location, str string) interface{} { 334 | switch str { 335 | case "-infinity": 336 | if infinityTsEnabled { 337 | return infinityTsNegative 338 | } 339 | return []byte(str) 340 | case "infinity": 341 | if infinityTsEnabled { 342 | return infinityTsPositive 343 | } 344 | return []byte(str) 345 | } 346 | t, err := ParseTimestamp(currentLocation, str) 347 | if err != nil { 348 | panic(err) 349 | } 350 | return t 351 | } 352 | 353 | // ParseTimestamp parses Postgres' text format. It returns a time.Time in 354 | // currentLocation iff that time's offset agrees with the offset sent from the 355 | // Postgres server. Otherwise, ParseTimestamp returns a time.Time with the 356 | // fixed offset offset provided by the Postgres server. 357 | func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { 358 | p := timestampParser{} 359 | 360 | monSep := strings.IndexRune(str, '-') 361 | // this is Gregorian year, not ISO Year 362 | // In Gregorian system, the year 1 BC is followed by AD 1 363 | year := p.mustAtoi(str, 0, monSep) 364 | daySep := monSep + 3 365 | month := p.mustAtoi(str, monSep+1, daySep) 366 | p.expect(str, '-', daySep) 367 | timeSep := daySep + 3 368 | day := p.mustAtoi(str, daySep+1, timeSep) 369 | 370 | minLen := monSep + len("01-01") + 1 371 | 372 | isBC := strings.HasSuffix(str, " BC") 373 | if isBC { 374 | minLen += 3 375 | } 376 | 377 | var hour, minute, second int 378 | if len(str) > minLen { 379 | p.expect(str, ' ', timeSep) 380 | minSep := timeSep + 3 381 | p.expect(str, ':', minSep) 382 | hour = p.mustAtoi(str, timeSep+1, minSep) 383 | secSep := minSep + 3 384 | p.expect(str, ':', secSep) 385 | minute = p.mustAtoi(str, minSep+1, secSep) 386 | secEnd := secSep + 3 387 | second = p.mustAtoi(str, secSep+1, secEnd) 388 | } 389 | remainderIdx := monSep + len("01-01 00:00:00") + 1 390 | // Three optional (but ordered) sections follow: the 391 | // fractional seconds, the time zone offset, and the BC 392 | // designation. We set them up here and adjust the other 393 | // offsets if the preceding sections exist. 394 | 395 | nanoSec := 0 396 | tzOff := 0 397 | 398 | if remainderIdx < len(str) && str[remainderIdx] == '.' { 399 | fracStart := remainderIdx + 1 400 | fracOff := strings.IndexAny(str[fracStart:], "-+ ") 401 | if fracOff < 0 { 402 | fracOff = len(str) - fracStart 403 | } 404 | fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) 405 | nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) 406 | 407 | remainderIdx += fracOff + 1 408 | } 409 | if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { 410 | // time zone separator is always '-' or '+' (UTC is +00) 411 | var tzSign int 412 | switch c := str[tzStart]; c { 413 | case '-': 414 | tzSign = -1 415 | case '+': 416 | tzSign = +1 417 | default: 418 | return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) 419 | } 420 | tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) 421 | remainderIdx += 3 422 | var tzMin, tzSec int 423 | if remainderIdx < len(str) && str[remainderIdx] == ':' { 424 | tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) 425 | remainderIdx += 3 426 | } 427 | if remainderIdx < len(str) && str[remainderIdx] == ':' { 428 | tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) 429 | remainderIdx += 3 430 | } 431 | tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) 432 | } 433 | var isoYear int 434 | 435 | if isBC { 436 | isoYear = 1 - year 437 | remainderIdx += 3 438 | } else { 439 | isoYear = year 440 | } 441 | if remainderIdx < len(str) { 442 | return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) 443 | } 444 | t := time.Date(isoYear, time.Month(month), day, 445 | hour, minute, second, nanoSec, 446 | globalLocationCache.getLocation(tzOff)) 447 | 448 | if currentLocation != nil { 449 | // Set the location of the returned Time based on the session's 450 | // TimeZone value, but only if the local time zone database agrees with 451 | // the remote database on the offset. 452 | lt := t.In(currentLocation) 453 | _, newOff := lt.Zone() 454 | if newOff == tzOff { 455 | t = lt 456 | } 457 | } 458 | 459 | return t, p.err 460 | } 461 | 462 | // formatTs formats t into a format postgres understands. 463 | func formatTs(t time.Time) []byte { 464 | if infinityTsEnabled { 465 | // t <= -infinity : ! (t > -infinity) 466 | if !t.After(infinityTsNegative) { 467 | return []byte("-infinity") 468 | } 469 | // t >= infinity : ! (!t < infinity) 470 | if !t.Before(infinityTsPositive) { 471 | return []byte("infinity") 472 | } 473 | } 474 | return FormatTimestamp(t) 475 | } 476 | 477 | // FormatTimestamp formats t into Postgres' text format for timestamps. 478 | func FormatTimestamp(t time.Time) []byte { 479 | // Need to send dates before 0001 A.D. with " BC" suffix, instead of the 480 | // minus sign preferred by Go. 481 | // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on 482 | bc := false 483 | if t.Year() <= 0 { 484 | // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" 485 | t = t.AddDate((-t.Year())*2+1, 0, 0) 486 | bc = true 487 | } 488 | b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) 489 | 490 | _, offset := t.Zone() 491 | offset = offset % 60 492 | if offset != 0 { 493 | // RFC3339Nano already printed the minus sign 494 | if offset < 0 { 495 | offset = -offset 496 | } 497 | 498 | b = append(b, ':') 499 | if offset < 10 { 500 | b = append(b, '0') 501 | } 502 | b = strconv.AppendInt(b, int64(offset), 10) 503 | } 504 | 505 | if bc { 506 | b = append(b, " BC"...) 507 | } 508 | return b 509 | } 510 | 511 | // Parse a bytea value received from the server. Both "hex" and the legacy 512 | // "escape" format are supported. 513 | func parseBytea(s []byte) (result []byte, err error) { 514 | if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { 515 | // bytea_output = hex 516 | s = s[2:] // trim off leading "\\x" 517 | result = make([]byte, hex.DecodedLen(len(s))) 518 | _, err := hex.Decode(result, s) 519 | if err != nil { 520 | return nil, err 521 | } 522 | } else { 523 | // bytea_output = escape 524 | for len(s) > 0 { 525 | if s[0] == '\\' { 526 | // escaped '\\' 527 | if len(s) >= 2 && s[1] == '\\' { 528 | result = append(result, '\\') 529 | s = s[2:] 530 | continue 531 | } 532 | 533 | // '\\' followed by an octal number 534 | if len(s) < 4 { 535 | return nil, fmt.Errorf("invalid bytea sequence %v", s) 536 | } 537 | r, err := strconv.ParseInt(string(s[1:4]), 8, 9) 538 | if err != nil { 539 | return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) 540 | } 541 | result = append(result, byte(r)) 542 | s = s[4:] 543 | } else { 544 | // We hit an unescaped, raw byte. Try to read in as many as 545 | // possible in one go. 546 | i := bytes.IndexByte(s, '\\') 547 | if i == -1 { 548 | result = append(result, s...) 549 | break 550 | } 551 | result = append(result, s[:i]...) 552 | s = s[i:] 553 | } 554 | } 555 | } 556 | 557 | return result, nil 558 | } 559 | 560 | func encodeBytea(serverVersion int, v []byte) (result []byte) { 561 | if serverVersion >= 90000 { 562 | // Use the hex format if we know that the server supports it 563 | result = make([]byte, 2+hex.EncodedLen(len(v))) 564 | result[0] = '\\' 565 | result[1] = 'x' 566 | hex.Encode(result[2:], v) 567 | } else { 568 | // .. or resort to "escape" 569 | for _, b := range v { 570 | if b == '\\' { 571 | result = append(result, '\\', '\\') 572 | } else if b < 0x20 || b > 0x7e { 573 | result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) 574 | } else { 575 | result = append(result, b) 576 | } 577 | } 578 | } 579 | 580 | return result 581 | } 582 | 583 | // NullTime represents a time.Time that may be null. NullTime implements the 584 | // sql.Scanner interface so it can be used as a scan destination, similar to 585 | // sql.NullString. 586 | type NullTime struct { 587 | Time time.Time 588 | Valid bool // Valid is true if Time is not NULL 589 | } 590 | 591 | // Scan implements the Scanner interface. 592 | func (nt *NullTime) Scan(value interface{}) error { 593 | nt.Time, nt.Valid = value.(time.Time) 594 | return nil 595 | } 596 | 597 | // Value implements the driver Valuer interface. 598 | func (nt NullTime) Value() (driver.Value, error) { 599 | if !nt.Valid { 600 | return nil, nil 601 | } 602 | return nt.Time, nil 603 | } 604 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/error.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "database/sql/driver" 5 | "fmt" 6 | "io" 7 | "net" 8 | "runtime" 9 | ) 10 | 11 | // Error severities 12 | const ( 13 | Efatal = "FATAL" 14 | Epanic = "PANIC" 15 | Ewarning = "WARNING" 16 | Enotice = "NOTICE" 17 | Edebug = "DEBUG" 18 | Einfo = "INFO" 19 | Elog = "LOG" 20 | ) 21 | 22 | // Error represents an error communicating with the server. 23 | // 24 | // See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields 25 | type Error struct { 26 | Severity string 27 | Code ErrorCode 28 | Message string 29 | Detail string 30 | Hint string 31 | Position string 32 | InternalPosition string 33 | InternalQuery string 34 | Where string 35 | Schema string 36 | Table string 37 | Column string 38 | DataTypeName string 39 | Constraint string 40 | File string 41 | Line string 42 | Routine string 43 | } 44 | 45 | // ErrorCode is a five-character error code. 46 | type ErrorCode string 47 | 48 | // Name returns a more human friendly rendering of the error code, namely the 49 | // "condition name". 50 | // 51 | // See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for 52 | // details. 53 | func (ec ErrorCode) Name() string { 54 | return errorCodeNames[ec] 55 | } 56 | 57 | // ErrorClass is only the class part of an error code. 58 | type ErrorClass string 59 | 60 | // Name returns the condition name of an error class. It is equivalent to the 61 | // condition name of the "standard" error code (i.e. the one having the last 62 | // three characters "000"). 63 | func (ec ErrorClass) Name() string { 64 | return errorCodeNames[ErrorCode(ec+"000")] 65 | } 66 | 67 | // Class returns the error class, e.g. "28". 68 | // 69 | // See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for 70 | // details. 71 | func (ec ErrorCode) Class() ErrorClass { 72 | return ErrorClass(ec[0:2]) 73 | } 74 | 75 | // errorCodeNames is a mapping between the five-character error codes and the 76 | // human readable "condition names". It is derived from the list at 77 | // http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html 78 | var errorCodeNames = map[ErrorCode]string{ 79 | // Class 00 - Successful Completion 80 | "00000": "successful_completion", 81 | // Class 01 - Warning 82 | "01000": "warning", 83 | "0100C": "dynamic_result_sets_returned", 84 | "01008": "implicit_zero_bit_padding", 85 | "01003": "null_value_eliminated_in_set_function", 86 | "01007": "privilege_not_granted", 87 | "01006": "privilege_not_revoked", 88 | "01004": "string_data_right_truncation", 89 | "01P01": "deprecated_feature", 90 | // Class 02 - No Data (this is also a warning class per the SQL standard) 91 | "02000": "no_data", 92 | "02001": "no_additional_dynamic_result_sets_returned", 93 | // Class 03 - SQL Statement Not Yet Complete 94 | "03000": "sql_statement_not_yet_complete", 95 | // Class 08 - Connection Exception 96 | "08000": "connection_exception", 97 | "08003": "connection_does_not_exist", 98 | "08006": "connection_failure", 99 | "08001": "sqlclient_unable_to_establish_sqlconnection", 100 | "08004": "sqlserver_rejected_establishment_of_sqlconnection", 101 | "08007": "transaction_resolution_unknown", 102 | "08P01": "protocol_violation", 103 | // Class 09 - Triggered Action Exception 104 | "09000": "triggered_action_exception", 105 | // Class 0A - Feature Not Supported 106 | "0A000": "feature_not_supported", 107 | // Class 0B - Invalid Transaction Initiation 108 | "0B000": "invalid_transaction_initiation", 109 | // Class 0F - Locator Exception 110 | "0F000": "locator_exception", 111 | "0F001": "invalid_locator_specification", 112 | // Class 0L - Invalid Grantor 113 | "0L000": "invalid_grantor", 114 | "0LP01": "invalid_grant_operation", 115 | // Class 0P - Invalid Role Specification 116 | "0P000": "invalid_role_specification", 117 | // Class 0Z - Diagnostics Exception 118 | "0Z000": "diagnostics_exception", 119 | "0Z002": "stacked_diagnostics_accessed_without_active_handler", 120 | // Class 20 - Case Not Found 121 | "20000": "case_not_found", 122 | // Class 21 - Cardinality Violation 123 | "21000": "cardinality_violation", 124 | // Class 22 - Data Exception 125 | "22000": "data_exception", 126 | "2202E": "array_subscript_error", 127 | "22021": "character_not_in_repertoire", 128 | "22008": "datetime_field_overflow", 129 | "22012": "division_by_zero", 130 | "22005": "error_in_assignment", 131 | "2200B": "escape_character_conflict", 132 | "22022": "indicator_overflow", 133 | "22015": "interval_field_overflow", 134 | "2201E": "invalid_argument_for_logarithm", 135 | "22014": "invalid_argument_for_ntile_function", 136 | "22016": "invalid_argument_for_nth_value_function", 137 | "2201F": "invalid_argument_for_power_function", 138 | "2201G": "invalid_argument_for_width_bucket_function", 139 | "22018": "invalid_character_value_for_cast", 140 | "22007": "invalid_datetime_format", 141 | "22019": "invalid_escape_character", 142 | "2200D": "invalid_escape_octet", 143 | "22025": "invalid_escape_sequence", 144 | "22P06": "nonstandard_use_of_escape_character", 145 | "22010": "invalid_indicator_parameter_value", 146 | "22023": "invalid_parameter_value", 147 | "2201B": "invalid_regular_expression", 148 | "2201W": "invalid_row_count_in_limit_clause", 149 | "2201X": "invalid_row_count_in_result_offset_clause", 150 | "22009": "invalid_time_zone_displacement_value", 151 | "2200C": "invalid_use_of_escape_character", 152 | "2200G": "most_specific_type_mismatch", 153 | "22004": "null_value_not_allowed", 154 | "22002": "null_value_no_indicator_parameter", 155 | "22003": "numeric_value_out_of_range", 156 | "2200H": "sequence_generator_limit_exceeded", 157 | "22026": "string_data_length_mismatch", 158 | "22001": "string_data_right_truncation", 159 | "22011": "substring_error", 160 | "22027": "trim_error", 161 | "22024": "unterminated_c_string", 162 | "2200F": "zero_length_character_string", 163 | "22P01": "floating_point_exception", 164 | "22P02": "invalid_text_representation", 165 | "22P03": "invalid_binary_representation", 166 | "22P04": "bad_copy_file_format", 167 | "22P05": "untranslatable_character", 168 | "2200L": "not_an_xml_document", 169 | "2200M": "invalid_xml_document", 170 | "2200N": "invalid_xml_content", 171 | "2200S": "invalid_xml_comment", 172 | "2200T": "invalid_xml_processing_instruction", 173 | // Class 23 - Integrity Constraint Violation 174 | "23000": "integrity_constraint_violation", 175 | "23001": "restrict_violation", 176 | "23502": "not_null_violation", 177 | "23503": "foreign_key_violation", 178 | "23505": "unique_violation", 179 | "23514": "check_violation", 180 | "23P01": "exclusion_violation", 181 | // Class 24 - Invalid Cursor State 182 | "24000": "invalid_cursor_state", 183 | // Class 25 - Invalid Transaction State 184 | "25000": "invalid_transaction_state", 185 | "25001": "active_sql_transaction", 186 | "25002": "branch_transaction_already_active", 187 | "25008": "held_cursor_requires_same_isolation_level", 188 | "25003": "inappropriate_access_mode_for_branch_transaction", 189 | "25004": "inappropriate_isolation_level_for_branch_transaction", 190 | "25005": "no_active_sql_transaction_for_branch_transaction", 191 | "25006": "read_only_sql_transaction", 192 | "25007": "schema_and_data_statement_mixing_not_supported", 193 | "25P01": "no_active_sql_transaction", 194 | "25P02": "in_failed_sql_transaction", 195 | // Class 26 - Invalid SQL Statement Name 196 | "26000": "invalid_sql_statement_name", 197 | // Class 27 - Triggered Data Change Violation 198 | "27000": "triggered_data_change_violation", 199 | // Class 28 - Invalid Authorization Specification 200 | "28000": "invalid_authorization_specification", 201 | "28P01": "invalid_password", 202 | // Class 2B - Dependent Privilege Descriptors Still Exist 203 | "2B000": "dependent_privilege_descriptors_still_exist", 204 | "2BP01": "dependent_objects_still_exist", 205 | // Class 2D - Invalid Transaction Termination 206 | "2D000": "invalid_transaction_termination", 207 | // Class 2F - SQL Routine Exception 208 | "2F000": "sql_routine_exception", 209 | "2F005": "function_executed_no_return_statement", 210 | "2F002": "modifying_sql_data_not_permitted", 211 | "2F003": "prohibited_sql_statement_attempted", 212 | "2F004": "reading_sql_data_not_permitted", 213 | // Class 34 - Invalid Cursor Name 214 | "34000": "invalid_cursor_name", 215 | // Class 38 - External Routine Exception 216 | "38000": "external_routine_exception", 217 | "38001": "containing_sql_not_permitted", 218 | "38002": "modifying_sql_data_not_permitted", 219 | "38003": "prohibited_sql_statement_attempted", 220 | "38004": "reading_sql_data_not_permitted", 221 | // Class 39 - External Routine Invocation Exception 222 | "39000": "external_routine_invocation_exception", 223 | "39001": "invalid_sqlstate_returned", 224 | "39004": "null_value_not_allowed", 225 | "39P01": "trigger_protocol_violated", 226 | "39P02": "srf_protocol_violated", 227 | // Class 3B - Savepoint Exception 228 | "3B000": "savepoint_exception", 229 | "3B001": "invalid_savepoint_specification", 230 | // Class 3D - Invalid Catalog Name 231 | "3D000": "invalid_catalog_name", 232 | // Class 3F - Invalid Schema Name 233 | "3F000": "invalid_schema_name", 234 | // Class 40 - Transaction Rollback 235 | "40000": "transaction_rollback", 236 | "40002": "transaction_integrity_constraint_violation", 237 | "40001": "serialization_failure", 238 | "40003": "statement_completion_unknown", 239 | "40P01": "deadlock_detected", 240 | // Class 42 - Syntax Error or Access Rule Violation 241 | "42000": "syntax_error_or_access_rule_violation", 242 | "42601": "syntax_error", 243 | "42501": "insufficient_privilege", 244 | "42846": "cannot_coerce", 245 | "42803": "grouping_error", 246 | "42P20": "windowing_error", 247 | "42P19": "invalid_recursion", 248 | "42830": "invalid_foreign_key", 249 | "42602": "invalid_name", 250 | "42622": "name_too_long", 251 | "42939": "reserved_name", 252 | "42804": "datatype_mismatch", 253 | "42P18": "indeterminate_datatype", 254 | "42P21": "collation_mismatch", 255 | "42P22": "indeterminate_collation", 256 | "42809": "wrong_object_type", 257 | "42703": "undefined_column", 258 | "42883": "undefined_function", 259 | "42P01": "undefined_table", 260 | "42P02": "undefined_parameter", 261 | "42704": "undefined_object", 262 | "42701": "duplicate_column", 263 | "42P03": "duplicate_cursor", 264 | "42P04": "duplicate_database", 265 | "42723": "duplicate_function", 266 | "42P05": "duplicate_prepared_statement", 267 | "42P06": "duplicate_schema", 268 | "42P07": "duplicate_table", 269 | "42712": "duplicate_alias", 270 | "42710": "duplicate_object", 271 | "42702": "ambiguous_column", 272 | "42725": "ambiguous_function", 273 | "42P08": "ambiguous_parameter", 274 | "42P09": "ambiguous_alias", 275 | "42P10": "invalid_column_reference", 276 | "42611": "invalid_column_definition", 277 | "42P11": "invalid_cursor_definition", 278 | "42P12": "invalid_database_definition", 279 | "42P13": "invalid_function_definition", 280 | "42P14": "invalid_prepared_statement_definition", 281 | "42P15": "invalid_schema_definition", 282 | "42P16": "invalid_table_definition", 283 | "42P17": "invalid_object_definition", 284 | // Class 44 - WITH CHECK OPTION Violation 285 | "44000": "with_check_option_violation", 286 | // Class 53 - Insufficient Resources 287 | "53000": "insufficient_resources", 288 | "53100": "disk_full", 289 | "53200": "out_of_memory", 290 | "53300": "too_many_connections", 291 | "53400": "configuration_limit_exceeded", 292 | // Class 54 - Program Limit Exceeded 293 | "54000": "program_limit_exceeded", 294 | "54001": "statement_too_complex", 295 | "54011": "too_many_columns", 296 | "54023": "too_many_arguments", 297 | // Class 55 - Object Not In Prerequisite State 298 | "55000": "object_not_in_prerequisite_state", 299 | "55006": "object_in_use", 300 | "55P02": "cant_change_runtime_param", 301 | "55P03": "lock_not_available", 302 | // Class 57 - Operator Intervention 303 | "57000": "operator_intervention", 304 | "57014": "query_canceled", 305 | "57P01": "admin_shutdown", 306 | "57P02": "crash_shutdown", 307 | "57P03": "cannot_connect_now", 308 | "57P04": "database_dropped", 309 | // Class 58 - System Error (errors external to PostgreSQL itself) 310 | "58000": "system_error", 311 | "58030": "io_error", 312 | "58P01": "undefined_file", 313 | "58P02": "duplicate_file", 314 | // Class F0 - Configuration File Error 315 | "F0000": "config_file_error", 316 | "F0001": "lock_file_exists", 317 | // Class HV - Foreign Data Wrapper Error (SQL/MED) 318 | "HV000": "fdw_error", 319 | "HV005": "fdw_column_name_not_found", 320 | "HV002": "fdw_dynamic_parameter_value_needed", 321 | "HV010": "fdw_function_sequence_error", 322 | "HV021": "fdw_inconsistent_descriptor_information", 323 | "HV024": "fdw_invalid_attribute_value", 324 | "HV007": "fdw_invalid_column_name", 325 | "HV008": "fdw_invalid_column_number", 326 | "HV004": "fdw_invalid_data_type", 327 | "HV006": "fdw_invalid_data_type_descriptors", 328 | "HV091": "fdw_invalid_descriptor_field_identifier", 329 | "HV00B": "fdw_invalid_handle", 330 | "HV00C": "fdw_invalid_option_index", 331 | "HV00D": "fdw_invalid_option_name", 332 | "HV090": "fdw_invalid_string_length_or_buffer_length", 333 | "HV00A": "fdw_invalid_string_format", 334 | "HV009": "fdw_invalid_use_of_null_pointer", 335 | "HV014": "fdw_too_many_handles", 336 | "HV001": "fdw_out_of_memory", 337 | "HV00P": "fdw_no_schemas", 338 | "HV00J": "fdw_option_name_not_found", 339 | "HV00K": "fdw_reply_handle", 340 | "HV00Q": "fdw_schema_not_found", 341 | "HV00R": "fdw_table_not_found", 342 | "HV00L": "fdw_unable_to_create_execution", 343 | "HV00M": "fdw_unable_to_create_reply", 344 | "HV00N": "fdw_unable_to_establish_connection", 345 | // Class P0 - PL/pgSQL Error 346 | "P0000": "plpgsql_error", 347 | "P0001": "raise_exception", 348 | "P0002": "no_data_found", 349 | "P0003": "too_many_rows", 350 | // Class XX - Internal Error 351 | "XX000": "internal_error", 352 | "XX001": "data_corrupted", 353 | "XX002": "index_corrupted", 354 | } 355 | 356 | func parseError(r *readBuf) *Error { 357 | err := new(Error) 358 | for t := r.byte(); t != 0; t = r.byte() { 359 | msg := r.string() 360 | switch t { 361 | case 'S': 362 | err.Severity = msg 363 | case 'C': 364 | err.Code = ErrorCode(msg) 365 | case 'M': 366 | err.Message = msg 367 | case 'D': 368 | err.Detail = msg 369 | case 'H': 370 | err.Hint = msg 371 | case 'P': 372 | err.Position = msg 373 | case 'p': 374 | err.InternalPosition = msg 375 | case 'q': 376 | err.InternalQuery = msg 377 | case 'W': 378 | err.Where = msg 379 | case 's': 380 | err.Schema = msg 381 | case 't': 382 | err.Table = msg 383 | case 'c': 384 | err.Column = msg 385 | case 'd': 386 | err.DataTypeName = msg 387 | case 'n': 388 | err.Constraint = msg 389 | case 'F': 390 | err.File = msg 391 | case 'L': 392 | err.Line = msg 393 | case 'R': 394 | err.Routine = msg 395 | } 396 | } 397 | return err 398 | } 399 | 400 | // Fatal returns true if the Error Severity is fatal. 401 | func (err *Error) Fatal() bool { 402 | return err.Severity == Efatal 403 | } 404 | 405 | // Get implements the legacy PGError interface. New code should use the fields 406 | // of the Error struct directly. 407 | func (err *Error) Get(k byte) (v string) { 408 | switch k { 409 | case 'S': 410 | return err.Severity 411 | case 'C': 412 | return string(err.Code) 413 | case 'M': 414 | return err.Message 415 | case 'D': 416 | return err.Detail 417 | case 'H': 418 | return err.Hint 419 | case 'P': 420 | return err.Position 421 | case 'p': 422 | return err.InternalPosition 423 | case 'q': 424 | return err.InternalQuery 425 | case 'W': 426 | return err.Where 427 | case 's': 428 | return err.Schema 429 | case 't': 430 | return err.Table 431 | case 'c': 432 | return err.Column 433 | case 'd': 434 | return err.DataTypeName 435 | case 'n': 436 | return err.Constraint 437 | case 'F': 438 | return err.File 439 | case 'L': 440 | return err.Line 441 | case 'R': 442 | return err.Routine 443 | } 444 | return "" 445 | } 446 | 447 | func (err Error) Error() string { 448 | return "pq: " + err.Message 449 | } 450 | 451 | // PGError is an interface used by previous versions of pq. It is provided 452 | // only to support legacy code. New code should use the Error type. 453 | type PGError interface { 454 | Error() string 455 | Fatal() bool 456 | Get(k byte) (v string) 457 | } 458 | 459 | func errorf(s string, args ...interface{}) { 460 | panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) 461 | } 462 | 463 | // TODO(ainar-g) Rename to errorf after removing panics. 464 | func fmterrorf(s string, args ...interface{}) error { 465 | return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) 466 | } 467 | 468 | func errRecoverNoErrBadConn(err *error) { 469 | e := recover() 470 | if e == nil { 471 | // Do nothing 472 | return 473 | } 474 | var ok bool 475 | *err, ok = e.(error) 476 | if !ok { 477 | *err = fmt.Errorf("pq: unexpected error: %#v", e) 478 | } 479 | } 480 | 481 | func (c *conn) errRecover(err *error) { 482 | e := recover() 483 | switch v := e.(type) { 484 | case nil: 485 | // Do nothing 486 | case runtime.Error: 487 | c.bad = true 488 | panic(v) 489 | case *Error: 490 | if v.Fatal() { 491 | *err = driver.ErrBadConn 492 | } else { 493 | *err = v 494 | } 495 | case *net.OpError: 496 | c.bad = true 497 | *err = v 498 | case error: 499 | if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { 500 | *err = driver.ErrBadConn 501 | } else { 502 | *err = v 503 | } 504 | 505 | default: 506 | c.bad = true 507 | panic(fmt.Sprintf("unknown error: %#v", e)) 508 | } 509 | 510 | // Any time we return ErrBadConn, we need to remember it since *Tx doesn't 511 | // mark the connection bad in database/sql. 512 | if *err == driver.ErrBadConn { 513 | c.bad = true 514 | } 515 | } 516 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/lib/pq 2 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/notify.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | // Package pq is a pure Go Postgres driver for the database/sql package. 4 | // This module contains support for Postgres LISTEN/NOTIFY. 5 | 6 | import ( 7 | "errors" 8 | "fmt" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | // Notification represents a single notification from the database. 15 | type Notification struct { 16 | // Process ID (PID) of the notifying postgres backend. 17 | BePid int 18 | // Name of the channel the notification was sent on. 19 | Channel string 20 | // Payload, or the empty string if unspecified. 21 | Extra string 22 | } 23 | 24 | func recvNotification(r *readBuf) *Notification { 25 | bePid := r.int32() 26 | channel := r.string() 27 | extra := r.string() 28 | 29 | return &Notification{bePid, channel, extra} 30 | } 31 | 32 | const ( 33 | connStateIdle int32 = iota 34 | connStateExpectResponse 35 | connStateExpectReadyForQuery 36 | ) 37 | 38 | type message struct { 39 | typ byte 40 | err error 41 | } 42 | 43 | var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") 44 | 45 | // ListenerConn is a low-level interface for waiting for notifications. You 46 | // should use Listener instead. 47 | type ListenerConn struct { 48 | // guards cn and err 49 | connectionLock sync.Mutex 50 | cn *conn 51 | err error 52 | 53 | connState int32 54 | 55 | // the sending goroutine will be holding this lock 56 | senderLock sync.Mutex 57 | 58 | notificationChan chan<- *Notification 59 | 60 | replyChan chan message 61 | } 62 | 63 | // NewListenerConn creates a new ListenerConn. Use NewListener instead. 64 | func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { 65 | return newDialListenerConn(defaultDialer{}, name, notificationChan) 66 | } 67 | 68 | func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { 69 | cn, err := DialOpen(d, name) 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | l := &ListenerConn{ 75 | cn: cn.(*conn), 76 | notificationChan: c, 77 | connState: connStateIdle, 78 | replyChan: make(chan message, 2), 79 | } 80 | 81 | go l.listenerConnMain() 82 | 83 | return l, nil 84 | } 85 | 86 | // We can only allow one goroutine at a time to be running a query on the 87 | // connection for various reasons, so the goroutine sending on the connection 88 | // must be holding senderLock. 89 | // 90 | // Returns an error if an unrecoverable error has occurred and the ListenerConn 91 | // should be abandoned. 92 | func (l *ListenerConn) acquireSenderLock() error { 93 | // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery 94 | l.senderLock.Lock() 95 | 96 | l.connectionLock.Lock() 97 | err := l.err 98 | l.connectionLock.Unlock() 99 | if err != nil { 100 | l.senderLock.Unlock() 101 | return err 102 | } 103 | return nil 104 | } 105 | 106 | func (l *ListenerConn) releaseSenderLock() { 107 | l.senderLock.Unlock() 108 | } 109 | 110 | // setState advances the protocol state to newState. Returns false if moving 111 | // to that state from the current state is not allowed. 112 | func (l *ListenerConn) setState(newState int32) bool { 113 | var expectedState int32 114 | 115 | switch newState { 116 | case connStateIdle: 117 | expectedState = connStateExpectReadyForQuery 118 | case connStateExpectResponse: 119 | expectedState = connStateIdle 120 | case connStateExpectReadyForQuery: 121 | expectedState = connStateExpectResponse 122 | default: 123 | panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) 124 | } 125 | 126 | return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) 127 | } 128 | 129 | // Main logic is here: receive messages from the postgres backend, forward 130 | // notifications and query replies and keep the internal state in sync with the 131 | // protocol state. Returns when the connection has been lost, is about to go 132 | // away or should be discarded because we couldn't agree on the state with the 133 | // server backend. 134 | func (l *ListenerConn) listenerConnLoop() (err error) { 135 | defer errRecoverNoErrBadConn(&err) 136 | 137 | r := &readBuf{} 138 | for { 139 | t, err := l.cn.recvMessage(r) 140 | if err != nil { 141 | return err 142 | } 143 | 144 | switch t { 145 | case 'A': 146 | // recvNotification copies all the data so we don't need to worry 147 | // about the scratch buffer being overwritten. 148 | l.notificationChan <- recvNotification(r) 149 | 150 | case 'T', 'D': 151 | // only used by tests; ignore 152 | 153 | case 'E': 154 | // We might receive an ErrorResponse even when not in a query; it 155 | // is expected that the server will close the connection after 156 | // that, but we should make sure that the error we display is the 157 | // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. 158 | if !l.setState(connStateExpectReadyForQuery) { 159 | return parseError(r) 160 | } 161 | l.replyChan <- message{t, parseError(r)} 162 | 163 | case 'C', 'I': 164 | if !l.setState(connStateExpectReadyForQuery) { 165 | // protocol out of sync 166 | return fmt.Errorf("unexpected CommandComplete") 167 | } 168 | // ExecSimpleQuery doesn't need to know about this message 169 | 170 | case 'Z': 171 | if !l.setState(connStateIdle) { 172 | // protocol out of sync 173 | return fmt.Errorf("unexpected ReadyForQuery") 174 | } 175 | l.replyChan <- message{t, nil} 176 | 177 | case 'N', 'S': 178 | // ignore 179 | default: 180 | return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) 181 | } 182 | } 183 | } 184 | 185 | // This is the main routine for the goroutine receiving on the database 186 | // connection. Most of the main logic is in listenerConnLoop. 187 | func (l *ListenerConn) listenerConnMain() { 188 | err := l.listenerConnLoop() 189 | 190 | // listenerConnLoop terminated; we're done, but we still have to clean up. 191 | // Make sure nobody tries to start any new queries by making sure the err 192 | // pointer is set. It is important that we do not overwrite its value; a 193 | // connection could be closed by either this goroutine or one sending on 194 | // the connection -- whoever closes the connection is assumed to have the 195 | // more meaningful error message (as the other one will probably get 196 | // net.errClosed), so that goroutine sets the error we expose while the 197 | // other error is discarded. If the connection is lost while two 198 | // goroutines are operating on the socket, it probably doesn't matter which 199 | // error we expose so we don't try to do anything more complex. 200 | l.connectionLock.Lock() 201 | if l.err == nil { 202 | l.err = err 203 | } 204 | l.cn.Close() 205 | l.connectionLock.Unlock() 206 | 207 | // There might be a query in-flight; make sure nobody's waiting for a 208 | // response to it, since there's not going to be one. 209 | close(l.replyChan) 210 | 211 | // let the listener know we're done 212 | close(l.notificationChan) 213 | 214 | // this ListenerConn is done 215 | } 216 | 217 | // Listen sends a LISTEN query to the server. See ExecSimpleQuery. 218 | func (l *ListenerConn) Listen(channel string) (bool, error) { 219 | return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) 220 | } 221 | 222 | // Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. 223 | func (l *ListenerConn) Unlisten(channel string) (bool, error) { 224 | return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) 225 | } 226 | 227 | // UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. 228 | func (l *ListenerConn) UnlistenAll() (bool, error) { 229 | return l.ExecSimpleQuery("UNLISTEN *") 230 | } 231 | 232 | // Ping the remote server to make sure it's alive. Non-nil error means the 233 | // connection has failed and should be abandoned. 234 | func (l *ListenerConn) Ping() error { 235 | sent, err := l.ExecSimpleQuery("") 236 | if !sent { 237 | return err 238 | } 239 | if err != nil { 240 | // shouldn't happen 241 | panic(err) 242 | } 243 | return nil 244 | } 245 | 246 | // Attempt to send a query on the connection. Returns an error if sending the 247 | // query failed, and the caller should initiate closure of this connection. 248 | // The caller must be holding senderLock (see acquireSenderLock and 249 | // releaseSenderLock). 250 | func (l *ListenerConn) sendSimpleQuery(q string) (err error) { 251 | defer errRecoverNoErrBadConn(&err) 252 | 253 | // must set connection state before sending the query 254 | if !l.setState(connStateExpectResponse) { 255 | panic("two queries running at the same time") 256 | } 257 | 258 | // Can't use l.cn.writeBuf here because it uses the scratch buffer which 259 | // might get overwritten by listenerConnLoop. 260 | b := &writeBuf{ 261 | buf: []byte("Q\x00\x00\x00\x00"), 262 | pos: 1, 263 | } 264 | b.string(q) 265 | l.cn.send(b) 266 | 267 | return nil 268 | } 269 | 270 | // ExecSimpleQuery executes a "simple query" (i.e. one with no bindable 271 | // parameters) on the connection. The possible return values are: 272 | // 1) "executed" is true; the query was executed to completion on the 273 | // database server. If the query failed, err will be set to the error 274 | // returned by the database, otherwise err will be nil. 275 | // 2) If "executed" is false, the query could not be executed on the remote 276 | // server. err will be non-nil. 277 | // 278 | // After a call to ExecSimpleQuery has returned an executed=false value, the 279 | // connection has either been closed or will be closed shortly thereafter, and 280 | // all subsequently executed queries will return an error. 281 | func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { 282 | if err = l.acquireSenderLock(); err != nil { 283 | return false, err 284 | } 285 | defer l.releaseSenderLock() 286 | 287 | err = l.sendSimpleQuery(q) 288 | if err != nil { 289 | // We can't know what state the protocol is in, so we need to abandon 290 | // this connection. 291 | l.connectionLock.Lock() 292 | // Set the error pointer if it hasn't been set already; see 293 | // listenerConnMain. 294 | if l.err == nil { 295 | l.err = err 296 | } 297 | l.connectionLock.Unlock() 298 | l.cn.c.Close() 299 | return false, err 300 | } 301 | 302 | // now we just wait for a reply.. 303 | for { 304 | m, ok := <-l.replyChan 305 | if !ok { 306 | // We lost the connection to server, don't bother waiting for a 307 | // a response. err should have been set already. 308 | l.connectionLock.Lock() 309 | err := l.err 310 | l.connectionLock.Unlock() 311 | return false, err 312 | } 313 | switch m.typ { 314 | case 'Z': 315 | // sanity check 316 | if m.err != nil { 317 | panic("m.err != nil") 318 | } 319 | // done; err might or might not be set 320 | return true, err 321 | 322 | case 'E': 323 | // sanity check 324 | if m.err == nil { 325 | panic("m.err == nil") 326 | } 327 | // server responded with an error; ReadyForQuery to follow 328 | err = m.err 329 | 330 | default: 331 | return false, fmt.Errorf("unknown response for simple query: %q", m.typ) 332 | } 333 | } 334 | } 335 | 336 | // Close closes the connection. 337 | func (l *ListenerConn) Close() error { 338 | l.connectionLock.Lock() 339 | if l.err != nil { 340 | l.connectionLock.Unlock() 341 | return errListenerConnClosed 342 | } 343 | l.err = errListenerConnClosed 344 | l.connectionLock.Unlock() 345 | // We can't send anything on the connection without holding senderLock. 346 | // Simply close the net.Conn to wake up everyone operating on it. 347 | return l.cn.c.Close() 348 | } 349 | 350 | // Err returns the reason the connection was closed. It is not safe to call 351 | // this function until l.Notify has been closed. 352 | func (l *ListenerConn) Err() error { 353 | return l.err 354 | } 355 | 356 | var errListenerClosed = errors.New("pq: Listener has been closed") 357 | 358 | // ErrChannelAlreadyOpen is returned from Listen when a channel is already 359 | // open. 360 | var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") 361 | 362 | // ErrChannelNotOpen is returned from Unlisten when a channel is not open. 363 | var ErrChannelNotOpen = errors.New("pq: channel is not open") 364 | 365 | // ListenerEventType is an enumeration of listener event types. 366 | type ListenerEventType int 367 | 368 | const ( 369 | // ListenerEventConnected is emitted only when the database connection 370 | // has been initially initialized. The err argument of the callback 371 | // will always be nil. 372 | ListenerEventConnected ListenerEventType = iota 373 | 374 | // ListenerEventDisconnected is emitted after a database connection has 375 | // been lost, either because of an error or because Close has been 376 | // called. The err argument will be set to the reason the database 377 | // connection was lost. 378 | ListenerEventDisconnected 379 | 380 | // ListenerEventReconnected is emitted after a database connection has 381 | // been re-established after connection loss. The err argument of the 382 | // callback will always be nil. After this event has been emitted, a 383 | // nil pq.Notification is sent on the Listener.Notify channel. 384 | ListenerEventReconnected 385 | 386 | // ListenerEventConnectionAttemptFailed is emitted after a connection 387 | // to the database was attempted, but failed. The err argument will be 388 | // set to an error describing why the connection attempt did not 389 | // succeed. 390 | ListenerEventConnectionAttemptFailed 391 | ) 392 | 393 | // EventCallbackType is the event callback type. See also ListenerEventType 394 | // constants' documentation. 395 | type EventCallbackType func(event ListenerEventType, err error) 396 | 397 | // Listener provides an interface for listening to notifications from a 398 | // PostgreSQL database. For general usage information, see section 399 | // "Notifications". 400 | // 401 | // Listener can safely be used from concurrently running goroutines. 402 | type Listener struct { 403 | // Channel for receiving notifications from the database. In some cases a 404 | // nil value will be sent. See section "Notifications" above. 405 | Notify chan *Notification 406 | 407 | name string 408 | minReconnectInterval time.Duration 409 | maxReconnectInterval time.Duration 410 | dialer Dialer 411 | eventCallback EventCallbackType 412 | 413 | lock sync.Mutex 414 | isClosed bool 415 | reconnectCond *sync.Cond 416 | cn *ListenerConn 417 | connNotificationChan <-chan *Notification 418 | channels map[string]struct{} 419 | } 420 | 421 | // NewListener creates a new database connection dedicated to LISTEN / NOTIFY. 422 | // 423 | // name should be set to a connection string to be used to establish the 424 | // database connection (see section "Connection String Parameters" above). 425 | // 426 | // minReconnectInterval controls the duration to wait before trying to 427 | // re-establish the database connection after connection loss. After each 428 | // consecutive failure this interval is doubled, until maxReconnectInterval is 429 | // reached. Successfully completing the connection establishment procedure 430 | // resets the interval back to minReconnectInterval. 431 | // 432 | // The last parameter eventCallback can be set to a function which will be 433 | // called by the Listener when the state of the underlying database connection 434 | // changes. This callback will be called by the goroutine which dispatches the 435 | // notifications over the Notify channel, so you should try to avoid doing 436 | // potentially time-consuming operations from the callback. 437 | func NewListener(name string, 438 | minReconnectInterval time.Duration, 439 | maxReconnectInterval time.Duration, 440 | eventCallback EventCallbackType) *Listener { 441 | return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) 442 | } 443 | 444 | // NewDialListener is like NewListener but it takes a Dialer. 445 | func NewDialListener(d Dialer, 446 | name string, 447 | minReconnectInterval time.Duration, 448 | maxReconnectInterval time.Duration, 449 | eventCallback EventCallbackType) *Listener { 450 | 451 | l := &Listener{ 452 | name: name, 453 | minReconnectInterval: minReconnectInterval, 454 | maxReconnectInterval: maxReconnectInterval, 455 | dialer: d, 456 | eventCallback: eventCallback, 457 | 458 | channels: make(map[string]struct{}), 459 | 460 | Notify: make(chan *Notification, 32), 461 | } 462 | l.reconnectCond = sync.NewCond(&l.lock) 463 | 464 | go l.listenerMain() 465 | 466 | return l 467 | } 468 | 469 | // NotificationChannel returns the notification channel for this listener. 470 | // This is the same channel as Notify, and will not be recreated during the 471 | // life time of the Listener. 472 | func (l *Listener) NotificationChannel() <-chan *Notification { 473 | return l.Notify 474 | } 475 | 476 | // Listen starts listening for notifications on a channel. Calls to this 477 | // function will block until an acknowledgement has been received from the 478 | // server. Note that Listener automatically re-establishes the connection 479 | // after connection loss, so this function may block indefinitely if the 480 | // connection can not be re-established. 481 | // 482 | // Listen will only fail in three conditions: 483 | // 1) The channel is already open. The returned error will be 484 | // ErrChannelAlreadyOpen. 485 | // 2) The query was executed on the remote server, but PostgreSQL returned an 486 | // error message in response to the query. The returned error will be a 487 | // pq.Error containing the information the server supplied. 488 | // 3) Close is called on the Listener before the request could be completed. 489 | // 490 | // The channel name is case-sensitive. 491 | func (l *Listener) Listen(channel string) error { 492 | l.lock.Lock() 493 | defer l.lock.Unlock() 494 | 495 | if l.isClosed { 496 | return errListenerClosed 497 | } 498 | 499 | // The server allows you to issue a LISTEN on a channel which is already 500 | // open, but it seems useful to be able to detect this case to spot for 501 | // mistakes in application logic. If the application genuinely does't 502 | // care, it can check the exported error and ignore it. 503 | _, exists := l.channels[channel] 504 | if exists { 505 | return ErrChannelAlreadyOpen 506 | } 507 | 508 | if l.cn != nil { 509 | // If gotResponse is true but error is set, the query was executed on 510 | // the remote server, but resulted in an error. This should be 511 | // relatively rare, so it's fine if we just pass the error to our 512 | // caller. However, if gotResponse is false, we could not complete the 513 | // query on the remote server and our underlying connection is about 514 | // to go away, so we only add relname to l.channels, and wait for 515 | // resync() to take care of the rest. 516 | gotResponse, err := l.cn.Listen(channel) 517 | if gotResponse && err != nil { 518 | return err 519 | } 520 | } 521 | 522 | l.channels[channel] = struct{}{} 523 | for l.cn == nil { 524 | l.reconnectCond.Wait() 525 | // we let go of the mutex for a while 526 | if l.isClosed { 527 | return errListenerClosed 528 | } 529 | } 530 | 531 | return nil 532 | } 533 | 534 | // Unlisten removes a channel from the Listener's channel list. Returns 535 | // ErrChannelNotOpen if the Listener is not listening on the specified channel. 536 | // Returns immediately with no error if there is no connection. Note that you 537 | // might still get notifications for this channel even after Unlisten has 538 | // returned. 539 | // 540 | // The channel name is case-sensitive. 541 | func (l *Listener) Unlisten(channel string) error { 542 | l.lock.Lock() 543 | defer l.lock.Unlock() 544 | 545 | if l.isClosed { 546 | return errListenerClosed 547 | } 548 | 549 | // Similarly to LISTEN, this is not an error in Postgres, but it seems 550 | // useful to distinguish from the normal conditions. 551 | _, exists := l.channels[channel] 552 | if !exists { 553 | return ErrChannelNotOpen 554 | } 555 | 556 | if l.cn != nil { 557 | // Similarly to Listen (see comment in that function), the caller 558 | // should only be bothered with an error if it came from the backend as 559 | // a response to our query. 560 | gotResponse, err := l.cn.Unlisten(channel) 561 | if gotResponse && err != nil { 562 | return err 563 | } 564 | } 565 | 566 | // Don't bother waiting for resync if there's no connection. 567 | delete(l.channels, channel) 568 | return nil 569 | } 570 | 571 | // UnlistenAll removes all channels from the Listener's channel list. Returns 572 | // immediately with no error if there is no connection. Note that you might 573 | // still get notifications for any of the deleted channels even after 574 | // UnlistenAll has returned. 575 | func (l *Listener) UnlistenAll() error { 576 | l.lock.Lock() 577 | defer l.lock.Unlock() 578 | 579 | if l.isClosed { 580 | return errListenerClosed 581 | } 582 | 583 | if l.cn != nil { 584 | // Similarly to Listen (see comment in that function), the caller 585 | // should only be bothered with an error if it came from the backend as 586 | // a response to our query. 587 | gotResponse, err := l.cn.UnlistenAll() 588 | if gotResponse && err != nil { 589 | return err 590 | } 591 | } 592 | 593 | // Don't bother waiting for resync if there's no connection. 594 | l.channels = make(map[string]struct{}) 595 | return nil 596 | } 597 | 598 | // Ping the remote server to make sure it's alive. Non-nil return value means 599 | // that there is no active connection. 600 | func (l *Listener) Ping() error { 601 | l.lock.Lock() 602 | defer l.lock.Unlock() 603 | 604 | if l.isClosed { 605 | return errListenerClosed 606 | } 607 | if l.cn == nil { 608 | return errors.New("no connection") 609 | } 610 | 611 | return l.cn.Ping() 612 | } 613 | 614 | // Clean up after losing the server connection. Returns l.cn.Err(), which 615 | // should have the reason the connection was lost. 616 | func (l *Listener) disconnectCleanup() error { 617 | l.lock.Lock() 618 | defer l.lock.Unlock() 619 | 620 | // sanity check; can't look at Err() until the channel has been closed 621 | select { 622 | case _, ok := <-l.connNotificationChan: 623 | if ok { 624 | panic("connNotificationChan not closed") 625 | } 626 | default: 627 | panic("connNotificationChan not closed") 628 | } 629 | 630 | err := l.cn.Err() 631 | l.cn.Close() 632 | l.cn = nil 633 | return err 634 | } 635 | 636 | // Synchronize the list of channels we want to be listening on with the server 637 | // after the connection has been established. 638 | func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { 639 | doneChan := make(chan error) 640 | go func(notificationChan <-chan *Notification) { 641 | for channel := range l.channels { 642 | // If we got a response, return that error to our caller as it's 643 | // going to be more descriptive than cn.Err(). 644 | gotResponse, err := cn.Listen(channel) 645 | if gotResponse && err != nil { 646 | doneChan <- err 647 | return 648 | } 649 | 650 | // If we couldn't reach the server, wait for notificationChan to 651 | // close and then return the error message from the connection, as 652 | // per ListenerConn's interface. 653 | if err != nil { 654 | for range notificationChan { 655 | } 656 | doneChan <- cn.Err() 657 | return 658 | } 659 | } 660 | doneChan <- nil 661 | }(notificationChan) 662 | 663 | // Ignore notifications while synchronization is going on to avoid 664 | // deadlocks. We have to send a nil notification over Notify anyway as 665 | // we can't possibly know which notifications (if any) were lost while 666 | // the connection was down, so there's no reason to try and process 667 | // these messages at all. 668 | for { 669 | select { 670 | case _, ok := <-notificationChan: 671 | if !ok { 672 | notificationChan = nil 673 | } 674 | 675 | case err := <-doneChan: 676 | return err 677 | } 678 | } 679 | } 680 | 681 | // caller should NOT be holding l.lock 682 | func (l *Listener) closed() bool { 683 | l.lock.Lock() 684 | defer l.lock.Unlock() 685 | 686 | return l.isClosed 687 | } 688 | 689 | func (l *Listener) connect() error { 690 | notificationChan := make(chan *Notification, 32) 691 | cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) 692 | if err != nil { 693 | return err 694 | } 695 | 696 | l.lock.Lock() 697 | defer l.lock.Unlock() 698 | 699 | err = l.resync(cn, notificationChan) 700 | if err != nil { 701 | cn.Close() 702 | return err 703 | } 704 | 705 | l.cn = cn 706 | l.connNotificationChan = notificationChan 707 | l.reconnectCond.Broadcast() 708 | 709 | return nil 710 | } 711 | 712 | // Close disconnects the Listener from the database and shuts it down. 713 | // Subsequent calls to its methods will return an error. Close returns an 714 | // error if the connection has already been closed. 715 | func (l *Listener) Close() error { 716 | l.lock.Lock() 717 | defer l.lock.Unlock() 718 | 719 | if l.isClosed { 720 | return errListenerClosed 721 | } 722 | 723 | if l.cn != nil { 724 | l.cn.Close() 725 | } 726 | l.isClosed = true 727 | 728 | // Unblock calls to Listen() 729 | l.reconnectCond.Broadcast() 730 | 731 | return nil 732 | } 733 | 734 | func (l *Listener) emitEvent(event ListenerEventType, err error) { 735 | if l.eventCallback != nil { 736 | l.eventCallback(event, err) 737 | } 738 | } 739 | 740 | // Main logic here: maintain a connection to the server when possible, wait 741 | // for notifications and emit events. 742 | func (l *Listener) listenerConnLoop() { 743 | var nextReconnect time.Time 744 | 745 | reconnectInterval := l.minReconnectInterval 746 | for { 747 | for { 748 | err := l.connect() 749 | if err == nil { 750 | break 751 | } 752 | 753 | if l.closed() { 754 | return 755 | } 756 | l.emitEvent(ListenerEventConnectionAttemptFailed, err) 757 | 758 | time.Sleep(reconnectInterval) 759 | reconnectInterval *= 2 760 | if reconnectInterval > l.maxReconnectInterval { 761 | reconnectInterval = l.maxReconnectInterval 762 | } 763 | } 764 | 765 | if nextReconnect.IsZero() { 766 | l.emitEvent(ListenerEventConnected, nil) 767 | } else { 768 | l.emitEvent(ListenerEventReconnected, nil) 769 | l.Notify <- nil 770 | } 771 | 772 | reconnectInterval = l.minReconnectInterval 773 | nextReconnect = time.Now().Add(reconnectInterval) 774 | 775 | for { 776 | notification, ok := <-l.connNotificationChan 777 | if !ok { 778 | // lost connection, loop again 779 | break 780 | } 781 | l.Notify <- notification 782 | } 783 | 784 | err := l.disconnectCleanup() 785 | if l.closed() { 786 | return 787 | } 788 | l.emitEvent(ListenerEventDisconnected, err) 789 | 790 | time.Sleep(time.Until(nextReconnect)) 791 | } 792 | } 793 | 794 | func (l *Listener) listenerMain() { 795 | l.listenerConnLoop() 796 | close(l.Notify) 797 | } 798 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/oid/doc.go: -------------------------------------------------------------------------------- 1 | // Package oid contains OID constants 2 | // as defined by the Postgres server. 3 | package oid 4 | 5 | // Oid is a Postgres Object ID. 6 | type Oid uint32 7 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/oid/gen.go: -------------------------------------------------------------------------------- 1 | // +build ignore 2 | 3 | // Generate the table of OID values 4 | // Run with 'go run gen.go'. 5 | package main 6 | 7 | import ( 8 | "database/sql" 9 | "fmt" 10 | "log" 11 | "os" 12 | "os/exec" 13 | "strings" 14 | 15 | _ "github.com/lib/pq" 16 | ) 17 | 18 | // OID represent a postgres Object Identifier Type. 19 | type OID struct { 20 | ID int 21 | Type string 22 | } 23 | 24 | // Name returns an upper case version of the oid type. 25 | func (o OID) Name() string { 26 | return strings.ToUpper(o.Type) 27 | } 28 | 29 | func main() { 30 | datname := os.Getenv("PGDATABASE") 31 | sslmode := os.Getenv("PGSSLMODE") 32 | 33 | if datname == "" { 34 | os.Setenv("PGDATABASE", "pqgotest") 35 | } 36 | 37 | if sslmode == "" { 38 | os.Setenv("PGSSLMODE", "disable") 39 | } 40 | 41 | db, err := sql.Open("postgres", "") 42 | if err != nil { 43 | log.Fatal(err) 44 | } 45 | rows, err := db.Query(` 46 | SELECT typname, oid 47 | FROM pg_type WHERE oid < 10000 48 | ORDER BY oid; 49 | `) 50 | if err != nil { 51 | log.Fatal(err) 52 | } 53 | oids := make([]*OID, 0) 54 | for rows.Next() { 55 | var oid OID 56 | if err = rows.Scan(&oid.Type, &oid.ID); err != nil { 57 | log.Fatal(err) 58 | } 59 | oids = append(oids, &oid) 60 | } 61 | if err = rows.Err(); err != nil { 62 | log.Fatal(err) 63 | } 64 | cmd := exec.Command("gofmt") 65 | cmd.Stderr = os.Stderr 66 | w, err := cmd.StdinPipe() 67 | if err != nil { 68 | log.Fatal(err) 69 | } 70 | f, err := os.Create("types.go") 71 | if err != nil { 72 | log.Fatal(err) 73 | } 74 | cmd.Stdout = f 75 | err = cmd.Start() 76 | if err != nil { 77 | log.Fatal(err) 78 | } 79 | fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.") 80 | fmt.Fprintln(w, "\npackage oid") 81 | fmt.Fprintln(w, "const (") 82 | for _, oid := range oids { 83 | fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID) 84 | } 85 | fmt.Fprintln(w, ")") 86 | fmt.Fprintln(w, "var TypeName = map[Oid]string{") 87 | for _, oid := range oids { 88 | fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name()) 89 | } 90 | fmt.Fprintln(w, "}") 91 | w.Close() 92 | cmd.Wait() 93 | } 94 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/oid/types.go: -------------------------------------------------------------------------------- 1 | // Code generated by gen.go. DO NOT EDIT. 2 | 3 | package oid 4 | 5 | const ( 6 | T_bool Oid = 16 7 | T_bytea Oid = 17 8 | T_char Oid = 18 9 | T_name Oid = 19 10 | T_int8 Oid = 20 11 | T_int2 Oid = 21 12 | T_int2vector Oid = 22 13 | T_int4 Oid = 23 14 | T_regproc Oid = 24 15 | T_text Oid = 25 16 | T_oid Oid = 26 17 | T_tid Oid = 27 18 | T_xid Oid = 28 19 | T_cid Oid = 29 20 | T_oidvector Oid = 30 21 | T_pg_ddl_command Oid = 32 22 | T_pg_type Oid = 71 23 | T_pg_attribute Oid = 75 24 | T_pg_proc Oid = 81 25 | T_pg_class Oid = 83 26 | T_json Oid = 114 27 | T_xml Oid = 142 28 | T__xml Oid = 143 29 | T_pg_node_tree Oid = 194 30 | T__json Oid = 199 31 | T_smgr Oid = 210 32 | T_index_am_handler Oid = 325 33 | T_point Oid = 600 34 | T_lseg Oid = 601 35 | T_path Oid = 602 36 | T_box Oid = 603 37 | T_polygon Oid = 604 38 | T_line Oid = 628 39 | T__line Oid = 629 40 | T_cidr Oid = 650 41 | T__cidr Oid = 651 42 | T_float4 Oid = 700 43 | T_float8 Oid = 701 44 | T_abstime Oid = 702 45 | T_reltime Oid = 703 46 | T_tinterval Oid = 704 47 | T_unknown Oid = 705 48 | T_circle Oid = 718 49 | T__circle Oid = 719 50 | T_money Oid = 790 51 | T__money Oid = 791 52 | T_macaddr Oid = 829 53 | T_inet Oid = 869 54 | T__bool Oid = 1000 55 | T__bytea Oid = 1001 56 | T__char Oid = 1002 57 | T__name Oid = 1003 58 | T__int2 Oid = 1005 59 | T__int2vector Oid = 1006 60 | T__int4 Oid = 1007 61 | T__regproc Oid = 1008 62 | T__text Oid = 1009 63 | T__tid Oid = 1010 64 | T__xid Oid = 1011 65 | T__cid Oid = 1012 66 | T__oidvector Oid = 1013 67 | T__bpchar Oid = 1014 68 | T__varchar Oid = 1015 69 | T__int8 Oid = 1016 70 | T__point Oid = 1017 71 | T__lseg Oid = 1018 72 | T__path Oid = 1019 73 | T__box Oid = 1020 74 | T__float4 Oid = 1021 75 | T__float8 Oid = 1022 76 | T__abstime Oid = 1023 77 | T__reltime Oid = 1024 78 | T__tinterval Oid = 1025 79 | T__polygon Oid = 1027 80 | T__oid Oid = 1028 81 | T_aclitem Oid = 1033 82 | T__aclitem Oid = 1034 83 | T__macaddr Oid = 1040 84 | T__inet Oid = 1041 85 | T_bpchar Oid = 1042 86 | T_varchar Oid = 1043 87 | T_date Oid = 1082 88 | T_time Oid = 1083 89 | T_timestamp Oid = 1114 90 | T__timestamp Oid = 1115 91 | T__date Oid = 1182 92 | T__time Oid = 1183 93 | T_timestamptz Oid = 1184 94 | T__timestamptz Oid = 1185 95 | T_interval Oid = 1186 96 | T__interval Oid = 1187 97 | T__numeric Oid = 1231 98 | T_pg_database Oid = 1248 99 | T__cstring Oid = 1263 100 | T_timetz Oid = 1266 101 | T__timetz Oid = 1270 102 | T_bit Oid = 1560 103 | T__bit Oid = 1561 104 | T_varbit Oid = 1562 105 | T__varbit Oid = 1563 106 | T_numeric Oid = 1700 107 | T_refcursor Oid = 1790 108 | T__refcursor Oid = 2201 109 | T_regprocedure Oid = 2202 110 | T_regoper Oid = 2203 111 | T_regoperator Oid = 2204 112 | T_regclass Oid = 2205 113 | T_regtype Oid = 2206 114 | T__regprocedure Oid = 2207 115 | T__regoper Oid = 2208 116 | T__regoperator Oid = 2209 117 | T__regclass Oid = 2210 118 | T__regtype Oid = 2211 119 | T_record Oid = 2249 120 | T_cstring Oid = 2275 121 | T_any Oid = 2276 122 | T_anyarray Oid = 2277 123 | T_void Oid = 2278 124 | T_trigger Oid = 2279 125 | T_language_handler Oid = 2280 126 | T_internal Oid = 2281 127 | T_opaque Oid = 2282 128 | T_anyelement Oid = 2283 129 | T__record Oid = 2287 130 | T_anynonarray Oid = 2776 131 | T_pg_authid Oid = 2842 132 | T_pg_auth_members Oid = 2843 133 | T__txid_snapshot Oid = 2949 134 | T_uuid Oid = 2950 135 | T__uuid Oid = 2951 136 | T_txid_snapshot Oid = 2970 137 | T_fdw_handler Oid = 3115 138 | T_pg_lsn Oid = 3220 139 | T__pg_lsn Oid = 3221 140 | T_tsm_handler Oid = 3310 141 | T_anyenum Oid = 3500 142 | T_tsvector Oid = 3614 143 | T_tsquery Oid = 3615 144 | T_gtsvector Oid = 3642 145 | T__tsvector Oid = 3643 146 | T__gtsvector Oid = 3644 147 | T__tsquery Oid = 3645 148 | T_regconfig Oid = 3734 149 | T__regconfig Oid = 3735 150 | T_regdictionary Oid = 3769 151 | T__regdictionary Oid = 3770 152 | T_jsonb Oid = 3802 153 | T__jsonb Oid = 3807 154 | T_anyrange Oid = 3831 155 | T_event_trigger Oid = 3838 156 | T_int4range Oid = 3904 157 | T__int4range Oid = 3905 158 | T_numrange Oid = 3906 159 | T__numrange Oid = 3907 160 | T_tsrange Oid = 3908 161 | T__tsrange Oid = 3909 162 | T_tstzrange Oid = 3910 163 | T__tstzrange Oid = 3911 164 | T_daterange Oid = 3912 165 | T__daterange Oid = 3913 166 | T_int8range Oid = 3926 167 | T__int8range Oid = 3927 168 | T_pg_shseclabel Oid = 4066 169 | T_regnamespace Oid = 4089 170 | T__regnamespace Oid = 4090 171 | T_regrole Oid = 4096 172 | T__regrole Oid = 4097 173 | ) 174 | 175 | var TypeName = map[Oid]string{ 176 | T_bool: "BOOL", 177 | T_bytea: "BYTEA", 178 | T_char: "CHAR", 179 | T_name: "NAME", 180 | T_int8: "INT8", 181 | T_int2: "INT2", 182 | T_int2vector: "INT2VECTOR", 183 | T_int4: "INT4", 184 | T_regproc: "REGPROC", 185 | T_text: "TEXT", 186 | T_oid: "OID", 187 | T_tid: "TID", 188 | T_xid: "XID", 189 | T_cid: "CID", 190 | T_oidvector: "OIDVECTOR", 191 | T_pg_ddl_command: "PG_DDL_COMMAND", 192 | T_pg_type: "PG_TYPE", 193 | T_pg_attribute: "PG_ATTRIBUTE", 194 | T_pg_proc: "PG_PROC", 195 | T_pg_class: "PG_CLASS", 196 | T_json: "JSON", 197 | T_xml: "XML", 198 | T__xml: "_XML", 199 | T_pg_node_tree: "PG_NODE_TREE", 200 | T__json: "_JSON", 201 | T_smgr: "SMGR", 202 | T_index_am_handler: "INDEX_AM_HANDLER", 203 | T_point: "POINT", 204 | T_lseg: "LSEG", 205 | T_path: "PATH", 206 | T_box: "BOX", 207 | T_polygon: "POLYGON", 208 | T_line: "LINE", 209 | T__line: "_LINE", 210 | T_cidr: "CIDR", 211 | T__cidr: "_CIDR", 212 | T_float4: "FLOAT4", 213 | T_float8: "FLOAT8", 214 | T_abstime: "ABSTIME", 215 | T_reltime: "RELTIME", 216 | T_tinterval: "TINTERVAL", 217 | T_unknown: "UNKNOWN", 218 | T_circle: "CIRCLE", 219 | T__circle: "_CIRCLE", 220 | T_money: "MONEY", 221 | T__money: "_MONEY", 222 | T_macaddr: "MACADDR", 223 | T_inet: "INET", 224 | T__bool: "_BOOL", 225 | T__bytea: "_BYTEA", 226 | T__char: "_CHAR", 227 | T__name: "_NAME", 228 | T__int2: "_INT2", 229 | T__int2vector: "_INT2VECTOR", 230 | T__int4: "_INT4", 231 | T__regproc: "_REGPROC", 232 | T__text: "_TEXT", 233 | T__tid: "_TID", 234 | T__xid: "_XID", 235 | T__cid: "_CID", 236 | T__oidvector: "_OIDVECTOR", 237 | T__bpchar: "_BPCHAR", 238 | T__varchar: "_VARCHAR", 239 | T__int8: "_INT8", 240 | T__point: "_POINT", 241 | T__lseg: "_LSEG", 242 | T__path: "_PATH", 243 | T__box: "_BOX", 244 | T__float4: "_FLOAT4", 245 | T__float8: "_FLOAT8", 246 | T__abstime: "_ABSTIME", 247 | T__reltime: "_RELTIME", 248 | T__tinterval: "_TINTERVAL", 249 | T__polygon: "_POLYGON", 250 | T__oid: "_OID", 251 | T_aclitem: "ACLITEM", 252 | T__aclitem: "_ACLITEM", 253 | T__macaddr: "_MACADDR", 254 | T__inet: "_INET", 255 | T_bpchar: "BPCHAR", 256 | T_varchar: "VARCHAR", 257 | T_date: "DATE", 258 | T_time: "TIME", 259 | T_timestamp: "TIMESTAMP", 260 | T__timestamp: "_TIMESTAMP", 261 | T__date: "_DATE", 262 | T__time: "_TIME", 263 | T_timestamptz: "TIMESTAMPTZ", 264 | T__timestamptz: "_TIMESTAMPTZ", 265 | T_interval: "INTERVAL", 266 | T__interval: "_INTERVAL", 267 | T__numeric: "_NUMERIC", 268 | T_pg_database: "PG_DATABASE", 269 | T__cstring: "_CSTRING", 270 | T_timetz: "TIMETZ", 271 | T__timetz: "_TIMETZ", 272 | T_bit: "BIT", 273 | T__bit: "_BIT", 274 | T_varbit: "VARBIT", 275 | T__varbit: "_VARBIT", 276 | T_numeric: "NUMERIC", 277 | T_refcursor: "REFCURSOR", 278 | T__refcursor: "_REFCURSOR", 279 | T_regprocedure: "REGPROCEDURE", 280 | T_regoper: "REGOPER", 281 | T_regoperator: "REGOPERATOR", 282 | T_regclass: "REGCLASS", 283 | T_regtype: "REGTYPE", 284 | T__regprocedure: "_REGPROCEDURE", 285 | T__regoper: "_REGOPER", 286 | T__regoperator: "_REGOPERATOR", 287 | T__regclass: "_REGCLASS", 288 | T__regtype: "_REGTYPE", 289 | T_record: "RECORD", 290 | T_cstring: "CSTRING", 291 | T_any: "ANY", 292 | T_anyarray: "ANYARRAY", 293 | T_void: "VOID", 294 | T_trigger: "TRIGGER", 295 | T_language_handler: "LANGUAGE_HANDLER", 296 | T_internal: "INTERNAL", 297 | T_opaque: "OPAQUE", 298 | T_anyelement: "ANYELEMENT", 299 | T__record: "_RECORD", 300 | T_anynonarray: "ANYNONARRAY", 301 | T_pg_authid: "PG_AUTHID", 302 | T_pg_auth_members: "PG_AUTH_MEMBERS", 303 | T__txid_snapshot: "_TXID_SNAPSHOT", 304 | T_uuid: "UUID", 305 | T__uuid: "_UUID", 306 | T_txid_snapshot: "TXID_SNAPSHOT", 307 | T_fdw_handler: "FDW_HANDLER", 308 | T_pg_lsn: "PG_LSN", 309 | T__pg_lsn: "_PG_LSN", 310 | T_tsm_handler: "TSM_HANDLER", 311 | T_anyenum: "ANYENUM", 312 | T_tsvector: "TSVECTOR", 313 | T_tsquery: "TSQUERY", 314 | T_gtsvector: "GTSVECTOR", 315 | T__tsvector: "_TSVECTOR", 316 | T__gtsvector: "_GTSVECTOR", 317 | T__tsquery: "_TSQUERY", 318 | T_regconfig: "REGCONFIG", 319 | T__regconfig: "_REGCONFIG", 320 | T_regdictionary: "REGDICTIONARY", 321 | T__regdictionary: "_REGDICTIONARY", 322 | T_jsonb: "JSONB", 323 | T__jsonb: "_JSONB", 324 | T_anyrange: "ANYRANGE", 325 | T_event_trigger: "EVENT_TRIGGER", 326 | T_int4range: "INT4RANGE", 327 | T__int4range: "_INT4RANGE", 328 | T_numrange: "NUMRANGE", 329 | T__numrange: "_NUMRANGE", 330 | T_tsrange: "TSRANGE", 331 | T__tsrange: "_TSRANGE", 332 | T_tstzrange: "TSTZRANGE", 333 | T__tstzrange: "_TSTZRANGE", 334 | T_daterange: "DATERANGE", 335 | T__daterange: "_DATERANGE", 336 | T_int8range: "INT8RANGE", 337 | T__int8range: "_INT8RANGE", 338 | T_pg_shseclabel: "PG_SHSECLABEL", 339 | T_regnamespace: "REGNAMESPACE", 340 | T__regnamespace: "_REGNAMESPACE", 341 | T_regrole: "REGROLE", 342 | T__regrole: "_REGROLE", 343 | } 344 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/rows.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "math" 5 | "reflect" 6 | "time" 7 | 8 | "github.com/lib/pq/oid" 9 | ) 10 | 11 | const headerSize = 4 12 | 13 | type fieldDesc struct { 14 | // The object ID of the data type. 15 | OID oid.Oid 16 | // The data type size (see pg_type.typlen). 17 | // Note that negative values denote variable-width types. 18 | Len int 19 | // The type modifier (see pg_attribute.atttypmod). 20 | // The meaning of the modifier is type-specific. 21 | Mod int 22 | } 23 | 24 | func (fd fieldDesc) Type() reflect.Type { 25 | switch fd.OID { 26 | case oid.T_int8: 27 | return reflect.TypeOf(int64(0)) 28 | case oid.T_int4: 29 | return reflect.TypeOf(int32(0)) 30 | case oid.T_int2: 31 | return reflect.TypeOf(int16(0)) 32 | case oid.T_varchar, oid.T_text: 33 | return reflect.TypeOf("") 34 | case oid.T_bool: 35 | return reflect.TypeOf(false) 36 | case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: 37 | return reflect.TypeOf(time.Time{}) 38 | case oid.T_bytea: 39 | return reflect.TypeOf([]byte(nil)) 40 | default: 41 | return reflect.TypeOf(new(interface{})).Elem() 42 | } 43 | } 44 | 45 | func (fd fieldDesc) Name() string { 46 | return oid.TypeName[fd.OID] 47 | } 48 | 49 | func (fd fieldDesc) Length() (length int64, ok bool) { 50 | switch fd.OID { 51 | case oid.T_text, oid.T_bytea: 52 | return math.MaxInt64, true 53 | case oid.T_varchar, oid.T_bpchar: 54 | return int64(fd.Mod - headerSize), true 55 | default: 56 | return 0, false 57 | } 58 | } 59 | 60 | func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { 61 | switch fd.OID { 62 | case oid.T_numeric, oid.T__numeric: 63 | mod := fd.Mod - headerSize 64 | precision = int64((mod >> 16) & 0xffff) 65 | scale = int64(mod & 0xffff) 66 | return precision, scale, true 67 | default: 68 | return 0, 0, false 69 | } 70 | } 71 | 72 | // ColumnTypeScanType returns the value type that can be used to scan types into. 73 | func (rs *rows) ColumnTypeScanType(index int) reflect.Type { 74 | return rs.colTyps[index].Type() 75 | } 76 | 77 | // ColumnTypeDatabaseTypeName return the database system type name. 78 | func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { 79 | return rs.colTyps[index].Name() 80 | } 81 | 82 | // ColumnTypeLength returns the length of the column type if the column is a 83 | // variable length type. If the column is not a variable length type ok 84 | // should return false. 85 | func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { 86 | return rs.colTyps[index].Length() 87 | } 88 | 89 | // ColumnTypePrecisionScale should return the precision and scale for decimal 90 | // types. If not applicable, ok should be false. 91 | func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { 92 | return rs.colTyps[index].PrecisionScale() 93 | } 94 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "io/ioutil" 7 | "net" 8 | "os" 9 | "os/user" 10 | "path/filepath" 11 | ) 12 | 13 | // ssl generates a function to upgrade a net.Conn based on the "sslmode" and 14 | // related settings. The function is nil when no upgrade should take place. 15 | func ssl(o values) (func(net.Conn) (net.Conn, error), error) { 16 | verifyCaOnly := false 17 | tlsConf := tls.Config{} 18 | switch mode := o["sslmode"]; mode { 19 | // "require" is the default. 20 | case "", "require": 21 | // We must skip TLS's own verification since it requires full 22 | // verification since Go 1.3. 23 | tlsConf.InsecureSkipVerify = true 24 | 25 | // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: 26 | // 27 | // Note: For backwards compatibility with earlier versions of 28 | // PostgreSQL, if a root CA file exists, the behavior of 29 | // sslmode=require will be the same as that of verify-ca, meaning the 30 | // server certificate is validated against the CA. Relying on this 31 | // behavior is discouraged, and applications that need certificate 32 | // validation should always use verify-ca or verify-full. 33 | if sslrootcert, ok := o["sslrootcert"]; ok { 34 | if _, err := os.Stat(sslrootcert); err == nil { 35 | verifyCaOnly = true 36 | } else { 37 | delete(o, "sslrootcert") 38 | } 39 | } 40 | case "verify-ca": 41 | // We must skip TLS's own verification since it requires full 42 | // verification since Go 1.3. 43 | tlsConf.InsecureSkipVerify = true 44 | verifyCaOnly = true 45 | case "verify-full": 46 | tlsConf.ServerName = o["host"] 47 | case "disable": 48 | return nil, nil 49 | default: 50 | return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) 51 | } 52 | 53 | err := sslClientCertificates(&tlsConf, o) 54 | if err != nil { 55 | return nil, err 56 | } 57 | err = sslCertificateAuthority(&tlsConf, o) 58 | if err != nil { 59 | return nil, err 60 | } 61 | sslRenegotiation(&tlsConf) 62 | 63 | return func(conn net.Conn) (net.Conn, error) { 64 | client := tls.Client(conn, &tlsConf) 65 | if verifyCaOnly { 66 | err := sslVerifyCertificateAuthority(client, &tlsConf) 67 | if err != nil { 68 | return nil, err 69 | } 70 | } 71 | return client, nil 72 | }, nil 73 | } 74 | 75 | // sslClientCertificates adds the certificate specified in the "sslcert" and 76 | // "sslkey" settings, or if they aren't set, from the .postgresql directory 77 | // in the user's home directory. The configured files must exist and have 78 | // the correct permissions. 79 | func sslClientCertificates(tlsConf *tls.Config, o values) error { 80 | // user.Current() might fail when cross-compiling. We have to ignore the 81 | // error and continue without home directory defaults, since we wouldn't 82 | // know from where to load them. 83 | user, _ := user.Current() 84 | 85 | // In libpq, the client certificate is only loaded if the setting is not blank. 86 | // 87 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 88 | sslcert := o["sslcert"] 89 | if len(sslcert) == 0 && user != nil { 90 | sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") 91 | } 92 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 93 | if len(sslcert) == 0 { 94 | return nil 95 | } 96 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 97 | if _, err := os.Stat(sslcert); os.IsNotExist(err) { 98 | return nil 99 | } else if err != nil { 100 | return err 101 | } 102 | 103 | // In libpq, the ssl key is only loaded if the setting is not blank. 104 | // 105 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 106 | sslkey := o["sslkey"] 107 | if len(sslkey) == 0 && user != nil { 108 | sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") 109 | } 110 | 111 | if len(sslkey) > 0 { 112 | if err := sslKeyPermissions(sslkey); err != nil { 113 | return err 114 | } 115 | } 116 | 117 | cert, err := tls.LoadX509KeyPair(sslcert, sslkey) 118 | if err != nil { 119 | return err 120 | } 121 | 122 | tlsConf.Certificates = []tls.Certificate{cert} 123 | return nil 124 | } 125 | 126 | // sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. 127 | func sslCertificateAuthority(tlsConf *tls.Config, o values) error { 128 | // In libpq, the root certificate is only loaded if the setting is not blank. 129 | // 130 | // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 131 | if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { 132 | tlsConf.RootCAs = x509.NewCertPool() 133 | 134 | cert, err := ioutil.ReadFile(sslrootcert) 135 | if err != nil { 136 | return err 137 | } 138 | 139 | if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { 140 | return fmterrorf("couldn't parse pem in sslrootcert") 141 | } 142 | } 143 | 144 | return nil 145 | } 146 | 147 | // sslVerifyCertificateAuthority carries out a TLS handshake to the server and 148 | // verifies the presented certificate against the CA, i.e. the one specified in 149 | // sslrootcert or the system CA if sslrootcert was not specified. 150 | func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { 151 | err := client.Handshake() 152 | if err != nil { 153 | return err 154 | } 155 | certs := client.ConnectionState().PeerCertificates 156 | opts := x509.VerifyOptions{ 157 | DNSName: client.ConnectionState().ServerName, 158 | Intermediates: x509.NewCertPool(), 159 | Roots: tlsConf.RootCAs, 160 | } 161 | for i, cert := range certs { 162 | if i == 0 { 163 | continue 164 | } 165 | opts.Intermediates.AddCert(cert) 166 | } 167 | _, err = certs[0].Verify(opts) 168 | return err 169 | } 170 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl_go1.7.go: -------------------------------------------------------------------------------- 1 | // +build go1.7 2 | 3 | package pq 4 | 5 | import "crypto/tls" 6 | 7 | // Accept renegotiation requests initiated by the backend. 8 | // 9 | // Renegotiation was deprecated then removed from PostgreSQL 9.5, but 10 | // the default configuration of older versions has it enabled. Redshift 11 | // also initiates renegotiations and cannot be reconfigured. 12 | func sslRenegotiation(conf *tls.Config) { 13 | conf.Renegotiation = tls.RenegotiateFreelyAsClient 14 | } 15 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl_permissions.go: -------------------------------------------------------------------------------- 1 | // +build !windows 2 | 3 | package pq 4 | 5 | import "os" 6 | 7 | // sslKeyPermissions checks the permissions on user-supplied ssl key files. 8 | // The key file should have very little access. 9 | // 10 | // libpq does not check key file permissions on Windows. 11 | func sslKeyPermissions(sslkey string) error { 12 | info, err := os.Stat(sslkey) 13 | if err != nil { 14 | return err 15 | } 16 | if info.Mode().Perm()&0077 != 0 { 17 | return ErrSSLKeyHasWorldPermissions 18 | } 19 | return nil 20 | } 21 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl_renegotiation.go: -------------------------------------------------------------------------------- 1 | // +build !go1.7 2 | 3 | package pq 4 | 5 | import "crypto/tls" 6 | 7 | // Renegotiation is not supported by crypto/tls until Go 1.7. 8 | func sslRenegotiation(*tls.Config) {} 9 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/ssl_windows.go: -------------------------------------------------------------------------------- 1 | // +build windows 2 | 3 | package pq 4 | 5 | // sslKeyPermissions checks the permissions on user-supplied ssl key files. 6 | // The key file should have very little access. 7 | // 8 | // libpq does not check key file permissions on Windows. 9 | func sslKeyPermissions(string) error { return nil } 10 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/url.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | nurl "net/url" 7 | "sort" 8 | "strings" 9 | ) 10 | 11 | // ParseURL no longer needs to be used by clients of this library since supplying a URL as a 12 | // connection string to sql.Open() is now supported: 13 | // 14 | // sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") 15 | // 16 | // It remains exported here for backwards-compatibility. 17 | // 18 | // ParseURL converts a url to a connection string for driver.Open. 19 | // Example: 20 | // 21 | // "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" 22 | // 23 | // converts to: 24 | // 25 | // "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" 26 | // 27 | // A minimal example: 28 | // 29 | // "postgres://" 30 | // 31 | // This will be blank, causing driver.Open to use all of the defaults 32 | func ParseURL(url string) (string, error) { 33 | u, err := nurl.Parse(url) 34 | if err != nil { 35 | return "", err 36 | } 37 | 38 | if u.Scheme != "postgres" && u.Scheme != "postgresql" { 39 | return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) 40 | } 41 | 42 | var kvs []string 43 | escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) 44 | accrue := func(k, v string) { 45 | if v != "" { 46 | kvs = append(kvs, k+"="+escaper.Replace(v)) 47 | } 48 | } 49 | 50 | if u.User != nil { 51 | v := u.User.Username() 52 | accrue("user", v) 53 | 54 | v, _ = u.User.Password() 55 | accrue("password", v) 56 | } 57 | 58 | if host, port, err := net.SplitHostPort(u.Host); err != nil { 59 | accrue("host", u.Host) 60 | } else { 61 | accrue("host", host) 62 | accrue("port", port) 63 | } 64 | 65 | if u.Path != "" { 66 | accrue("dbname", u.Path[1:]) 67 | } 68 | 69 | q := u.Query() 70 | for k := range q { 71 | accrue(k, q.Get(k)) 72 | } 73 | 74 | sort.Strings(kvs) // Makes testing easier (not a performance concern) 75 | return strings.Join(kvs, " "), nil 76 | } 77 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/user_posix.go: -------------------------------------------------------------------------------- 1 | // Package pq is a pure Go Postgres driver for the database/sql package. 2 | 3 | // +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun 4 | 5 | package pq 6 | 7 | import ( 8 | "os" 9 | "os/user" 10 | ) 11 | 12 | func userCurrent() (string, error) { 13 | u, err := user.Current() 14 | if err == nil { 15 | return u.Username, nil 16 | } 17 | 18 | name := os.Getenv("USER") 19 | if name != "" { 20 | return name, nil 21 | } 22 | 23 | return "", ErrCouldNotDetectUsername 24 | } 25 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/user_windows.go: -------------------------------------------------------------------------------- 1 | // Package pq is a pure Go Postgres driver for the database/sql package. 2 | package pq 3 | 4 | import ( 5 | "path/filepath" 6 | "syscall" 7 | ) 8 | 9 | // Perform Windows user name lookup identically to libpq. 10 | // 11 | // The PostgreSQL code makes use of the legacy Win32 function 12 | // GetUserName, and that function has not been imported into stock Go. 13 | // GetUserNameEx is available though, the difference being that a 14 | // wider range of names are available. To get the output to be the 15 | // same as GetUserName, only the base (or last) component of the 16 | // result is returned. 17 | func userCurrent() (string, error) { 18 | pw_name := make([]uint16, 128) 19 | pwname_size := uint32(len(pw_name)) - 1 20 | err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) 21 | if err != nil { 22 | return "", ErrCouldNotDetectUsername 23 | } 24 | s := syscall.UTF16ToString(pw_name) 25 | u := filepath.Base(s) 26 | return u, nil 27 | } 28 | -------------------------------------------------------------------------------- /vendor/github.com/lib/pq/uuid.go: -------------------------------------------------------------------------------- 1 | package pq 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | ) 7 | 8 | // decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. 9 | func decodeUUIDBinary(src []byte) ([]byte, error) { 10 | if len(src) != 16 { 11 | return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) 12 | } 13 | 14 | dst := make([]byte, 36) 15 | dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' 16 | hex.Encode(dst[0:], src[0:4]) 17 | hex.Encode(dst[9:], src[4:6]) 18 | hex.Encode(dst[14:], src[6:8]) 19 | hex.Encode(dst[19:], src[8:10]) 20 | hex.Encode(dst[24:], src[10:16]) 21 | 22 | return dst, nil 23 | } 24 | -------------------------------------------------------------------------------- /vendor/modules.txt: -------------------------------------------------------------------------------- 1 | # github.com/casbin/casbin v1.8.0 2 | github.com/casbin/casbin/model 3 | github.com/casbin/casbin/config 4 | github.com/casbin/casbin/log 5 | github.com/casbin/casbin/rbac 6 | github.com/casbin/casbin/util 7 | # github.com/lib/pq v1.0.0 8 | github.com/lib/pq 9 | github.com/lib/pq/oid 10 | --------------------------------------------------------------------------------