├── .classpath ├── .gitignore ├── .project ├── LICENSE ├── README.md ├── build.properties ├── build.xml ├── docs └── installation.txt ├── ivy.xml ├── ivy └── ivysettings.xml ├── solr-conf ├── solr.xml.patch └── solrconfig.xml.patch ├── src ├── main │ └── java │ │ ├── com │ │ └── couchbase │ │ │ └── capi │ │ │ ├── CAPIBehavior.java │ │ │ ├── CAPIServer.java │ │ │ ├── CouchbaseBehavior.java │ │ │ └── servlet │ │ │ ├── BucketMapServlet.java │ │ │ ├── CAPIServlet.java │ │ │ ├── ClusterMapServlet.java │ │ │ └── StatsServlet.java │ │ └── org │ │ └── apache │ │ └── solr │ │ └── couchbase │ │ ├── Bucket.java │ │ ├── CommonConstants.java │ │ ├── CouchbaseRecordHandler.java │ │ ├── CouchbaseRequestHandler.java │ │ ├── CouchbaseUtils.java │ │ ├── Counter.java │ │ ├── DefaultTypeSelector.java │ │ ├── RegexTypeSelector.java │ │ ├── Settings.java │ │ ├── SolrCAPIBehaviour.java │ │ ├── SolrCouchbaseBehaviour.java │ │ ├── SolrUtils.java │ │ ├── TypeSelector.java │ │ └── Utils.java └── test │ └── java │ └── org │ └── apache │ └── solr │ └── couchbase │ └── SolrUtilsTest.java └── zk-conf └── zoo.cfg /.classpath: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Directories 2 | /bin/ 3 | /build/ 4 | build-lib/ 5 | lib/ 6 | 7 | -------------------------------------------------------------------------------- /.project: -------------------------------------------------------------------------------- 1 | 2 | 3 | solr-couchbase-plugin 4 | 5 | 6 | 7 | 8 | 9 | org.eclipse.jdt.core.javabuilder 10 | 11 | 12 | 13 | 14 | 15 | org.eclipse.jdt.core.javanature 16 | org.apache.ivyde.eclipse.ivynature 17 | 18 | 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | solr-couchbase-plugin 2 | ===================== 3 | 4 | This plugin allows to import CouchBase data to Solr. It uses the Cross-datacenter Replication (XDCR) feature of Couchbase Server 2.0 to transfer data continuously. 5 | 6 | # Plugin configuration 7 | 8 | 9 | ## Dependencies 10 | 11 | * Copy this plugin from `solr-shared-libs/` to the directory: 12 | ``` 13 | /solr/lib-couchbase/ 14 | ``` 15 | * Add dependencies from `solr-war-libs/` directory to solr.war and remove the older **commons-io** dependency version (2.3). Dependencies should be located in the war file under: 16 | ``` 17 | /WEB-INF/lib/ 18 | ``` 19 | 20 | 21 | ## solr.xml 22 | 23 | An additional line to solr.xml should be added, to inform Solr about new dependencies which should be included in the Solr's classpath. Add the following line to solr.xml file: 24 | 25 | ``` 26 | ${sharedLib:lib-couchbase} 27 | ``` 28 | 29 | The whole solr.xml file should look as follows: 30 | 31 | ``` 32 | 33 | ${sharedLib:lib-couchbase} 34 | 35 | 36 | ${host:} 37 | ${jetty.port:8983} 38 | ${hostContext:solr} 39 | ${zkClientTimeout:30000} 40 | ${genericCoreNodeNames:true} 41 | 42 | 43 | 45 | ${socketTimeout:0} 46 | ${connTimeout:0} 47 | 48 | 49 | 50 | ``` 51 | 52 | 53 | ## solrconfig.xml 54 | 55 | It is required to configure Couchbase buckets to index data from in the solrconfig.xml file under */couchbase* RequestHandler. Whole RequestHandler configuration should look as following: 56 | 57 | ``` 58 | 59 | 60 | admin 61 | admin123 62 | 9876 63 | 1024 64 | false 65 | false 66 | 67 | 68 | 69 | 127.0.0.1 70 | Administrator 71 | password 72 | Solr 73 | 74 | 75 | test 76 | default 77 | 78 | 79 | 80 | program 81 | default 82 | 83 | 84 | 85 | 86 | 87 | default 88 | / 89 | 90 | name:/name 91 | city_s:/city 92 | code_s:/code 93 | country_s:/country 94 | phone_s:/phone 95 | url:/website 96 | type_s:/type 97 | last_modified:/updated 98 | description:/description 99 | address_s:/address 100 | geo_s:/geo 101 | 102 | 103 | 104 | ``` 105 | 106 | * params - a list of params required to configure this plugin. 107 | - username - A valid Couchbase server username 108 | - password - A valid Couchbase server password 109 | - port - A port number on which this plugin will register itself as a Couchbase replica. 110 | - numVBuckets - A number of VBuckets used by this Couchbase replica. Couchbase Server on Mac OS X uses 64 vBuckets as opposed to the 1024 vBuckets used by other platforms. **Couchbase clusters with mixed platforms are not supported.** It is required that numVBuckets is identical on the Couchbase server and Solr plugin. 111 | - commitAfterBatch - A flag specifying whether this plugin should commit documents to Solr after every batch of documents or when all the documents are retrieved from Couchbase. 112 | 113 | - couchbaseServer - Optional, a list with attributes that are required for creating Couchbase XDCR remote cluster and/or XDCR replication. 114 | - ipAddress - The IP address at where this plugin is running. Required if you have couchbaseServer field. 115 | - couchbaseUsername - The username of target Couchbase instance. With this field the plugin can get authorized by Couchbase. Required if you have couchbaseServer field. 116 | - couchbasePassword - The password of target Couchbase instance. With this field the plugin can get authorized by Couchbase. Required if you have couchbaseServer field. 117 | - clusterName - The cluster name that you want to create Couchbase XDCR remote cluster with. Required if you have couchbaseServer field. 118 | - bucketInfo - Optional, a list with attributes that are required for crteating Couchbase XDCR replication. 119 | - fromBucketName - The Couchbase bucket name where you want to pull data from. Required if you have bucketInfo field. 120 | - toBucketName - The destination bucket name where you want to push data to. Basically you have to use the names that are specified in "bucket" list. Required if you have bucketInfo field. 121 | 122 | * bucket - a list with bucket parameters required to perform a synchronisation with Couchbase. Multiple lists of this type are allowed. 123 | - name - Bucket name - must be unique 124 | - splitpath - a list with paths to the fields in JSON Object on which the original Couchbase JSON document will be split up to extract embedded documents. This is a single String where paths are saparated with "|". 125 | - fieldmappings - a list with field names mapping for Couchbase documents, before indexing them into Solr. List element's name must be unique. At least one field mapping must be provided. Value should be `solr_field_name:couchbase_field_path`. The ‘json-path’ is a required part. 'target-field-name' is the name of the field in the input Solr document. It is optional and it is automatically derived from the input json. 126 | - More informations about splitpath and fieldmappings usage can be found here: https://lucidworks.com/blog/indexing-custom-json-data/ 127 | - Wildcards - Instead of specifying all the field names in *fieldmappings* explicitly , it is possible to specify a wildcard '\*' or a wildwildcard '\*\*' to map fields automatically. The constraint is that wild cards can be only used in the end of the json-path. The split path cannot use wildcards. The following are example wildcard path mappings: 128 | 129 | Example Wildcards: 130 | ``` 131 | f=/docs/* : maps all the fields under docs and in the name as given in json 132 | f=/docs/** : maps all the fields under docs and its children in the name as given in json 133 | f=searchField:/docs/* : maps all fields under /docs to a single field called ‘searchField’ 134 | f=searchField:/docs/** : maps all fields under /docs and its children to searchField 135 | ``` 136 | 137 | 138 | Example Bucket Configuration: 139 | ``` 140 | 141 | default 142 | / 143 | 144 | name:/name 145 | city_s:/city 146 | code_s:/code 147 | country_s:/country 148 | phone_s:/phone 149 | url:/website 150 | type_s:/type 151 | last_modified:/updated 152 | description:/description 153 | address_s:/address 154 | geo_s:/geo 155 | 156 | 157 | ``` 158 | 159 | 160 | Example JSON: 161 | ``` 162 | { 163 | "person" : { 164 | "name" : "John", 165 | "age" : 22 166 | }, 167 | "value" : 100 168 | } 169 | ``` 170 | 171 | Example field mappings for above JSON: 172 | ``` 173 | 174 | person:/person 175 | name:/person/name 176 | age_i:/person/age 177 | value_i:/value 178 | 179 | ``` 180 | 181 | 182 | ## Couchbase XDCR 183 | 184 | In Couchbase admin panel, under XDCR tab following settings should be configured: 185 | 186 | ### Remote Cluster 187 | 188 | This Solr plugin should be configured as Couchbase's Remote Cluster. Click on 'Create Cluster Reference' button and fill in cluster data. 189 | 190 | * 'Cluster Name' can be any name. 191 | * 'IP/hostname' should be `:port` and the port number is the port on which this request handler will register itself as a Couchbase Replica. It is specified in CouchbaseRequestHandler's **params** list as 'port'. 192 | * Username and password should be the same as those provided in solrconfig.xml file, as the replica starts with the same credentials as the Couchbase server. 193 | 194 | ### Replication 195 | 196 | Click on 'Create Replication' button. 197 | 198 | * Select a Couchbase Cluster and a Bucket to replicate from. 199 | * Select configured Solr's Plugin Remote Cluster and add a Bucket name. This Bucket name must match the name specified in the bucket **name** parameter in the solrconfig.xml file. 200 | * Under *Advaced settings* **XDCR Prootcol** should be set to 'Version 1' value. 201 | 202 | 203 | # Multiple collections 204 | 205 | To sunchronize documents from Couchbase to multiple Solr collections, each collection needs to have this plugin configured in it's solrconfig.xml file and also a replication in Couchbase created. Each collection plugin must be started manually. If this is done, there will be one Couchbase replica running for each Solr collection. 206 | 207 | # Running Solr Couchbase Plugin 208 | 209 | To run this plugin, simply perform all actions described in Configuration section of this instruction, and run Solr. To run single instance of Solr, execute `ant run-solr` command in the main directory. To run Solr in Cloud mode, execute `ant solr-cloud` command in the main directory. When solr is started execute GET request to URL 210 | 211 | ``` 212 | http:///solr/collection1/couchbase?action=start 213 | ``` 214 | 215 | This will start the plugin, which will register in Couchbase as its replica and the data synchronisation as well as indexing into Solr should start. It assumes that Couchbase server is running and Cross-Datacenter Replication is set as described in *Couchbase XDCR* section of this document. 216 | 217 | To stop the plugin, exegute GET request to the URL 218 | ``` 219 | http:///solr/collection1/couchbase?action=stop 220 | ``` 221 | 222 | In a case, when all configured Solr instances with this plugin are restarted, Couchbase XDCR must be configured again. This is because every time a Couchbase replica is started, it acquires new pool UUID which is used in communications with Couchbase server. 223 | -------------------------------------------------------------------------------- /build.properties: -------------------------------------------------------------------------------- 1 | # Directories 2 | basedir=${ant.file.solr-couchbase-plugin} 3 | build.dir=${basedir}/build 4 | build-lib.dir=${basedir}/build-lib 5 | lib.dir=${basedir}/lib 6 | classes.dir=${build.dir}/classes 7 | test.classes.dir=${build.dir}/test-classes 8 | sources.dir=${basedir}/src/main/java 9 | tests.dir=${build.dir}/tests 10 | test.reports=${build.dir}/test-reports 11 | ivy.dir=${basedir}/ivy 12 | ant.dir=${build-lib.dir}/ant 13 | solr.dir=${build.dir}/solr-${solr.server.version} 14 | solr.conf.dir=${basedir}/src/main/resources/conf 15 | solr.sharedlib.dir=solr/lib-couchbase 16 | solr.conf.overrides.dir=${basedir}/solr-conf 17 | solr.cloud.dir.1=${solr.dir}-solrcloud-1 18 | solr.cloud.dir.2=${solr.dir}-solrcloud-2 19 | solr.cloud.dir.3=${solr.dir}-solrcloud-3 20 | solr.extra.libs=${build.dir}/solr-lib 21 | zk.dir=${build.dir}/zookeeper-${zk.version} 22 | zk.conf.dir=${basedir}/zk-conf 23 | dist.dir=${build.dir}/dist 24 | 25 | # Versions 26 | solr.couchbase.plugin.version=0.5.2 27 | couchbase-capi-server.version=1.3.0 28 | # solr server version to run externally 29 | solr.server.version=4.10.0 30 | ivy.version=2.3.0 31 | jetty.version=8.1.8.v20121106 32 | # aggregator servlet version 33 | servlet.version=3.0.0.v201112011016 34 | zk.version=3.4.6 35 | 36 | # Parameters 37 | heap.size=1024M 38 | javac.target=1.7 39 | javac.source=1.7 40 | solr.port=8890 41 | solr.stop.port=7890 42 | debug=on 43 | debug.port=5005 44 | solr.cloud.port.1=8890 45 | solr.cloud.port.2=8891 46 | solr.cloud.port.3=8892 47 | solr.cloud.stop.port.1=7890 48 | solr.cloud.stop.port.2=7891 49 | solr.cloud.stop.port.3=7892 50 | solr.cloud.zk.port=2181 51 | solr.cloud.num.shards=2 52 | -------------------------------------------------------------------------------- /build.xml: -------------------------------------------------------------------------------- 1 | 4 | 5 | Solr Couchbase plugin 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 40 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | Resolving ivy conf: @{conf} for lib.dir: @{lib.dir} in module ${ant.project.name} 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | ${basedir} 113 | 114 | Sources: ${sources.dir} 115 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | solrzip.exists=${solrzip.exists} 148 | 149 | 150 | 151 | 153 | 154 | 155 | 156 | 157 | zk.tar.exists=${zk.tar.exists} 158 | 159 | 160 | 161 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | Unpacking Solr @{solr.version} into @{solr.dir} from module ${ant.project.name} 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 267 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | ZooKeeper failed to start. 425 | See the log for details; this is usually caused by a port conflict, often when the previous instance of ZooKeeper had not yet released the port. 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | 491 | 492 | 493 | 494 | 495 | 498 | 499 | 500 | 501 | 502 | 503 | 504 | -------------------------------------------------------------------------------- /docs/installation.txt: -------------------------------------------------------------------------------- 1 | solr-couchbase-plugin 2 | ===================== 3 | 4 | This plugin allows to import CouchBase data to Solr. It uses the Cross-datacenter Replication (XDCR) feature of Couchbase Server 2.0 to transfer data continuously. It is confirmed to run with Solr 4.10 and Couchbase 2.5.1 releases. 5 | 6 | # Plugin configuration 7 | 8 | 9 | ## Dependencies 10 | 11 | * Copy this plugin and its dependencies from `lib/` directory to solr.war and remove the older **commons-io** dependency version (2.3). All libraries should be copied to the war file under: 12 | ``` 13 | /WEB-INF/lib/ 14 | ``` 15 | 16 | solr.war file can be updated by performing this steps: 17 | 1. extract original solr.war file 18 | 2. in the `/WEB-INF/lib/` directory put this plugin library and its dependencies 19 | 3. remove older commons-io dependency (optional) 20 | 4. make new war file by executing this command in the extracted solr.war directory `jar -cvf solr.war .` 21 | 5. copy new solr.war file to the `/webapps/` directory. 22 | 23 | 24 | ## solrconfig.xml 25 | 26 | It is required to configure Couchbase buckets to index data from in the solrconfig.xml file under */couchbase* RequestHandler. Whole RequestHandler configuration should look as following: 27 | 28 | ``` 29 | 30 | 31 | admin 32 | admin123 33 | 9876 34 | 1024 35 | false 36 | false 37 | 38 | 39 | default 40 | / 41 | 42 | name:/name 43 | city_s:/city 44 | code_s:/code 45 | country_s:/country 46 | phone_s:/phone 47 | url:/website 48 | type_s:/type 49 | last_modified:/updated 50 | description:/description 51 | address_s:/address 52 | geo_s:/geo 53 | 54 | 55 | 56 | ``` 57 | 58 | * params - a list of params required to configure this plugin. 59 | - username - A valid Couchbase server username 60 | - password - A valid Couchbase server password 61 | - port - A port number on which this plugin will register itself as a Couchbase replica. 62 | - numVBuckets - A number of VBuckets used by this Couchbase replica. Couchbase Server on Mac OS X uses 64 vBuckets as opposed to the 1024 vBuckets used by other platforms. **Couchbase clusters with mixed platforms are not supported.** It is required that numVBuckets is identical on the Couchbase server and Solr plugin. 63 | - commitAfterBatch - A flag specifying whether this plugin should commit documents to Solr after every batch of documents or when all the documents are retrieved from Couchbase. 64 | 65 | * bucket - a list with bucket parameters required to perform a synchronisation with Couchbase. Multiple lists of this type are allowed. 66 | - name - Bucket name - must be unique 67 | - splitpath - a list with paths to the fields in JSON Object on which the original Couchbase JSON document will be split up to extract embedded documents. This is a single String where paths are saparated with "|". 68 | - fieldmappings - a list with field names mapping for Couchbase documents, before indexing them into Solr. List element's name must be unique. At least one field mapping must be provided. Value should be `solr_field_name:couchbase_field_path`. The ‘json-path’ is a required part. 'target-field-name' is the name of the field in the input Solr document. It is optional and it is automatically derived from the input json. 69 | - More informations about splitpath and fieldmappings usage can be found here: https://lucidworks.com/blog/indexing-custom-json-data/ 70 | - Wildcards - Instead of specifying all the field names in *fieldmappings* explicitly , it is possible to specify a wildcard '\*' or a wildwildcard '\*\*' to map fields automatically. The constraint is that wild cards can be only used in the end of the json-path. The split path cannot use wildcards. The following are example wildcard path mappings: 71 | 72 | Example Wildcards: 73 | ``` 74 | f=/docs/* : maps all the fields under docs and in the name as given in json 75 | f=/docs/** : maps all the fields under docs and its children in the name as given in json 76 | f=searchField:/docs/* : maps all fields under /docs to a single field called ‘searchField’ 77 | f=searchField:/docs/** : maps all fields under /docs and its children to searchField 78 | ``` 79 | 80 | 81 | Example Bucket Configuration: 82 | ``` 83 | 84 | default 85 | / 86 | 87 | name:/name 88 | city_s:/city 89 | code_s:/code 90 | country_s:/country 91 | phone_s:/phone 92 | url:/website 93 | type_s:/type 94 | last_modified:/updated 95 | description:/description 96 | address_s:/address 97 | geo_s:/geo 98 | 99 | 100 | ``` 101 | 102 | 103 | Example JSON: 104 | ``` 105 | { 106 | "person" : { 107 | "name" : "John", 108 | "age" : 22 109 | }, 110 | "value" : 100 111 | } 112 | ``` 113 | 114 | Example field mappings for above JSON: 115 | ``` 116 | 117 | person:/person 118 | name:/person/name 119 | age_i:/person/age 120 | value_i:/value 121 | 122 | ``` 123 | 124 | 125 | ## Couchbase XDCR 126 | 127 | In Couchbase admin panel, under XDCR tab following settings should be configured: 128 | 129 | ### Remote Cluster 130 | 131 | This Solr plugin should be configured as Couchbase's Remote Cluster. Click on 'Create Cluster Reference' button and fill in cluster data. 132 | 133 | * 'Cluster Name' can be any name. 134 | * 'IP/hostname' should be `:port` and the port number is the port on which this request handler will register itself as a Couchbase Replica. It is specified in CouchbaseRequestHandler's **params** list as 'port'. 135 | * Username and password should be the same as those provided in solrconfig.xml file, as the replica starts with the same credentials as the Couchbase server. 136 | 137 | ### Replication 138 | 139 | Click on 'Create Replication' button. 140 | 141 | * Select a Couchbase Cluster and a Bucket to replicate from. 142 | * Select configured Solr's Plugin Remote Cluster and add a Bucket name. This Bucket name must match the name specified in the bucket **name** parameter in the solrconfig.xml file. 143 | * Under *Advaced settings* **XDCR Prootcol** should be set to 'Version 1' value. 144 | 145 | 146 | # Multiple collections 147 | 148 | To sunchronize documents from Couchbase to multiple Solr collections, each collection needs to have this plugin configured in it's solrconfig.xml file and also a replication in Couchbase created. Each collection plugin must be started manually. If this is done, there will be one Couchbase replica running for each Solr collection. 149 | 150 | 151 | # Running Solr Couchbase Plugin 152 | 153 | To run this plugin, simply perform all actions described in Configuration section of this instruction, and run Solr. When solr is started execute GET request to URL 154 | 155 | ``` 156 | http:///solr/collection1/couchbase?action=start 157 | ``` 158 | 159 | This will start the plugin, which will register in Couchbase as its replica and the data synchronisation as well as indexing into Solr should start. It assumes that Couchbase server is running and Cross-Datacenter Replication is set as described in *Couchbase XDCR* section of this document. 160 | 161 | To stop the plugin, exegute GET request to the URL 162 | ``` 163 | http:///solr/collection1/couchbase?action=stop 164 | ``` 165 | 166 | In a case, when all configured Solr instances with this plugin are restarted, Couchbase XDCR must be configured again. This is because every time a Couchbase replica is started, it acquires new pool UUID which is used in communications with Couchbase server. -------------------------------------------------------------------------------- /ivy.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /ivy/ivysettings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /solr-conf/solr.xml.patch: -------------------------------------------------------------------------------- 1 | --- solr.orig.xml 2014-05-14 19:40:34.000000000 -0700 2 | +++ solr.xml 2014-08-27 14:00:49.470374283 -0700 3 | @@ -27,6 +27,7 @@ 4 | --> 5 | 6 | 7 | + ${sharedLib:lib-couchbase} 8 | 9 | 10 | ${host:} 11 | -------------------------------------------------------------------------------- /solr-conf/solrconfig.xml.patch: -------------------------------------------------------------------------------- 1 | --- solrconfig.orig.xml 2014-05-14 19:40:34.000000000 -0700 2 | +++ solrconfig.xml 2014-08-27 14:02:32.046369709 -0700 3 | @@ -1868,4 +1868,46 @@ 4 | *:* 5 | 6 | 7 | + 8 | + 9 | + admin 10 | + admin123 11 | + 9876 12 | + 1024 13 | + true 14 | + 15 | + 127.0.0.1:8091 16 | + 127.0.0.1:9898 17 | + 18 | + solr 19 | + 20 | + 21 | + beer-sample 22 | + / 23 | + 24 | + address_ss:/address 25 | + /* 26 | + 27 | + 28 | + 29 | + gamesim-sample 30 | + / 31 | + 32 | + /* 33 | + 34 | + 35 | + 36 | + test 37 | + /exams 38 | + 39 | + first_s:/first 40 | + last_s:/last 41 | + grade_i:/grade 42 | + subject_s:/exams/subject 43 | + test_s:/exams/test 44 | + marks_i:/exams/marks 45 | + /* 46 | + 47 | + 48 | + 49 | 50 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/capi/CAPIBehavior.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2012 Couchbase, Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 5 | * except in compliance with the License. You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software distributed under the 10 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 11 | * either express or implied. See the License for the specific language governing permissions 12 | * and limitations under the License. 13 | */ 14 | package com.couchbase.capi; 15 | 16 | import java.io.InputStream; 17 | import java.util.List; 18 | import java.util.Map; 19 | 20 | import javax.servlet.UnavailableException; 21 | 22 | public interface CAPIBehavior { 23 | 24 | /** Database Operations **/ 25 | 26 | Map welcome(); 27 | 28 | String databaseExists(String database); 29 | 30 | Map getDatabaseDetails(String database); 31 | 32 | boolean createDatabase(String database); 33 | 34 | boolean deleteDatabase(String database); 35 | 36 | boolean ensureFullCommit(String database); 37 | 38 | Map revsDiff(String database, Map revs) throws UnavailableException; 39 | 40 | List bulkDocs(String database, List> docs) throws UnavailableException; 41 | 42 | /** Document Operations **/ 43 | 44 | Map getDocument(String database, String docId); 45 | 46 | Map getLocalDocument(String database, String docId); 47 | 48 | String storeDocument(String database, String docId, Map document); 49 | 50 | String storeLocalDocument(String database, String docId, Map document); 51 | 52 | /** Attachment Operations **/ 53 | 54 | InputStream getAttachment(String database, String docId, String attachmentName); 55 | 56 | String storeAttachment(String database, String docId, String attachmentName, String contentType, InputStream input); 57 | 58 | InputStream getLocalAttachment(String databsae, String docId, String attachmentName); 59 | 60 | String storeLocalAttachment(String database, String docId, String attachmentName, String contentType, InputStream input); 61 | 62 | Map getStats(); 63 | 64 | String getVBucketUUID(String pool, String bucket, int vbucket); 65 | 66 | String getBucketUUID(String pool, String bucket); 67 | } 68 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/capi/CAPIServer.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2012 Couchbase, Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 5 | * except in compliance with the License. You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software distributed under the 10 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 11 | * either express or implied. See the License for the specific language governing permissions 12 | * and limitations under the License. 13 | */ 14 | package com.couchbase.capi; 15 | 16 | import java.net.Inet4Address; 17 | import java.net.InetAddress; 18 | import java.net.InetSocketAddress; 19 | import java.net.NetworkInterface; 20 | import java.net.URI; 21 | import java.net.URISyntaxException; 22 | import java.util.Enumeration; 23 | 24 | import org.eclipse.jetty.security.ConstraintMapping; 25 | import org.eclipse.jetty.security.ConstraintSecurityHandler; 26 | import org.eclipse.jetty.security.HashLoginService; 27 | import org.eclipse.jetty.security.SecurityHandler; 28 | import org.eclipse.jetty.security.authentication.BasicAuthenticator; 29 | import org.eclipse.jetty.server.Connector; 30 | import org.eclipse.jetty.server.Server; 31 | import org.eclipse.jetty.server.nio.SelectChannelConnector; 32 | import org.eclipse.jetty.servlet.ServletContextHandler; 33 | import org.eclipse.jetty.servlet.ServletHolder; 34 | import org.eclipse.jetty.util.security.Constraint; 35 | import org.eclipse.jetty.util.security.Credential; 36 | 37 | import com.couchbase.capi.servlet.BucketMapServlet; 38 | import com.couchbase.capi.servlet.CAPIServlet; 39 | import com.couchbase.capi.servlet.ClusterMapServlet; 40 | import com.couchbase.capi.servlet.StatsServlet; 41 | 42 | public class CAPIServer extends Server { 43 | 44 | private InetAddress publishAddress; 45 | private InetSocketAddress bindAddress; 46 | 47 | public CAPIServer(CAPIBehavior capiBehavior, CouchbaseBehavior couchbaseBehavior, String username, String password) { 48 | this(capiBehavior, couchbaseBehavior, 0, username, password); 49 | } 50 | 51 | public CAPIServer(CAPIBehavior capiBehavior, CouchbaseBehavior couchbaseBehavior, int port, String username, String password) { 52 | this(capiBehavior, couchbaseBehavior, new InetSocketAddress("0.0.0.0", port), username, password); 53 | } 54 | 55 | public CAPIServer(CAPIBehavior capiBehavior, CouchbaseBehavior couchbaseBehavior, InetSocketAddress bindAddress, String username, String password) { 56 | this(capiBehavior, couchbaseBehavior, bindAddress, username, password, 1024); 57 | } 58 | 59 | public CAPIServer(CAPIBehavior capiBehavior, CouchbaseBehavior couchbaseBehavior, InetSocketAddress bindAddress, String username, String password, int numVbuckets) { 60 | super(bindAddress); 61 | 62 | SelectChannelConnector connector0 = new SelectChannelConnector(); 63 | connector0.setHost(bindAddress.getHostName()); 64 | connector0.setPort(bindAddress.getPort()); 65 | connector0.setRequestBufferSize(32 * 1024); 66 | 67 | setConnectors(new Connector[]{ connector0 }); 68 | 69 | this.bindAddress = bindAddress; 70 | 71 | ServletContextHandler context = new ServletContextHandler( 72 | ServletContextHandler.SESSIONS); 73 | context.setContextPath("/"); 74 | context.setSecurityHandler(basicAuth(username, password, "Couchbase Server Admin / REST")); 75 | setHandler(context); 76 | 77 | context.addServlet(new ServletHolder(new StatsServlet(couchbaseBehavior, capiBehavior)), "/_stats"); 78 | context.addServlet(new ServletHolder(new ClusterMapServlet(couchbaseBehavior)), 79 | "/pools/*"); 80 | context.addServlet(new ServletHolder(new BucketMapServlet( 81 | couchbaseBehavior, numVbuckets)), "/pools/default/buckets/*"); 82 | context.addServlet( 83 | new ServletHolder(new CAPIServlet(capiBehavior)), "/*"); 84 | 85 | } 86 | 87 | public int getPort() { 88 | Connector[] connectors = getConnectors(); 89 | if(connectors.length < 1) { 90 | throw new IllegalStateException("Cannot get port, there are no connectors"); 91 | } 92 | Connector connector = connectors[0]; 93 | return connector.getLocalPort(); 94 | } 95 | 96 | /** 97 | * Returns the first IPv4 address we find 98 | * 99 | * @return 100 | */ 101 | protected InetAddress guessPublishAddress() { 102 | NetworkInterface ni; 103 | try { 104 | ni = NetworkInterface.getByInetAddress(InetAddress.getLocalHost()); 105 | } catch (Exception e) { 106 | return null; 107 | } 108 | 109 | Enumeration ia = ni.getInetAddresses(); 110 | while (ia.hasMoreElements()) { 111 | InetAddress elem = (InetAddress) ia.nextElement(); 112 | if (elem instanceof Inet4Address) { 113 | return elem; 114 | } 115 | } 116 | return null; 117 | } 118 | 119 | public URI getCAPIAddress() { 120 | if(publishAddress == null) { 121 | publishAddress = guessPublishAddress(); 122 | } 123 | try { 124 | return new URI(String.format("http://%s:%d/", publishAddress, 125 | getPort())); 126 | } catch (URISyntaxException e) { 127 | throw new IllegalArgumentException(e); 128 | } 129 | } 130 | 131 | public InetAddress getPublishAddress() { 132 | return publishAddress; 133 | } 134 | 135 | public void setPublishAddress(InetAddress publishAddress) { 136 | this.publishAddress = publishAddress; 137 | } 138 | 139 | private static final SecurityHandler basicAuth(String username, String password, String realm) { 140 | 141 | HashLoginService l = new HashLoginService(); 142 | l.putUser(username, Credential.getCredential(password), new String[] {"user"}); 143 | l.setName(realm); 144 | 145 | Constraint constraint = new Constraint(); 146 | constraint.setName(Constraint.__BASIC_AUTH); 147 | constraint.setRoles(new String[]{"user"}); 148 | constraint.setAuthenticate(true); 149 | 150 | ConstraintMapping cm = new ConstraintMapping(); 151 | cm.setConstraint(constraint); 152 | cm.setPathSpec("/*"); 153 | 154 | ConstraintSecurityHandler csh = new ConstraintSecurityHandler(); 155 | csh.setAuthenticator(new BasicAuthenticator()); 156 | csh.setRealmName(realm); 157 | csh.addConstraintMapping(cm); 158 | csh.setLoginService(l); 159 | 160 | return csh; 161 | 162 | } 163 | 164 | public InetSocketAddress getBindAddress() { 165 | return bindAddress; 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/capi/CouchbaseBehavior.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2012 Couchbase, Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 5 | * except in compliance with the License. You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software distributed under the 10 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 11 | * either express or implied. See the License for the specific language governing permissions 12 | * and limitations under the License. 13 | */ 14 | package com.couchbase.capi; 15 | 16 | import java.util.List; 17 | import java.util.Map; 18 | 19 | public interface CouchbaseBehavior { 20 | 21 | List getPools(); 22 | 23 | String getPoolUUID(String pool); 24 | 25 | Map getPoolDetails(String pool); 26 | 27 | List getBucketsInPool(String pool); 28 | 29 | String getBucketUUID(String pool, String bucket); 30 | 31 | List getNodesServingPool(String pool); 32 | 33 | Map getStats(); 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/capi/servlet/BucketMapServlet.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2012 Couchbase, Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 5 | * except in compliance with the License. You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software distributed under the 10 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 11 | * either express or implied. See the License for the specific language governing permissions 12 | * and limitations under the License. 13 | */ 14 | package com.couchbase.capi.servlet; 15 | 16 | import java.io.IOException; 17 | import java.io.OutputStream; 18 | import java.util.ArrayList; 19 | import java.util.HashMap; 20 | import java.util.List; 21 | import java.util.Map; 22 | 23 | import javax.servlet.ServletException; 24 | import javax.servlet.http.HttpServlet; 25 | import javax.servlet.http.HttpServletRequest; 26 | import javax.servlet.http.HttpServletResponse; 27 | 28 | import org.codehaus.jackson.JsonGenerationException; 29 | import org.codehaus.jackson.map.JsonMappingException; 30 | import org.codehaus.jackson.map.ObjectMapper; 31 | import org.slf4j.Logger; 32 | import org.slf4j.LoggerFactory; 33 | 34 | import com.couchbase.capi.CouchbaseBehavior; 35 | 36 | /** 37 | * This servlet is responsible for providing the bucket list and bucket details. 38 | * 39 | * Requests like: 40 | * 41 | * /.../buckets 42 | * AND 43 | * /.../buckets/default 44 | * 45 | * @author mschoch 46 | * 47 | */ 48 | @SuppressWarnings("serial") 49 | public class BucketMapServlet extends HttpServlet { 50 | 51 | protected int numVbuckets = 1024; 52 | 53 | private static final Logger logger = LoggerFactory.getLogger(BucketMapServlet.class); 54 | protected ObjectMapper mapper = new ObjectMapper(); 55 | 56 | protected CouchbaseBehavior couchbaseBehavior; 57 | 58 | public BucketMapServlet(CouchbaseBehavior couchbaseBehavior) { 59 | this.couchbaseBehavior = couchbaseBehavior; 60 | } 61 | 62 | public BucketMapServlet(CouchbaseBehavior couchbaseBehavior, int numVbuckets) { 63 | this.couchbaseBehavior = couchbaseBehavior; 64 | this.numVbuckets = numVbuckets; 65 | } 66 | 67 | /** 68 | * Handle get requests for the matching URLs and direct to the right handler method. 69 | */ 70 | @Override 71 | protected void doGet(HttpServletRequest req, HttpServletResponse resp) 72 | throws ServletException, IOException { 73 | 74 | String bucket = req.getPathInfo(); 75 | OutputStream os = resp.getOutputStream(); 76 | 77 | if (bucket == null || bucket.equals("/")) { 78 | String uuid = req.getParameter("uuid"); 79 | executeBucketsRequest(resp, os, "default", uuid); 80 | } else { 81 | String bucketUUID = req.getParameter("bucket_uuid"); 82 | bucket = getDatabaseNameFromPath(removePathSuffix(bucket, "/")); 83 | executeBucketRequest(resp, os, "default", bucket, bucketUUID); 84 | } 85 | } 86 | 87 | /** 88 | * Using the connection manager, find the client ids of the active connections 89 | * and return this as a list of a valid buckets. 90 | * 91 | * @param os 92 | * @throws IOException 93 | */ 94 | protected void executeBucketsRequest(HttpServletResponse resp, OutputStream os, String pool, String uuid) throws IOException { 95 | logger.trace("asked for bucket list"); 96 | 97 | List buckets = new ArrayList(); 98 | 99 | List bucketNames = couchbaseBehavior.getBucketsInPool(pool); 100 | 101 | if(uuid != null) { 102 | //if a uuid was provided make sure it matches for this pool 103 | String poolUUID = couchbaseBehavior.getPoolUUID(pool); 104 | if(!uuid.equals(poolUUID)) { 105 | resp.setStatus(404); 106 | os.write("Cluster uuid does not match the requested.".getBytes()); 107 | os.close(); 108 | } else { 109 | formatBuckets(resp, os, pool, buckets, bucketNames); 110 | } 111 | } else { 112 | formatBuckets(resp, os, pool, buckets, bucketNames); 113 | } 114 | } 115 | 116 | protected void formatBuckets(HttpServletResponse resp, OutputStream os, String pool, 117 | List buckets, List bucketNames) throws IOException, 118 | JsonGenerationException, JsonMappingException { 119 | if(bucketNames != null) { 120 | for (String bucketName : bucketNames) { 121 | String actualBucketUUID = couchbaseBehavior.getBucketUUID(pool, bucketName); 122 | List nodes = couchbaseBehavior.getNodesServingPool(pool); 123 | Map bucket = buildBucketDetailsMap(bucketName, nodes, actualBucketUUID); 124 | buckets.add(bucket); 125 | } 126 | mapper.writeValue(os, buckets); 127 | } else { 128 | resp.setStatus(404); 129 | } 130 | } 131 | 132 | /** 133 | * Return a fake bucket map for the requested bucket. 134 | * 135 | * @param req 136 | * @param os 137 | * @param bucket 138 | * @throws IOException 139 | */ 140 | protected void executeBucketRequest(HttpServletResponse resp, final OutputStream os, 141 | final String pool, final String bucket, String bucketUUID) throws IOException { 142 | 143 | String actualBucketUUID = couchbaseBehavior.getBucketUUID(pool, bucket); 144 | if(actualBucketUUID == null) { 145 | resp.setStatus(404); 146 | return; 147 | } 148 | 149 | List nodes = couchbaseBehavior.getNodesServingPool(pool); 150 | 151 | if(bucketUUID != null) { 152 | //if a bucket uuid is provided, make sure it matches the buckets uuid 153 | if(!bucketUUID.equals(actualBucketUUID)) { 154 | resp.setStatus(404); 155 | os.write("Bucket uuid does not match the requested.".getBytes()); 156 | os.close(); 157 | } else { 158 | formatBucket(resp, os, bucket, nodes, actualBucketUUID); 159 | } 160 | } else { 161 | formatBucket(resp, os, bucket, nodes, actualBucketUUID); 162 | } 163 | } 164 | 165 | protected void formatBucket(HttpServletResponse resp, final OutputStream os, final String bucket, 166 | List nodes, String actualBucketUUID) throws IOException, JsonGenerationException, 167 | JsonMappingException { 168 | 169 | if(nodes != null) { 170 | Map responseMap = buildBucketDetailsMap(bucket, 171 | nodes, actualBucketUUID); 172 | 173 | mapper.writeValue(os, responseMap); 174 | } else { 175 | resp.setStatus(404); 176 | } 177 | } 178 | 179 | protected Map buildBucketDetailsMap(final String bucket, 180 | List nodes, String actualBucketUUID) { 181 | List serverList = new ArrayList(); 182 | for (Object node : nodes) { 183 | Map nodeObj = (Map)node; 184 | serverList.add(nodeObj.get("hostname")); 185 | //add the bucket name to the node's couchApiBase 186 | String couchApiBase = (String)nodeObj.get("couchApiBase"); 187 | nodeObj.put("couchApiBase", couchApiBase + bucket); 188 | } 189 | 190 | 191 | List vBucketMap = new ArrayList(); 192 | for(int i=0; i < numVbuckets; i++) { 193 | List vbucket = new ArrayList(); 194 | vbucket.add(i%serverList.size()); 195 | vbucket.add(-1); 196 | vBucketMap.add(vbucket); 197 | } 198 | 199 | Map vbucketServerMap = new HashMap(); 200 | vbucketServerMap.put("serverList", serverList); 201 | vbucketServerMap.put("vBucketMap", vBucketMap); 202 | 203 | Map responseMap = new HashMap(); 204 | responseMap.put("nodes", nodes); 205 | responseMap.put("vBucketServerMap", vbucketServerMap); 206 | responseMap.put("name", bucket); 207 | responseMap.put("uri", String.format("/pools/default/buckets/%s?bucket_uuid=%s", bucket, actualBucketUUID)); 208 | responseMap.put("uuid", actualBucketUUID); 209 | responseMap.put("bucketType", "membase"); 210 | responseMap.put("saslPassword", ""); 211 | 212 | List bucketCapabilities = new ArrayList(); 213 | bucketCapabilities.add("couchapi"); 214 | responseMap.put("bucketCapabilities", bucketCapabilities); 215 | return responseMap; 216 | } 217 | 218 | protected String removePathSuffix(String path, String suffix) { 219 | if (path.endsWith(suffix)) { 220 | path = path.substring(0, path.length() - suffix.length()); 221 | } 222 | return path; 223 | } 224 | 225 | protected String getDatabaseNameFromPath(String path) { 226 | String database = null; 227 | if(path.startsWith("/")) { 228 | database = path.substring(1); 229 | } 230 | return database; 231 | } 232 | 233 | } 234 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/capi/servlet/CAPIServlet.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2012 Couchbase, Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 5 | * except in compliance with the License. You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software distributed under the 10 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 11 | * either express or implied. See the License for the specific language governing permissions 12 | * and limitations under the License. 13 | */ 14 | package com.couchbase.capi.servlet; 15 | 16 | import java.io.IOException; 17 | import java.io.InputStream; 18 | import java.io.OutputStream; 19 | import java.io.UnsupportedEncodingException; 20 | import java.net.URLDecoder; 21 | import java.util.ArrayList; 22 | import java.util.HashMap; 23 | import java.util.List; 24 | import java.util.Map; 25 | 26 | import javax.servlet.ServletException; 27 | import javax.servlet.UnavailableException; 28 | import javax.servlet.http.HttpServlet; 29 | import javax.servlet.http.HttpServletRequest; 30 | import javax.servlet.http.HttpServletResponse; 31 | 32 | import org.apache.commons.io.IOUtils; 33 | import org.codehaus.jackson.JsonGenerationException; 34 | import org.codehaus.jackson.map.JsonMappingException; 35 | import org.codehaus.jackson.map.ObjectMapper; 36 | import org.slf4j.Logger; 37 | import org.slf4j.LoggerFactory; 38 | 39 | import com.couchbase.capi.CAPIBehavior; 40 | 41 | /** 42 | * This servlet implements the Couch API (CAPI) 43 | * 44 | * This is not a fully-functional implementation, rather it is a bare-minimum implementation to support 45 | * receiving a push replication from another CouchDB instance. 46 | * 47 | * @author mschoch 48 | * 49 | */ 50 | @SuppressWarnings("serial") 51 | public class CAPIServlet extends HttpServlet { 52 | 53 | private static final Logger logger = LoggerFactory.getLogger(CAPIServlet.class); 54 | protected ObjectMapper mapper = new ObjectMapper(); 55 | 56 | protected CAPIBehavior capiBehavior; 57 | 58 | public CAPIServlet(CAPIBehavior capiBehavior) { 59 | this.capiBehavior = capiBehavior; 60 | } 61 | 62 | /** 63 | * Takes a look at the structure of the URL requested and dispatch to the right handler method 64 | */ 65 | @Override 66 | protected void service(HttpServletRequest req, HttpServletResponse resp) 67 | throws ServletException, IOException { 68 | 69 | String uri = req.getRequestURI(); 70 | String[] splitUri = getUriPieces(uri); 71 | 72 | if((splitUri.length == 1) && splitUri[0].equals("")) { 73 | handleWelcome(req, resp); 74 | } 75 | else if ((splitUri.length == 1 && splitUri[0].startsWith("_"))) { 76 | handleRootSpecial(req, resp, splitUri[0]); 77 | } 78 | else if (splitUri.length == 1) { 79 | handleDatabase(req, resp, unescapeName(splitUri[0])); 80 | } else if (splitUri.length == 2) { 81 | // make sure database is valid 82 | String doesNotExistReason = capiBehavior.databaseExists(unescapeName(splitUri[0])); 83 | if(doesNotExistReason != null) { 84 | sendNotFoundResponse(resp, doesNotExistReason); 85 | return; 86 | } 87 | 88 | if (splitUri[1].equals("_bulk_docs")) { 89 | handleBulkDocs(req, resp, unescapeName(splitUri[0])); 90 | } else if (splitUri[1].equals("_revs_diff")) { 91 | handleRevsDiff(req, resp, unescapeName(splitUri[0])); 92 | } else if (splitUri[1].equals("_ensure_full_commit")) { 93 | handleEnsureFullCommit(req, resp, 94 | unescapeName(splitUri[0])); 95 | } else if (splitUri[1].startsWith("_")) { 96 | logger.debug("Unsupported special operation {}", splitUri[1]); 97 | } else { 98 | // this must be a document id 99 | handleDocument(req, resp, unescapeName(splitUri[0]), 100 | unescapeName(splitUri[1])); 101 | } 102 | } else if (splitUri.length == 3) { 103 | // make sure database is valid 104 | String doesNotExistReason = capiBehavior.databaseExists(unescapeName(splitUri[0])); 105 | if(doesNotExistReason != null) { 106 | sendNotFoundResponse(resp, doesNotExistReason); 107 | return; 108 | } 109 | 110 | if (splitUri[1].equals("_local")) { 111 | handleLocalDocument(req, resp, 112 | unescapeName(splitUri[0]), "_local/" 113 | + unescapeName(splitUri[2])); 114 | } else { 115 | // attachment request 116 | handleAttachment(req, resp, unescapeName(splitUri[0]), 117 | splitUri[1], splitUri[2]); 118 | } 119 | } else { 120 | // make sure database is valid 121 | String doesNotExistReason = capiBehavior.databaseExists(unescapeName(splitUri[0])); 122 | if(doesNotExistReason != null) { 123 | sendNotFoundResponse(resp, doesNotExistReason); 124 | return; 125 | } 126 | 127 | if (splitUri[1].equals("_local")) { 128 | handleLocalAttachment(req, resp, 129 | unescapeName(splitUri[0]), splitUri[2], 130 | splitUri[3]); 131 | } else { 132 | logger.debug("I don't know how to handle {}", uri); 133 | } 134 | } 135 | 136 | } 137 | 138 | /** 139 | * Handle special operations at the root level /_... 140 | * @param req 141 | * @param resp 142 | * @param special 143 | * @throws ServletException 144 | * @throws IOException 145 | */ 146 | protected void handleRootSpecial(HttpServletRequest req, 147 | HttpServletResponse resp, String special) throws ServletException, 148 | IOException { 149 | 150 | if(special.equals("_pre_replicate")) { 151 | logger.debug("got _pre_replicate: {}", req.getRequestURI()); 152 | handlePreReplicate(req, resp); 153 | return; 154 | } else if(special.equals("_commit_for_checkpoint")) { 155 | logger.debug("got _commit_for_checkpoint: {}", req.getRequestURI()); 156 | handleCommitForCheckpoint(req, resp); 157 | return; 158 | } else { 159 | logger.debug("got unknown special: {}", req.getRequestURI()); 160 | } 161 | 162 | InputStream is = req.getInputStream(); 163 | int requestLength = req.getContentLength(); 164 | byte[] buffer = new byte[requestLength]; 165 | IOUtils.readFully(is, buffer, 0, requestLength); 166 | 167 | logger.trace("root special request body was: '{}'", new String(buffer)); 168 | 169 | sendNotFoundResponse(resp, "missing"); 170 | } 171 | 172 | protected void handlePreReplicate(HttpServletRequest req, 173 | HttpServletResponse resp) throws ServletException, IOException { 174 | 175 | // read the request 176 | InputStream is = req.getInputStream(); 177 | int requestLength = req.getContentLength(); 178 | byte[] buffer = new byte[requestLength]; 179 | IOUtils.readFully(is, buffer, 0, requestLength); 180 | 181 | @SuppressWarnings("unchecked") 182 | Map parsedValue = (Map) mapper 183 | .readValue(buffer, Map.class); 184 | logger.trace("pre replicate parsed value is " + parsedValue); 185 | 186 | int vbucket = (Integer)parsedValue.get("vb"); 187 | String bucket = (String)parsedValue.get("bucket"); 188 | String bucketUUID = (String)parsedValue.get("bucketUUID"); 189 | String vbopaque = (String)parsedValue.get("vbopaque"); 190 | String commitopaque = (String)parsedValue.get("commitopaque"); 191 | 192 | String vbucketUUID = capiBehavior.getVBucketUUID("default", bucket, vbucket); 193 | 194 | if((vbopaque != null) && (!vbopaque.equals(vbucketUUID))) { 195 | logger.debug("returning 400"); 196 | resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); 197 | } 198 | if((commitopaque != null) && (!commitopaque.equals(vbucketUUID))) { 199 | logger.debug("returning 400"); 200 | resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); 201 | } 202 | 203 | OutputStream os = resp.getOutputStream(); 204 | resp.setContentType("application/json"); 205 | Map responseMap = new HashMap(); 206 | responseMap.put("vbopaque", vbucketUUID); 207 | mapper.writeValue(os, responseMap); 208 | } 209 | 210 | protected void handleCommitForCheckpoint(HttpServletRequest req, 211 | HttpServletResponse resp) throws ServletException, IOException { 212 | 213 | // read the request 214 | InputStream is = req.getInputStream(); 215 | int requestLength = req.getContentLength(); 216 | byte[] buffer = new byte[requestLength]; 217 | IOUtils.readFully(is, buffer, 0, requestLength); 218 | 219 | @SuppressWarnings("unchecked") 220 | Map parsedValue = (Map) mapper 221 | .readValue(buffer, Map.class); 222 | logger.trace("commit for checkpoint parsed value is " + parsedValue); 223 | 224 | int vbucket = (Integer)parsedValue.get("vb"); 225 | String bucket = (String)parsedValue.get("bucket"); 226 | String bucketUUID = (String)parsedValue.get("bucketUUID"); 227 | String vbopaque = (String)parsedValue.get("vbopaque"); 228 | 229 | String vbucketUUID = capiBehavior.getVBucketUUID("default", bucket, vbucket); 230 | Map responseMap = new HashMap(); 231 | responseMap.put("vbopaque", vbucketUUID); 232 | 233 | if((vbopaque != null) && (!vbopaque.equals(vbucketUUID))) { 234 | logger.debug("returning 400"); 235 | resp.setStatus(HttpServletResponse.SC_BAD_REQUEST); 236 | } else { 237 | // add the commit opaque 238 | responseMap.put("commitopaque", vbucketUUID); 239 | } 240 | 241 | OutputStream os = resp.getOutputStream(); 242 | resp.setContentType("application/json"); 243 | mapper.writeValue(os, responseMap); 244 | } 245 | 246 | /** 247 | * Handle GET requests to the root URL 248 | * @param req 249 | * @param resp 250 | * @throws ServletException 251 | * @throws IOException 252 | */ 253 | protected void handleWelcome(HttpServletRequest req, 254 | HttpServletResponse resp) throws ServletException, 255 | IOException { 256 | 257 | if (!req.getMethod().equals("GET")) { 258 | throw new UnsupportedOperationException( 259 | "Only GET operations on / are supported at this time"); 260 | } 261 | 262 | logger.trace("Got " + req.getMethod() + " request for /"); 263 | OutputStream os = resp.getOutputStream(); 264 | resp.setContentType("application/json"); 265 | Map responseMap = capiBehavior.welcome(); 266 | mapper.writeValue(os, responseMap); 267 | } 268 | 269 | /** 270 | * Handle GET/HEAD requests to the database URL 271 | * 272 | * @param req 273 | * @param resp 274 | * @param database 275 | * @throws ServletException 276 | * @throws IOException 277 | */ 278 | protected void handleDatabase(HttpServletRequest req, 279 | HttpServletResponse resp, String database) throws ServletException, 280 | IOException { 281 | 282 | if (!(req.getMethod().equals("GET") || req.getMethod().equals("HEAD"))) { 283 | throw new UnsupportedOperationException( 284 | "Only GET/HEAD operations on database are supported at this time"); 285 | } 286 | 287 | logger.trace("Got " + req.getMethod() + " request for " + database); 288 | 289 | OutputStream os = resp.getOutputStream(); 290 | 291 | String doesNotExistReason = capiBehavior.databaseExists(database); 292 | if(doesNotExistReason == null) { 293 | if (req.getMethod().equals("GET")) { 294 | resp.setContentType("application/json"); 295 | 296 | Map responseMap = capiBehavior.getDatabaseDetails(database); 297 | mapper.writeValue(os, responseMap); 298 | } 299 | } else { 300 | sendNotFoundResponse(resp, doesNotExistReason); 301 | } 302 | 303 | } 304 | 305 | /** 306 | * Handle _revs_diff by claiming we don't have any of these revisions 307 | * 308 | * @param req 309 | * @param resp 310 | * @param database 311 | * @throws ServletException 312 | * @throws IOException 313 | */ 314 | protected void handleRevsDiff(HttpServletRequest req, 315 | HttpServletResponse resp, String database) throws ServletException, 316 | IOException { 317 | 318 | if (!req.getMethod().equals("POST")) { 319 | throw new UnsupportedOperationException("_revs_diff must be POST"); 320 | } 321 | 322 | logger.trace("Got revs diff request for " + database); 323 | 324 | OutputStream os = resp.getOutputStream(); 325 | InputStream is = req.getInputStream(); 326 | 327 | int requestLength = req.getContentLength(); 328 | byte[] buffer = new byte[requestLength]; 329 | IOUtils.readFully(is, buffer, 0, requestLength); 330 | 331 | logger.trace("revs diff request body was {}", new String(buffer)); 332 | 333 | @SuppressWarnings("unchecked") 334 | Map parsedValue = (Map) mapper 335 | .readValue(buffer, Map.class); 336 | 337 | logger.trace("revs diff parsed value is " + parsedValue); 338 | 339 | try { 340 | Map responseMap = capiBehavior.revsDiff(database, parsedValue); 341 | 342 | if(responseMap != null) { 343 | mapper.writeValue(os, responseMap); 344 | } else { 345 | sendNotFoundResponse(resp, "missing"); 346 | } 347 | } catch (UnavailableException e) { 348 | sendServiceUnavailableResponse(resp, "too many concurrent requests"); 349 | } 350 | } 351 | 352 | protected void handleEnsureFullCommit(HttpServletRequest req, 353 | HttpServletResponse resp, String database) throws ServletException, 354 | IOException { 355 | 356 | if (!req.getMethod().equals("POST")) { 357 | throw new UnsupportedOperationException( 358 | "_ensure_full_commit must be POST"); 359 | } 360 | 361 | logger.trace("Got ensure full commitf request for " + database); 362 | 363 | resp.setStatus(HttpServletResponse.SC_CREATED); 364 | resp.setContentType("application/json"); 365 | 366 | if(capiBehavior.ensureFullCommit(database)) { 367 | 368 | Map responseMap = new HashMap(); 369 | responseMap.put("ok", true); 370 | 371 | OutputStream os = resp.getOutputStream(); 372 | mapper.writeValue(os, responseMap); 373 | } else { 374 | sendNotFoundResponse(resp, "missing"); 375 | } 376 | } 377 | 378 | protected void handleAttachment(HttpServletRequest req, 379 | HttpServletResponse resp, String databaseName, String documentId, 380 | String attachmentName) { 381 | throw new UnsupportedOperationException( 382 | "Document attachments are not supported at this time"); 383 | } 384 | 385 | protected void handleLocalAttachment(HttpServletRequest req, 386 | HttpServletResponse resp, String databaseName, String documentId, 387 | String attachemntName) { 388 | throw new UnsupportedOperationException( 389 | "Local Document attachments are not supported at this time"); 390 | } 391 | 392 | protected void handleDocument(HttpServletRequest req, 393 | HttpServletResponse resp, String databaseName, String documentId) 394 | throws IOException, ServletException { 395 | handleDocumentInternal(req, resp, databaseName, documentId, "document"); 396 | } 397 | 398 | protected void handleLocalDocument(HttpServletRequest req, 399 | HttpServletResponse resp, String databaseName, String documentId) 400 | throws IOException, ServletException { 401 | handleDocumentInternal(req, resp, databaseName, documentId, "_local"); 402 | } 403 | 404 | protected void handleDocumentInternal(HttpServletRequest req, 405 | HttpServletResponse resp, String databaseName, String documentId, 406 | String documentType) throws IOException, ServletException { 407 | 408 | logger.trace(String.format( 409 | "Got document request in database %s document %s type %s", 410 | databaseName, documentId, documentType)); 411 | 412 | if (!(req.getMethod().equals("GET") || req.getMethod().equals("HEAD") || req 413 | .getMethod().equals("PUT"))) { 414 | throw new UnsupportedOperationException( 415 | "Only GET/HEAD/PUT operations on documents are supported at this time"); 416 | } 417 | 418 | 419 | if (req.getMethod().equals("GET") || req.getMethod().equals("HEAD")) { 420 | 421 | Map doc = null; 422 | if (documentType.equals("_local")) { 423 | doc = capiBehavior.getLocalDocument(databaseName, documentId); 424 | } else { 425 | doc = capiBehavior.getDocument(databaseName, documentId); 426 | } 427 | 428 | if(doc != null) { 429 | resp.setStatus(HttpServletResponse.SC_OK); 430 | resp.setContentType("application/json"); 431 | OutputStream os = resp.getOutputStream(); 432 | mapper.writeValue(os, doc); 433 | } else { 434 | sendNotFoundResponse(resp, "missing"); 435 | return; 436 | } 437 | 438 | } else if (req.getMethod().equals("PUT")) { 439 | 440 | String rev = null; 441 | 442 | //read the document 443 | InputStream is = req.getInputStream(); 444 | 445 | int requestLength = req.getContentLength(); 446 | byte[] buffer = new byte[requestLength]; 447 | IOUtils.readFully(is, buffer, 0, requestLength); 448 | 449 | @SuppressWarnings("unchecked") 450 | Map parsedValue = (Map) mapper 451 | .readValue(buffer, Map.class); 452 | 453 | if(documentType.equals("_local)")) { 454 | rev = capiBehavior.storeLocalDocument(databaseName, documentId, parsedValue); 455 | } else { 456 | rev = capiBehavior.storeDocument(databaseName, documentId, parsedValue); 457 | } 458 | 459 | if(rev == null) { 460 | throw new ServletException("Storing document did not result in valid revision"); 461 | } 462 | 463 | resp.setStatus(HttpServletResponse.SC_CREATED); 464 | resp.setContentType("application/json"); 465 | OutputStream os = resp.getOutputStream(); 466 | 467 | Map responseMap = new HashMap(); 468 | responseMap.put("ok", true); 469 | responseMap.put("id", documentId); 470 | responseMap.put("rev", rev); 471 | mapper.writeValue(os, responseMap); 472 | } 473 | 474 | } 475 | 476 | private void sendNotFoundResponse(HttpServletResponse resp, String doesNotExistReason) 477 | throws IOException, JsonGenerationException, JsonMappingException { 478 | resp.setStatus(HttpServletResponse.SC_NOT_FOUND); 479 | resp.setContentType("application/json"); 480 | OutputStream os = resp.getOutputStream(); 481 | 482 | Map responseMap = new HashMap(); 483 | responseMap.put("error", "not_found"); 484 | responseMap.put("reason", doesNotExistReason); 485 | mapper.writeValue(os, responseMap); 486 | } 487 | 488 | private void sendServiceUnavailableResponse(HttpServletResponse resp, String reason) 489 | throws IOException, JsonGenerationException, JsonMappingException { 490 | resp.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE); 491 | resp.setContentType("application/json"); 492 | OutputStream os = resp.getOutputStream(); 493 | 494 | Map responseMap = new HashMap(); 495 | responseMap.put("error", "service_unavailable"); 496 | responseMap.put("reason", reason); 497 | mapper.writeValue(os, responseMap); 498 | } 499 | 500 | protected void handleBulkDocs(HttpServletRequest req, 501 | HttpServletResponse resp, String database) throws ServletException, 502 | IOException { 503 | 504 | if (!req.getMethod().equals("POST")) { 505 | throw new UnsupportedOperationException("_bulk_docs must be POST"); 506 | } 507 | 508 | 509 | 510 | logger.trace("Got bulk docs request for " + database); 511 | 512 | resp.setStatus(HttpServletResponse.SC_CREATED); 513 | resp.setContentType("application/json"); 514 | 515 | OutputStream os = resp.getOutputStream(); 516 | InputStream is = req.getInputStream(); 517 | 518 | int requestLength = req.getContentLength(); 519 | byte[] buffer = new byte[requestLength]; 520 | IOUtils.readFully(is, buffer, 0, requestLength); 521 | 522 | @SuppressWarnings("unchecked") 523 | Map parsedValue = (Map) mapper 524 | .readValue(buffer, Map.class); 525 | 526 | logger.trace("parsed value is " + parsedValue); 527 | 528 | try { 529 | List responseList = capiBehavior.bulkDocs(database, (ArrayList>) parsedValue.get("docs")); 530 | if(responseList == null) { 531 | sendNotFoundResponse(resp, "missing"); 532 | return; 533 | } 534 | mapper.writeValue(os, responseList); 535 | } catch (UnavailableException e) { 536 | sendServiceUnavailableResponse(resp, "too many concurrent requests"); 537 | } 538 | } 539 | 540 | String[] getUriPieces(String uri) { 541 | // remove initial / 542 | if (uri.startsWith("/")) { 543 | uri = uri.substring(1); 544 | } 545 | String[] result = uri.split("/"); 546 | return result; 547 | } 548 | 549 | String unescapeName(String name) throws UnsupportedEncodingException { 550 | return URLDecoder.decode(name, "UTF-8"); 551 | } 552 | 553 | } 554 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/capi/servlet/ClusterMapServlet.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2012 Couchbase, Inc. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file 5 | * except in compliance with the License. You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software distributed under the 10 | * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, 11 | * either express or implied. See the License for the specific language governing permissions 12 | * and limitations under the License. 13 | */ 14 | package com.couchbase.capi.servlet; 15 | 16 | import java.io.IOException; 17 | import java.io.OutputStream; 18 | import java.util.ArrayList; 19 | import java.util.HashMap; 20 | import java.util.List; 21 | import java.util.Map; 22 | 23 | import javax.servlet.ServletException; 24 | import javax.servlet.http.HttpServlet; 25 | import javax.servlet.http.HttpServletRequest; 26 | import javax.servlet.http.HttpServletResponse; 27 | 28 | import org.codehaus.jackson.map.ObjectMapper; 29 | import org.slf4j.Logger; 30 | import org.slf4j.LoggerFactory; 31 | 32 | import com.couchbase.capi.CouchbaseBehavior; 33 | 34 | /** 35 | * This servlet is responsible for providing the cluster list and cluster details. 36 | * 37 | * Requests like: 38 | * 39 | * /pools 40 | * AND 41 | * /pools/default 42 | * 43 | * @author mschoch 44 | * 45 | */ 46 | @SuppressWarnings("serial") 47 | public class ClusterMapServlet extends HttpServlet { 48 | 49 | private static final Logger logger = LoggerFactory.getLogger(ClusterMapServlet.class); 50 | protected ObjectMapper mapper = new ObjectMapper(); 51 | 52 | private CouchbaseBehavior couchbaseBehavior; 53 | 54 | public ClusterMapServlet(CouchbaseBehavior couchbaseBehavior) { 55 | this.couchbaseBehavior = couchbaseBehavior; 56 | } 57 | 58 | /** 59 | * Handle get requests for the matching URLs and direct to the right handler method. 60 | */ 61 | @Override 62 | protected void doGet(HttpServletRequest req, HttpServletResponse resp) 63 | throws ServletException, IOException { 64 | 65 | String pool = req.getPathInfo(); 66 | OutputStream os = resp.getOutputStream(); 67 | 68 | if (pool == null || pool.equals("/")) { 69 | executePoolsRequest(os); 70 | } else { 71 | // trim off slash 72 | if (pool.startsWith("/")) { 73 | pool = pool.substring(1); 74 | } 75 | String uuid = req.getParameter("uuid"); 76 | executePoolRequest(resp, os, pool, uuid); 77 | } 78 | } 79 | 80 | /** 81 | * Returns a single pool named "default" 82 | * 83 | * @param os 84 | * @throws IOException 85 | */ 86 | protected void executePoolsRequest(OutputStream os) throws IOException { 87 | logger.trace("asked for pools"); 88 | List pools = new ArrayList(); 89 | 90 | List poolNames = couchbaseBehavior.getPools(); 91 | 92 | for (String poolName : poolNames) { 93 | Map pool = new HashMap(); 94 | pool.put("name", poolName); 95 | pool.put("uri", "/pools/" + poolName + "?uuid=" + couchbaseBehavior.getPoolUUID(poolName)); 96 | pools.add(pool); 97 | } 98 | 99 | Map responseMap = new HashMap(); 100 | responseMap.put("pools", pools); 101 | responseMap.put("uuid", couchbaseBehavior.getPoolUUID("default")); 102 | 103 | mapper.writeValue(os, responseMap); 104 | } 105 | 106 | /** 107 | * When asked about the details of a pool, returns a pointer to the bucket list 108 | * 109 | * @param os 110 | * @param pool 111 | * @throws IOException 112 | */ 113 | protected void executePoolRequest(HttpServletResponse resp, OutputStream os, String pool, String uuid) 114 | throws IOException { 115 | logger.trace("asked for pool " + pool); 116 | 117 | Map responseMap = couchbaseBehavior.getPoolDetails(pool); 118 | if(responseMap != null) { 119 | // if the request contained a UUID, make sure it matches 120 | if(uuid != null) { 121 | String poolUUID = couchbaseBehavior.getPoolUUID(pool); 122 | if(!uuid.equals(poolUUID)) { 123 | resp.setStatus(404); 124 | os.write("Cluster uuid does not match the requested.".getBytes()); 125 | os.close(); 126 | } else { 127 | mapper.writeValue(os, responseMap); 128 | } 129 | } else { 130 | mapper.writeValue(os, responseMap); 131 | } 132 | } else { 133 | resp.setStatus(404); 134 | } 135 | } 136 | 137 | } 138 | -------------------------------------------------------------------------------- /src/main/java/com/couchbase/capi/servlet/StatsServlet.java: -------------------------------------------------------------------------------- 1 | package com.couchbase.capi.servlet; 2 | 3 | import java.io.IOException; 4 | import java.io.OutputStream; 5 | import java.util.HashMap; 6 | import java.util.Map; 7 | 8 | import javax.servlet.ServletException; 9 | import javax.servlet.http.HttpServlet; 10 | import javax.servlet.http.HttpServletRequest; 11 | import javax.servlet.http.HttpServletResponse; 12 | 13 | import org.codehaus.jackson.map.ObjectMapper; 14 | 15 | import com.couchbase.capi.CAPIBehavior; 16 | import com.couchbase.capi.CouchbaseBehavior; 17 | 18 | @SuppressWarnings("serial") 19 | public class StatsServlet extends HttpServlet { 20 | 21 | protected ObjectMapper mapper = new ObjectMapper(); 22 | protected CouchbaseBehavior couchbaseBehavior; 23 | protected CAPIBehavior capiBehavior; 24 | 25 | public StatsServlet(CouchbaseBehavior couchbaseBehavior, CAPIBehavior capiBehavior) { 26 | this.couchbaseBehavior = couchbaseBehavior; 27 | this.capiBehavior = capiBehavior; 28 | } 29 | 30 | @Override 31 | protected void doGet(HttpServletRequest req, HttpServletResponse resp) 32 | throws ServletException, IOException { 33 | 34 | Map couchbaseStats = couchbaseBehavior.getStats(); 35 | Map capiStats = capiBehavior.getStats(); 36 | 37 | Map resultMap = new HashMap(); 38 | resultMap.put("couchbase", couchbaseStats); 39 | resultMap.put("capi", capiStats); 40 | 41 | OutputStream os = resp.getOutputStream(); 42 | mapper.writeValue(os, resultMap); 43 | } 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/Bucket.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.util.Map; 4 | 5 | public class Bucket { 6 | 7 | String name; 8 | String splitpath; 9 | /** A field mapping for documents from Couchbase. Reversed mapping: target -> source*/ 10 | Map fieldmapping; 11 | 12 | public Bucket(String name, String splitpath, Map fieldmapping) { 13 | this.name = name; 14 | this.splitpath = splitpath; 15 | this.fieldmapping = fieldmapping; 16 | } 17 | 18 | public String getName() { 19 | return name; 20 | } 21 | 22 | public String getSplitpath() { 23 | return splitpath; 24 | } 25 | 26 | public Map getFieldmapping() { 27 | return fieldmapping; 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/CommonConstants.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | public class CommonConstants { 4 | 5 | /** Solr plugin params */ 6 | public static final String HANDLER_PARAMS = "params"; 7 | public static final String BUCKET_MARK = "bucket"; 8 | /** couchbase XDCR settings */ 9 | public static final String COUCHBASE_SERVER_FIELD = "couchbaseServer"; 10 | public static final String COUCHBASE_CLUSTER_NAME_FIELD = "clusterName"; 11 | public static final String COUCHBASE_HOST_IP_FIELD = "ipAddress"; 12 | public static final String COUCHBASE_USERNAME_FIELD = "couchbaseUsername"; 13 | public static final String COUCHBASE_PASSWORD_FIELD = "couchbasePassword"; 14 | public static final String COUCHBASE_FROM_BUCKET_INFO_FIELD = "bucketInfo"; 15 | public static final String COUCHBASE_FROM_BUCKET_NAME_FIELD = "fromBucketName"; 16 | public static final String COUCHBASE_TO_BUCKET_NAME_FIELD = "toBucketName"; 17 | 18 | public static final String FIELD_MAPPING_FIELD = "fieldmappings"; 19 | public static final String SPLITPATH_FIELD = "splitpath"; 20 | public static final String USERNAME_FIELD = "username"; 21 | public static final String PASSWORD_FIELD = "password"; 22 | public static final String NUM_VBUCKETS_FIELD = "numVBuckets"; 23 | public static final String HOST_IP = "hostIP"; 24 | public static final String PORT_FIELD = "port"; 25 | public static final String NAME_FIELD = "name"; 26 | public static final String COMMIT_AFTER_BATCH_FIELD = "commitAfterBatch"; 27 | /** Select the update processor chain to use. A RequestHandler may or may not respect this parameter */ 28 | public static final String UPDATE_CHAIN = "update.chain"; 29 | 30 | /** Couchbase document fields */ 31 | public static final String ID_FIELD = "id"; 32 | public static final String REVISION_FIELD = "revision_s"; 33 | public static final String JSON_FIELD = "content"; 34 | public static final String METADATA_FIELD = "metadata_s"; 35 | public static final String TTL_FIELD = "ttl_l"; 36 | public static final String DELETED_FIELD = "deleted_b"; 37 | public static final String PARENT_FIELD = "parent_s"; 38 | public static final String ROUTING_FIELD = "routing_s"; 39 | } 40 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/CouchbaseRecordHandler.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.text.DateFormat; 4 | import java.text.ParseException; 5 | import java.text.SimpleDateFormat; 6 | import java.util.ArrayList; 7 | import java.util.Date; 8 | import java.util.HashMap; 9 | import java.util.List; 10 | import java.util.Map; 11 | 12 | import org.apache.solr.common.SolrInputDocument; 13 | import org.apache.solr.common.util.JsonRecordReader.Handler; 14 | import org.apache.solr.request.SolrQueryRequest; 15 | import org.slf4j.Logger; 16 | import org.slf4j.LoggerFactory; 17 | 18 | public class CouchbaseRecordHandler implements Handler{ 19 | 20 | private static final Logger LOG = LoggerFactory.getLogger(CouchbaseRequestHandler.class); 21 | 22 | List bulkDocsResult = new ArrayList(); 23 | Map revisions = new HashMap(); 24 | SolrCAPIBehaviour capiBehaviour; 25 | SolrInputDocument doc; 26 | SolrQueryRequest req; 27 | int seq = 1; 28 | 29 | public CouchbaseRecordHandler(SolrCAPIBehaviour capiBehaviour, SolrQueryRequest req, SolrInputDocument doc, List bulkDocsResult) { 30 | this.capiBehaviour = capiBehaviour; 31 | this.doc = doc; 32 | this.req = req; 33 | this.bulkDocsResult = bulkDocsResult; 34 | } 35 | 36 | @Override 37 | public void handle(Map record, String path) { 38 | SolrInputDocument solrDoc = doc.deepCopy(); 39 | Map mapping = SolrUtils.mapToSolrDynamicFields(record); 40 | if(path != null && !path.equals("/")) { 41 | solrDoc.setField(CommonConstants.ID_FIELD, (String)doc.getFieldValue(CommonConstants.ID_FIELD) + "-" + seq); 42 | seq++; 43 | } 44 | for(Map.Entry entry : record.entrySet()) { 45 | String key = entry.getKey(); 46 | if(mapping.containsKey(key)) { 47 | key = mapping.get(key); 48 | } 49 | if(entry.getKey().equals("last_modified")) { 50 | DateFormat formatter = new SimpleDateFormat("yyyy-mm-dd HH:mm:ss"); 51 | Date date = null; 52 | try { 53 | date = (Date)formatter.parse((String) entry.getValue()); 54 | } catch (ParseException e) { 55 | LOG.error("Solr Couchbase plugin could not parse date", e); 56 | } 57 | solrDoc.setField(key, date); 58 | } else { 59 | solrDoc.addField(key, entry.getValue()); 60 | } 61 | } 62 | 63 | boolean success = false; 64 | success = capiBehaviour.addDoc(solrDoc, req); 65 | 66 | String itemId = (String) doc.getFieldValue(CommonConstants.ID_FIELD); 67 | String itemRev = (String) doc.getFieldValue(CommonConstants.REVISION_FIELD); 68 | Map itemResponse = new HashMap(); 69 | itemResponse.put("id", itemId); 70 | itemResponse.put("rev", itemRev); 71 | 72 | if(success) { 73 | if(!itemRev.equals(revisions.get(itemId))) { 74 | revisions.put(itemId, itemRev); 75 | bulkDocsResult.add(itemResponse); 76 | } 77 | } 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/CouchbaseRequestHandler.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | import java.io.IOException; 3 | import java.net.InetSocketAddress; 4 | import java.net.ServerSocket; 5 | import java.util.ArrayList; 6 | import java.util.Collections; 7 | import java.util.HashMap; 8 | import java.util.Iterator; 9 | import java.util.List; 10 | import java.util.Map; 11 | 12 | import org.apache.http.auth.AuthScope; 13 | import org.apache.http.auth.UsernamePasswordCredentials; 14 | import org.apache.http.client.CredentialsProvider; 15 | import org.apache.http.client.protocol.HttpClientContext; 16 | import org.apache.http.impl.client.BasicCredentialsProvider; 17 | import org.apache.http.impl.client.CloseableHttpClient; 18 | import org.apache.http.impl.client.HttpClients; 19 | import org.apache.solr.common.cloud.Replica; 20 | import org.apache.solr.common.cloud.Slice; 21 | import org.apache.solr.common.cloud.SolrZkClient; 22 | import org.apache.solr.common.cloud.ZkNodeProps; 23 | import org.apache.solr.common.cloud.ZkStateReader; 24 | import org.apache.solr.common.params.CommonParams; 25 | import org.apache.solr.common.params.SolrParams; 26 | import org.apache.solr.common.util.NamedList; 27 | import org.apache.solr.core.CoreContainer; 28 | import org.apache.solr.core.SolrCore; 29 | import org.apache.solr.handler.RequestHandlerBase; 30 | import org.apache.solr.request.SolrQueryRequest; 31 | import org.apache.solr.request.SolrQueryRequestBase; 32 | import org.apache.solr.response.SolrQueryResponse; 33 | import org.apache.solr.update.processor.UpdateRequestProcessorChain; 34 | import org.apache.solr.util.plugin.SolrCoreAware; 35 | import org.apache.zookeeper.CreateMode; 36 | import org.apache.zookeeper.KeeperException; 37 | import org.apache.zookeeper.WatchedEvent; 38 | import org.apache.zookeeper.Watcher; 39 | import org.apache.zookeeper.Watcher.Event.EventType; 40 | import org.codehaus.jackson.map.ObjectMapper; 41 | import org.slf4j.Logger; 42 | import org.slf4j.LoggerFactory; 43 | 44 | import com.couchbase.capi.CAPIBehavior; 45 | import com.couchbase.capi.CAPIServer; 46 | import com.couchbase.capi.CouchbaseBehavior; 47 | 48 | 49 | public class CouchbaseRequestHandler extends RequestHandlerBase implements SolrCoreAware { 50 | 51 | private static final Logger LOG = LoggerFactory.getLogger(CouchbaseRequestHandler.class); 52 | private static final String CLUSTER_NAME = "solr-"; 53 | private static final String CLUSTERS_URI = "/pools/default/remoteClusters"; 54 | private static final String CAPISERVER_PATH = "/capi_servers"; 55 | 56 | private CouchbaseBehavior couchbaseBehaviour; 57 | private CAPIBehavior capiBehaviour; 58 | private CAPIServer server; 59 | private String host; 60 | private int port = -1; 61 | private String username; 62 | private String password; 63 | private int numVBuckets; 64 | private TypeSelector typeSelector; 65 | private Settings settings; 66 | private SolrCore core; 67 | private UpdateRequestProcessorChain processorChain; 68 | private SolrZkClient zkClient; 69 | private ZkStateReader zkStateReader; 70 | private boolean commitAfterBatch; 71 | private ElectionWatcher electionWatcher; 72 | private String clusterName; 73 | private CloseableHttpClient httpClient; 74 | private HttpClientContext httpContext; 75 | private ObjectMapper mapper = new ObjectMapper(); 76 | private ArrayList clusterNames; 77 | private String collection; 78 | private boolean isRunning = false; 79 | 80 | private Map documentTypeParentFields; 81 | private Map documentTypeRoutingFields; 82 | private Map buckets = new HashMap(); 83 | 84 | private NamedList couchbaseServer = null; 85 | 86 | @Override 87 | public String getSource() { return null; } 88 | 89 | @Override 90 | public void inform(SolrCore core) { 91 | this.core = core; 92 | SolrQueryRequest req = new SolrQueryRequestBase(getCore(), new SolrParams() { 93 | 94 | @Override 95 | public String[] getParams(String param) { 96 | // TODO Auto-generated method stub 97 | return null; 98 | } 99 | 100 | @Override 101 | public Iterator getParameterNamesIterator() { 102 | // TODO Auto-generated method stub 103 | return null; 104 | } 105 | 106 | @Override 107 | public String get(String param) { 108 | // TODO Auto-generated method stub 109 | return null; 110 | } 111 | }) {}; 112 | SolrQueryResponse rsp = new SolrQueryResponse(); 113 | processorChain = 114 | getCore().getUpdateProcessingChain(""); 115 | zkClient = getZkClient(); 116 | collection = getCore().getName(); 117 | } 118 | 119 | @Override 120 | public void init(NamedList args) { 121 | super.init(args); 122 | Map params = toMap((NamedList)args.get(CommonConstants.HANDLER_PARAMS)); 123 | host = (params.get(CommonConstants.HOST_IP) == null) ? "127.0.0.1" : params.get(CommonConstants.HOST_IP).toString(); 124 | username = params.get(CommonConstants.USERNAME_FIELD).toString(); 125 | password = params.get(CommonConstants.PASSWORD_FIELD).toString(); 126 | numVBuckets = (int) params.get(CommonConstants.NUM_VBUCKETS_FIELD); 127 | port = (int)params.get(CommonConstants.PORT_FIELD); 128 | commitAfterBatch = (boolean)params.get(CommonConstants.COMMIT_AFTER_BATCH_FIELD); 129 | 130 | List> bucketslist = args.getAll(CommonConstants.BUCKET_MARK); 131 | for(NamedList bucket : bucketslist) { 132 | String name = (String)bucket.get(CommonConstants.NAME_FIELD); 133 | String splitpath = (String)bucket.get(CommonConstants.SPLITPATH_FIELD); 134 | NamedList mappingslist = (NamedList) bucket.get(CommonConstants.FIELD_MAPPING_FIELD); 135 | Map fieldmappings = SolrParams.toMap(mappingslist); 136 | Bucket b = new Bucket(name, splitpath, fieldmappings); 137 | buckets.put(name, b); 138 | } 139 | 140 | couchbaseServer = (NamedList)params.get(CommonConstants.COUCHBASE_SERVER_FIELD); 141 | if(couchbaseServer == null) { 142 | LOG.info("No Couchbase server configured!"); 143 | } 144 | else if(couchbaseServer != null && couchbaseServer.size() <= 0) { 145 | LOG.error("Missing content for Couchbase server!"); 146 | } 147 | } 148 | 149 | @Override 150 | public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) 151 | throws Exception { 152 | SolrParams params = req.getParams(); 153 | String action = params.get(CommonParams.ACTION); 154 | 155 | action = action.toLowerCase(); 156 | action = action.trim(); 157 | 158 | switch(action) { 159 | case "start" : 160 | handleStart(); 161 | break; 162 | case "stop" : 163 | handleStop(); 164 | break; 165 | } 166 | } 167 | 168 | @Override 169 | public String getDescription() { 170 | return "Couchbase plugin"; 171 | } 172 | 173 | public SolrZkClient getZkClient() { 174 | SolrZkClient client = null; 175 | if(getCore() != null) { 176 | CoreContainer container = getCore().getCoreDescriptor().getCoreContainer(); 177 | if(container.isZooKeeperAware()) { 178 | client = container.getZkController().getZkClient(); 179 | } 180 | } 181 | return client; 182 | } 183 | public boolean isSolrCloud() { 184 | return getCore().getCoreDescriptor().getCoreContainer().isZooKeeperAware(); 185 | } 186 | 187 | public void handleStart() { 188 | if(!isRunning) { 189 | //Check if in SolrCloud mode 190 | if(isSolrCloud()) { 191 | checkIfIamLeader(); 192 | } else { 193 | startCouchbaseReplica(); 194 | 195 | // Create Couchbase XDCR replication after the plugin is activated 196 | if(couchbaseServer != null && couchbaseServer.size() > 0) { 197 | CouchbaseUtils.createCouchbaseXDCRReplications(couchbaseServer, host, String.valueOf(port), username, password); 198 | } 199 | } 200 | } else { 201 | LOG.info("CAPIServer already running."); 202 | } 203 | } 204 | 205 | public void handleStop() { 206 | if(isRunning) { 207 | stopCouchbaseReplica(); 208 | } else { 209 | LOG.info("CAPIServer is not running."); 210 | } 211 | } 212 | 213 | public void checkIfIamLeader() { 214 | //at first, check if I am the first shard leader, which shoudld start Couchbase replica 215 | zkStateReader = new ZkStateReader(getZkClient()); 216 | try { 217 | zkStateReader.updateClusterState(true); 218 | } catch (KeeperException | InterruptedException e1) { 219 | LOG.error("Error while updating Cluster State!", e1); 220 | } 221 | String collection = getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName(); 222 | Map slices = zkStateReader.getClusterState().getActiveSlicesMap(collection); 223 | List sliceNames = new ArrayList(slices.keySet()); 224 | Collections.sort(sliceNames); 225 | String shard = sliceNames.get(0); 226 | Replica replica = null; 227 | try { 228 | replica = zkStateReader.getLeaderRetry(collection, shard); 229 | } catch (InterruptedException e) { 230 | LOG.error("Could not get leader!", e); 231 | } 232 | final String coreNodeName = getCore().getCoreDescriptor().getCloudDescriptor().getCoreNodeName(); 233 | if(replica != null && coreNodeName != null && replica.getName().equals(coreNodeName)) { // I am the leader 234 | startCouchbaseReplica(); 235 | } else { // I'm not the leader, watch leader. 236 | String watchedNode = "/collections/" + collection + "/leaders/shard1"; 237 | electionWatcher = new ElectionWatcher(coreNodeName, watchedNode); 238 | try { 239 | zkClient.getData(watchedNode, electionWatcher, null, true); 240 | } catch (KeeperException e) { 241 | LOG.warn("Failed setting watch", e); 242 | // we couldn't set our watch - the node before us may already be down? 243 | // we need to check if we are the leader again 244 | checkIfIamLeader(); 245 | } catch (InterruptedException e) { 246 | LOG.warn("Failed setting watch", e); 247 | } 248 | } 249 | } 250 | 251 | public static int checkPort(int port) { 252 | ServerSocket s = null; 253 | int result = -1; 254 | for(;;) { 255 | try { 256 | s = new ServerSocket(port); 257 | result = port; 258 | s.close(); 259 | break; 260 | } catch (IOException e) { 261 | //port occupied, find another one 262 | try { 263 | s = new ServerSocket(0); 264 | result = s.getLocalPort(); 265 | s.close(); 266 | } catch (IOException e1) { 267 | LOG.error("Could not find a free port!", e); 268 | } 269 | } 270 | } 271 | 272 | return result; 273 | } 274 | 275 | public void createZKCapiServer() { 276 | Map properties = new HashMap(); 277 | properties.put("core", collection); 278 | properties.put("node_name", "127.0.0.1:" + port + "_capiserver"); 279 | properties.put("host", "localhost"); 280 | properties.put("port", port); 281 | 282 | ZkNodeProps nodeProps = new ZkNodeProps(properties); 283 | try { 284 | zkClient.makePath(CAPISERVER_PATH + "/" + properties.get("node_name"), ZkStateReader.toJSON(nodeProps), 285 | CreateMode.EPHEMERAL, true); 286 | } catch (KeeperException | InterruptedException e) { 287 | LOG.error("CAPIServer could not create ephemeral node in ZooKeeper!", e); 288 | } 289 | } 290 | 291 | public void startCouchbaseReplica() { 292 | settings = new Settings(); 293 | this.documentTypeParentFields = settings.getByPrefix("couchbase.documentTypeParentFields."); 294 | for (String key: documentTypeParentFields.keySet()) { 295 | String parentField = documentTypeParentFields.get(key); 296 | LOG.info("Using field {} as parent for type {}", parentField, key); 297 | } 298 | 299 | this.documentTypeRoutingFields = settings.getByPrefix("couchbase.documentTypeRoutingFields."); 300 | for (String key: documentTypeRoutingFields.keySet()) { 301 | String routingField = documentTypeRoutingFields.get(key); 302 | LOG.info("Using field {} as routing for type {}", routingField, key); 303 | } 304 | typeSelector = new DefaultTypeSelector(); 305 | typeSelector.configure(settings); 306 | couchbaseBehaviour = new SolrCouchbaseBehaviour(this); 307 | port = checkPort(port); 308 | capiBehaviour = new SolrCAPIBehaviour(this, typeSelector, documentTypeParentFields, documentTypeRoutingFields, commitAfterBatch); 309 | server = new CAPIServer(capiBehaviour, couchbaseBehaviour, new InetSocketAddress("0.0.0.0", port), username, password, numVBuckets); 310 | try{ 311 | server.start(); 312 | port = server.getPort(); 313 | LOG.info(String.format("CAPIServer started on port %d", port)); 314 | // configureXDCR(); 315 | if(zkClient != null) { 316 | createZKCapiServer(); 317 | } 318 | isRunning = true; 319 | } catch (Exception e) { 320 | LOG.error("Could not start CAPIServer!", e); 321 | } 322 | } 323 | 324 | public void stopCouchbaseReplica() { 325 | if(server != null) { 326 | try { 327 | server.stop(); 328 | isRunning = false; 329 | } catch (Exception e) { 330 | LOG.error("Error while stopping Couchbase server.", e); 331 | } 332 | } 333 | } 334 | 335 | public UpdateRequestProcessorChain getProcessorChain() { 336 | return this.processorChain; 337 | } 338 | 339 | public SolrCore getCore() { 340 | return this.core; 341 | } 342 | 343 | public Map getBuckets() { 344 | return buckets; 345 | } 346 | 347 | public Bucket getBucket(String name) { 348 | return buckets.get(name); 349 | } 350 | 351 | public String getHost() { 352 | return host; 353 | } 354 | 355 | public int getPort() { 356 | return port; 357 | } 358 | 359 | public String getUsername() { 360 | return username; 361 | } 362 | 363 | public String getPassword() { 364 | return password; 365 | } 366 | 367 | /** Create a Map<String,Object> from a NamedList given no keys are repeated */ 368 | public static Map toMap(NamedList params) { 369 | HashMap map = new HashMap<>(); 370 | for (int i=0; i getCollectionsLeaders() { 377 | List capiservers = new ArrayList(); 378 | Map capiserversProps = new HashMap(); 379 | try { 380 | zkStateReader.updateClusterState(true); 381 | capiservers = zkClient.getChildren(CAPISERVER_PATH, null, true); 382 | for(String serverName : capiservers) { 383 | ZkNodeProps props = ZkNodeProps.load(zkClient.getData( 384 | CAPISERVER_PATH + "/" + serverName, null, null, true)); 385 | capiserversProps.put(serverName, props); 386 | } 387 | } catch (KeeperException | InterruptedException e1) { 388 | LOG.error("Error while updating Cluster State!", e1); 389 | } 390 | // ClusterState state = zkStateReader.getClusterState(); 391 | // Set collections = state.getCollections(); 392 | // for(String collection : collections) { 393 | // List activeSlices = new ArrayList(state.getActiveSlices(collection)); 394 | // for(Slice slice : activeSlices) { 395 | // Replica replica = slice.getLeader(); 396 | // Map properties = replica.getProperties(); 397 | // String baseUrl = (String) properties.get("base_url"); 398 | // String[] splits = baseUrl.split(":"); 399 | // String host = splits[1].substring(splits[1].lastIndexOf("/")+1, splits[1].length()); 400 | // String port = splits[2].substring(0, splits[2].indexOf("/")); 401 | // leaders.put(host, port); 402 | // } 403 | // } 404 | return capiserversProps; 405 | } 406 | 407 | private class ElectionWatcher implements Watcher { 408 | 409 | final String myNode,watchedNode; 410 | private boolean canceled = false; 411 | 412 | private ElectionWatcher(String myNode, String watchedNode) { 413 | this.myNode = myNode; 414 | this.watchedNode = watchedNode; 415 | } 416 | 417 | void cancel(String leaderSeqPath){ 418 | canceled = true; 419 | } 420 | 421 | @Override 422 | public void process(WatchedEvent event) { 423 | if (EventType.None.equals(event.getType())) { 424 | return; 425 | } 426 | if (canceled) { 427 | LOG.info("This watcher is not active anymore {}", myNode); 428 | try { 429 | zkClient.delete(myNode, -1, true); 430 | } catch (KeeperException.NoNodeException nne) { 431 | // expected . don't do anything 432 | } catch (Exception e) { 433 | LOG.warn("My watched node still exists and can't remove " + myNode, e); 434 | } 435 | return; 436 | } 437 | try { 438 | // am I the next leader? 439 | checkIfIamLeader(); 440 | } catch (Exception e) { 441 | LOG.warn("", e); 442 | } 443 | } 444 | 445 | } 446 | } 447 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/CouchbaseUtils.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.util.ArrayList; 4 | import java.util.List; 5 | 6 | import javax.ws.rs.core.MediaType; 7 | import javax.ws.rs.core.MultivaluedMap; 8 | import javax.ws.rs.core.UriBuilder; 9 | 10 | import org.codehaus.jettison.json.*; 11 | 12 | import org.apache.solr.common.util.NamedList; 13 | import org.slf4j.Logger; 14 | import org.slf4j.LoggerFactory; 15 | 16 | import com.sun.jersey.api.client.Client; 17 | import com.sun.jersey.api.client.ClientResponse; 18 | import com.sun.jersey.api.client.WebResource; 19 | import com.sun.jersey.api.client.config.ClientConfig; 20 | import com.sun.jersey.api.client.config.DefaultClientConfig; 21 | import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter; 22 | import com.sun.jersey.core.util.MultivaluedMapImpl; 23 | 24 | public class CouchbaseUtils { 25 | private static final Logger LOG = LoggerFactory.getLogger(CouchbaseUtils.class); 26 | 27 | private static String couchbaseIp = null; 28 | private static String couchbaseUsername = null; 29 | private static String couchbasePassword = null; 30 | private static String clusterName = null; 31 | private static List fromBuckets = new ArrayList<>(); 32 | private static List toBuckets = new ArrayList<>(); 33 | 34 | /** 35 | * Method creating Couchbase XDCR remote cluster and XDCR replication(s). 36 | * @param NamedList couchbaseServer representing the configuration for Couchbase XDCR setting. 37 | * @param cbClientHost representing the host IP for Couchbase client. 38 | * @param cbClientPort representing the port for Couchbase client. 39 | * @param cbClientUsername representing the username for Couchbase client. 40 | * @param cbClientPassword representing the password for Couchbase client. 41 | * See README.md for sample format. 42 | */ 43 | public static void createCouchbaseXDCRReplications(NamedList couchbaseServer, 44 | String cbClientHost, String cbClientPort, 45 | String cbClientUsername, String cbClientPassword) { 46 | boolean initSuc = init(couchbaseServer); 47 | String uuid = null; 48 | if(initSuc) { 49 | uuid = createRemoteClusters(cbClientUsername, cbClientPassword, cbClientHost, cbClientPort); 50 | } 51 | if(uuid != null) { 52 | createReplication(uuid); 53 | } 54 | } 55 | 56 | /** 57 | * Method parsing NamedList which passed from CouchbaseRequestHandler. 58 | * @param NamedList couchbaseServer representing the configuration for Couchbase XDCR setting. 59 | * See README.md for sample format. 60 | */ 61 | private static boolean init(NamedList couchbaseServer) { 62 | // parse basic information for creating Couchbase remote cluster 63 | couchbaseIp = (String)couchbaseServer.get(CommonConstants.COUCHBASE_HOST_IP_FIELD); 64 | // TODO: May want to consider to encrypt username and password in solrconfig.xml 65 | couchbaseUsername = (String)couchbaseServer.get(CommonConstants.COUCHBASE_USERNAME_FIELD); 66 | couchbasePassword = (String)couchbaseServer.get(CommonConstants.COUCHBASE_PASSWORD_FIELD); 67 | clusterName = (String)couchbaseServer.get(CommonConstants.COUCHBASE_CLUSTER_NAME_FIELD); 68 | 69 | if(couchbaseIp == null || couchbaseUsername == null || couchbasePassword == null || clusterName == null) { 70 | LOG.warn("couchbaseServer configuration is missing ipAddress/couchbaseUsername/couchbasePassword/clusterName attribute(s)"); 71 | return false; 72 | } 73 | 74 | // parse buckets information 75 | List> bucketsList = couchbaseServer.getAll(CommonConstants.COUCHBASE_FROM_BUCKET_INFO_FIELD); 76 | for(NamedList bucketInfo : bucketsList) { 77 | String fromBucket = (String)bucketInfo.get(CommonConstants.COUCHBASE_FROM_BUCKET_NAME_FIELD); 78 | String toBucket = (String)bucketInfo.get(CommonConstants.COUCHBASE_TO_BUCKET_NAME_FIELD); 79 | 80 | if(fromBucket != null && toBucket != null) { 81 | fromBuckets.add(fromBucket); 82 | toBuckets.add(toBucket); 83 | } 84 | else { 85 | LOG.warn("couchbaseServer bucketInfo configuration is missing fromBucketName/toBucketName attribute(s)"); 86 | } 87 | } 88 | return true; 89 | } 90 | 91 | // Creating destination cluster reference 92 | private static String createRemoteClusters(String cbClientUsername, String cbClientPassword, String host, String port) { 93 | String uuid = getRemoteClusterRef(cbClientUsername, cbClientPassword, host, port); 94 | 95 | if(uuid != null) { 96 | ClientConfig config = new DefaultClientConfig(); 97 | Client client = Client.create(config); 98 | client.addFilter(new HTTPBasicAuthFilter(couchbaseUsername, couchbasePassword)); 99 | 100 | String url = "http://" + couchbaseIp + ":" + "8091"; 101 | WebResource couchbaseService = client.resource(UriBuilder.fromUri(url).build()); 102 | 103 | MultivaluedMap formData = new MultivaluedMapImpl(); 104 | formData.add("uuid", uuid); 105 | formData.add("name", clusterName); 106 | formData.add("hostname", host + ":" + port); 107 | formData.add("username", cbClientUsername); 108 | formData.add("password", cbClientPassword); 109 | 110 | ClientResponse response = couchbaseService.path("pools").path("default").path("remoteClusters").type(MediaType.APPLICATION_FORM_URLENCODED_TYPE).post(ClientResponse.class, formData); 111 | if(response.getStatus() == 200) { 112 | LOG.debug("CouchbaseUtils.createRemoteClusters : " + "Remote cluter = " + clusterName + " created."); 113 | return uuid; 114 | } 115 | else { 116 | LOG.warn("CouchbaseUtils.createRemoteClusters : " + "Remote cluter = " + clusterName + " creation failed with msg = " + response.getEntity(String.class)); 117 | } 118 | } 119 | return null; 120 | } 121 | 122 | // Getting destination cluster reference. Notice that the destination/remote cluster here is this plugin itself 123 | private static String getRemoteClusterRef(String cbClientUsername, String cbClientPassword, String host, String port) { 124 | ClientConfig remoteConfig = new DefaultClientConfig(); 125 | Client remoteClient = Client.create(remoteConfig); 126 | remoteClient.addFilter(new HTTPBasicAuthFilter(cbClientUsername, cbClientPassword)); 127 | 128 | String url = "http://" + host + ":" + port; 129 | WebResource remoteServerService = remoteClient.resource(UriBuilder.fromUri(url).build()); 130 | 131 | String uuid = null; 132 | 133 | try { 134 | JSONObject obj = new JSONObject(remoteServerService.path("pools").path("default").path("remoteCluster").accept(MediaType.APPLICATION_JSON).get(String.class)); 135 | obj = obj.getJSONObject("buckets"); 136 | String uri = obj.getString("uri"); 137 | uuid = uri.substring(uri.indexOf("uuid=")+"uuid=".length()); 138 | } 139 | catch(Exception e) { 140 | LOG.warn("CouchbaseUtils.getRemoteClusterRef : " + e.getMessage()); 141 | } 142 | return uuid; 143 | } 144 | 145 | // Create XDCR replications 146 | public static void createReplication(String uuid) { 147 | ClientConfig config = new DefaultClientConfig(); 148 | Client client = Client.create(config); 149 | client.addFilter(new HTTPBasicAuthFilter(couchbaseUsername, couchbasePassword)); 150 | 151 | String url = "http://" + couchbaseIp + ":" + "8091"; 152 | WebResource couchbaseService = client.resource(UriBuilder.fromUri(url).build()); 153 | 154 | for(int i=0; i documentTypePatternStrings; 15 | private Map documentTypePatterns; 16 | 17 | @Override 18 | public void configure(Settings settings) { 19 | this.defaultDocumentType = settings.get("couchbase.defaultDocumentType", DefaultTypeSelector.DEFAULT_DOCUMENT_TYPE_DOCUMENT); 20 | this.documentTypePatterns = new HashMap(); 21 | this.documentTypePatternStrings = settings.getByPrefix("couchbase.documentTypes."); 22 | for (String key : documentTypePatternStrings.keySet()) { 23 | String pattern = documentTypePatternStrings.get(key); 24 | LOG.info("See document type: {} with pattern: {} compiling...", key, pattern); 25 | documentTypePatterns.put(key, Pattern.compile(pattern)); 26 | } 27 | } 28 | 29 | @Override 30 | public String getType(String index, String docId) { 31 | for(Map.Entry typePattern : this.documentTypePatterns.entrySet()) { 32 | if(typePattern.getValue().matcher(docId).matches()) { 33 | return typePattern.getKey(); 34 | } 35 | } 36 | return defaultDocumentType; 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/Settings.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | public class Settings extends HashMap{ 7 | 8 | /** 9 | * 10 | */ 11 | private static final long serialVersionUID = 1657776653494453826L; 12 | 13 | public String get(String key, String defaultValue) { 14 | String value = this.get(key); 15 | if(value == null) { 16 | value = defaultValue; 17 | } 18 | return value; 19 | } 20 | 21 | public Map getByPrefix(String prefix) { 22 | Map results = new HashMap(); 23 | 24 | for(Map.Entry entry : this.entrySet()) { 25 | if(entry.getKey().startsWith(prefix)) { 26 | results.put(entry.getKey(), entry.getValue()); 27 | } 28 | } 29 | 30 | return results; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/SolrCAPIBehaviour.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.io.IOException; 4 | import java.io.InputStream; 5 | import java.util.ArrayList; 6 | import java.util.HashMap; 7 | import java.util.Iterator; 8 | import java.util.List; 9 | import java.util.Map; 10 | import java.util.Map.Entry; 11 | 12 | import org.apache.commons.codec.binary.Base64; 13 | import org.apache.lucene.index.Term; 14 | import org.apache.lucene.queryparser.xml.builders.BooleanQueryBuilder; 15 | import org.apache.lucene.search.BooleanClause.Occur; 16 | import org.apache.lucene.search.BooleanQuery; 17 | import org.apache.lucene.search.PrefixQuery; 18 | import org.apache.lucene.search.TermQuery; 19 | import org.apache.solr.common.SolrInputDocument; 20 | import org.apache.solr.common.params.SolrParams; 21 | import org.apache.solr.common.util.JsonRecordReader; 22 | import org.apache.solr.common.util.JsonRecordReader.Handler; 23 | import org.apache.solr.request.SolrQueryRequest; 24 | import org.apache.solr.request.SolrQueryRequestBase; 25 | import org.apache.solr.response.SolrQueryResponse; 26 | import org.apache.solr.search.DocIterator; 27 | import org.apache.solr.search.DocSet; 28 | import org.apache.solr.search.SolrIndexSearcher; 29 | import org.apache.solr.update.AddUpdateCommand; 30 | import org.apache.solr.update.CommitUpdateCommand; 31 | import org.apache.solr.update.DeleteUpdateCommand; 32 | import org.apache.solr.update.processor.UpdateRequestProcessor; 33 | import org.apache.solr.util.RefCounted; 34 | import org.codehaus.jackson.map.ObjectMapper; 35 | import org.noggit.JSONParser; 36 | import org.noggit.JSONUtil; 37 | import org.slf4j.Logger; 38 | import org.slf4j.LoggerFactory; 39 | 40 | import com.couchbase.capi.CAPIBehavior; 41 | import com.ibm.icu.text.DateFormat.BooleanAttribute; 42 | 43 | public class SolrCAPIBehaviour implements CAPIBehavior { 44 | 45 | private static final Logger LOG = LoggerFactory.getLogger(SolrCAPIBehaviour.class); 46 | 47 | protected ObjectMapper mapper = new ObjectMapper(); 48 | private TypeSelector typeSelector; 49 | protected Map documentTypeParentFields; 50 | protected Map documentTypeRoutingFields; 51 | protected CouchbaseRequestHandler handler; 52 | private boolean commitAfterBatch; 53 | 54 | protected Counter activeRevsDiffRequests; 55 | protected Counter meanRevsDiffRequests; 56 | protected Counter activeBulkDocsRequests; 57 | protected Counter meanBulkDocsRequests; 58 | protected Counter totalTooManyConcurrentRequestsErrors; 59 | 60 | public SolrCAPIBehaviour(CouchbaseRequestHandler handler, TypeSelector typeSelector, Map documentTypeParentFields, Map documentTypeRoutingFields, boolean commitAfterBatch) { 61 | this.handler = handler; 62 | this.typeSelector = typeSelector; 63 | this.documentTypeParentFields = documentTypeParentFields; 64 | this.documentTypeRoutingFields = documentTypeRoutingFields; 65 | this.commitAfterBatch = commitAfterBatch; 66 | 67 | activeRevsDiffRequests = new Counter(); 68 | meanRevsDiffRequests = new Counter(); 69 | activeBulkDocsRequests = new Counter(); 70 | meanBulkDocsRequests = new Counter(); 71 | totalTooManyConcurrentRequestsErrors = new Counter(); 72 | } 73 | 74 | public Map welcome() { 75 | Map responseMap = new HashMap(); 76 | responseMap.put("welcome", "solr-couchbase-plugin"); 77 | return responseMap; 78 | } 79 | 80 | public String databaseExists(String database) { 81 | String bucketName = getBucketNameFromDatabase(database); 82 | if(handler.getBucket(bucketName) != null) { 83 | return null; 84 | } 85 | return "missing"; 86 | } 87 | 88 | protected String getBucketNameFromDatabase(String database) { 89 | String[] pieces = database.split("/", 2); 90 | if(pieces.length < 2) { 91 | return database; 92 | } else { 93 | return pieces[0]; 94 | } 95 | } 96 | 97 | public Map getDatabaseDetails(String database) { 98 | String doesNotExistReason = databaseExists(database); 99 | if(doesNotExistReason == null) { 100 | Map responseMap = new HashMap(); 101 | responseMap.put("db_name", getDatabaseNameWithoutUUID(database)); 102 | return responseMap; 103 | } 104 | return null; 105 | } 106 | 107 | protected String getDatabaseNameWithoutUUID(String database) { 108 | int semicolonIndex = database.indexOf(';'); 109 | if(semicolonIndex >= 0) { 110 | return database.substring(0, semicolonIndex); 111 | } 112 | return database; 113 | } 114 | 115 | public boolean createDatabase(String database) { 116 | // FIXME add test 117 | return false; 118 | } 119 | 120 | public boolean deleteDatabase(String database) { 121 | // FIXME add test 122 | return false; 123 | } 124 | 125 | public boolean ensureFullCommit(String database) { 126 | if("default".equals(database)) { 127 | return true; 128 | } 129 | return false; 130 | } 131 | 132 | public Map revsDiff(String database, 133 | Map revsMap) { 134 | long start = System.currentTimeMillis(); 135 | activeBulkDocsRequests.inc(); 136 | 137 | Map responseMap = null; 138 | 139 | String bucketName = getBucketNameFromDatabase(database); 140 | if(handler.getBucket(bucketName) != null) { 141 | responseMap = new HashMap(); 142 | for (Entry entry : revsMap.entrySet()) { 143 | RefCounted searcher = handler.getCore().getSearcher(); 144 | String id = entry.getKey(); 145 | String revs = (String) entry.getValue(); 146 | TermQuery tQuery = new TermQuery(new Term(id)); 147 | PrefixQuery pQuery = new PrefixQuery(new Term(id + "-")); 148 | BooleanQuery query = new BooleanQuery(); 149 | query.add(tQuery, Occur.SHOULD); 150 | query.add(pQuery, Occur.SHOULD); 151 | try { 152 | DocSet docs = searcher.get().getDocSet(query, (DocSet)null); 153 | DocIterator iterator = docs.iterator(); 154 | if(docs.size()>0) { 155 | while(iterator.hasNext()) { 156 | int docId = iterator.nextDoc(); 157 | if(!revs.equals(searcher.get().doc(docId).get(CommonConstants.REVISION_FIELD))) { 158 | Map rev = new HashMap(); 159 | rev.put("missing", revs); 160 | responseMap.put(id, rev); 161 | } 162 | } 163 | } else { 164 | Map rev = new HashMap(); 165 | rev.put("missing", revs); 166 | responseMap.put(id, rev); 167 | } 168 | } catch (IOException e) { 169 | LOG.error("Could not do revsDiff!", e); 170 | } finally { 171 | searcher.decref(); 172 | } 173 | } 174 | } 175 | long end = System.currentTimeMillis(); 176 | meanRevsDiffRequests.inc(end - start); 177 | activeRevsDiffRequests.dec(); 178 | return responseMap; 179 | } 180 | 181 | public List bulkDocs(String database, List> docs) { 182 | 183 | long start = System.currentTimeMillis(); 184 | activeBulkDocsRequests.inc(); 185 | 186 | String bucketName = getBucketNameFromDatabase(database); 187 | SolrQueryRequest req = new SolrQueryRequestBase(handler.getCore(), new SolrParams() { 188 | 189 | @Override 190 | public String[] getParams(String param) { 191 | // TODO Auto-generated method stub 192 | return null; 193 | } 194 | 195 | @Override 196 | public Iterator getParameterNamesIterator() { 197 | // TODO Auto-generated method stub 198 | return null; 199 | } 200 | 201 | @Override 202 | public String get(String param) { 203 | // TODO Auto-generated method stub 204 | return null; 205 | } 206 | }) {}; 207 | // keep a map of the id - rev for building the response 208 | List bulkDocsResult = new ArrayList(); 209 | Map revisions = new HashMap(); 210 | 211 | if(handler.getBucket(bucketName) != null) { 212 | 213 | for (Map doc : docs) { 214 | 215 | // these are the top-level elements that could be in the document sent by Couchbase 216 | Map meta = (Map)doc.get("meta"); 217 | String metaJson = JSONUtil.toJSON(meta); 218 | Map jsonMap = (Map)doc.get("json"); 219 | String base64 = (String)doc.get("base64"); 220 | String jsonString = null; 221 | 222 | if(meta == null) { 223 | // if there is no meta-data section, there is nothing we can do 224 | LOG.warn("Document without meta in bulk_docs, ignoring...."); 225 | continue; 226 | } else if("non-JSON mode".equals(meta.get("att_reason"))) { 227 | // optimization, this tells us the body isn't json 228 | jsonMap = new HashMap(); 229 | } else if(jsonMap == null && base64 != null) { 230 | byte[] decodedData = Base64.decodeBase64(base64); 231 | try { 232 | jsonString = new String(decodedData, "UTF-8"); 233 | // now try to parse the decoded data as json 234 | jsonMap = (Map) mapper.readValue(decodedData, Map.class); 235 | } 236 | catch(Exception e) { 237 | LOG.error("Unable to parse decoded base64 data as JSON, indexing stub for id: {}", meta.get("id")); 238 | LOG.error("Body was: {} Parse error was: {}", new String(decodedData), e); 239 | jsonMap = new HashMap(); 240 | 241 | } 242 | } 243 | 244 | // at this point we know we have the document meta-data 245 | // and the document contents to be indexed are in json 246 | 247 | String id = (String)meta.get("id"); 248 | String rev = (String)meta.get("rev"); 249 | 250 | SolrInputDocument solrDoc = new SolrInputDocument(); 251 | solrDoc.addField(CommonConstants.ID_FIELD, id); 252 | solrDoc.addField(CommonConstants.REVISION_FIELD, rev); 253 | solrDoc.addField(CommonConstants.JSON_FIELD, jsonString); 254 | solrDoc.addField(CommonConstants.METADATA_FIELD, metaJson); 255 | 256 | Map toBeIndexed = new HashMap(); 257 | toBeIndexed.put("meta", meta); 258 | toBeIndexed.put("doc", jsonMap); 259 | 260 | long ttl = 0; 261 | Integer expiration = (Integer)meta.get("expiration"); 262 | if(expiration != null) { 263 | ttl = (expiration.longValue() * 1000) - System.currentTimeMillis(); 264 | } 265 | if(ttl > 0) { 266 | solrDoc.addField(CommonConstants.TTL_FIELD, ttl); 267 | } 268 | 269 | boolean deleted = meta.containsKey("deleted") ? (Boolean)meta.get("deleted") : false; 270 | 271 | if(!deleted) { 272 | String parentField = null; 273 | String routingField = null; 274 | String type = typeSelector.getType(bucketName, id); 275 | if(documentTypeParentFields != null && documentTypeParentFields.containsKey(type)) { 276 | parentField = documentTypeParentFields.get(type); 277 | } 278 | if(documentTypeRoutingFields != null && documentTypeRoutingFields.containsKey(type)) { 279 | routingField = documentTypeRoutingFields.get(type); 280 | } 281 | 282 | if(parentField != null) { 283 | Object parent = jsonMapPath(toBeIndexed, parentField); 284 | if (parent != null && parent instanceof String ) { 285 | solrDoc.addField(CommonConstants.PARENT_FIELD, parent); 286 | } else { 287 | LOG.warn("Unabled to determine parent value from parent field {} for doc id {}", parentField, id); 288 | } 289 | } 290 | if(routingField != null) { 291 | Object routing = jsonMapPath(toBeIndexed, routingField); 292 | if (routing != null && routing instanceof String) { 293 | solrDoc.addField(CommonConstants.ROUTING_FIELD, routing); 294 | } else { 295 | LOG.warn("Unable to determine routing value from routing field {} for doc id {}", routingField, id); 296 | } 297 | } 298 | 299 | //extract and map json fields 300 | JsonRecordReader rr = JsonRecordReader.getInst(handler.getBucket(bucketName).getSplitpath(), 301 | new ArrayList(handler.getBucket(bucketName).getFieldmapping().values())); 302 | if(jsonString == null) { 303 | jsonString=""; 304 | } 305 | JSONParser parser = new JSONParser(jsonString); 306 | Handler handler = new CouchbaseRecordHandler(this, req, solrDoc, bulkDocsResult); 307 | try { 308 | rr.streamRecords(parser, handler); 309 | } catch (IOException e) { 310 | LOG.error("Cannot parse Couchbase record!", e); 311 | } 312 | } else { //document deleted 313 | boolean success = deleteDoc(id, req); 314 | if(success) { 315 | Map itemResponse = new HashMap(); 316 | itemResponse.put("id", id); 317 | itemResponse.put("rev", rev); 318 | if(!rev.equals(revisions.get(id))) { 319 | revisions.put(id, rev); 320 | bulkDocsResult.add(itemResponse); 321 | } 322 | } 323 | } 324 | } 325 | if(commitAfterBatch && bulkDocsResult.size() > 0) { 326 | commit(req); 327 | } 328 | } else { 329 | LOG.debug("Bucket \"" + bucketName + "\" is not configured with this plugin."); 330 | } 331 | 332 | long end = System.currentTimeMillis(); 333 | meanBulkDocsRequests.inc(end - start); 334 | activeBulkDocsRequests.dec(); 335 | return bulkDocsResult; 336 | } 337 | 338 | public Map getDocument(String database, String docId) { 339 | if("default".equals(database)) { 340 | if("docid".equals(docId)) { 341 | Map document = new HashMap(); 342 | document.put("_id", "docid"); 343 | document.put("_rev", "1-abc"); 344 | document.put("value", "test"); 345 | return document; 346 | } 347 | } 348 | return null; 349 | } 350 | 351 | public Map getLocalDocument(String database, String docId) { 352 | if("default".equals(database)) { 353 | if("_local/docid".equals(docId)) { 354 | Map document = new HashMap(); 355 | document.put("_id", "_local/docid"); 356 | document.put("_rev", "1-abc"); 357 | document.put("value", "test"); 358 | return document; 359 | } else if("_local/441-0921e80de6603d60b1d553bb7c253def/beer-sample/beer-sample".equals(docId)) { 360 | Map historyItem = new HashMap(); 361 | historyItem.put("session_id", "121f9c416336108dd0b891a054f9b878"); 362 | historyItem.put("start_time", "Thu, 30 Aug 2012 18:22:02 GMT"); 363 | historyItem.put("end_time", "Thu, 30 Aug 2012 18:22:02 GMT"); 364 | historyItem.put("start_last_seq", 0); 365 | historyItem.put("end_last_seq", 10); 366 | historyItem.put("recorded_seq", 10); 367 | historyItem.put("docs_checked", 10); 368 | historyItem.put("docs_written", 10); 369 | 370 | List history = new ArrayList(); 371 | history.add(historyItem); 372 | 373 | Map document = new HashMap(); 374 | document.put("session_id", "121f9c416336108dd0b891a054f9b878"); 375 | document.put("source_last_seq", 10); 376 | document.put("start_time", "Thu, 30 Aug 2012 18:22:02 GMT"); 377 | document.put("end_time", "Thu, 30 Aug 2012 18:22:02 GMT"); 378 | document.put("docs_checked", 10); 379 | document.put("docs_written", 10); 380 | document.put("history", history); 381 | return document; 382 | } 383 | } 384 | return null; 385 | } 386 | 387 | public String storeDocument(String database, String docId, 388 | Map document) { 389 | // FIXME add test 390 | return null; 391 | } 392 | 393 | public String storeLocalDocument(String database, String docId, 394 | Map document) { 395 | // FIXME add test 396 | return null; 397 | } 398 | 399 | public InputStream getAttachment(String database, String docId, 400 | String attachmentName) { 401 | // FIXME add test 402 | return null; 403 | } 404 | 405 | public String storeAttachment(String database, String docId, 406 | String attachmentName, String contentType, InputStream input) { 407 | // FIXME add test 408 | return null; 409 | } 410 | 411 | public InputStream getLocalAttachment(String databsae, String docId, 412 | String attachmentName) { 413 | // FIXME add test 414 | return null; 415 | } 416 | 417 | public String storeLocalAttachment(String database, String docId, 418 | String attachmentName, String contentType, InputStream input) { 419 | // FIXME add test 420 | return null; 421 | } 422 | 423 | @Override 424 | public Map getStats() { 425 | Map stats = new HashMap(); 426 | 427 | Map bulkDocsStats = new HashMap(); 428 | bulkDocsStats.put("activeCount", activeBulkDocsRequests.count()); 429 | bulkDocsStats.put("totalCount", meanBulkDocsRequests.count()); 430 | bulkDocsStats.put("totalTime", meanBulkDocsRequests.sum()); 431 | bulkDocsStats.put("avgTime", meanBulkDocsRequests.mean()); 432 | 433 | Map revsDiffStats = new HashMap(); 434 | revsDiffStats.put("activeCount", activeRevsDiffRequests.count()); 435 | revsDiffStats.put("totalCount", meanRevsDiffRequests.count()); 436 | revsDiffStats.put("totalTime", meanRevsDiffRequests.sum()); 437 | revsDiffStats.put("avgTime", meanRevsDiffRequests.mean()); 438 | 439 | stats.put("_bulk_docs", bulkDocsStats); 440 | stats.put("_revs_diff", revsDiffStats); 441 | // stats.put("tooManyConcurrentRequestsErrors", totalTooManyConcurrentRequestsErrors.count()); 442 | 443 | return stats; 444 | } 445 | 446 | public String getVBucketUUID(String pool, String bucket, int vbucket) { 447 | if("default".equals(bucket)) { 448 | return "00000000000000000000000000000000"; 449 | } 450 | return null; 451 | } 452 | 453 | public String getBucketUUID(String pool, String bucket) { 454 | if("default".equals(bucket)) { 455 | return "00000000000000000000000000000000"; 456 | } 457 | return null; 458 | } 459 | 460 | public Object jsonMapPath(Map json, String path) { 461 | int dotIndex = path.indexOf('.'); 462 | if (dotIndex >= 0) { 463 | String pathThisLevel = path.substring(0,dotIndex); 464 | Object current = json.get(pathThisLevel); 465 | String pathRest = path.substring(dotIndex+1); 466 | if (pathRest.length() == 0) { 467 | return current; 468 | } 469 | else if(current instanceof Map && pathRest.length() > 0) { 470 | return jsonMapPath((Map)current, pathRest); 471 | } 472 | } else { 473 | // no dot 474 | Object current = json.get(path); 475 | return current; 476 | } 477 | return null; 478 | } 479 | 480 | boolean addDoc(SolrInputDocument doc, SolrQueryRequest req) { 481 | boolean success = false; 482 | try { 483 | AddUpdateCommand command = new AddUpdateCommand(req); 484 | command.solrDoc = doc; 485 | SolrQueryResponse rsp = new SolrQueryResponse(); 486 | UpdateRequestProcessor processor = handler.getProcessorChain().createProcessor(req, rsp); 487 | processor.processAdd(command); 488 | success = true; 489 | } catch (Exception e) { 490 | LOG.warn("Error creating document : " + doc, e); 491 | } 492 | return success; 493 | } 494 | 495 | public boolean deleteDoc(Object id, SolrQueryRequest req) { 496 | boolean success = false; 497 | try { 498 | LOG.info("Deleting document:" + id); 499 | DeleteUpdateCommand delCmd = new DeleteUpdateCommand(req); 500 | delCmd.setId(id.toString()); 501 | SolrQueryResponse rsp = new SolrQueryResponse(); 502 | UpdateRequestProcessor processor = handler.getProcessorChain().createProcessor(req, rsp); 503 | processor.processDelete(delCmd); 504 | success = true; 505 | } catch (IOException e) { 506 | LOG.error("Exception while deleting doc:" + id, e); 507 | } 508 | return success; 509 | } 510 | 511 | public void commit(SolrQueryRequest req) { 512 | try { 513 | CommitUpdateCommand commit = new CommitUpdateCommand(req,false); 514 | SolrQueryResponse rsp = new SolrQueryResponse(); 515 | UpdateRequestProcessor processor = handler.getProcessorChain().createProcessor(req, rsp); 516 | processor.processCommit(commit); 517 | } catch (Exception e) { 518 | LOG.error("Exception while solr commit.", e); 519 | } 520 | } 521 | } 522 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/SolrCouchbaseBehaviour.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.util.ArrayList; 4 | import java.util.HashMap; 5 | import java.util.List; 6 | import java.util.Map; 7 | 8 | import org.apache.solr.common.cloud.ZkNodeProps; 9 | 10 | import com.couchbase.capi.CouchbaseBehavior; 11 | 12 | public class SolrCouchbaseBehaviour implements CouchbaseBehavior{ 13 | 14 | CouchbaseRequestHandler handler; 15 | String poolUUID; 16 | 17 | public SolrCouchbaseBehaviour(CouchbaseRequestHandler handler) { 18 | this.handler = handler; 19 | poolUUID = Utils.randomID(); 20 | } 21 | 22 | public List getPools() { 23 | List result = new ArrayList(); 24 | result.add("default"); 25 | return result; 26 | } 27 | 28 | public String getPoolUUID(String pool) { 29 | return poolUUID; 30 | } 31 | 32 | public Map getPoolDetails(String pool) { 33 | Map bucket = new HashMap(); 34 | bucket.put("uri", "/pools/" + pool + "/buckets?uuid=" + getPoolUUID(pool)); 35 | 36 | Map responseMap = new HashMap(); 37 | responseMap.put("buckets", bucket); 38 | 39 | List nodes = getNodesServingPool(pool); 40 | responseMap.put("nodes", nodes); 41 | 42 | return responseMap; 43 | } 44 | 45 | public List getBucketsInPool(String pool) { 46 | return new ArrayList(handler.getBuckets().keySet()); 47 | } 48 | 49 | public String getBucketUUID(String pool, String bucket) { 50 | if(handler.getBucket(bucket) != null) { 51 | return "00000000000000000000000000000000"; 52 | } 53 | return null; 54 | } 55 | 56 | public List getNodesServingPool(String pool) { 57 | List nodes = null; 58 | if("default".equals(pool)) { 59 | nodes = new ArrayList(); 60 | if(handler.getZkClient() != null) { 61 | Map capiserversProps = handler.getCollectionsLeaders(); 62 | for(Map.Entry entry : capiserversProps.entrySet()) { 63 | String host = entry.getValue().getStr("host"); 64 | int port = entry.getValue().getInt("port", 9999); 65 | 66 | Map nodePorts = new HashMap(); 67 | nodePorts.put("direct", port); 68 | 69 | Map node = new HashMap(); 70 | node.put("couchApiBase", 71 | String.format("http://%s:%s/", host, port)); 72 | node.put("hostname", host + ":" + port); 73 | node.put("ports", nodePorts); 74 | 75 | nodes.add(node); 76 | } 77 | } else { 78 | Map nodePorts = new HashMap(); 79 | nodePorts.put("direct", handler.getPort()); 80 | 81 | Map node = new HashMap(); 82 | node.put("couchApiBase", 83 | String.format("http://%s:%s/", handler.getHost(), handler.getPort())); 84 | node.put("hostname", handler.getHost() + ":" + handler.getPort()); 85 | node.put("ports", nodePorts); 86 | 87 | nodes.add(node); 88 | } 89 | } 90 | 91 | return nodes; 92 | } 93 | 94 | @Override 95 | public Map getStats() { 96 | return new HashMap(); 97 | } 98 | 99 | } 100 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/SolrUtils.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.util.Arrays; 4 | import java.util.Date; 5 | import java.util.HashMap; 6 | import java.util.HashSet; 7 | import java.util.List; 8 | import java.util.Map; 9 | import java.util.Set; 10 | 11 | public class SolrUtils { 12 | 13 | /** 14 | * Set of suffixes being recognixed by the mapping methods 15 | */ 16 | private static final Set suffixes = new HashSet(Arrays.asList("_b", "_d", "_dt", "_f", "_i", "_l", "_txt", "_s", "_ss")); 17 | 18 | public static Set getSuffixes() { 19 | return suffixes; 20 | } 21 | /** 22 | * Method mapping field names to Solr dynamic fields based on the value type 23 | * @param jsonMap A Map representing JSON document 24 | * @param fieldmapping A map of name -> field_mapping entries. Field mapping stucture is :. 25 | */ 26 | public static Map mapToSolrDynamicFields(Map jsonMap) { 27 | Map solrMapping = new HashMap(); 28 | for(Map.Entry entry : jsonMap.entrySet()) { 29 | String key = entry.getKey(); 30 | int keyLength = key.length(); 31 | 32 | if(keyLength == 0) { 33 | continue; 34 | } 35 | if(keyLength < 3) { //key too short to have dynamic mapping, don't check it. 36 | } else if(keyLength == 3) { 37 | if(suffixes.contains(key.substring(key.length()-2, key.length()))) { //one-letter key 38 | continue; 39 | } 40 | } else if(keyLength > 3) { 41 | if(suffixes.contains(key.substring(key.length()-2, key.length())) 42 | || SolrUtils.getSuffixes().contains(key.substring(key.length()-3, key.length())) 43 | || SolrUtils.getSuffixes().contains(key.substring(key.length()-4, key.length()))) {//other length keys 44 | continue; 45 | } 46 | } 47 | Object value = entry.getValue(); 48 | String suffix = getSuffixFromObject(value); 49 | if(suffix.equals("_map")) { 50 | solrMapping.putAll(mapToSolrDynamicFields((Map) value)); 51 | } else { 52 | solrMapping.put(entry.getKey(), entry.getKey() + suffix); 53 | } 54 | } 55 | return solrMapping; 56 | } 57 | 58 | /** 59 | * Returns corresponding Solr dynamic field suffix for the passed Object 60 | * @param o 61 | * @return corresponding Solr dynamic field suffix 62 | */ 63 | public static String getSuffixFromObject(Object o) { 64 | String suffix = "_txt"; 65 | if(o instanceof String) { 66 | suffix = "_s"; 67 | } else if(o instanceof Integer) { 68 | suffix = "_i"; 69 | } else if(o instanceof Long) { 70 | suffix = "_l"; 71 | } else if(o instanceof Boolean) { 72 | suffix = "_b"; 73 | } else if(o instanceof Float) { 74 | suffix = "_f"; 75 | } else if(o instanceof Double) { 76 | suffix = "_d"; 77 | } else if(o instanceof Date) { 78 | suffix = "_dt"; 79 | } else if(o instanceof List) { 80 | if(o != null && ((List)o).size() > 0) 81 | suffix = getSuffixFromObject(((List) o).get(0)); 82 | } else if(o instanceof Map) { 83 | suffix = "_map"; 84 | } 85 | return suffix; 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/TypeSelector.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | 4 | public interface TypeSelector { 5 | void configure(Settings settings); 6 | String getType(String index, String docId); 7 | } 8 | -------------------------------------------------------------------------------- /src/main/java/org/apache/solr/couchbase/Utils.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | 3 | import java.util.UUID; 4 | 5 | public class Utils { 6 | 7 | public static String randomID() { 8 | return UUID.randomUUID().toString().replaceAll("-", ""); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/test/java/org/apache/solr/couchbase/SolrUtilsTest.java: -------------------------------------------------------------------------------- 1 | package org.apache.solr.couchbase; 2 | import java.util.Calendar; 3 | import java.util.Date; 4 | import java.util.HashMap; 5 | import java.util.Map; 6 | 7 | import junit.framework.TestCase; 8 | 9 | import org.junit.Test; 10 | 11 | public class SolrUtilsTest extends TestCase { 12 | 13 | @Test 14 | public void testMappingToDynamicFields() { 15 | 16 | Map map = new HashMap(); 17 | map.put("last_name", "Smith"); 18 | map.put("age", 30); 19 | map.put("value", 4356L); 20 | map.put("shoe", 8.5f); 21 | map.put("height", 5.6); 22 | map.put("weight", null); 23 | map.put("married", true); 24 | Date time = Calendar.getInstance().getTime(); 25 | map.put("birth_date", time); 26 | Map object = new HashMap(); 27 | object.put("foo", "bar"); 28 | object.put("bar", "baz"); 29 | map.put("object", object); 30 | map.put("correct_s", "correct"); 31 | map.put("correct_dt", time); 32 | map.put("correct_txt", "correct long text"); 33 | map.put("a", "text"); 34 | map.put("", "text"); 35 | 36 | Map mapped = SolrUtils.mapToSolrDynamicFields(map); 37 | assertEquals("last_name_s", mapped.get("last_name")); 38 | assertEquals("age_i", mapped.get("age")); 39 | assertEquals("value_l", mapped.get("value")); 40 | assertEquals("shoe_f", mapped.get("shoe")); 41 | assertEquals("height_d", mapped.get("height")); 42 | assertEquals("weight_txt", mapped.get("weight")); 43 | assertEquals("married_b", mapped.get("married")); 44 | assertEquals("birth_date_dt", mapped.get("birth_date")); 45 | assertEquals("foo_s", mapped.get("foo")); 46 | assertEquals("bar_s", mapped.get("bar")); 47 | assertEquals(null, mapped.get("correct_s")); 48 | assertEquals(null, mapped.get("correct_dt")); 49 | assertEquals(null, mapped.get("correct_txt")); 50 | assertEquals("a_s", mapped.get("a")); 51 | assertEquals(null, mapped.get("")); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /zk-conf/zoo.cfg: -------------------------------------------------------------------------------- 1 | tickTime=2000 2 | dataDir=/var/lib/zookeeper 3 | clientPort=2181 4 | --------------------------------------------------------------------------------