├── LICENSE.txt ├── README ├── pom.xml └── src ├── main ├── fmpp │ └── org │ │ └── apache │ │ └── hadoop │ │ └── hbase │ │ └── regionserver │ │ └── idx │ │ └── support │ │ └── arrays │ │ └── ArrayList.java ├── java │ └── org │ │ └── apache │ │ └── hadoop │ │ └── hbase │ │ ├── JmxHelper.java │ │ ├── WritableHelper.java │ │ ├── client │ │ └── idx │ │ │ ├── IdxColumnDescriptor.java │ │ │ ├── IdxIndexDescriptor.java │ │ │ ├── IdxQualifierType.java │ │ │ ├── IdxScan.java │ │ │ ├── exp │ │ │ ├── And.java │ │ │ ├── Comparison.java │ │ │ ├── Compound.java │ │ │ ├── Expression.java │ │ │ └── Or.java │ │ │ └── package.html │ │ └── regionserver │ │ ├── CompleteIndex.java │ │ ├── CompleteIndexBuilder.java │ │ ├── EmptyIndex.java │ │ ├── IdxExpressionEvaluator.java │ │ ├── IdxIndex.java │ │ ├── IdxRegion.java │ │ ├── IdxRegionIndexManager.java │ │ ├── IdxRegionMBean.java │ │ ├── IdxRegionMBeanImpl.java │ │ ├── IdxSearchContext.java │ │ └── idx │ │ └── support │ │ ├── Bits.java │ │ ├── Callback.java │ │ ├── IdxClassSize.java │ │ ├── arrays │ │ ├── BinarySearch.java │ │ ├── List.java │ │ └── ObjectArrayList.java │ │ └── sets │ │ ├── BitSet.java │ │ ├── IntSet.java │ │ ├── IntSetBase.java │ │ ├── IntSetBuilder.java │ │ └── SparseBitSet.java └── resources │ └── fmpp │ └── types.csv └── test ├── fmpp └── org │ └── apache │ └── hadoop │ └── hbase │ └── regionserver │ └── idx │ └── support │ └── arrays │ └── TestArrayList.java ├── java └── org │ └── apache │ └── hadoop │ └── hbase │ ├── TestIdxHBaseCluster.java │ ├── TestIdxMasterAdmin.java │ ├── TestWritableHelper.java │ ├── client │ └── idx │ │ ├── TestIdxColumnDescriptor.java │ │ ├── TestIdxIndexDescriptor.java │ │ ├── TestIdxScan.java │ │ └── exp │ │ ├── TestComparison.java │ │ └── TestExpression.java │ └── regionserver │ ├── HeapSizeEstimator.java │ ├── TestCompleteIndex.java │ ├── TestHRegionWithIdxRegion.java │ ├── TestHRegionWithIdxRegionNoIndexes.java │ ├── TestIdxExpressionEvaluator.java │ ├── TestIdxRegion.java │ ├── TestIdxRegionIndexManager.java │ ├── TestIdxRegionMBeanImpl.java │ ├── TestIdxRegionPerformance.java │ └── idx │ └── support │ ├── TestBits.java │ ├── TestIdxClassSize.java │ └── sets │ ├── IntSetBaseTestCase.java │ ├── TestBitSet.java │ └── TestSparseBitSet.java └── resources └── log4j.properties /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | IHbase: 2 | --------- 3 | http://github.com/ykulbak/ihbase 4 | 5 | April 2010 6 | 7 | Released under Apache License 2.0. 8 | 9 | Previously release under HBase as a contrib. For usage information please refer to the github project wiki. 10 | 11 | To build: 12 | --------- 13 | - Download or checkout hbase 0.20.5 and build it (in case it's not already built) 14 | - Use maven to build, make sure you append -Dhbase.basedir= to any maven command you issue 15 | - In case of any issue please email yoram.kulbak@gmail.com 16 | 17 | Owners: 18 | ------------ 19 | Yoram Kulbak 20 | Dan Washusen 21 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/JmxHelper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase; 21 | 22 | import org.apache.log4j.Logger; 23 | 24 | import javax.management.InstanceAlreadyExistsException; 25 | import javax.management.InstanceNotFoundException; 26 | import javax.management.MBeanRegistrationException; 27 | import javax.management.MBeanServer; 28 | import javax.management.NotCompliantMBeanException; 29 | import javax.management.ObjectName; 30 | import java.lang.management.ManagementFactory; 31 | 32 | /** 33 | * Utilities for JMX. 34 | */ 35 | public final class JmxHelper { 36 | static final Logger LOG = Logger.getLogger(JmxHelper.class); 37 | 38 | 39 | private JmxHelper() { 40 | // private constuctor for utility classes. 41 | } 42 | 43 | /** 44 | * Registers an MBean with the platform MBean server. if an MBean with the 45 | * same name exists it will be unregistered and the provided MBean would 46 | * replace it 47 | * 48 | * @param objectName the object name 49 | * @param mbean the mbean class 50 | */ 51 | public static void registerMBean(ObjectName objectName, Object mbean) { 52 | final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 53 | if (mbs.isRegistered(objectName)) { 54 | try { 55 | LOG.info("unregister: " + objectName); 56 | mbs.unregisterMBean(objectName); 57 | } catch (InstanceNotFoundException e) { 58 | throw new IllegalStateException("mbean " + objectName + 59 | " failed unregistration", e); 60 | } catch (MBeanRegistrationException e) { 61 | throw new IllegalStateException("mbean " + objectName + 62 | " failed unregistration", e); 63 | } 64 | } 65 | try { 66 | LOG.info("register: " + objectName); 67 | mbs.registerMBean(mbean, objectName); 68 | } catch (InstanceAlreadyExistsException e) { 69 | throw new IllegalStateException("mbean " + objectName + 70 | " failed registration", e); 71 | } catch (MBeanRegistrationException e) { 72 | throw new IllegalStateException("mbean " + objectName + 73 | " failed registration", e); 74 | } catch (NotCompliantMBeanException e) { 75 | throw new IllegalStateException("mbean " + objectName + 76 | " failed registration", e); 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/WritableHelper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase; 21 | 22 | import org.apache.hadoop.hbase.util.Bytes; 23 | import org.apache.hadoop.io.Writable; 24 | 25 | import java.io.DataInput; 26 | import java.io.DataOutput; 27 | import java.io.IOException; 28 | 29 | /** 30 | * A collection of writable utils. 31 | */ 32 | public class WritableHelper { 33 | private WritableHelper() { 34 | } 35 | 36 | /** 37 | * Helper method to instantiate an expression instance using the provided 38 | * className. 39 | * 40 | * @param className the class name 41 | * @param baseClass the base class type (the class must be or inherit from 42 | * this type) 43 | * @return the instance 44 | */ 45 | @SuppressWarnings("unchecked") 46 | public static T instanceForName(String className, Class baseClass) { 47 | try { 48 | Class clazz = (Class) Class.forName(className); 49 | return clazz.newInstance(); 50 | } catch (Exception e) { 51 | throw new IllegalArgumentException("Can't find or instantiate class " + className, e); 52 | } 53 | } 54 | 55 | /** 56 | * Reads an instance of provided clazz (or one of it's subclasses) from the 57 | * provided data input. 58 | * 59 | * @param in the data into 60 | * @param clazz the class that the instance will be or extend from 61 | * @param the type 62 | * @return the instance 63 | * @throws IOException if an io error occurs 64 | */ 65 | public static T readInstance(DataInput in, Class clazz) throws IOException { 66 | String className = Bytes.toString(Bytes.readByteArray(in)); 67 | T instance = instanceForName(className, clazz); 68 | instance.readFields(in); 69 | return instance; 70 | } 71 | 72 | /** 73 | * Reads an instance of provided clazz (or one of it's subclasses) from the 74 | * provided data input. 75 | *

76 | *

Note: It's assumed that the {@link #writeInstanceNullable(java.io.DataOutput, 77 | * org.apache.hadoop.io.Writable)} method was used to write out the instance. 78 | * 79 | * @param in the data into 80 | * @param clazz the class that the instance will be or extend from 81 | * @param the type 82 | * @return the instance (or null) 83 | * @throws IOException if an io error occurs 84 | */ 85 | public static T readInstanceNullable(DataInput in, Class clazz) throws IOException { 86 | if (in.readBoolean()) { 87 | return readInstance(in, clazz); 88 | } else { 89 | return null; 90 | } 91 | } 92 | 93 | /** 94 | * Writes out the provided writable instance to the data outout. 95 | * 96 | * @param out the data output 97 | * @param writable the writable isntance (must not be null) 98 | * @throws IOException if an io error occurs 99 | */ 100 | public static void writeInstance(DataOutput out, Writable writable) throws IOException { 101 | if (writable == null) { 102 | throw new IllegalArgumentException("The writable instance must not be null"); 103 | } 104 | Bytes.writeByteArray(out, Bytes.toBytes(writable.getClass().getName())); 105 | writable.write(out); 106 | } 107 | 108 | /** 109 | * Writes out the provided writable instance to the data outout. 110 | * 111 | * @param out the data output 112 | * @param writable the writable isntance (can be null) 113 | * @throws IOException if an io error occurs 114 | */ 115 | public static void writeInstanceNullable(DataOutput out, Writable writable) throws IOException { 116 | if (writable == null) { 117 | out.writeBoolean(false); 118 | } else { 119 | out.writeBoolean(true); 120 | writeInstance(out, writable); 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/IdxIndexDescriptor.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx; 21 | 22 | import org.apache.hadoop.hbase.util.Bytes; 23 | import org.apache.hadoop.io.VersionedWritable; 24 | import org.apache.hadoop.io.WritableUtils; 25 | 26 | import java.io.DataInput; 27 | import java.io.DataOutput; 28 | import java.io.IOException; 29 | import java.util.Arrays; 30 | 31 | /** 32 | * The description of an indexed column family qualifier. 33 | *

34 | * The description is composed of the following properties: 35 | *

    36 | *
  1. The qualifier name - specified which qualifier to index. The values 37 | * stored to this qualifier will serve as index keys. 38 | *
  2. The qualifier type - type information for the qualifier. The type 39 | * information allows for custom ordering of index keys (which are qualifier 40 | * values) which may come handy when range queries are executed. 41 | *
  3. offset - combine this property with the length property to allow partial 42 | * value extraction. Useful for keeping the index size small while for qualifiers 43 | * with large values. the offset specifies the starting point in the value from 44 | * which to extract the index key 45 | *
  4. length - see also offset's description, the length property allows 46 | * to limit the number of bytes extracted to serve as index keys. If the bytes 47 | * are random a length of 1 or 2 bytes would yield very good results. 48 | *
49 | *

50 | */ 51 | public class IdxIndexDescriptor extends VersionedWritable { 52 | 53 | private static final byte VERSION = 1; 54 | 55 | /** 56 | * Qualifier name; 57 | */ 58 | private byte[] qualifierName; 59 | 60 | /** 61 | * The qualifier type - affects the translation of bytes into indexed 62 | * properties. 63 | */ 64 | private IdxQualifierType qualifierType; 65 | 66 | /** 67 | * Where to grab the column qualifier's value from. The default is from 68 | * its first byte. 69 | */ 70 | private int offset = 0; 71 | 72 | /** 73 | * Up-to where to grab the column qualifier's value. The default is 74 | * all of it. A positive number would indicate a set limit. 75 | */ 76 | private int length = -1; 77 | 78 | /** 79 | * Empty constructor to support the writable interface - DO NOT USE. 80 | */ 81 | public IdxIndexDescriptor() { 82 | } 83 | 84 | /** 85 | * Construct a new index descriptor. 86 | * 87 | * @param qualifierName the qualifier name 88 | * @param qualifierType the qualifier type 89 | */ 90 | public IdxIndexDescriptor(byte[] qualifierName, 91 | IdxQualifierType qualifierType) { 92 | this.qualifierName = qualifierName; 93 | this.qualifierType = qualifierType; 94 | } 95 | 96 | /** 97 | * Construct a new index descriptor. 98 | * 99 | * @param qualifierName the qualifier name 100 | * @param qualifierType the qualifier type 101 | * @param offset the offset (from kv value start) from which to extract the 102 | * index key 103 | * @param length the length to extract (everything by default) 104 | */ 105 | public IdxIndexDescriptor(byte[] qualifierName, IdxQualifierType qualifierType, 106 | int offset, int length) { 107 | this(qualifierName, qualifierType); 108 | this.offset = offset; 109 | this.length = length; 110 | } 111 | 112 | /** 113 | * The column family qualifier name. 114 | * 115 | * @return column family qualifier name 116 | */ 117 | public byte[] getQualifierName() { 118 | return qualifierName; 119 | } 120 | 121 | /** 122 | * The column family qualifier name. 123 | * 124 | * @param qualifierName column family qualifier name 125 | */ 126 | public void setQualifierName(byte[] qualifierName) { 127 | this.qualifierName = qualifierName; 128 | } 129 | 130 | /** 131 | * The data type that the column family qualifier contains. 132 | * 133 | * @return data type that the column family qualifier contains 134 | */ 135 | public IdxQualifierType getQualifierType() { 136 | return qualifierType; 137 | } 138 | 139 | /** 140 | * The data type that the column family qualifier contains. 141 | * 142 | * @param qualifierType data type that the column family qualifier contains 143 | */ 144 | public void setQualifierType(IdxQualifierType qualifierType) { 145 | this.qualifierType = qualifierType; 146 | } 147 | 148 | /** 149 | * The offset from which to extract the values. 150 | * 151 | * @return the current offset value. 152 | */ 153 | public int getOffset() { 154 | return offset; 155 | } 156 | 157 | /** 158 | * Sets the offset 159 | * 160 | * @param offset the offset from which to extract the values. 161 | */ 162 | public void setOffset(int offset) { 163 | this.offset = offset; 164 | } 165 | 166 | /** 167 | * The length of the block extracted from the qualifier's value. 168 | * 169 | * @return the length of the extracted value 170 | */ 171 | public int getLength() { 172 | return length; 173 | } 174 | 175 | /** 176 | * The length of the extracted value. 177 | * 178 | * @param length the length of the extracted value. 179 | */ 180 | public void setLength(int length) { 181 | this.length = length; 182 | } 183 | 184 | /** 185 | * {@inheritDoc} 186 | */ 187 | @Override 188 | public void write(DataOutput dataOutput) throws IOException { 189 | super.write(dataOutput); 190 | Bytes.writeByteArray(dataOutput, qualifierName); 191 | WritableUtils.writeEnum(dataOutput, qualifierType); 192 | dataOutput.writeInt(offset); 193 | dataOutput.writeInt(length); 194 | } 195 | 196 | /** 197 | * {@inheritDoc} 198 | */ 199 | @Override 200 | public void readFields(DataInput dataInput) throws IOException { 201 | super.readFields(dataInput); 202 | qualifierName = Bytes.readByteArray(dataInput); 203 | qualifierType = WritableUtils.readEnum(dataInput, IdxQualifierType.class); 204 | this.offset = dataInput.readInt(); 205 | this.length = dataInput.readInt(); 206 | } 207 | 208 | /** 209 | * {@inheritDoc} 210 | */ 211 | @Override 212 | public byte getVersion() { 213 | return VERSION; 214 | } 215 | 216 | /** 217 | * {@inheritDoc} 218 | */ 219 | @Override 220 | public boolean equals(Object o) { 221 | if (this == o) return true; 222 | if (o == null || getClass() != o.getClass()) return false; 223 | 224 | IdxIndexDescriptor that = (IdxIndexDescriptor) o; 225 | 226 | if (!Arrays.equals(qualifierName, that.qualifierName)) return false; 227 | 228 | if (this.qualifierType != that.qualifierType) return false; 229 | 230 | if (this.offset != that.offset) return false; 231 | 232 | if (this.length != that.length) return false; 233 | 234 | return true; 235 | } 236 | 237 | /** 238 | * {@inheritDoc} 239 | */ 240 | @Override 241 | public int hashCode() { 242 | return Arrays.hashCode(qualifierName); 243 | } 244 | 245 | /** 246 | * {@inheritDoc} 247 | */ 248 | @Override 249 | public String toString() { 250 | StringBuffer s = new StringBuffer(); 251 | s.append('{'); 252 | s.append("QUALIFIER"); 253 | s.append(" => '"); 254 | s.append(Bytes.toString(qualifierName)); 255 | s.append("',"); 256 | s.append("TYPE"); 257 | s.append(" => '"); 258 | s.append(qualifierType); 259 | s.append("'}"); 260 | return s.toString(); 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/IdxQualifierType.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx; 21 | 22 | /** 23 | * Indicates the data type contained in the column family qualifier. 24 | * This type affects index construction and value ordering in the index 25 | */ 26 | public enum IdxQualifierType { 27 | /** 28 | * Values qualified by this qualifier are bytes. 29 | * Each entry is a byte array of size 1 which should be treated as numerical 30 | * byte. 31 | */ 32 | BYTE, 33 | /** 34 | * Values qualified by this qualifier are characters. 35 | * Each entry is a byte array of size 2 which should be treated as 36 | * a character. 37 | */ 38 | CHAR, 39 | /** 40 | * Values qualified by this qualifier are short integers. 41 | * Each entry is a byte array of size 2 which should be treated as 42 | * a short integer. 43 | */ 44 | SHORT, 45 | /** 46 | * Values qualified by this qualifier are integers. 47 | * Each entry is a byte array of size 4 which should be treated as 48 | * a an integer. 49 | */ 50 | INT, 51 | /** 52 | * Values qualified by this qualifier are long integers. 53 | * Each entry is a byte array of size 8 which should be treated as 54 | * a long integer. 55 | */ 56 | LONG, 57 | /** 58 | * Values qualified by this qualifier are floats. 59 | * Each entry is a byte array of size 4 which should be treated as 60 | * a float. 61 | */ 62 | FLOAT, 63 | /** 64 | * Values qualified by this qualifier are doubles. 65 | * Each entry is a byte array of size 8 which should be treated as 66 | * a double. 67 | */ 68 | DOUBLE, 69 | /** 70 | * Values qualified by this qualifier are big decimals. 71 | * Each entry is a byte array of variable size which should be treated as 72 | * a big decimal. See also conversion methods in 73 | * {@link org.apache.hadoop.hbase.util.Bytes} 74 | */ 75 | BIG_DECIMAL, 76 | /** 77 | * Values qualified by this qualifier are byte arrays. 78 | * Each entry is a byte array of variable size which should be compared 79 | * based on the byte array's bytes numerical order. 80 | */ 81 | BYTE_ARRAY, 82 | /** 83 | * Values qualified by this qualifier are character arrays. 84 | * Each entry is a byte array of variable size which should be compared 85 | * based on the char array's characters lexicographical order. 86 | */ 87 | CHAR_ARRAY, 88 | } 89 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/IdxScan.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx; 21 | 22 | import org.apache.hadoop.hbase.WritableHelper; 23 | import org.apache.hadoop.hbase.client.Scan; 24 | import org.apache.hadoop.hbase.client.idx.exp.Expression; 25 | import org.apache.hadoop.hbase.filter.Filter; 26 | import org.apache.hadoop.hbase.io.ImmutableBytesWritable; 27 | import org.apache.hadoop.hbase.util.Bytes; 28 | import org.apache.hadoop.io.DataInputBuffer; 29 | import org.apache.hadoop.io.DataOutputBuffer; 30 | 31 | import java.io.DataInput; 32 | import java.io.DataOutput; 33 | import java.io.IOException; 34 | import java.util.Map; 35 | 36 | /** 37 | * Extends the {@link Scan} class to provide an {@link Expression} 38 | * that is used to quickly reduce the scope of the scan. 39 | */ 40 | public class IdxScan extends Scan { 41 | /** 42 | * The key used to store and retrieve the scan index expression. 43 | */ 44 | public static final ImmutableBytesWritable EXPRESSION = 45 | new ImmutableBytesWritable(Bytes.toBytes("EXPRESSION")); 46 | 47 | private Expression expression; 48 | 49 | /** 50 | * No-args constructor. 51 | */ 52 | public IdxScan() { 53 | } 54 | 55 | /** 56 | * Constructs a scan. 57 | * 58 | * @param expression the index expression 59 | */ 60 | public IdxScan(Expression expression) { 61 | this.expression = expression; 62 | } 63 | 64 | /** 65 | * Constructs a scan. 66 | * 67 | * @param startRow row to start scanner at or after (inclusive) 68 | * @param filter the filter that will applied to the scan 69 | * @param expression the index expression 70 | */ 71 | public IdxScan(byte[] startRow, Filter filter, Expression expression) { 72 | super(startRow, filter); 73 | this.expression = expression; 74 | } 75 | 76 | /** 77 | * Constructs a scan. 78 | * 79 | * @param startRow row to start scanner at or after (inclusive) 80 | * @param expression the index expression 81 | */ 82 | public IdxScan(byte[] startRow, Expression expression) { 83 | super(startRow); 84 | this.expression = expression; 85 | } 86 | 87 | /** 88 | * Constructs a scan. 89 | * 90 | * @param startRow row to start scanner at or after (inclusive) 91 | * @param stopRow row to stop scanner before (exclusive) 92 | * @param expression the index expression 93 | */ 94 | public IdxScan(byte[] startRow, byte[] stopRow, Expression expression) { 95 | super(startRow, stopRow); 96 | this.expression = expression; 97 | } 98 | 99 | /** 100 | * Constructs a scan from the provided scan with the expression. 101 | * 102 | * @param scan the scan to copy from 103 | * @param expression the index expression 104 | * @throws IOException if thrown by {@link Scan#Scan(org.apache.hadoop.hbase.client.Scan)} 105 | */ 106 | public IdxScan(Scan scan, Expression expression) throws IOException { 107 | super(scan); 108 | this.expression = expression; 109 | } 110 | 111 | /** 112 | * Returns the index expression used by the scan. 113 | * 114 | * @return the index expression 115 | */ 116 | public Expression getExpression() { 117 | return expression; 118 | } 119 | 120 | /** 121 | * Sets the index expression used by the scan. 122 | * 123 | * @param expression the index expression 124 | */ 125 | public void setExpression(Expression expression) { 126 | this.expression = expression; 127 | } 128 | 129 | /** 130 | * Scanning all versions is not currently supported. 131 | * 132 | * @return never 133 | * @throws IllegalStateException if this method is called 134 | */ 135 | @Override 136 | public Scan setMaxVersions() { 137 | throw new IllegalStateException("Scanning all versions is not currently supported."); 138 | } 139 | 140 | /** 141 | * Scanning all versions is not currently supported. 142 | * 143 | * @param maxVersions maximum versions for each column 144 | * @return never 145 | * @throws IllegalStateException if this method is called 146 | */ 147 | @Override 148 | public Scan setMaxVersions(int maxVersions) { 149 | throw new IllegalStateException("Scanning all versions is not currently supported."); 150 | } 151 | 152 | /** 153 | * {@inheritDoc}. 154 | *

155 | * Also writes the optional {@link #getExpression()}. 156 | */ 157 | @Override 158 | public void write(DataOutput out) throws IOException { 159 | if (expression != null) { 160 | values.put(EXPRESSION, writeExpression(expression)); 161 | } else { 162 | values.remove(EXPRESSION); 163 | } 164 | super.write(out); 165 | } 166 | 167 | private static ImmutableBytesWritable writeExpression(Expression expression) throws IOException { 168 | DataOutputBuffer out = new DataOutputBuffer(); 169 | 170 | WritableHelper.writeInstanceNullable(out, expression); 171 | 172 | return new ImmutableBytesWritable(out.getData()); 173 | } 174 | 175 | /** 176 | * {@inheritDoc}. 177 | *

178 | * Also reads the optional {@link #getExpression()}. 179 | */ 180 | @Override 181 | public void readFields(DataInput in) throws IOException { 182 | super.readFields(in); 183 | this.expression = getExpression(this); 184 | } 185 | 186 | public static Expression getExpression(Scan scan) throws IOException { 187 | if (scan instanceof IdxScan && ((IdxScan) scan).getExpression() != null) { 188 | return ((IdxScan) scan).getExpression(); 189 | } 190 | 191 | Map values = scan.getValues(); 192 | if (values.containsKey(EXPRESSION)) { 193 | DataInputBuffer in = new DataInputBuffer(); 194 | byte[] bytes = values.get(EXPRESSION).get(); 195 | in.reset(bytes, bytes.length); 196 | 197 | return WritableHelper.readInstanceNullable(in, Expression.class); 198 | } else { 199 | return null; 200 | } 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/exp/And.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx.exp; 21 | 22 | import java.util.Collection; 23 | 24 | /** 25 | * This class implements boolean AND - all sub-expressions must be true in order 26 | * for it to be true. 27 | */ 28 | public class And extends Compound { 29 | /** 30 | * Internal constructor. 31 | */ 32 | public And() { 33 | super(); 34 | } 35 | 36 | /** 37 | * Constructs an and expression with provided expression. 38 | * 39 | * @param expressions the expression 40 | */ 41 | public And(Expression... expressions) { 42 | super(expressions); 43 | } 44 | 45 | /** 46 | * Constructs an and expression with provided expression. 47 | * 48 | * @param expressions the expression 49 | */ 50 | public And(Collection expressions) { 51 | super(expressions); 52 | } 53 | 54 | /** 55 | * Adds the expression to the set of expression. 56 | * 57 | * @param expression the expression 58 | * @return this 59 | * @see Compound#add(Expression) 60 | */ 61 | public And and(Expression expression) { 62 | return (And) super.add(expression); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/exp/Comparison.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx.exp; 21 | 22 | import org.apache.hadoop.hbase.util.Bytes; 23 | 24 | import java.io.DataInput; 25 | import java.io.DataOutput; 26 | import java.io.IOException; 27 | import java.util.Arrays; 28 | 29 | /** 30 | * The comparison expression. 31 | */ 32 | public class Comparison extends Expression { 33 | private byte[] columnName; 34 | private byte[] qualifier; 35 | private Operator operator; 36 | private byte[] value; 37 | private boolean includeMissing = true; 38 | 39 | /** 40 | * No args constructor. 41 | */ 42 | public Comparison() { 43 | } 44 | 45 | /** 46 | * Convenience constrcutor that takes strings and converts from to byte[]. 47 | * 48 | * @param columnName the column name 49 | * @param qualifier the column qualifier 50 | * @param operator the operator 51 | * @param value the value 52 | */ 53 | public Comparison(String columnName, String qualifier, Operator operator, byte[] value) { 54 | this(Bytes.toBytes(columnName), Bytes.toBytes(qualifier), operator, value); 55 | } 56 | 57 | /** 58 | * Convenience constrcutor that takes strings and converts from to byte[]. 59 | * 60 | * @param columnName the column name 61 | * @param qualifier the column qualifier 62 | * @param operator the operator 63 | * @param value the value 64 | * @param includeMissing include missing ids 65 | */ 66 | public Comparison(String columnName, String qualifier, Operator operator, 67 | byte[] value, boolean includeMissing) { 68 | this(Bytes.toBytes(columnName), Bytes.toBytes(qualifier), operator, 69 | value, includeMissing); 70 | } 71 | 72 | /** 73 | * Partial constructor with all required fields. 74 | * 75 | * @param columnName the column name 76 | * @param qualifier the column qualifier 77 | * @param operator the operator 78 | * @param value the value 79 | */ 80 | public Comparison(byte[] columnName, byte[] qualifier, Operator operator, 81 | byte[] value) { 82 | this(columnName, qualifier, operator, value, true); 83 | } 84 | 85 | /** 86 | * Full constructor with all fields. 87 | * 88 | * @param columnName the column name 89 | * @param qualifier the column qualifier 90 | * @param operator the operator 91 | * @param value the value 92 | * @param includeMissing should the comparison result include ids which are 93 | * missing from the index. Same idea as {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter#filterIfMissing}. 94 | * Default value is true. 95 | */ 96 | public Comparison(byte[] columnName, byte[] qualifier, Operator operator, 97 | byte[] value, boolean includeMissing) { 98 | assert columnName != null : "The columnName must not be null"; 99 | assert qualifier != null : "The qualifier must not be null"; 100 | assert operator != null : "The operator must not be null"; 101 | assert value != null : "The value must not be null"; 102 | 103 | this.columnName = columnName; 104 | this.qualifier = qualifier; 105 | this.operator = operator; 106 | this.value = value; 107 | this.includeMissing = includeMissing; 108 | } 109 | 110 | /** 111 | * The {@link org.apache.hadoop.hbase.HColumnDescriptor#getName() column 112 | * family name} that the {@link #getQualifier() qualifier} is a member of. 113 | * 114 | * @return the column family name 115 | */ 116 | public byte[] getColumnName() { 117 | return columnName; 118 | } 119 | 120 | /** 121 | * The column qualifier. 122 | * 123 | * @return the qualifier 124 | */ 125 | public byte[] getQualifier() { 126 | return qualifier; 127 | } 128 | 129 | /** 130 | * The operator. 131 | * 132 | * @return the operator 133 | */ 134 | public Operator getOperator() { 135 | return operator; 136 | } 137 | 138 | /** 139 | * The value. 140 | * 141 | * @return the value 142 | */ 143 | public byte[] getValue() { 144 | return value; 145 | } 146 | 147 | /** 148 | * Gets whether to include missing columns or not. 149 | * 150 | * @return true to include missing columns. 151 | */ 152 | public boolean getIncludeMissing() { 153 | return includeMissing; 154 | } 155 | 156 | /** 157 | * {@inheritDoc} 158 | */ 159 | @Override 160 | public void write(DataOutput dataOutput) throws IOException { 161 | Bytes.writeByteArray(dataOutput, columnName); 162 | Bytes.writeByteArray(dataOutput, qualifier); 163 | Bytes.writeByteArray(dataOutput, Bytes.toBytes(operator.toString())); 164 | Bytes.writeByteArray(dataOutput, value); 165 | } 166 | 167 | /** 168 | * {@inheritDoc} 169 | */ 170 | @Override 171 | public void readFields(DataInput dataInput) throws IOException { 172 | columnName = Bytes.readByteArray(dataInput); 173 | qualifier = Bytes.readByteArray(dataInput); 174 | operator = Operator.valueOf(Bytes.toString(Bytes.readByteArray(dataInput))); 175 | value = Bytes.readByteArray(dataInput); 176 | } 177 | 178 | /** 179 | * {@inheritDoc} 180 | */ 181 | @Override 182 | public boolean equals(Object o) { 183 | if (this == o) return true; 184 | if (o == null || getClass() != o.getClass()) return false; 185 | 186 | Comparison that = (Comparison) o; 187 | 188 | if (!Arrays.equals(columnName, that.columnName)) return false; 189 | if (operator != that.operator) return false; 190 | if (!Arrays.equals(qualifier, that.qualifier)) return false; 191 | if (!Arrays.equals(value, that.value)) return false; 192 | 193 | return true; 194 | } 195 | 196 | /** 197 | * {@inheritDoc} 198 | */ 199 | @Override 200 | public int hashCode() { 201 | int result = Arrays.hashCode(columnName); 202 | result = 31 * result + Arrays.hashCode(qualifier); 203 | result = 31 * result + operator.hashCode(); 204 | result = 31 * result + Arrays.hashCode(value); 205 | return result; 206 | } 207 | 208 | /** 209 | * The enum for specifying the function we're performing in a {@link 210 | * Comparison}. 211 | */ 212 | public enum Operator { 213 | /** 214 | * The equals operator. 215 | */ 216 | EQ, 217 | /** 218 | * The greater than operator. 219 | */ 220 | GT, 221 | /** 222 | * The greater than or equals operator. 223 | */ 224 | GTE, 225 | /** 226 | * The less than operator. 227 | */ 228 | LT, 229 | /** 230 | * The less than or equals operator. 231 | */ 232 | LTE, 233 | /** 234 | * The not equals operator. 235 | */ 236 | NEQ, 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/exp/Compound.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | 21 | package org.apache.hadoop.hbase.client.idx.exp; 22 | 23 | import org.apache.hadoop.hbase.WritableHelper; 24 | 25 | import java.io.DataInput; 26 | import java.io.DataOutput; 27 | import java.io.IOException; 28 | import java.util.Arrays; 29 | import java.util.Collection; 30 | import java.util.HashSet; 31 | import java.util.Set; 32 | 33 | /** 34 | * A compound expression has no built-in tests but aggregates a set of child 35 | * expressions into a logical group such as boolean logic. 36 | */ 37 | public abstract class Compound extends Expression { 38 | /** 39 | * The set of child expressions. 40 | */ 41 | protected Set children; 42 | 43 | /** 44 | * Class constructor. 45 | * 46 | * @param expressions the expressions to be evaluated 47 | */ 48 | public Compound(Expression... expressions) { 49 | assert expressions != null : "expressions cannot be null or empty"; 50 | this.children = new HashSet(Arrays.asList(expressions)); 51 | } 52 | 53 | /** 54 | * Class constructor. 55 | * 56 | * @param expressions the expressions to be evaluated 57 | */ 58 | public Compound(Collection expressions) { 59 | this.children = new HashSet(expressions); 60 | } 61 | 62 | /** 63 | * Add an expression to the child set. 64 | * 65 | * @param expression the expression to add 66 | * @return this 67 | */ 68 | public Compound add(Expression expression) { 69 | this.children.add(expression); 70 | return this; 71 | } 72 | 73 | /** 74 | * Returns the set of child expressions. 75 | * 76 | * @return the expression set 77 | */ 78 | public Set getChildren() { 79 | return children; 80 | } 81 | 82 | /** 83 | * {@inheritDoc} 84 | */ 85 | @Override 86 | public boolean equals(Object o) { 87 | if (this == o) { 88 | return true; 89 | } 90 | if (o == null || getClass() != o.getClass()) { 91 | return false; 92 | } 93 | 94 | Compound that = (Compound) o; 95 | 96 | if (!children.equals(that.children)) { 97 | return false; 98 | } 99 | 100 | return true; 101 | } 102 | 103 | /** 104 | * {@inheritDoc} 105 | */ 106 | @Override 107 | public int hashCode() { 108 | return children.hashCode(); 109 | } 110 | 111 | /** 112 | * {@inheritDoc} 113 | */ 114 | @Override 115 | public void write(DataOutput dataOutput) throws IOException { 116 | dataOutput.writeInt(children.size()); 117 | for (Expression child : children) { 118 | WritableHelper.writeInstance(dataOutput, child); 119 | } 120 | } 121 | 122 | /** 123 | * {@inheritDoc} 124 | */ 125 | @Override 126 | public void readFields(DataInput dataInput) throws IOException { 127 | int size = dataInput.readInt(); 128 | children = new HashSet(size); 129 | for (int i = 0; i < size; i++) { 130 | Expression expression = WritableHelper.readInstance(dataInput, Expression.class); 131 | children.add(expression); 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/exp/Expression.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx.exp; 21 | 22 | import org.apache.hadoop.io.Writable; 23 | 24 | /** 25 | * Class representing an expression. 26 | */ 27 | public abstract class Expression implements Writable { 28 | /** 29 | * {@inheritDoc} 30 | */ 31 | public abstract int hashCode(); 32 | 33 | /** 34 | * {@inheritDoc} 35 | */ 36 | public abstract boolean equals(Object o); 37 | 38 | /** 39 | * Creates and returns an {@link Or} instance. 40 | * 41 | * @param expressions the expressions 42 | * @return an instance 43 | */ 44 | public static Or or(Expression... expressions) { 45 | return new Or(expressions); 46 | } 47 | 48 | /** 49 | * Creates and returns an {@link And} instance. 50 | * 51 | * @param expressions the expressions 52 | * @return an instance 53 | */ 54 | public static And and(Expression... expressions) { 55 | return new And(expressions); 56 | } 57 | 58 | /** 59 | * Creates and returns an {@link Comparison} 60 | * instance. 61 | * 62 | * @param family the column family name 63 | * @param qualifier the qualifier 64 | * @param operator the operator 65 | * @param value the value 66 | * @return the instance 67 | */ 68 | public static Comparison comparison(byte[] family, byte[] qualifier, Comparison.Operator operator, byte[] value) { 69 | return new Comparison(family, qualifier, operator, value); 70 | } 71 | 72 | /** 73 | * Creates and returns an {@link Comparison} 74 | * instance. 75 | * 76 | * @param family the column family name 77 | * @param qualifier the qualifier 78 | * @param operator the operator 79 | * @param value the value 80 | * @param includeMissing include ids missing from the index. 81 | * Same idea as {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter#filterIfMissing}. 82 | * true by default 83 | * @return the instance 84 | */ 85 | public static Comparison comparison(byte[] family, byte[] qualifier, Comparison.Operator operator, byte[] value, boolean includeMissing) { 86 | return new Comparison(family, qualifier, operator, value, includeMissing); 87 | } 88 | 89 | /** 90 | * Creates and returns an {@link Comparison} 91 | * instance. 92 | * 93 | * @param family the column family name 94 | * @param qualifier the qualifier 95 | * @param operator the operator 96 | * @param value the value 97 | * @return the instance 98 | */ 99 | public static Comparison comparison(String family, String qualifier, Comparison.Operator operator, byte[] value) { 100 | return new Comparison(family, qualifier, operator, value); 101 | } 102 | 103 | /** 104 | * Creates and returns an {@link Comparison} 105 | * instance. 106 | * 107 | * @param family the column family name 108 | * @param qualifier the qualifier 109 | * @param operator the operator 110 | * @param value the value 111 | * @param includeMissing include ids missing from the index. 112 | * Same idea as {@link org.apache.hadoop.hbase.filter.SingleColumnValueFilter#filterIfMissing}. 113 | * true by default 114 | * @return the instance 115 | */ 116 | public static Comparison comparison(String family, String qualifier, Comparison.Operator operator, byte[] value, boolean includeMissing) { 117 | return new Comparison(family, qualifier, operator, value, includeMissing); 118 | } 119 | 120 | } 121 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/idx/exp/Or.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx.exp; 21 | 22 | import java.util.Collection; 23 | 24 | /** 25 | * This class implements boolean OR - any sub-expressions can be true in order 26 | * for it to be true. 27 | */ 28 | public class Or extends Compound { 29 | /** 30 | * Internal constructor. 31 | */ 32 | public Or() { 33 | super(); 34 | } 35 | 36 | /** 37 | * Constructs an or expression with provided expression. 38 | * 39 | * @param expressions the expression 40 | */ 41 | public Or(Expression... expressions) { 42 | super(expressions); 43 | } 44 | 45 | /** 46 | * Constructs an or expression with provided expression. 47 | * 48 | * @param expressions the expression 49 | */ 50 | public Or(Collection expressions) { 51 | super(expressions); 52 | } 53 | 54 | /** 55 | * Adds the expression to the set of expression. 56 | * 57 | * @param expression the expression 58 | * @return this 59 | * @see Compound#add(Expression) 60 | */ 61 | public Or or(Expression expression) { 62 | return (Or) super.add(expression); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/CompleteIndex.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.commons.lang.ArrayUtils; 23 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.BinarySearch; 24 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.List; 25 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSet; 26 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSetBuilder; 27 | import org.apache.hadoop.hbase.util.Bytes; 28 | import org.apache.hadoop.hbase.util.ClassSize; 29 | 30 | import static org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSetBuilder.calcHeapSize; 31 | 32 | /** 33 | * A complete index implementation - all keys are put in the keystore, no skips. 34 | */ 35 | class CompleteIndex implements IdxIndex { 36 | /** 37 | * The fixed part in the heap size calcualtion. 38 | */ 39 | static final long FIXED_SIZE = ClassSize.align(ClassSize.OBJECT + 40 | 5 * ClassSize.REFERENCE + ClassSize.align(3 * ClassSize.ARRAY) + 41 | Bytes.SIZEOF_LONG + 2 * Bytes.SIZEOF_INT 42 | ); 43 | 44 | /** 45 | * The capacity of the sets. 46 | */ 47 | private final int numKeyValues; 48 | /** 49 | * The key store - holds the col:qual values. 50 | */ 51 | private final List keyStore; 52 | /** 53 | * The value store - holds sets with {@link #numKeyValues} capacity. 54 | */ 55 | private final IntSet[] valueStore; 56 | /** 57 | * Sets containing partial calculations of the tail operation. 58 | */ 59 | private final IntSet[] heads; 60 | /** 61 | * Sets containing partial calculations of the head operation. 62 | */ 63 | private final IntSet[] tails; 64 | /** 65 | * A set containing all ids matching any key in this index. 66 | */ 67 | private final IntSet allIds; 68 | /** 69 | * The partial calculation interval (used to determine up to which point 70 | * to use the valueStore before grabbing a pre-calculated set. 71 | */ 72 | private final int precalcInterval; 73 | /** 74 | * The heap size. 75 | */ 76 | private final long heapSize; 77 | 78 | /** 79 | * Construct a new complete index. 80 | * 81 | * @param keyStore the key store 82 | * @param valueStore the value store 83 | * @param heads a list of precalculated heads 84 | * @param tails a list of precalculated tails 85 | * @param allIds a set containing all ids mathcing any key in this index 86 | * @param numKeyValues the total number of KeyValues for this region 87 | * @param precalcInterval the interval by which tails/heads are precalculated 88 | */ 89 | CompleteIndex(List keyStore, IntSet[] valueStore, 90 | IntSet[] heads, IntSet[] tails, IntSet allIds, 91 | int numKeyValues, int precalcInterval) { 92 | this.keyStore = keyStore; 93 | this.valueStore = valueStore; 94 | this.heads = heads; 95 | this.tails = tails; 96 | this.allIds = allIds; 97 | this.numKeyValues = numKeyValues; 98 | this.precalcInterval = precalcInterval; 99 | heapSize = FIXED_SIZE + keyStore.heapSize() + calcHeapSize(valueStore) + 100 | calcHeapSize(heads) + calcHeapSize(tails) + allIds.heapSize(); 101 | } 102 | 103 | /** 104 | * Looks up a key in the index. 105 | * 106 | * @param probe the probe to lookup 107 | * @return the set of results, may be empty 108 | */ 109 | @Override 110 | public IntSet lookup(byte[] probe) { 111 | int index = BinarySearch.search(keyStore, keyStore.size(), probe); 112 | return index >= 0 ? valueStore[index].clone() : 113 | IntSetBuilder.newEmptyIntSet(numKeyValues); 114 | } 115 | 116 | /** 117 | * Find all the results which are greater and perhaps equal to the probe. 118 | * 119 | * @param probe the probe to lookup 120 | * @param inclusive if greater equal 121 | * @return a possibly empty set of results 122 | */ 123 | @Override 124 | public IntSet tail(byte[] probe, boolean inclusive) { 125 | int index = BinarySearch.search(keyStore, keyStore.size(), probe); 126 | if (index < 0 || !inclusive) { 127 | index++; 128 | } 129 | if (index < 0) { 130 | index = -index; 131 | } 132 | int tailIndex = index / precalcInterval + 1; 133 | IntSet result = tailIndex < tails.length ? 134 | tails[tailIndex].clone() : 135 | IntSetBuilder.newEmptyIntSet(numKeyValues); 136 | int stopIndex = Math.min(tailIndex * precalcInterval, valueStore.length); 137 | for (int i = index; i < stopIndex; i++) { 138 | result = result.unite(valueStore[i]); 139 | } 140 | return result; 141 | } 142 | 143 | /** 144 | * Find all the results which are less than and perhaps equal to the probe. 145 | * 146 | * @param probe the probe to lookup 147 | * @param inclusive if greater equal 148 | * @return a possibly empty set of results 149 | */ 150 | @Override 151 | public IntSet head(byte[] probe, boolean inclusive) { 152 | int index = BinarySearch.search(keyStore, keyStore.size(), probe); 153 | if (index >= 0 && inclusive) { 154 | index++; 155 | } 156 | if (index < 0) { 157 | index = -(index + 1); 158 | } 159 | 160 | int headIndex = (index - 1) / precalcInterval; 161 | IntSet result = headIndex > 0 ? 162 | heads[headIndex].clone() : 163 | IntSetBuilder.newEmptyIntSet(numKeyValues); 164 | int startIndex = Math.max(headIndex * precalcInterval, 0); 165 | for (int i = startIndex; i < index; i++) { 166 | result = result.unite(valueStore[i]); 167 | } 168 | return result; 169 | } 170 | 171 | /** 172 | * Finds all the results which match any key in this index. 173 | * 174 | * @return all the ids in this index. 175 | */ 176 | @Override 177 | public IntSet all() { 178 | return allIds.clone(); 179 | } 180 | 181 | @Override 182 | public int size() { 183 | return this.keyStore.size(); 184 | } 185 | 186 | /** 187 | * {@inheritDoc} 188 | */ 189 | @Override 190 | public long heapSize() { 191 | return heapSize; 192 | } 193 | 194 | public String probeToString(byte[] bytes) { 195 | return ArrayUtils.toString(keyStore.fromBytes(bytes)); 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/CompleteIndexBuilder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.HColumnDescriptor; 23 | import org.apache.hadoop.hbase.KeyValue; 24 | import org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor; 25 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.BigDecimalArrayList; 26 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.BinarySearch; 27 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.ByteArrayArrayList; 28 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.ByteArrayList; 29 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.CharArrayArrayList; 30 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.CharArrayList; 31 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.DoubleArrayList; 32 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.FloatArrayList; 33 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.IntegerArrayList; 34 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.List; 35 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.LongArrayList; 36 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.ObjectArrayList; 37 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.ShortArrayList; 38 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSet; 39 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSetBuilder; 40 | import org.apache.hadoop.hbase.util.Bytes; 41 | 42 | /** 43 | * A builder class used to create complete indexes. 44 | */ 45 | public class CompleteIndexBuilder { 46 | 47 | /** 48 | * Number of pre calculated head and tail sets. 49 | */ 50 | private static final int NUMBER_OF_PRE_CALCULATED_SETS = 4; 51 | 52 | private final HColumnDescriptor columnDescriptor; 53 | private final IdxIndexDescriptor indexDescriptor; 54 | /** 55 | * Offset extracted from the index descriptor. 56 | */ 57 | private final int offset; 58 | 59 | /** 60 | * Length extracted from the index descriptor. 61 | */ 62 | private final int length; 63 | 64 | /** 65 | * The target keystore. 66 | */ 67 | private List keyStore; 68 | /** 69 | * The value store set builders. 70 | */ 71 | private ObjectArrayList valueStoreBuilders; 72 | 73 | 74 | /** 75 | * Construct a new complete index builder. 76 | * 77 | * @param columnDescriptor the column descriptor 78 | * @param indexDescriptor the index descriptor 79 | */ 80 | public CompleteIndexBuilder(HColumnDescriptor columnDescriptor, 81 | IdxIndexDescriptor indexDescriptor) { 82 | this(columnDescriptor, indexDescriptor, 1); 83 | } 84 | 85 | /** 86 | * Construct a new complete index builder. 87 | * 88 | * @param columnDescriptor the column descriptor 89 | * @param indexDescriptor the index descriptor 90 | * @param initialSize the initial arrays size, use -1 for defaults 91 | */ 92 | public CompleteIndexBuilder(HColumnDescriptor columnDescriptor, 93 | IdxIndexDescriptor indexDescriptor, int initialSize) { 94 | this.columnDescriptor = columnDescriptor; 95 | this.indexDescriptor = indexDescriptor; 96 | this.offset = indexDescriptor.getOffset(); 97 | this.length = indexDescriptor.getLength(); 98 | 99 | switch (this.indexDescriptor.getQualifierType()) { 100 | case BYTE_ARRAY: 101 | keyStore = new ByteArrayArrayList(initialSize); 102 | break; 103 | case LONG: 104 | keyStore = new LongArrayList(initialSize); 105 | break; 106 | case DOUBLE: 107 | keyStore = new DoubleArrayList(initialSize); 108 | break; 109 | case BYTE: 110 | keyStore = new ByteArrayList(initialSize); 111 | break; 112 | case CHAR: 113 | keyStore = new CharArrayList(initialSize); 114 | break; 115 | case SHORT: 116 | keyStore = new ShortArrayList(initialSize); 117 | break; 118 | case INT: 119 | keyStore = new IntegerArrayList(initialSize); 120 | break; 121 | case FLOAT: 122 | keyStore = new FloatArrayList(initialSize); 123 | break; 124 | case BIG_DECIMAL: 125 | keyStore = new BigDecimalArrayList(initialSize); 126 | break; 127 | case CHAR_ARRAY: 128 | keyStore = new CharArrayArrayList(initialSize); 129 | break; 130 | default: 131 | throw new IllegalStateException("Unsupported type " + 132 | this.indexDescriptor.getQualifierType()); 133 | } 134 | valueStoreBuilders = new ObjectArrayList(initialSize); 135 | } 136 | 137 | /** 138 | * Add a new key value to the index. The keyvalues are added in 'key' order. 139 | * 140 | * @param kv the keyvalue. 141 | * @param id the id of the keyvalue (e.g. its place in the sorted order) 142 | */ 143 | public void addKeyValue(KeyValue kv, int id) { 144 | assert Bytes.equals(indexDescriptor.getQualifierName(), kv.getQualifier()) 145 | && Bytes.equals(columnDescriptor.getName(), kv.getFamily()); 146 | byte[] key = extractKey(kv); 147 | int index = BinarySearch.search(keyStore, keyStore.size(), key); 148 | IntSetBuilder intsetBuilder; 149 | if (index < 0) { 150 | intsetBuilder = new IntSetBuilder().start(); 151 | index = -(index + 1); 152 | keyStore.insert(index, key); 153 | valueStoreBuilders.insert(index, intsetBuilder); 154 | } else { 155 | intsetBuilder = valueStoreBuilders.get(index); 156 | } 157 | intsetBuilder.addNext(id); 158 | } 159 | 160 | /** 161 | * Extract the key from the KeyValue value. 162 | * 163 | * @param kv the key value from which to extract the key 164 | * @return the extracted keyvalue. 165 | */ 166 | private byte[] extractKey(KeyValue kv) { 167 | int valueLength = kv.getValueLength(); 168 | int l = length == -1 ? valueLength - offset : length; 169 | if (offset + l > valueLength) { 170 | throw new ArrayIndexOutOfBoundsException(String.format("Can't extract key: " + 171 | "Offset (%d) + Length (%d) > valueLength (%d)", offset, l, valueLength)); 172 | } 173 | int o = kv.getValueOffset() + this.offset; 174 | byte[] result = new byte[l]; 175 | System.arraycopy(kv.getBuffer(), o, result, 0, l); 176 | return result; 177 | } 178 | 179 | /** 180 | * Finalized the index creation and creates the new index. 181 | * 182 | * @param numKeyValues the total number of keyvalues in the region 183 | * @return a new complete index 184 | */ 185 | @SuppressWarnings({"unchecked"}) 186 | public IdxIndex finalizeIndex(int numKeyValues) { 187 | if (valueStoreBuilders.size() > 0) { 188 | assert numKeyValues > 0; 189 | int indexSize = keyStore.size(); 190 | 191 | IntSet[] valueStore = new IntSet[indexSize]; 192 | for (int i = 0; i < indexSize; i++) { 193 | valueStore[i] = valueStoreBuilders.get(i).finish(numKeyValues); 194 | } 195 | int interval = (indexSize / NUMBER_OF_PRE_CALCULATED_SETS) + (indexSize < NUMBER_OF_PRE_CALCULATED_SETS ? 1 : 0); 196 | int precalcSize = indexSize / interval + Integer.signum(indexSize % interval); 197 | 198 | IntSet[] tails = new IntSet[precalcSize]; 199 | IntSet currentTail = IntSetBuilder.newEmptyIntSet(numKeyValues); 200 | for (int i = indexSize - 1; i >= 0; i--) { 201 | currentTail = currentTail.unite(valueStore[i]); 202 | if (i % interval == 0) { 203 | tails[i / interval] = currentTail; 204 | currentTail = currentTail.clone(); 205 | } 206 | } 207 | 208 | IntSet[] heads = new IntSet[precalcSize]; 209 | IntSet currentHead = IntSetBuilder.newEmptyIntSet(numKeyValues); 210 | int maxHeadIndex = -1; 211 | for (int i = 0; i < indexSize; i++) { 212 | currentHead = currentHead.unite(valueStore[i]); 213 | if (i % interval == 0) { 214 | maxHeadIndex = i; 215 | heads[i / interval] = currentHead; 216 | currentHead = currentHead.clone(); 217 | } 218 | } 219 | 220 | IntSet allIds; 221 | if (maxHeadIndex < 0) { 222 | allIds = IntSetBuilder.newEmptyIntSet(numKeyValues); 223 | } else { 224 | allIds = currentHead.clone(); 225 | // Add all remaning key values to the allKeys set 226 | for (int i = maxHeadIndex; i < indexSize; i++) { 227 | allIds = allIds.unite(valueStore[i]); 228 | } 229 | } 230 | 231 | return new CompleteIndex(keyStore, valueStore, heads, tails, allIds, 232 | numKeyValues, interval); 233 | } else { 234 | return new EmptyIndex(keyStore, numKeyValues); 235 | } 236 | } 237 | 238 | } 239 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/EmptyIndex.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.commons.lang.ArrayUtils; 23 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.List; 24 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSet; 25 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSetBuilder; 26 | import org.apache.hadoop.hbase.util.Bytes; 27 | import org.apache.hadoop.hbase.util.ClassSize; 28 | 29 | /** 30 | * An empty index. 31 | */ 32 | public class EmptyIndex implements IdxIndex { 33 | 34 | private static final int HEAP_SIZE = ClassSize.align(ClassSize.OBJECT + 35 | ClassSize.REFERENCE + Bytes.SIZEOF_INT); 36 | 37 | private List keyStore; 38 | private int numKeyValues; 39 | 40 | /** 41 | * Construct a new empty index with a given capacity. All sets genreated by 42 | * this empty index will be initiazlized using the provided capacity. 43 | * 44 | * @param keyStore the key store 45 | * @param capacity the capacity 46 | */ 47 | EmptyIndex(List keyStore, int capacity) { 48 | this.keyStore = keyStore; 49 | this.numKeyValues = capacity; 50 | } 51 | 52 | /** 53 | * {@inheritDoc} 54 | *

55 | * Returns an empty set. 56 | */ 57 | @Override 58 | public IntSet lookup(byte[] probe) { 59 | return IntSetBuilder.newEmptyIntSet(numKeyValues); 60 | } 61 | 62 | /** 63 | * {@inheritDoc} 64 | *

65 | * Returns an empty set. 66 | */ 67 | @Override 68 | public IntSet tail(byte[] probe, boolean inclusive) { 69 | return IntSetBuilder.newEmptyIntSet(numKeyValues); 70 | } 71 | 72 | /** 73 | * {@inheritDoc} 74 | *

75 | * Returns an empty set. 76 | */ 77 | @Override 78 | public IntSet head(byte[] probe, boolean inclusive) { 79 | return IntSetBuilder.newEmptyIntSet(numKeyValues); 80 | } 81 | 82 | /** 83 | * {@inheritDoc} 84 | *

85 | * Returns an empty set. 86 | */ 87 | @Override 88 | public IntSet all() { 89 | return IntSetBuilder.newEmptyIntSet(numKeyValues); 90 | } 91 | 92 | /** 93 | * {@inheritDoc} 94 | */ 95 | @Override 96 | public String probeToString(byte[] bytes) { 97 | return ArrayUtils.toString(keyStore.fromBytes(bytes)); 98 | } 99 | 100 | /** 101 | * {@inheritDoc} 102 | */ 103 | @Override 104 | public int size() { 105 | return 0; 106 | } 107 | 108 | @Override 109 | public long heapSize() { 110 | return HEAP_SIZE; 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/IdxExpressionEvaluator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.client.idx.exp.And; 23 | import org.apache.hadoop.hbase.client.idx.exp.Comparison; 24 | import org.apache.hadoop.hbase.client.idx.exp.Expression; 25 | import org.apache.hadoop.hbase.client.idx.exp.Or; 26 | import org.apache.hadoop.hbase.io.HeapSize; 27 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSet; 28 | import org.apache.hadoop.hbase.util.Bytes; 29 | import org.apache.hadoop.hbase.util.ClassSize; 30 | import org.apache.log4j.Logger; 31 | 32 | 33 | /** 34 | * Evaluates an {@link Expression}. 35 | */ 36 | public class IdxExpressionEvaluator implements HeapSize { 37 | private static final Logger LOG = Logger.getLogger(IdxExpressionEvaluator.class); 38 | 39 | /** 40 | * Evaluates the expression using the provided search context. 41 | * 42 | * @param searchContext the search context to use whe evaluating the 43 | * exrpession 44 | * @param expression the expression to evaluate. 45 | * @return a set which contains ids of rows matching the expression provided 46 | */ 47 | public IntSet evaluate(IdxSearchContext searchContext, Expression expression) { 48 | if (expression == null) return null; 49 | 50 | if (expression instanceof And) { 51 | return evaluate(searchContext, (And) expression); 52 | } else if (expression instanceof Or) { 53 | return evaluate(searchContext, (Or) expression); 54 | } else if (expression instanceof Comparison) { 55 | return evaluate(searchContext, (Comparison) expression); 56 | } else { 57 | throw new IllegalArgumentException("Could not evaluate expression type " + 58 | expression.getClass().getName()); 59 | } 60 | } 61 | 62 | protected IntSet evaluate(IdxSearchContext searchContext, And and) { 63 | IntSet result = null; 64 | for (Expression expression : and.getChildren()) { 65 | if (LOG.isDebugEnabled()) { 66 | LOG.debug("Intersecting expression:"); 67 | } 68 | IntSet childResult = evaluate(searchContext, expression); 69 | if (result == null) { 70 | result = childResult; 71 | } else if (childResult != null) { 72 | result = result.intersect(childResult); 73 | } 74 | } 75 | return result; 76 | } 77 | 78 | protected IntSet evaluate(IdxSearchContext searchContext, Or or) { 79 | IntSet result = null; 80 | for (Expression expression : or.getChildren()) { 81 | if (LOG.isDebugEnabled()) { 82 | LOG.debug("Uniting expression:"); 83 | } 84 | IntSet childResult = evaluate(searchContext, expression); 85 | if (result == null) { 86 | result = childResult; 87 | } else if (childResult != null) { 88 | result = result.unite(childResult); 89 | } 90 | } 91 | return result; 92 | } 93 | 94 | protected IntSet evaluate(IdxSearchContext searchContext, Comparison comparison) { 95 | IdxIndex index = searchContext.getIndex(comparison.getColumnName(), comparison.getQualifier()); 96 | if (index == null) throw new IllegalStateException( 97 | String.format("Could not find an index for column: '%s', qualifier: '%s'", 98 | Bytes.toString(comparison.getColumnName()), 99 | Bytes.toString(comparison.getQualifier()))); 100 | 101 | IntSet matched = null; 102 | boolean resultIncludesMissing = false; 103 | switch (comparison.getOperator()) { 104 | case EQ: 105 | matched = index.lookup(comparison.getValue()); 106 | break; 107 | case NEQ: 108 | matched = index.lookup(comparison.getValue()); 109 | matched = matched.complement(); 110 | // When we complement the matched set we may include ids which are 111 | // missing from the index 112 | resultIncludesMissing = true; 113 | break; 114 | case GT: 115 | matched = index.tail(comparison.getValue(), false); 116 | break; 117 | case GTE: 118 | matched = index.tail(comparison.getValue(), true); 119 | break; 120 | case LT: 121 | matched = index.head(comparison.getValue(), false); 122 | break; 123 | case LTE: 124 | matched = index.head(comparison.getValue(), true); 125 | break; 126 | } 127 | 128 | if (comparison.getIncludeMissing() != resultIncludesMissing) { 129 | matched = resultIncludesMissing ? matched.intersect(index.all()) : matched.unite(index.all().complement()); 130 | } 131 | 132 | if (LOG.isDebugEnabled() && matched != null) { 133 | LOG.debug(String.format("Evaluation of comparison on column: '%s', " + 134 | "qualifier: '%s', operator: %s, value: '%s' include missing: '%b' " + 135 | "yielded %s matches", 136 | Bytes.toString(comparison.getColumnName()), 137 | Bytes.toString(comparison.getQualifier()), 138 | comparison.getOperator(), 139 | index.probeToString(comparison.getValue()), 140 | comparison.getIncludeMissing(), matched.size())); 141 | } 142 | 143 | return matched != null ? matched : null; 144 | } 145 | 146 | @Override 147 | public long heapSize() { 148 | return ClassSize.OBJECT; 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/IdxIndex.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.io.HeapSize; 23 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSet; 24 | 25 | /** 26 | * An index interface. 27 | */ 28 | public interface IdxIndex extends HeapSize { 29 | 30 | /** 31 | * Looks up an object. returns only exact matches. 32 | * 33 | * @param probe the probe to lookup 34 | * @return the result set 35 | */ 36 | IntSet lookup(byte[] probe); 37 | 38 | /** 39 | * Gets all hte objects which are greater (or greater equal) than the probe. 40 | * 41 | * @param probe the probe to lookup 42 | * @param inclusive if greater equal 43 | * @return the result set 44 | */ 45 | IntSet tail(byte[] probe, boolean inclusive); 46 | 47 | /** 48 | * Gets all hte objects which are lesser (or lesser equal) than the probe. 49 | * 50 | * @param probe the probe to lookup 51 | * @param inclusive if greater equal 52 | * @return the result set 53 | */ 54 | IntSet head(byte[] probe, boolean inclusive); 55 | 56 | /** 57 | * Finds all the results which match any key in this index. 58 | * 59 | * @return all the ids in this index. 60 | */ 61 | IntSet all(); 62 | 63 | /** 64 | * Returns a string representation of the provided bytes probe. 65 | * 66 | * @param bytes the bytes 67 | * @return the string representation 68 | */ 69 | String probeToString(byte[] bytes); 70 | 71 | /** 72 | * The number of entries in the index. 73 | * 74 | * @return the number of entries in the index 75 | */ 76 | int size(); 77 | } 78 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/IdxRegionMBean.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | /** 23 | * An MBean exposing various region ops and info. 24 | */ 25 | public interface IdxRegionMBean { 26 | 27 | /** 28 | * Checks whether the region being exposed by this MBean is still alive. 29 | * 30 | * @return whether the region being exposed by this MBean is still alive. 31 | */ 32 | boolean isValid(); 33 | 34 | /** 35 | * The number of keys in the index which is equivalent to the number of top 36 | * level rows in this region. 37 | * 38 | * @return the number of keys in the index. 39 | */ 40 | int getNumberOfIndexedKeys(); 41 | 42 | /** 43 | * The total heap size, in bytes, used by the indexes and their overhead. 44 | * 45 | * @return the total index heap size in bytes. 46 | */ 47 | long getIndexesTotalHeapSize(); 48 | 49 | /** 50 | * Gets the total number of indexed scan since the last reset. 51 | * 52 | * @return the total number of indexed scans. 53 | */ 54 | public long getTotalIndexedScans(); 55 | 56 | /** 57 | * Resets the total number of indexed scans. 58 | * 59 | * @return the number of indexed scans just before the reset 60 | */ 61 | public long resetTotalIndexedScans(); 62 | 63 | /** 64 | * Gets the total number of non indexed scan since the last reset. 65 | * 66 | * @return the total number of indexed scans. 67 | */ 68 | public long getTotalNonIndexedScans(); 69 | 70 | /** 71 | * Resets the total number of non indexed scans. 72 | * 73 | * @return the number of indexed scans just before the reset 74 | */ 75 | public long resetTotalNonIndexedScans(); 76 | 77 | /** 78 | * Exposes the number of indexed scans currently ongoing in the system. 79 | * 80 | * @return the number of ongoing indexed scans 81 | */ 82 | public long getNumberOfOngoingIndexedScans(); 83 | 84 | /** 85 | * Gets the index build times buffer as a comma delimited string 86 | * where each item is the milliseconds required to rebuild the indexes. 87 | * 88 | * @return a rolling buffer of index build times 89 | */ 90 | public String getIndexBuildTimes(); 91 | 92 | /** 93 | * Resets the index build times buffer. 94 | * 95 | * @return the previous build times buffer 96 | */ 97 | public String resetIndexBuildTimes(); 98 | 99 | } 100 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/IdxSearchContext.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.KeyValue; 23 | import org.apache.hadoop.hbase.regionserver.idx.support.Callback; 24 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.ObjectArrayList; 25 | import org.apache.hadoop.hbase.regionserver.idx.support.sets.IntSet; 26 | import org.apache.hadoop.hbase.util.Pair; 27 | 28 | import java.util.Map; 29 | 30 | /** 31 | * The search context holds the context for a specific search request. 32 | * It takes a snapshot of the indexes at the time the search was requested. 33 | *

34 | * The search context is a transient object whose life spans only while the 35 | * search (typically a region scan) is in progress. 36 | */ 37 | public class IdxSearchContext { 38 | 39 | private ObjectArrayList keys; 40 | private Map, IdxIndex> indexMap; 41 | 42 | /** 43 | * Construct a new search context. 44 | * 45 | * @param keys the keys to use when searching. 46 | * @param indexMap the matching index map 47 | */ 48 | public IdxSearchContext(ObjectArrayList keys, 49 | Map, IdxIndex> indexMap) { 50 | this.keys = keys; 51 | this.indexMap = indexMap; 52 | //DebugPrint.println("IdxSearchContext w/ keys/indexMap: " + 53 | // keys.hashCode() + " / " + indexMap.hashCode()); 54 | } 55 | 56 | /** 57 | * Looks up an index based on the column and the qualifier. May return null if 58 | * no such index is found. 59 | * 60 | * @param column the column 61 | * @param qualifier the column qualifier 62 | * @return the index for the column/qualifer 63 | */ 64 | public IdxIndex getIndex(byte[] column, byte[] qualifier) { 65 | return indexMap.get(Pair.of(column, qualifier)); 66 | } 67 | 68 | /** 69 | * Process a set of rows, typically to convert a query to a scan. Rows are 70 | * processed in sorted order. 71 | * 72 | * @param rowSet the row set to process 73 | * @param callback the callback to use to process those rows 74 | */ 75 | public void processRows(IntSet rowSet, Callback callback) { 76 | IntSet.IntSetIterator iterator = rowSet.iterator(); 77 | while (iterator.hasNext()) { 78 | int i = iterator.next(); 79 | callback.call(keys.get(i)); 80 | } 81 | } 82 | 83 | /** 84 | * Unmap a index key to a actual key. 85 | * 86 | * @param index the index offset 87 | * @return the byte 88 | */ 89 | public KeyValue lookupRow(int index) { 90 | return keys.get(index); 91 | } 92 | 93 | /** 94 | * The number of rows indexed in this search context. 95 | * 96 | * @return the number of indexed rows. 97 | */ 98 | public int rowCount() { 99 | return keys.size(); 100 | } 101 | 102 | /** 103 | * close this search context 104 | */ 105 | public void close() { 106 | keys = null; 107 | indexMap = null; 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/Bits.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support; 21 | 22 | /** 23 | * Bits utility class. 24 | * TODO consider delete this class since the Java SE implemetations are as good. 25 | */ 26 | public final class Bits { 27 | 28 | /** 29 | * De bruijn 64 number to implement set bit index extraction. 30 | */ 31 | private static final long DEBRUIJN64 = 0x022fdd63cc95386dl; 32 | 33 | private static final int[] DEBRUIJN64_TABLE = new int[]{ 34 | 0, 1, 2, 53, 3, 7, 54, 27, 35 | 4, 38, 41, 8, 34, 55, 48, 28, 36 | 62, 5, 39, 46, 44, 42, 22, 9, 37 | 24, 35, 59, 56, 49, 18, 29, 11, 38 | 63, 52, 6, 26, 37, 40, 33, 47, 39 | 61, 45, 43, 21, 23, 58, 17, 10, 40 | 51, 25, 36, 32, 60, 20, 57, 16, 41 | 50, 31, 19, 15, 30, 14, 13, 12, 42 | }; 43 | 44 | private static final int DEBRUIJN64_SHIFT = 58; 45 | 46 | private Bits() { 47 | //utility class private constructor 48 | } 49 | 50 | 51 | /** 52 | * Finds the index of the lowest set bit. 53 | * 54 | * @param word the word to check. Should not be zero. 55 | * @return the index of the lowest set bit. 56 | */ 57 | public static int lowestSetBitIndex(long word) { 58 | assert word != 0; 59 | word &= -word; 60 | return DEBRUIJN64_TABLE[(int) ((word * DEBRUIJN64) >>> DEBRUIJN64_SHIFT)]; 61 | } 62 | 63 | /** 64 | * Finds the index of the highest set bit. 65 | * 66 | * @param word the word to check. Should not be zero. 67 | * @return the index of the highest set bit. 68 | */ 69 | public static int highestSetBitIndex(long word) { 70 | word |= word >> 1; 71 | word |= word >> 2; 72 | word |= word >> 4; 73 | word |= word >> 8; 74 | word |= word >> 16; 75 | word |= word >> 32; 76 | word = word + 1 >>> 1; 77 | return DEBRUIJN64_TABLE[(int) ((word * DEBRUIJN64) >>> DEBRUIJN64_SHIFT)]; 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/Callback.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support; 21 | 22 | /** 23 | * An interface for callback with a given argument type. 24 | * 25 | * @param the argument type 26 | */ 27 | public interface Callback { 28 | 29 | /** 30 | * Call this callback with the given argument 31 | * 32 | * @param arg the argument to use 33 | */ 34 | void call(T arg); 35 | } 36 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/IdxClassSize.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support; 21 | 22 | import org.apache.hadoop.hbase.util.Bytes; 23 | import org.apache.hadoop.hbase.util.ClassSize; 24 | 25 | /** 26 | * Holds class sizes used by Idx heap size calcs. 27 | * TODO merge with ClassSize. 28 | */ 29 | public class IdxClassSize extends ClassSize { 30 | 31 | /** 32 | * Hash map fixed overhead. 33 | */ 34 | public static final long HASHMAP = align(OBJECT + 3 * Bytes.SIZEOF_INT + 35 | Bytes.SIZEOF_FLOAT + ARRAY + 4 * REFERENCE); 36 | 37 | /** 38 | * Object array list fixed overhead. 39 | */ 40 | public static final long OBJECT_ARRAY_LIST = align(OBJECT + Bytes.SIZEOF_INT + 41 | ARRAY + REFERENCE); 42 | 43 | 44 | } 45 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/arrays/BinarySearch.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.arrays; 21 | 22 | /** 23 | * A generic implementation of the binary search algorithm that can run on 24 | * any type of object that conforms to the {@link Searchable} or any of the 25 | * directly supported primitive types or buffers. 26 | */ 27 | public final class BinarySearch { 28 | 29 | /** 30 | * Suppress construction for utility classes. 31 | */ 32 | private BinarySearch() { 33 | } 34 | 35 | 36 | /** 37 | * Conducts a binary search for the requested value in the supplied target 38 | * object. 39 | *

40 | * If not found, returns -(insertionPoint + 1) 41 | * This is always a negative number. To convert this negative number into 42 | * the real the insertion point, call convertToInsertionIndex(int) 43 | * 44 | * @param haystack the object you wish to search 45 | * @param haystackLength the number of objects in the haystack 46 | * @param needle the value you are looking for 47 | * @param the class that we are searching 48 | * @param the class of needle we are looking for 49 | * @return the position the key was found in 50 | */ 51 | public static , N> int search(H haystack, 52 | int haystackLength, N needle) { 53 | // Check argument validity 54 | if (haystack == null) { 55 | throw new IllegalArgumentException("Argument 'Check' cannot be null"); 56 | } 57 | if (needle == null) { 58 | throw new IllegalArgumentException("Argument 'needle' cannot be null"); 59 | } 60 | 61 | // Initialise boundaries 62 | int high = haystackLength; 63 | int low = -1; 64 | 65 | // Search until the high and low markers are next to each other 66 | while (high - low > 1) { 67 | // Calculate the mid-point to check 68 | int probe = (low + high) >>> 1; 69 | 70 | // Move the markers. Note that the comparison returns < 0 if the needle is 71 | // less than the comparison index so this test is opposite to the standard 72 | int comparison = haystack.compare(needle, probe); 73 | if (comparison > 0) { 74 | low = probe; 75 | } else { 76 | high = probe; 77 | } 78 | } 79 | 80 | // If the high marker hasn't moved (still off the end of the target), or 81 | // the value we landed on isnt what we were looking for, we didn't find it 82 | if (high == haystackLength || haystack.compare(needle, high) != 0) { 83 | // Return the encoded insertion position. 84 | return -(high + 1); 85 | } else { 86 | // Return the match position 87 | return high; 88 | } 89 | } 90 | 91 | /** 92 | * Conducts a binary search for the requested value in the supplied target 93 | * object. 94 | *

95 | * If not found, returns -(insertionPoint + 1) 96 | * This is always a negative number. To convert this negative number into the 97 | * real the insertion point, call convertToInsertionIndex(int) 98 | * 99 | * @param haystack the object you wish to search 100 | * @param haystackLength the number of objects in the haystack 101 | * @param needle the value you are looking for 102 | * @param the class that we are searching 103 | * @param the class of needle we are looking for 104 | * @return the position the key was found in 105 | */ 106 | public static , N> int search(H haystack, 107 | int haystackLength, byte[] needle) { 108 | return search(haystack, haystackLength, haystack.fromBytes(needle)); 109 | } 110 | 111 | /** 112 | * This interface enforces the required methods to search an arbitrary 113 | * object with a binary search algorithm. 114 | */ 115 | public interface Searchable { 116 | /** 117 | * Create the needle for a byte array. 118 | * 119 | * @param bytes the byte array to use 120 | * @return the needle instance 121 | */ 122 | N fromBytes(byte[] bytes); 123 | 124 | /** 125 | * Compares the two requested elements. 126 | * 127 | * @param needle the value we are looking for 128 | * @param compareToIndex the index of the element to compare the needle to 129 | * @return -ve, 0, +ve if the needle is <, = or > than the element to check 130 | */ 131 | int compare(N needle, int compareToIndex); 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/arrays/List.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.arrays; 21 | 22 | import org.apache.hadoop.hbase.io.HeapSize; 23 | import org.apache.hadoop.hbase.util.Bytes; 24 | import org.apache.hadoop.hbase.util.ClassSize; 25 | 26 | /** 27 | * An interface for an array list, typically used as a key store. 28 | * 29 | * @param the element type 30 | */ 31 | public interface List extends Iterable, BinarySearch.Searchable, 32 | HeapSize { 33 | 34 | long FIXED_OVERHEAD = 35 | ClassSize.align(ClassSize.ARRAY + Bytes.SIZEOF_INT + ClassSize.OBJECT); 36 | 37 | /** 38 | * Adds the element to the end of the list. 39 | * 40 | * @param e the new element 41 | */ 42 | void add(byte[] e); 43 | 44 | /** 45 | * Sets the specified index to the nominated value. 46 | * 47 | * @param index the list index 48 | * @param newValue the value 49 | */ 50 | void set(int index, byte[] newValue); 51 | 52 | /** 53 | * Inserts at the specified index to the list. 54 | * 55 | * @param index the index to insert 56 | * @param newValue the value to insert 57 | */ 58 | void insert(int index, byte[] newValue); 59 | 60 | /** 61 | * Checks if the list is empty. 62 | * 63 | * @return true if the list is empty 64 | */ 65 | boolean isEmpty(); 66 | 67 | /** 68 | * Returns the current number of elements in this list. 69 | * 70 | * @return the number of elements. 71 | */ 72 | int size(); 73 | 74 | 75 | } 76 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/arrays/ObjectArrayList.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.arrays; 21 | 22 | import java.util.Arrays; 23 | import java.util.Iterator; 24 | import java.util.NoSuchElementException; 25 | 26 | /** 27 | * An object array list implemented using the same methodology as the primitve 28 | * arrays but without the extra heap size and {@link List} methods. 29 | *

30 | * NOTE: This class is completely unsynchronised. 31 | * 32 | * @param element type 33 | */ 34 | public class ObjectArrayList implements Iterable { 35 | 36 | /** 37 | * Default initial size of the array backing this list. 38 | */ 39 | private static final int DEFAULT_SIZE = 1; 40 | 41 | /** 42 | * The scaling factor we use to resize the backing buffer when the list needs to grow. 43 | */ 44 | private static final float SCALE_FACTOR = 1.5f; 45 | 46 | /** 47 | * The array backing this list. 48 | */ 49 | private T[] values; 50 | 51 | /** 52 | * The number of values present in the list. 53 | */ 54 | private int size; 55 | 56 | /** 57 | * Constructor that initialises with the default size. 58 | */ 59 | public ObjectArrayList() { 60 | this(DEFAULT_SIZE); 61 | } 62 | 63 | /** 64 | * Constructor which initialises with the specified initial capacity. 65 | * 66 | * @param initialCapacity the initial capacity of the backing array 67 | */ 68 | @SuppressWarnings("unchecked") 69 | public ObjectArrayList(int initialCapacity) { 70 | values = (T[]) new Object[initialCapacity]; 71 | } 72 | 73 | /** 74 | * Constructor which initialises the content from the supplied array list. 75 | * 76 | * @param initial the initial contents 77 | */ 78 | public ObjectArrayList(ObjectArrayList initial) { 79 | // Initialise the internal storage to the appropriate size 80 | this(initial.size); 81 | 82 | // Copy over the references/values 83 | System.arraycopy(initial.values, 0, this.values, 0, initial.size); 84 | this.size = initial.size; 85 | } 86 | 87 | /** 88 | * Checks the contents of the collection for equality. 89 | *

90 | * {@inheritDoc} 91 | */ 92 | @Override 93 | public boolean equals(Object compareTo) { 94 | if (this == compareTo) { 95 | return true; 96 | } 97 | if (!(compareTo instanceof ObjectArrayList)) { 98 | return false; 99 | } 100 | 101 | ObjectArrayList that = (ObjectArrayList) compareTo; 102 | 103 | return Arrays.equals(this.values, that.values); 104 | } 105 | 106 | /** 107 | * Adds the element to the end of the list. 108 | * 109 | * @param e the new element 110 | */ 111 | public void add(T e) { 112 | ensureCapacity(size + 1); 113 | values[size] = e; 114 | size++; 115 | } 116 | 117 | /** 118 | * Grows the backing array to the requested size. 119 | * 120 | * @param requested the new capacity. 121 | */ 122 | @SuppressWarnings("unchecked") 123 | private void ensureCapacity(int requested) { 124 | // If we need to resize 125 | if (requested > values.length) { 126 | // Calculate the new size, growing slowly at the start to avoid overallocation too early. 127 | int newSize = Math.max(requested, (int) (values.length * SCALE_FACTOR + 1)); 128 | 129 | // Create the new array 130 | T[] newValues = (T[]) new Object[newSize]; 131 | 132 | // Populate the new backing array 133 | System.arraycopy(values, 0, newValues, 0, size); 134 | values = newValues; 135 | } 136 | } 137 | 138 | /** 139 | * Retrieves the element at the requested index. 140 | * 141 | * @param index the element index you wish to retrieve 142 | * @return the value at that index 143 | */ 144 | public T get(int index) { 145 | if (index >= size) { 146 | throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements"); 147 | } 148 | 149 | return values[index]; 150 | } 151 | 152 | /** 153 | * Searches the list for the nominated value. 154 | * 155 | * @param searchFor the value you are looking for 156 | * @return the first index the value was found at or -1 if not found 157 | */ 158 | public int indexOf(T searchFor) { 159 | // Check each of the values. Don't bother with get() since we don't need its protection. 160 | for (int i = 0; i < size; i++) { 161 | if (values[i].equals(searchFor)) { 162 | return i; 163 | } 164 | } 165 | 166 | // Didn't find it. 167 | return -1; 168 | } 169 | 170 | /** 171 | * Simple iterator that runs over the values in the list. 172 | */ 173 | private static final class InternalIterator 174 | implements Iterator { 175 | 176 | private T[] values; 177 | private int size; 178 | private int current = 0; 179 | 180 | private InternalIterator(T[] values, int size) { 181 | this.values = values; 182 | this.size = size; 183 | } 184 | 185 | /** 186 | * {@inheritDoc} 187 | */ 188 | public boolean hasNext() { 189 | return current < size; 190 | } 191 | 192 | /** 193 | * {@inheritDoc} 194 | */ 195 | public T next() { 196 | if (!hasNext()) { 197 | throw new NoSuchElementException(); 198 | } 199 | return values[current++]; 200 | } 201 | 202 | /** 203 | * Not supported. 204 | */ 205 | public void remove() { 206 | throw new UnsupportedOperationException("remove() is not supported"); 207 | } 208 | } 209 | 210 | /** 211 | * Returns an iterator over the underlying content. Note that this is completely unsynchronised and the contents can change under you. 212 | */ 213 | public Iterator iterator() { 214 | return new InternalIterator(values, size); 215 | } 216 | 217 | /** 218 | * Checks if the list is empty. 219 | * 220 | * @return true if the list is empty 221 | */ 222 | public boolean isEmpty() { 223 | return size == 0; 224 | } 225 | 226 | /** 227 | * Sets the specified index to the nominated value. 228 | * 229 | * @param index the list index 230 | * @param newValue the value 231 | */ 232 | public void set(int index, T newValue) { 233 | if (index >= size) { 234 | throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements"); 235 | } 236 | 237 | values[index] = newValue; 238 | } 239 | 240 | /** 241 | * Removes the specified index from the list. 242 | * 243 | * @param index the index to remove 244 | * @return the original value 245 | */ 246 | public T remove(int index) { 247 | if (index >= size) { 248 | throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements"); 249 | } 250 | 251 | T original = values[index]; 252 | System.arraycopy(values, index + 1, values, index, size - index - 1); 253 | values[size - 1] = null; 254 | size--; 255 | return original; 256 | } 257 | 258 | /** 259 | * Inserts at the specified index to the list. 260 | * 261 | * @param index the index to insert 262 | * @param newValue the value to insert 263 | */ 264 | public void insert(int index, T newValue) { 265 | if (index > size) { 266 | throw new ArrayIndexOutOfBoundsException("Attempted to access index " + index + " but array is " + size + " elements"); 267 | } 268 | 269 | ensureCapacity(size + 1); 270 | if (index != size) { 271 | System.arraycopy(values, index, values, index + 1, size - index); 272 | } 273 | values[index] = newValue; 274 | size++; 275 | } 276 | 277 | 278 | /** 279 | * Removes the last item in the list. 280 | * 281 | * @return the original value 282 | */ 283 | public T removeLast() { 284 | if (size < 1) { 285 | throw new ArrayIndexOutOfBoundsException("Attempted to remove last element from array with size 0"); 286 | } 287 | 288 | T result = values[size - 1]; 289 | values[size - 1] = null; 290 | size--; 291 | 292 | return result; 293 | } 294 | 295 | /** 296 | * Returns the current number of elements in this list. 297 | * 298 | * @return the number of elements. 299 | */ 300 | public int size() { 301 | return size; 302 | } 303 | 304 | /** 305 | * Return a nice view of the list. 306 | * {@inheritDoc} 307 | */ 308 | public String toString() { 309 | return Arrays.toString(Arrays.copyOf(values, size)); 310 | } 311 | 312 | 313 | } 314 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSet.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.sets; 21 | 22 | import org.apache.hadoop.hbase.io.HeapSize; 23 | 24 | /** 25 | * A set of integers ranged between 0 and MAX. Elements to this set have to be 26 | * added in order. 27 | */ 28 | public interface IntSet extends HeapSize, Cloneable { 29 | 30 | /** 31 | * The value of the smallest element which may be added to this set. 32 | */ 33 | int SMALLEST = 0; 34 | /** 35 | * The value of the maximal element which may be added to an IntSet. 36 | */ 37 | int LARGEST = Integer.MAX_VALUE; 38 | 39 | /** 40 | * Counts the elements in the set. 41 | * 42 | * @return number of elements in the set 43 | */ 44 | int size(); 45 | 46 | /** 47 | * Empty check. 48 | * 49 | * @return true if the set is empty. 50 | */ 51 | boolean isEmpty(); 52 | 53 | /** 54 | * The number of elements which this set may contain. 55 | * The elements can be any in the range of [0, capacity()-1] 56 | * 57 | * @return the maximum element in the set. 58 | */ 59 | int capacity(); 60 | 61 | /** 62 | * Checks whether an element is contained in the set. 63 | * 64 | * @param element an intefer in the range of 0 and {@link #capacity()} 65 | * @return true if the set contains this element. 66 | */ 67 | boolean contains(int element); 68 | 69 | /** 70 | * Clear the set. 71 | */ 72 | void clear(); 73 | 74 | /** 75 | * Inteverts this set to the 'complementary set' e.g. to a set which contains 76 | * exactly the set of elements not contained in this set. This operation is 77 | * unsafe, it may change this set. 78 | * 79 | * @return the complementary set. 80 | */ 81 | IntSet complement(); 82 | 83 | /** 84 | * Intersect this int set with another int set. This operation is unsafe, it 85 | * may change this set. 86 | * 87 | * @param other the set to intersect with (not affected by this operation). 88 | * @return the intersection (may be a reference to this set). 89 | */ 90 | IntSet intersect(IntSet other); 91 | 92 | /** 93 | * Unite this intset with another int set. This operation is unsafe, it may 94 | * change this set. 95 | * 96 | * @param other the set to unite with. 97 | * @return the united set, my be a reference to this set. 98 | */ 99 | IntSet unite(IntSet other); 100 | 101 | /** 102 | * Subtract all the elements of another set from this one leaving only 103 | * elements which do not exist in the other set. This operation is unsafe, it 104 | * may change this set. 105 | * 106 | * @param other the set to subtract from this one 107 | * @return the subtracted set, may be a referene to this one 108 | */ 109 | IntSet subtract(IntSet other); 110 | 111 | /** 112 | * The difference between the two sets, all the elements which are set in 113 | * either but not in both. This operation is unsafe, it may change this set. 114 | * 115 | * @param other the other set 116 | * @return the difference set, may be a referene to this one 117 | */ 118 | IntSet difference(IntSet other); 119 | 120 | /** 121 | * Clone this set. Implementing classes must be able to clone themsevles. 122 | * 123 | * @return the cloned set. 124 | */ 125 | IntSet clone(); 126 | 127 | /** 128 | * Returns an iterator over the int set. 129 | * 130 | * @return an iterator 131 | */ 132 | IntSetIterator iterator(); 133 | 134 | /** 135 | * An iterator over an {@link IntSet} that avoids auto-boxing. 136 | */ 137 | public interface IntSetIterator { 138 | /** 139 | * Returns true if the iteration has more elements. 140 | * 141 | * @return true if the iterator has more elements, otherwise false 142 | */ 143 | boolean hasNext(); 144 | 145 | /** 146 | * Returns the next element in the iteration. 147 | * 148 | * @return the next element in the iteration 149 | * @throws IndexOutOfBoundsException iteration has no more elements 150 | */ 151 | int next(); 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBase.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.sets; 21 | 22 | /** 23 | * A package protected interface for modifiable {@link IntSet}s. 24 | */ 25 | abstract class IntSetBase implements IntSet { 26 | 27 | /** 28 | * Adds the next element to the set. This element must be larger than all 29 | * the other elements in this set. 30 | * 31 | * @param element the element to add 32 | */ 33 | abstract void addNext(int element); 34 | 35 | /** 36 | * {@inheritDoc}. 37 | *

38 | * This is a convenience method to avoid wrappig the 39 | * {@link CloneNotSupportedException} in the sub classes or handling it 40 | * in classes using this set. 41 | */ 42 | @Override 43 | public IntSet clone() { 44 | try { 45 | return (IntSetBase) super.clone(); 46 | } catch (CloneNotSupportedException e) { 47 | throw new IllegalStateException("Super clone should be supported."); 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBuilder.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.sets; 21 | 22 | /** 23 | * Encapsultes {@link IntSet} building strategy. 24 | * May switch set implementations 'behind the scenes'. 25 | * To prevent extra memory allocation in the index this class implements the 26 | * intset interface, please note however that 27 | * all the implemented methods throw {@link UnsupportedOperationException}. 28 | */ 29 | public class IntSetBuilder { 30 | 31 | private SparseBitSet sparseBitSet; 32 | 33 | /** 34 | * Create a new empty int set. 35 | * 36 | * @param capacity the capacity of the set. 37 | * @return the new set 38 | */ 39 | public static IntSet newEmptyIntSet(int capacity) { 40 | SparseBitSet intSet = new SparseBitSet(); 41 | intSet.setCapacity(capacity); 42 | return intSet; 43 | } 44 | 45 | /** 46 | * Calculates the total size of the elements of an IntSet array. 47 | * 48 | * @param intSets the intset array to calculate 49 | * @return the total size 50 | */ 51 | public static long calcHeapSize(IntSet[] intSets) { 52 | int size = 0; 53 | for (IntSet set : intSets) { 54 | size += set.heapSize(); 55 | } 56 | return size; 57 | } 58 | 59 | /** 60 | * Start building the intset. 61 | * 62 | * @return this 63 | */ 64 | public IntSetBuilder start() { 65 | sparseBitSet = new SparseBitSet(); 66 | return this; 67 | } 68 | 69 | /** 70 | * Adds the next item to this set. Items must be added in order. 71 | * 72 | * @param element the item to add 73 | * @return this 74 | */ 75 | public IntSetBuilder addNext(int element) { 76 | sparseBitSet.addNext(element); 77 | return this; 78 | } 79 | 80 | /** 81 | * Convenience method that adds one or more elements. 82 | * 83 | * @param element a mandatory element 84 | * @param elements an array of optional elements 85 | * @return this 86 | * @see #addNext(int) 87 | */ 88 | public IntSetBuilder addAll(int element, int... elements) { 89 | addNext(element); 90 | if (elements != null) { 91 | for (int i : elements) { 92 | addNext(i); 93 | } 94 | } 95 | return this; 96 | } 97 | 98 | 99 | /** 100 | * Finalize the bitset. 101 | * 102 | * @param numKeys the number of keys in the final bitset 103 | * @return the underlying bitset. 104 | */ 105 | public IntSet finish(int numKeys) { 106 | sparseBitSet.setCapacity(numKeys); 107 | return sparseBitSet; 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/main/resources/fmpp/types.csv: -------------------------------------------------------------------------------- 1 | displayName;clazz;primitive;kind 2 | Byte;Byte;byte;integer 3 | Char;Character;char;integer 4 | Short;Short;short;integer 5 | Integer;Integer;int;integer 6 | Long;Long;long;integer 7 | Float;Float;float;floatingPoint 8 | Double;Double;double;floatingPoint 9 | ByteArray;byte[];byte[];integerArray 10 | CharArray;char[];char[];integerArray 11 | BigDecimal;BigDecimal;BigDecimal;comparable -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/TestIdxHBaseCluster.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase; 21 | 22 | import junit.framework.Assert; 23 | import org.apache.hadoop.hbase.client.HBaseAdmin; 24 | import org.apache.hadoop.hbase.client.HTable; 25 | import org.apache.hadoop.hbase.client.Put; 26 | import org.apache.hadoop.hbase.client.Result; 27 | import org.apache.hadoop.hbase.client.ResultScanner; 28 | import org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor; 29 | import org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor; 30 | import org.apache.hadoop.hbase.client.idx.IdxQualifierType; 31 | import org.apache.hadoop.hbase.client.idx.IdxScan; 32 | import org.apache.hadoop.hbase.client.idx.exp.Comparison; 33 | import org.apache.hadoop.hbase.client.idx.exp.Expression; 34 | import org.apache.hadoop.hbase.filter.CompareFilter; 35 | import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; 36 | import org.apache.hadoop.hbase.regionserver.IdxRegion; 37 | import org.apache.hadoop.hbase.util.Bytes; 38 | 39 | import java.io.IOException; 40 | import java.util.Arrays; 41 | import java.util.Random; 42 | import java.util.concurrent.Callable; 43 | import java.util.concurrent.ExecutorService; 44 | import java.util.concurrent.Executors; 45 | import java.util.concurrent.atomic.AtomicInteger; 46 | 47 | /** 48 | * tests administrative functions 49 | */ 50 | public class TestIdxHBaseCluster extends TestHBaseCluster { 51 | private static final String FIXED_PART = "Some string with a rolling value "; 52 | 53 | /** 54 | * constructor 55 | */ 56 | public TestIdxHBaseCluster() { 57 | super(); 58 | conf.setClass(HConstants.REGION_IMPL, IdxRegion.class, IdxRegion.class); 59 | // Force flushes and compactions 60 | conf.set("hbase.hregion.memstore.flush.size", "262144"); 61 | } 62 | 63 | /** 64 | * Tests reading and writing concurrently to a table. 65 | * 66 | * @throws IOException exception 67 | */ 68 | @SuppressWarnings({"unchecked"}) 69 | public void testConcurrentReadWrite() throws IOException { 70 | int maxRows = 20000; 71 | Random random = new Random(4111994L); 72 | byte[][] rows = new byte[maxRows][]; 73 | for (int i = 0; i < rows.length; i++) { 74 | rows[i] = Bytes.toBytes(random.nextInt()); 75 | } 76 | final AtomicInteger sequence = new AtomicInteger(0); 77 | 78 | HTableDescriptor desc = new HTableDescriptor("testConcurrentReadWrite"); 79 | byte[] family = Bytes.toBytes("concurrentRW"); 80 | byte[] qualifier = Bytes.toBytes("strings"); 81 | IdxColumnDescriptor descriptor = new IdxColumnDescriptor(family); 82 | descriptor.addIndexDescriptor(new IdxIndexDescriptor(qualifier, IdxQualifierType.CHAR_ARRAY)); 83 | desc.addFamily(descriptor); 84 | HBaseAdmin admin = new HBaseAdmin(conf); 85 | admin.createTable(desc); 86 | HTable table = new HTable(conf, desc.getName()); 87 | 88 | ExecutorService service = Executors.newCachedThreadPool(); 89 | for (int i = 0; i < 5; i++) { 90 | service.submit(new Writer(table, family, qualifier, sequence, rows)); 91 | } 92 | 93 | byte[] value = Bytes.toBytes((FIXED_PART + 0).toCharArray()); 94 | IdxScan idxScan = new IdxScan(); 95 | idxScan.setExpression(Expression.comparison(family, qualifier, Comparison.Operator.EQ, value)); 96 | idxScan.setFilter(new SingleColumnValueFilter(family, qualifier, CompareFilter.CompareOp.EQUAL, value)); 97 | idxScan.setCaching(1000); 98 | 99 | int count = 0; 100 | int finalCount = maxRows / 10; 101 | int printCount = 0; 102 | //List prevList = null; 103 | int totalScans = 0; 104 | while (count < finalCount) { 105 | ResultScanner scanner = table.getScanner(idxScan); 106 | int nextCount = 0; 107 | //List resultList = new ArrayList(); 108 | for (Result res : scanner) { 109 | nextCount++; 110 | Assert.assertTrue(Arrays.equals(res.getValue(family, qualifier), value)); 111 | //resultList.add(res); 112 | } 113 | totalScans++; 114 | if (nextCount > printCount + 1000) { 115 | System.out.printf("++ found %d matching rows\n", nextCount); 116 | printCount = nextCount; 117 | } 118 | String infoString = "nextCount=" + nextCount + ", count=" + count + ", finalCount=" + finalCount; 119 | boolean condition = nextCount >= count && nextCount <= finalCount; 120 | if (!condition) { 121 | System.out.println("-------- " + infoString); 122 | // Useful debugging harness. 123 | //System.out.println("DEBUGPRINT:"); 124 | //DebugPrint.dumpToFile("/tmp/debug.txt"); 125 | //System.out.println(DebugPrint.out.toString()); 126 | //System.out.println("----- MY RESULT"); 127 | //for ( Result r : resultList ) System.out.println(r); 128 | //System.out.println("---- PREV RESULTZ: " ); 129 | //for ( Result r : prevList ) System.out.println(r); 130 | } 131 | Assert.assertTrue(infoString, condition); 132 | count = nextCount; 133 | //prevList = resultList; 134 | //DebugPrint.reset(); 135 | } 136 | System.out.println("total scans = " + totalScans); 137 | service.shutdown(); 138 | } 139 | 140 | 141 | private static class Writer implements Callable { 142 | 143 | private HTable table; 144 | private byte[] family; 145 | private byte[] qualifier; 146 | private AtomicInteger sequence; 147 | private byte[][] rows; 148 | 149 | private Writer(HTable table, byte[] family, byte[] qualifier, AtomicInteger sequence, byte[][] rows) { 150 | this.table = table; 151 | this.family = family; 152 | this.qualifier = qualifier; 153 | this.sequence = sequence; 154 | this.rows = rows; 155 | } 156 | 157 | @Override 158 | public Object call() throws Exception { 159 | while (true) { 160 | int num = sequence.getAndIncrement(); 161 | if (num % 100 == 0) { 162 | System.out.printf("-- writing row %d\n", num); 163 | } 164 | if (num <= rows.length) { 165 | Put put = new Put(rows[num]); 166 | char[] chars = (FIXED_PART + num % 10).toCharArray(); 167 | put.add(family, qualifier, Bytes.toBytes(chars)); 168 | table.put(put); 169 | } else { 170 | return null; 171 | } 172 | //Thread.sleep(0L, 100000); // sleep .1 millis 173 | } 174 | } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/TestIdxMasterAdmin.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2007 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase; 21 | 22 | import org.apache.hadoop.hbase.regionserver.IdxRegion; 23 | 24 | /** 25 | * tests administrative functions 26 | */ 27 | public class TestIdxMasterAdmin extends TestMasterAdmin { 28 | /** 29 | * constructor 30 | */ 31 | public TestIdxMasterAdmin() { 32 | super(); 33 | conf.setClass(HConstants.REGION_IMPL, IdxRegion.class, IdxRegion.class); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/TestWritableHelper.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase; 21 | 22 | import junit.framework.Assert; 23 | import junit.framework.TestCase; 24 | import org.apache.hadoop.hbase.client.idx.exp.Comparison; 25 | import org.apache.hadoop.hbase.client.idx.exp.Expression; 26 | import org.apache.hadoop.hbase.util.Bytes; 27 | import org.apache.hadoop.io.DataInputBuffer; 28 | import org.apache.hadoop.io.DataOutputBuffer; 29 | 30 | import java.io.IOException; 31 | 32 | /** 33 | * Tests the {@link org.apache.hadoop.hbase.WritableHelper}. 34 | */ 35 | public class TestWritableHelper extends TestCase { 36 | /** 37 | * Tests the {@link org.apache.hadoop.hbase.WritableHelper#instanceForName(String, 38 | * Class)} works as expected. 39 | */ 40 | public void testInstanceForName() { 41 | Expression expression = WritableHelper.instanceForName(Comparison.class.getName(), Expression.class); 42 | 43 | Assert.assertNotNull("Instance should not be null", expression); 44 | Assert.assertEquals("Wrong class returned", Comparison.class, expression.getClass()); 45 | } 46 | 47 | /** 48 | * Tests the {@link org.apache.hadoop.hbase.WritableHelper#instanceForName(String, 49 | * Class)} works as expected when an invalid class name is provided. 50 | */ 51 | public void testInstanceForNameInvalidClassName() { 52 | try { 53 | WritableHelper.instanceForName(Comparison.class.getName() + "1", Expression.class); 54 | Assert.fail("An exception should have been thrown"); 55 | } catch (Exception e) { 56 | Assert.assertEquals("Wrong exception was thrown", IllegalArgumentException.class, e.getClass()); 57 | } 58 | } 59 | 60 | /** 61 | * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstance(java.io.DataOutput, 62 | * org.apache.hadoop.io.Writable)} fails as expected when null is provided. 63 | * 64 | * @throws IOException if an error occurs 65 | */ 66 | public void testWriteInstanceFailsWithNull() throws IOException { 67 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 68 | try { 69 | WritableHelper.writeInstance(dataOutputBuffer, null); 70 | Assert.fail("Expected an exception"); 71 | } catch (Exception e) { 72 | Assert.assertEquals("Wrong exception thrown when null was provided", IllegalArgumentException.class, e.getClass()); 73 | } 74 | } 75 | 76 | /** 77 | * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstance(java.io.DataOutput, 78 | * org.apache.hadoop.io.Writable)} and {@link org.apache.hadoop.hbase.WritableHelper#readInstance(java.io.DataInput, 79 | * Class)} works as expected. 80 | * 81 | * @throws IOException if an error occurs 82 | */ 83 | public void testWriteReadInstance() throws IOException { 84 | Expression expression = Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value")); 85 | 86 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 87 | WritableHelper.writeInstance(dataOutputBuffer, expression); 88 | 89 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 90 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 91 | 92 | Expression clonedExpression = WritableHelper.readInstance(dataInputBuffer, Expression.class); 93 | 94 | Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression); 95 | } 96 | 97 | /** 98 | * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstanceNullable(java.io.DataOutput, 99 | * org.apache.hadoop.io.Writable)} and {@link org.apache.hadoop.hbase.WritableHelper#readInstanceNullable(java.io.DataInput, 100 | * Class)} works as expected. 101 | * 102 | * @throws IOException if an error occurs 103 | */ 104 | public void testWriteReadInstanceNullable() throws IOException { 105 | Expression expression = Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value")); 106 | 107 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 108 | WritableHelper.writeInstanceNullable(dataOutputBuffer, expression); 109 | 110 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 111 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 112 | 113 | Expression clonedExpression = WritableHelper.readInstanceNullable(dataInputBuffer, Expression.class); 114 | 115 | Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression); 116 | } 117 | 118 | /** 119 | * Tests the that {@link org.apache.hadoop.hbase.WritableHelper#writeInstanceNullable(java.io.DataOutput, 120 | * org.apache.hadoop.io.Writable)} and {@link org.apache.hadoop.hbase.WritableHelper#readInstanceNullable(java.io.DataInput, 121 | * Class)} works as expected when null is provided. 122 | * 123 | * @throws IOException if an error occurs 124 | */ 125 | public void testWriteReadInstanceNullableWithNull() throws IOException { 126 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 127 | WritableHelper.writeInstanceNullable(dataOutputBuffer, null); 128 | 129 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 130 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 131 | 132 | Expression clonedExpression = WritableHelper.readInstanceNullable(dataInputBuffer, Expression.class); 133 | 134 | Assert.assertNull("A null value was expected", clonedExpression); 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/client/idx/TestIdxColumnDescriptor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx; 21 | 22 | import junit.framework.Assert; 23 | import junit.framework.TestCase; 24 | import org.apache.hadoop.hbase.HColumnDescriptor; 25 | import org.apache.hadoop.hbase.io.ImmutableBytesWritable; 26 | import org.apache.hadoop.hbase.util.Bytes; 27 | import org.apache.hadoop.io.DataInputBuffer; 28 | import org.apache.hadoop.io.DataOutputBuffer; 29 | 30 | import java.io.IOException; 31 | import java.util.Set; 32 | 33 | /** 34 | * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor}. 35 | */ 36 | public class TestIdxColumnDescriptor extends TestCase { 37 | /** 38 | * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#addIndexDescriptor(IdxIndexDescriptor)} 39 | * method. 40 | */ 41 | public void testAddIndexDescriptor() throws IOException { 42 | IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName"); 43 | IdxIndexDescriptor indexDescriptor = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.INT); 44 | 45 | Assert.assertEquals("The column desciptor should not contain any index descriptors", 46 | 0, descriptor.getIndexDescriptors().size()); 47 | descriptor.addIndexDescriptor(indexDescriptor); 48 | Assert.assertEquals("The column desciptor should contain a index descriptor", 49 | 1, descriptor.getIndexDescriptors().size()); 50 | } 51 | 52 | /** 53 | * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#addIndexDescriptor(IdxIndexDescriptor)} 54 | * method when an index descriptor already exists for the qualifier. 55 | */ 56 | public void testAddIndexDescriptorWithExisting() throws IOException { 57 | IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName"); 58 | IdxIndexDescriptor indexDescriptor = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.INT); 59 | IdxIndexDescriptor indexDescriptor2 = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.LONG); 60 | 61 | descriptor.addIndexDescriptor(indexDescriptor); 62 | try { 63 | descriptor.addIndexDescriptor(indexDescriptor2); 64 | Assert.fail("An exception should have been thrown"); 65 | } catch (Exception e) { 66 | Assert.assertEquals("Wrong exception thrown", IllegalArgumentException.class, e.getClass()); 67 | } 68 | } 69 | 70 | /** 71 | * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#removeIndexDescriptor(byte[])} 72 | * method. 73 | */ 74 | public void testRemoveIndexDescriptor() throws IOException { 75 | IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName"); 76 | IdxIndexDescriptor indexDescriptor = new IdxIndexDescriptor(Bytes.toBytes("qualifer"), IdxQualifierType.INT); 77 | 78 | Assert.assertEquals("The column desciptor should not contain any index descriptors", 79 | 0, descriptor.getIndexDescriptors().size()); 80 | descriptor.addIndexDescriptor(indexDescriptor); 81 | Assert.assertEquals("The column desciptor should contain a index descriptor", 82 | 1, descriptor.getIndexDescriptors().size()); 83 | Assert.assertTrue("The remove method should have returned true", 84 | descriptor.removeIndexDescriptor(Bytes.toBytes("qualifer"))); 85 | Assert.assertEquals("The column desciptor should not contain any index descriptors", 86 | 0, descriptor.getIndexDescriptors().size()); 87 | } 88 | 89 | /** 90 | * Tests the {@link IdxColumnDescriptor#getIndexedQualifiers()} method when an 91 | * index descriptor already exists for the qualifier. 92 | */ 93 | public void testGetIndexedQualifiers() throws IOException { 94 | IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName"); 95 | byte[] qualifierName1 = Bytes.toBytes("qualifer1"); 96 | IdxIndexDescriptor indexDescriptor1 97 | = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT); 98 | byte[] qualifierName2 = Bytes.toBytes("qualifer2"); 99 | IdxIndexDescriptor indexDescriptor2 100 | = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.LONG); 101 | 102 | descriptor.addIndexDescriptor(indexDescriptor1); 103 | descriptor.addIndexDescriptor(indexDescriptor2); 104 | 105 | Set indexedQualifiers = descriptor.getIndexedQualifiers(); 106 | Assert.assertNotNull("The set of indexed qualifiers should not be null", 107 | indexedQualifiers); 108 | Assert.assertEquals("The column desciptor should contain index qualifiers", 109 | 2, indexedQualifiers.size()); 110 | 111 | Assert.assertTrue("The set of indexed qualifiers should contain the key", 112 | indexedQualifiers.contains(new ImmutableBytesWritable(qualifierName1))); 113 | Assert.assertTrue("The set of indexed qualifiers should contain the key", 114 | indexedQualifiers.contains(new ImmutableBytesWritable(qualifierName2))); 115 | } 116 | 117 | /** 118 | * Tests the {@link IdxColumnDescriptor#write(java.io.DataOutput)} and {@link 119 | * org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#readFields(java.io.DataInput)} 120 | * methods. 121 | * 122 | * @throws java.io.IOException if an error occurs 123 | */ 124 | public void testWritable() throws IOException { 125 | IdxColumnDescriptor descriptor = new IdxColumnDescriptor("familyName"); 126 | byte[] qualifierName1 = Bytes.toBytes("qualifer1"); 127 | IdxIndexDescriptor indexDescriptor1 128 | = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT); 129 | byte[] qualifierName2 = Bytes.toBytes("qualifer2"); 130 | IdxIndexDescriptor indexDescriptor2 131 | = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.LONG); 132 | 133 | descriptor.addIndexDescriptor(indexDescriptor1); 134 | descriptor.addIndexDescriptor(indexDescriptor2); 135 | 136 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 137 | descriptor.write(dataOutputBuffer); 138 | 139 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 140 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 141 | 142 | IdxColumnDescriptor clonedDescriptor = new IdxColumnDescriptor(); 143 | clonedDescriptor.readFields(dataInputBuffer); 144 | 145 | Assert.assertEquals("The expression was not the same after being written and read", descriptor, clonedDescriptor); 146 | } 147 | 148 | /** 149 | * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#compareTo(org.apache.hadoop.hbase.HColumnDescriptor)} 150 | * method when the two instances are the same. 151 | */ 152 | public void testCompareToWhenSame() throws IOException { 153 | IdxColumnDescriptor descriptor1 = new IdxColumnDescriptor("familyName1"); 154 | byte[] qualifierName1 = Bytes.toBytes("qualifer1"); 155 | IdxIndexDescriptor indexDescriptor1 156 | = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT); 157 | descriptor1.addIndexDescriptor(indexDescriptor1); 158 | 159 | IdxColumnDescriptor descriptor2 = new IdxColumnDescriptor("familyName1"); 160 | byte[] qualifierName2 = Bytes.toBytes("qualifer1"); 161 | IdxIndexDescriptor indexDescriptor2 162 | = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.INT); 163 | descriptor2.addIndexDescriptor(indexDescriptor2); 164 | 165 | Assert.assertTrue("The compare to should have returned 0", descriptor1.compareTo(descriptor2) == 0); 166 | } 167 | 168 | /** 169 | * Tests the {@link org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor#compareTo(org.apache.hadoop.hbase.HColumnDescriptor)} 170 | * method when the two instances are different. 171 | */ 172 | public void testCompareToWhenDifferent() throws IOException { 173 | IdxColumnDescriptor descriptor1 = new IdxColumnDescriptor("familyName1"); 174 | byte[] qualifierName1 = Bytes.toBytes("qualifer1"); 175 | IdxIndexDescriptor indexDescriptor1 176 | = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT); 177 | descriptor1.addIndexDescriptor(indexDescriptor1); 178 | 179 | IdxColumnDescriptor descriptor2 = new IdxColumnDescriptor("familyName2"); 180 | byte[] qualifierName2 = Bytes.toBytes("qualifer2"); 181 | IdxIndexDescriptor indexDescriptor2 182 | = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.INT); 183 | descriptor2.addIndexDescriptor(indexDescriptor2); 184 | 185 | Assert.assertTrue("The compare to should not have returned 0", descriptor1.compareTo(descriptor2) != 0); 186 | } 187 | 188 | /** 189 | * Tests that two column descriptors are equal when the Idx decorator isn't being used. 190 | */ 191 | public void testCompareToWithoutDecorator() throws IOException { 192 | IdxColumnDescriptor descriptor1 = new IdxColumnDescriptor("familyName1"); 193 | byte[] qualifierName1 = Bytes.toBytes("qualifer1"); 194 | IdxIndexDescriptor indexDescriptor1 195 | = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT); 196 | descriptor1.addIndexDescriptor(indexDescriptor1); 197 | 198 | IdxColumnDescriptor descriptor2 = new IdxColumnDescriptor("familyName2"); 199 | byte[] qualifierName2 = Bytes.toBytes("qualifer2"); 200 | IdxIndexDescriptor indexDescriptor2 201 | = new IdxIndexDescriptor(qualifierName2, IdxQualifierType.INT); 202 | descriptor2.addIndexDescriptor(indexDescriptor2); 203 | 204 | HColumnDescriptor descriptor2WithoutDec = new HColumnDescriptor(descriptor2); 205 | 206 | Assert.assertTrue("The compare to should not have returned 0", descriptor1.compareTo(descriptor2WithoutDec) != 0); 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/client/idx/TestIdxIndexDescriptor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx; 21 | 22 | import junit.framework.Assert; 23 | import junit.framework.TestCase; 24 | import org.apache.hadoop.hbase.util.Bytes; 25 | import org.apache.hadoop.io.DataInputBuffer; 26 | import org.apache.hadoop.io.DataOutputBuffer; 27 | 28 | import java.io.IOException; 29 | 30 | /** 31 | * Tests the {@link IdxIndexDescriptor} class. 32 | */ 33 | public class TestIdxIndexDescriptor extends TestCase { 34 | /** 35 | * Tests the {@link IdxIndexDescriptor#write(java.io.DataOutput)} and {@link 36 | * org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor#readFields(java.io.DataInput)} 37 | * methods. 38 | * 39 | * @throws java.io.IOException if an error occurs 40 | */ 41 | public void testWritable() throws IOException { 42 | final int repeatCount = 3; 43 | IdxIndexDescriptor descriptor = createIdxIndexDescriptor(); 44 | 45 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 46 | for (int i = 0; i < repeatCount; i++) { 47 | descriptor.write(dataOutputBuffer); 48 | } 49 | 50 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 51 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 52 | 53 | for (int i = 0; i < repeatCount; i++) { 54 | IdxIndexDescriptor clonedDescriptor = new IdxIndexDescriptor(); 55 | clonedDescriptor.readFields(dataInputBuffer); 56 | Assert.assertEquals("The descriptor was not the same after being written and " + 57 | "read attempt=" + i, descriptor, clonedDescriptor); 58 | } 59 | } 60 | 61 | /** 62 | * Tests the equals method. 63 | */ 64 | public void testEquals() { 65 | IdxIndexDescriptor ix1 = createIdxIndexDescriptor(); 66 | 67 | IdxIndexDescriptor ix2 = createIdxIndexDescriptor(); 68 | Assert.assertEquals(ix1, ix2); 69 | 70 | ix2.getQualifierName()[0] = 9; 71 | Assert.assertFalse(ix1.equals(ix2)); 72 | 73 | ix2 = createIdxIndexDescriptor(); 74 | ix2.setQualifierType(IdxQualifierType.LONG); 75 | Assert.assertFalse(ix1.equals(ix2)); 76 | 77 | ix2 = createIdxIndexDescriptor(); 78 | ix2.setOffset(1); 79 | Assert.assertFalse(ix1.equals(ix2)); 80 | 81 | ix2 = createIdxIndexDescriptor(); 82 | ix2.setLength(-1); 83 | Assert.assertFalse(ix1.equals(ix2)); 84 | 85 | } 86 | 87 | private static IdxIndexDescriptor createIdxIndexDescriptor() { 88 | byte[] qualifierName1 = Bytes.toBytes("qualifer1"); 89 | IdxIndexDescriptor descriptor 90 | = new IdxIndexDescriptor(qualifierName1, IdxQualifierType.INT); 91 | descriptor.setLength(4); 92 | descriptor.setOffset(2); 93 | return descriptor; 94 | } 95 | 96 | 97 | } 98 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/client/idx/TestIdxScan.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx; 21 | 22 | import junit.framework.Assert; 23 | import junit.framework.TestCase; 24 | import org.apache.hadoop.hbase.client.idx.exp.Comparison; 25 | import org.apache.hadoop.hbase.client.idx.exp.Expression; 26 | import org.apache.hadoop.hbase.util.Bytes; 27 | import org.apache.hadoop.io.DataInputBuffer; 28 | import org.apache.hadoop.io.DataOutputBuffer; 29 | 30 | import java.io.IOException; 31 | 32 | /** 33 | * Tests the {@link IdxScan} class. 34 | */ 35 | public class TestIdxScan extends TestCase { 36 | 37 | /** 38 | * Tests that the writable and readFields methods work as expected. 39 | * 40 | * @throws java.io.IOException if an IO error occurs 41 | */ 42 | public void testWritable() throws IOException { 43 | Expression expression = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value")); 44 | 45 | IdxScan idxScan = new IdxScan(expression); 46 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 47 | idxScan.write(dataOutputBuffer); 48 | 49 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 50 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 51 | 52 | IdxScan clonedScan = new IdxScan(); 53 | clonedScan.readFields(dataInputBuffer); 54 | 55 | Assert.assertEquals("The expression was not the same after being written and read", idxScan.getExpression(), clonedScan.getExpression()); 56 | } 57 | 58 | /** 59 | * Tests that the writable and readFields methods work as expected. 60 | * 61 | * @throws java.io.IOException if an IO error occurs 62 | */ 63 | public void testWritableNullExpression() throws IOException { 64 | IdxScan idxScan = new IdxScan(); 65 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 66 | idxScan.write(dataOutputBuffer); 67 | 68 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 69 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 70 | 71 | IdxScan clonedScan = new IdxScan(); 72 | clonedScan.readFields(dataInputBuffer); 73 | 74 | Assert.assertEquals("The expression was not the same after being written and read", idxScan.getExpression(), clonedScan.getExpression()); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/client/idx/exp/TestComparison.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | 21 | package org.apache.hadoop.hbase.client.idx.exp; 22 | 23 | import junit.framework.Assert; 24 | import junit.framework.TestCase; 25 | import org.apache.hadoop.hbase.util.Bytes; 26 | import org.apache.hadoop.io.DataInputBuffer; 27 | import org.apache.hadoop.io.DataOutputBuffer; 28 | 29 | import java.io.IOException; 30 | 31 | /** 32 | * Tests the expression class. 33 | */ 34 | public class TestComparison extends TestCase { 35 | /** 36 | * Tests the constuctor. 37 | */ 38 | public void testConstructor() { 39 | byte[] columnName1 = Bytes.toBytes("columnName1"); 40 | byte[] qualifer1 = Bytes.toBytes("qualifier1"); 41 | byte[] value1 = Bytes.toBytes("value1"); 42 | Comparison.Operator operator1 = Comparison.Operator.EQ; 43 | 44 | Comparison comparison = new Comparison(columnName1, qualifer1, operator1, value1); 45 | 46 | Assert.assertEquals("columnName was incorrect", columnName1, comparison.getColumnName()); 47 | Assert.assertEquals("qualifier was incorrect", qualifer1, comparison.getQualifier()); 48 | Assert.assertEquals("value was incorrect", value1, comparison.getValue()); 49 | Assert.assertEquals("operator was incorrect", operator1, comparison.getOperator()); 50 | } 51 | 52 | /** 53 | * Tests that the equals method works. 54 | */ 55 | public void testEquals() { 56 | Expression expression1 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value")); 57 | Expression expression2 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value")); 58 | 59 | Assert.assertTrue("equals didn't work as expected", expression1.equals(expression2)); 60 | } 61 | 62 | /** 63 | * Tests that the equals method works. 64 | */ 65 | public void testEqualsFalse() { 66 | Expression expression1 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("value")); 67 | Expression expression2 = Expression.comparison("columnName", "qualifier", Comparison.Operator.EQ, Bytes.toBytes("othervalue")); 68 | 69 | Assert.assertFalse("equals didn't work as expected", expression1.equals(expression2)); 70 | } 71 | 72 | /** 73 | * Tests the an comparison can be written and read and still be equal. 74 | * 75 | * @throws java.io.IOException if an io error occurs 76 | */ 77 | public void testWritable() throws IOException { 78 | Expression expression = Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value")); 79 | 80 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 81 | expression.write(dataOutputBuffer); 82 | 83 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 84 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 85 | 86 | Expression clonedExpression = new Comparison(); 87 | clonedExpression.readFields(dataInputBuffer); 88 | 89 | Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression); 90 | } 91 | } -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/client/idx/exp/TestExpression.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client.idx.exp; 21 | 22 | import junit.framework.Assert; 23 | import junit.framework.TestCase; 24 | import org.apache.hadoop.hbase.util.Bytes; 25 | import org.apache.hadoop.io.DataInputBuffer; 26 | import org.apache.hadoop.io.DataOutputBuffer; 27 | 28 | import java.io.IOException; 29 | 30 | /** 31 | * Tests the expression class. 32 | */ 33 | public class TestExpression extends TestCase { 34 | /** 35 | * Tests that the methods to build an expression all result in equal instances 36 | * when provided the same input. 37 | */ 38 | public void testExpressionBuilder() { 39 | String columnName1 = "columnName1"; 40 | String qualifer1 = "qualifier1"; 41 | byte[] value1 = Bytes.toBytes("value1"); 42 | Comparison.Operator operator1 = Comparison.Operator.EQ; 43 | 44 | String columnName2 = "columnName2"; 45 | String qualifer2 = "qualifier2"; 46 | byte[] value2 = Bytes.toBytes("value2"); 47 | Comparison.Operator operator2 = Comparison.Operator.GT; 48 | 49 | String columnName3 = "columnName3"; 50 | String qualifer3 = "qualifier3"; 51 | byte[] value3 = Bytes.toBytes("value3"); 52 | Comparison.Operator operator3 = Comparison.Operator.LT; 53 | 54 | Expression expression1 = new Or( 55 | new Comparison(columnName1, qualifer1, operator1, value1), 56 | new And( 57 | new Comparison(columnName2, qualifer2, operator2, value2), 58 | new Comparison(columnName3, qualifer3, operator3, value3) 59 | ) 60 | ); 61 | 62 | Expression expression2 = Expression 63 | .or( 64 | Expression.comparison(columnName1, qualifer1, operator1, value1) 65 | ) 66 | .or( 67 | Expression.and() 68 | .and(Expression.comparison(columnName2, qualifer2, operator2, value2)) 69 | .and(Expression.comparison(columnName3, qualifer3, operator3, value3)) 70 | ); 71 | 72 | Expression expression3 = Expression.or( 73 | Expression.comparison(columnName1, qualifer1, operator1, value1), 74 | Expression.and( 75 | Expression.comparison(columnName2, qualifer2, operator2, value2), 76 | Expression.comparison(columnName3, qualifer3, operator3, value3) 77 | ) 78 | ); 79 | 80 | Assert.assertTrue("The expressions didn't match", expression1.equals(expression2) && expression1.equals(expression3)); 81 | } 82 | 83 | /** 84 | * Tests the an expression tree can be written and read and still be equal. 85 | * 86 | * @throws java.io.IOException if an io error occurs 87 | */ 88 | public void testWritable() throws IOException { 89 | Expression expression = Expression.or( 90 | Expression.comparison("columnName1", "qualifier1", Comparison.Operator.EQ, Bytes.toBytes("value")), 91 | Expression.and( 92 | Expression.comparison("columnName2", "qualifier2", Comparison.Operator.GT, Bytes.toBytes("value2")), 93 | Expression.comparison("columnName3", "qualifier3", Comparison.Operator.LT, Bytes.toBytes("value3")) 94 | ) 95 | ); 96 | 97 | DataOutputBuffer dataOutputBuffer = new DataOutputBuffer(); 98 | expression.write(dataOutputBuffer); 99 | 100 | DataInputBuffer dataInputBuffer = new DataInputBuffer(); 101 | dataInputBuffer.reset(dataOutputBuffer.getData(), dataOutputBuffer.getLength()); 102 | 103 | Expression clonedExpression = new Or(); 104 | clonedExpression.readFields(dataInputBuffer); 105 | 106 | Assert.assertEquals("The expression was not the same after being written and read", expression, clonedExpression); 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/HeapSizeEstimator.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.HColumnDescriptor; 23 | import org.apache.hadoop.hbase.KeyValue; 24 | import org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor; 25 | import org.apache.hadoop.hbase.client.idx.IdxQualifierType; 26 | import org.apache.hadoop.hbase.util.Bytes; 27 | 28 | /** 29 | * A simple utility to help estimate the heap size of indexes. 30 | */ 31 | public class HeapSizeEstimator { 32 | 33 | public static void main(String[] args) { 34 | if (args.length != 1) { 35 | System.err.println("Usage: java o.a.h.h.HeapSizeEstimator [num entries]"); 36 | } 37 | 38 | int entries = Integer.parseInt(args[0]); 39 | 40 | final byte[] family = Bytes.toBytes("family"); 41 | final byte[] qualifier = Bytes.toBytes("qualifier"); 42 | HColumnDescriptor columnDescriptor = new HColumnDescriptor(family); 43 | CompleteIndexBuilder completeIndexBuilder = new CompleteIndexBuilder(columnDescriptor, new IdxIndexDescriptor(qualifier, IdxQualifierType.LONG)); 44 | for (long i = 0; i < entries; i++) { 45 | completeIndexBuilder.addKeyValue(new KeyValue(Bytes.toBytes(i), family, qualifier, Bytes.toBytes(i)), (int) i); 46 | } 47 | CompleteIndex ix = (CompleteIndex) completeIndexBuilder.finalizeIndex(entries); 48 | System.out.printf("index heap size=%d bytes, total heap=%d\n", ix.heapSize(), Runtime.getRuntime().totalMemory()); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithIdxRegion.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.HBaseConfiguration; 23 | import org.apache.hadoop.hbase.HConstants; 24 | import org.apache.hadoop.hbase.HTableDescriptor; 25 | import org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor; 26 | import org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor; 27 | import org.apache.hadoop.hbase.client.idx.IdxQualifierType; 28 | 29 | import java.io.IOException; 30 | 31 | /** 32 | * Tests that an IdxRegion is compatible with an HRegion when there are no 33 | * indexes defined. 34 | */ 35 | public class TestHRegionWithIdxRegion extends TestHRegion { 36 | 37 | /** 38 | * Override the HRegion inistialization method 39 | * 40 | * @param tableName the table name 41 | * @param callingMethod the calling method 42 | * @param conf the conf (augmented with the IdxRegion as the 43 | * REGION_IMPL 44 | * @param families the families 45 | * @throws IOException exception 46 | */ 47 | @Override 48 | protected void initHRegion(byte[] tableName, String callingMethod, 49 | HBaseConfiguration conf, byte[]... families) throws IOException { 50 | conf.set(HConstants.REGION_IMPL, IdxRegion.class.getName()); 51 | super.initHRegion(tableName, callingMethod, conf, families); 52 | } 53 | 54 | @Override 55 | protected HTableDescriptor constructTableDescriptor(byte[] tableName, 56 | byte[]... families) { 57 | HTableDescriptor htd = new HTableDescriptor(tableName); 58 | for (byte[] family : families) { 59 | try { 60 | IdxColumnDescriptor icd = new IdxColumnDescriptor(family); 61 | icd.addIndexDescriptor(new IdxIndexDescriptor(qual1, 62 | IdxQualifierType.BYTE_ARRAY)); 63 | icd.addIndexDescriptor(new IdxIndexDescriptor(qual2, 64 | IdxQualifierType.BYTE_ARRAY)); 65 | icd.addIndexDescriptor(new IdxIndexDescriptor(qual3, 66 | IdxQualifierType.BYTE_ARRAY)); 67 | htd.addFamily(icd); 68 | } catch (IOException e) { 69 | throw new IllegalStateException(e); 70 | } 71 | } 72 | return htd; 73 | } 74 | 75 | @Override 76 | public void testWritesWhileScanning() throws IOException, InterruptedException { 77 | // Disabled due to intermittent failures todo fix 78 | } 79 | 80 | @Override 81 | public void testWritesWhileGetting() throws IOException, InterruptedException { 82 | // Disabled due to intermittent failures todo fix 83 | } 84 | 85 | @Override 86 | public void testGetScanner_WithNoFamilies() throws IOException { 87 | //Disable, parent test is broken todo submit JIRA and discuss 88 | } 89 | } -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithIdxRegionNoIndexes.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.HBaseConfiguration; 23 | import org.apache.hadoop.hbase.HConstants; 24 | 25 | import java.io.IOException; 26 | 27 | /** 28 | * Tests that an IdxRegion is compatible with an HRegion when there are no 29 | * indexes defined. 30 | */ 31 | public class TestHRegionWithIdxRegionNoIndexes extends TestHRegion { 32 | 33 | /** 34 | * Override the HRegion inistialization method 35 | * 36 | * @param tableName the table name 37 | * @param callingMethod the calling method 38 | * @param conf the conf (augmented with the IdxRegion as the 39 | * REGION_IMPL 40 | * @param families the families 41 | * @throws IOException exception 42 | */ 43 | @Override 44 | protected void initHRegion(byte[] tableName, String callingMethod, 45 | HBaseConfiguration conf, byte[]... families) throws IOException { 46 | conf.set(HConstants.REGION_IMPL, IdxRegion.class.getName()); 47 | super.initHRegion(tableName, callingMethod, conf, families); 48 | } 49 | 50 | @Override 51 | public void testWritesWhileScanning() throws IOException, InterruptedException { 52 | // Disabled due to intermittent failures todo fix 53 | } 54 | 55 | @Override 56 | public void testGetScanner_WithNoFamilies() throws IOException { 57 | //Disable, parent test is broken todo submit JIRA and discuss 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/TestIdxRegionIndexManager.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.hbase.HBaseTestCase; 23 | import org.apache.hadoop.hbase.regionserver.idx.support.IdxClassSize; 24 | import org.apache.hadoop.hbase.util.ClassSize; 25 | 26 | /** 27 | * Tests for the index manager. 28 | */ 29 | public class TestIdxRegionIndexManager extends HBaseTestCase { 30 | 31 | /** 32 | * Verifies the fixed heap size. 33 | */ 34 | public void testHeapSize() { 35 | assertEquals(IdxRegionIndexManager.FIXED_SIZE, 36 | ClassSize.estimateBase(IdxRegionIndexManager.class, true) + 37 | IdxClassSize.HASHMAP + IdxClassSize.OBJECT_ARRAY_LIST + 38 | ClassSize.REENTRANT_LOCK); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/TestIdxRegionMBeanImpl.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | 21 | package org.apache.hadoop.hbase.regionserver; 22 | 23 | import junit.framework.TestCase; 24 | import org.apache.hadoop.hbase.HRegionInfo; 25 | import org.apache.hadoop.hbase.HTableDescriptor; 26 | 27 | /** 28 | * Tests the {@link org.apache.hadoop.hbase.regionserver.IdxRegionMBeanImpl} class. 29 | */ 30 | public class TestIdxRegionMBeanImpl extends TestCase { 31 | /** 32 | * Ensures that the special bytes potentially contained in the start and end 33 | * rows are encoded. 34 | */ 35 | public void testGenerateObjectNameWithInvalidValueInKey() { 36 | HRegionInfo info = new HRegionInfo( 37 | new HTableDescriptor("foo"), 38 | new byte[]{'"'}, 39 | new byte[]{0, ','} 40 | ); 41 | IdxRegionMBeanImpl.generateObjectName(info); 42 | } 43 | 44 | /** 45 | * Ensures that the HTableDescriptor doesn't allow special chars in the table 46 | * name. This is redundant but it's here just incase the HTableDescriptor 47 | * changes. 48 | */ 49 | public void testGenerateObjectNameWithInvalidValueName() { 50 | try { 51 | HRegionInfo info = new HRegionInfo( 52 | new HTableDescriptor("foo,%="), 53 | new byte[]{'"'}, 54 | new byte[]{0, ','} 55 | ); 56 | } catch (IllegalArgumentException e) { 57 | // expected 58 | } 59 | } 60 | } -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/TestIdxRegionPerformance.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | import org.apache.hadoop.fs.Path; 23 | import org.apache.hadoop.hbase.HBaseTestCase; 24 | import org.apache.hadoop.hbase.HRegionInfo; 25 | import org.apache.hadoop.hbase.HTableDescriptor; 26 | import org.apache.hadoop.hbase.KeyValue; 27 | import org.apache.hadoop.hbase.client.Put; 28 | import org.apache.hadoop.hbase.client.idx.IdxColumnDescriptor; 29 | import org.apache.hadoop.hbase.client.idx.IdxIndexDescriptor; 30 | import org.apache.hadoop.hbase.client.idx.IdxQualifierType; 31 | import org.apache.hadoop.hbase.client.idx.IdxScan; 32 | import org.apache.hadoop.hbase.client.idx.exp.Comparison; 33 | import org.apache.hadoop.hbase.client.idx.exp.Expression; 34 | import org.apache.hadoop.hbase.filter.CompareFilter; 35 | import org.apache.hadoop.hbase.filter.Filter; 36 | import org.apache.hadoop.hbase.filter.FilterList; 37 | import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; 38 | import org.apache.hadoop.hbase.util.Bytes; 39 | 40 | import java.io.IOException; 41 | import java.util.ArrayList; 42 | import java.util.Arrays; 43 | import java.util.List; 44 | import java.util.Random; 45 | 46 | /** 47 | * Tests/Demonstrates performance compared to good-old HRegion. 48 | */ 49 | public class TestIdxRegionPerformance extends HBaseTestCase { 50 | 51 | private final String DIR = "test/build/data/TestIdxRegionPerformance/"; 52 | private static final byte[] FAMILY_1_NAME = Bytes.toBytes("family1"); 53 | private static final byte[] INT_QUAL_NAME = Bytes.toBytes("intQual"); 54 | private static final byte[] BYTES_QUAL_NAME = Bytes.toBytes("bytesQual"); 55 | private static final byte[] FAMILY_2_NAME = Bytes.toBytes("family2"); 56 | private static final byte[] CHARS_QUAL_NAME = Bytes.toBytes("charsQual"); 57 | 58 | /** 59 | * Compares the Idx region performance with the HRegion performance. 60 | * 61 | * @throws java.io.IOException in case of an IO error 62 | */ 63 | public void testIdxRegionPerformance() throws IOException { 64 | IdxColumnDescriptor family1 = new IdxColumnDescriptor(FAMILY_1_NAME); 65 | family1.addIndexDescriptor(new IdxIndexDescriptor(INT_QUAL_NAME, 66 | IdxQualifierType.INT)); 67 | family1.addIndexDescriptor(new IdxIndexDescriptor(BYTES_QUAL_NAME, 68 | IdxQualifierType.BYTE_ARRAY)); 69 | 70 | IdxColumnDescriptor family2 = new IdxColumnDescriptor(FAMILY_2_NAME); 71 | family2.addIndexDescriptor(new IdxIndexDescriptor(CHARS_QUAL_NAME, 72 | IdxQualifierType.CHAR_ARRAY)); 73 | 74 | HTableDescriptor htableDescriptor 75 | = new HTableDescriptor("testIdxRegionPerformance"); 76 | htableDescriptor.addFamily(family1); 77 | htableDescriptor.addFamily(family2); 78 | HRegionInfo info = new HRegionInfo(htableDescriptor, null, null, false); 79 | Path path = new Path(DIR + htableDescriptor.getNameAsString()); 80 | IdxRegion region = TestIdxRegion.createIdxRegion(info, path, conf); 81 | 82 | int numberOfRows = 10000; 83 | 84 | Random random = new Random(2112L); 85 | for (int row = 0; row < numberOfRows; row++) { 86 | Put put = new Put(Bytes.toBytes(random.nextLong())); 87 | put.add(FAMILY_1_NAME, INT_QUAL_NAME, Bytes.toBytes(row)); 88 | final String str = String.format("%010d", row % 1000); 89 | put.add(FAMILY_1_NAME, BYTES_QUAL_NAME, str.getBytes()); 90 | put.add(FAMILY_2_NAME, CHARS_QUAL_NAME, Bytes.toBytes(str.toCharArray())); 91 | region.put(put); 92 | } 93 | 94 | region.flushcache(); 95 | 96 | final byte[] intValue = Bytes.toBytes(numberOfRows - numberOfRows / 5); 97 | final byte[] charsValue = 98 | Bytes.toBytes(String.format("%010d", 50).toCharArray()); 99 | final byte[] bytesValue = String.format("%010d", 990).getBytes(); 100 | 101 | IdxScan scan = new IdxScan(); 102 | scan.setExpression(Expression.or( 103 | Expression.and( 104 | Expression.comparison(FAMILY_1_NAME, INT_QUAL_NAME, 105 | Comparison.Operator.GTE, intValue), 106 | Expression.comparison(FAMILY_2_NAME, CHARS_QUAL_NAME, 107 | Comparison.Operator.LT, charsValue)), 108 | Expression.comparison(FAMILY_1_NAME, BYTES_QUAL_NAME, 109 | Comparison.Operator.GTE, bytesValue))); 110 | 111 | scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ONE, 112 | Arrays.asList( 113 | new FilterList(FilterList.Operator.MUST_PASS_ALL, 114 | Arrays.asList( 115 | new SingleColumnValueFilter(FAMILY_1_NAME, INT_QUAL_NAME, 116 | CompareFilter.CompareOp.GREATER_OR_EQUAL, intValue), 117 | new SingleColumnValueFilter(FAMILY_2_NAME, CHARS_QUAL_NAME, 118 | CompareFilter.CompareOp.LESS, charsValue))), 119 | new SingleColumnValueFilter(FAMILY_1_NAME, BYTES_QUAL_NAME, 120 | CompareFilter.CompareOp.GREATER_OR_EQUAL, bytesValue) 121 | ))); 122 | 123 | // scan for two percent of the region 124 | int expectedNumberOfResults = numberOfRows / 50; 125 | 126 | long start = System.currentTimeMillis(); 127 | InternalScanner scanner = region.getScanner(scan); 128 | List results = new ArrayList(expectedNumberOfResults); 129 | int actualResults = 0; 130 | while (scanner.next(results)) { 131 | assertEquals(3, results.size()); 132 | results.clear(); 133 | actualResults++; 134 | } 135 | System.out.println("Total (millis) for scanning " + 136 | "for 2% of the region using indexed scan: " + 137 | (System.currentTimeMillis() - start)); 138 | assertEquals(expectedNumberOfResults, actualResults); 139 | } 140 | 141 | 142 | } 143 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/idx/support/TestBits.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support; 21 | 22 | import junit.framework.Assert; 23 | import org.apache.hadoop.hbase.HBaseTestCase; 24 | 25 | import java.math.BigDecimal; 26 | import java.util.Random; 27 | 28 | /** 29 | * Tests for the bits algorithms. 30 | */ 31 | public class TestBits extends HBaseTestCase { 32 | private static final int PERFORMANCE_COUNT = 10000; 33 | private static final BigDecimal ONE_MILLION = new BigDecimal(1000000); 34 | private static final int WARMUP_COUNT = 1000; 35 | 36 | public void testLowestSetBitIndex() { 37 | for (long word = 1L; word < 1000000l; word++) { 38 | Assert.assertEquals("word=" + word, Bits.lowestSetBitIndex(word), 39 | Long.numberOfTrailingZeros(word)); 40 | long highWord = Long.MAX_VALUE - (word - 1) * 17; 41 | Assert.assertEquals("word=" + highWord, Bits.lowestSetBitIndex(highWord), 42 | Long.numberOfTrailingZeros(highWord)); 43 | } 44 | } 45 | 46 | public void testHighestSetBitIndex() { 47 | for (long word = 1L; word < 1000000l; word++) { 48 | Assert.assertEquals("word=" + word, Bits.highestSetBitIndex(word), 49 | 63 - Long.numberOfLeadingZeros(word)); 50 | long highWord = Long.MAX_VALUE - (word - 1) * 11; 51 | Assert.assertEquals("word=" + highWord, Bits.highestSetBitIndex(highWord), 52 | 63 - Long.numberOfLeadingZeros(highWord)); 53 | } 54 | } 55 | 56 | 57 | public void testLowestSetBitPerformance() { 58 | long[] randomSequence = new long[PERFORMANCE_COUNT]; 59 | Random random = new Random(); 60 | for (int i = 0; i < randomSequence.length; i++) { 61 | randomSequence[i] = random.nextLong(); 62 | } 63 | 64 | for (int i = 0; i < WARMUP_COUNT; i++) { 65 | Long.numberOfTrailingZeros(random.nextLong()); 66 | } 67 | 68 | long start = System.nanoTime(); 69 | for (int i = 0; i < randomSequence.length; i++) { 70 | Long.numberOfTrailingZeros(randomSequence[i]); 71 | } 72 | long finish = System.nanoTime(); 73 | long nanos = (finish - start) / PERFORMANCE_COUNT; 74 | System.out.printf("Long.numberOfTrailingZeros: %s\n", 75 | new BigDecimal(nanos).divide(ONE_MILLION) + ";"); 76 | 77 | for (int i = 0; i < WARMUP_COUNT; i++) { 78 | Bits.lowestSetBitIndex(random.nextLong()); 79 | } 80 | 81 | 82 | start = System.nanoTime(); 83 | for (int i = 0; i < randomSequence.length; i++) { 84 | Bits.lowestSetBitIndex(randomSequence[i]); 85 | } 86 | finish = System.nanoTime(); 87 | nanos = (finish - start) / PERFORMANCE_COUNT; 88 | System.out.printf("Bits.lowestSetBitIndex: %s\n", 89 | new BigDecimal(nanos).divide(ONE_MILLION) + ";"); 90 | } 91 | 92 | public void testHigestSetBitPerformance() { 93 | long[] randomSequence = new long[PERFORMANCE_COUNT]; 94 | Random random = new Random(); 95 | for (int i = 0; i < randomSequence.length; i++) { 96 | randomSequence[i] = random.nextLong(); 97 | } 98 | 99 | for (int i = 0; i < WARMUP_COUNT; i++) { 100 | Long.numberOfLeadingZeros(random.nextLong()); 101 | } 102 | 103 | long start = System.nanoTime(); 104 | for (int i = 0; i < randomSequence.length; i++) { 105 | Long.numberOfLeadingZeros(randomSequence[i]); 106 | } 107 | long finish = System.nanoTime(); 108 | long nanos = (finish - start) / PERFORMANCE_COUNT; 109 | System.out.printf("Long.numberOfLeadingZeros: %s\n", 110 | new BigDecimal(nanos).divide(ONE_MILLION) + ";"); 111 | 112 | for (int i = 0; i < WARMUP_COUNT; i++) { 113 | Bits.highestSetBitIndex(random.nextLong()); 114 | } 115 | 116 | 117 | start = System.nanoTime(); 118 | for (int i = 0; i < randomSequence.length; i++) { 119 | Bits.highestSetBitIndex(randomSequence[i]); 120 | } 121 | finish = System.nanoTime(); 122 | nanos = (finish - start) / PERFORMANCE_COUNT; 123 | System.out.printf("Bits.highestSetBitIndex: %s\n", 124 | new BigDecimal(nanos).divide(ONE_MILLION) + ";"); 125 | } 126 | 127 | } 128 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/idx/support/TestIdxClassSize.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support; 21 | 22 | import org.apache.hadoop.hbase.HBaseTestCase; 23 | import org.apache.hadoop.hbase.regionserver.idx.support.arrays.ObjectArrayList; 24 | import org.apache.hadoop.hbase.util.ClassSize; 25 | 26 | import java.util.HashMap; 27 | 28 | public class TestIdxClassSize extends HBaseTestCase { 29 | 30 | /** 31 | * Tests that the class sizes matches the estimate. 32 | */ 33 | public void testClassSizes() { 34 | assertEquals(IdxClassSize.HASHMAP, 35 | ClassSize.estimateBase(HashMap.class, false)); 36 | 37 | assertEquals(IdxClassSize.OBJECT_ARRAY_LIST, 38 | ClassSize.estimateBase(ObjectArrayList.class, false)); 39 | 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/IntSetBaseTestCase.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.sets; 21 | 22 | import junit.framework.Assert; 23 | import junit.framework.AssertionFailedError; 24 | import org.apache.hadoop.hbase.HBaseTestCase; 25 | 26 | /** 27 | * A base test-case of {@link IntSet}. 28 | */ 29 | public abstract class IntSetBaseTestCase extends HBaseTestCase { 30 | 31 | private static final int[] SOME = new int[]{0, 10, 63, 64, 99, 103, 104, 200, 32 | 800, 805}; 33 | 34 | /** 35 | * Creates a sparse bitset with the given elements and maximum. 36 | * 37 | * @param capacity the maximum 38 | * @param sortedElements the elements assumed to be sorted 39 | * @return the new sparse bitset. 40 | */ 41 | public static IntSetBase createSparseBitSet(int capacity, 42 | int... sortedElements) { 43 | SparseBitSet bitSet = new SparseBitSet(); 44 | for (int element : sortedElements) { 45 | bitSet.addNext(element); 46 | } 47 | bitSet.setCapacity(capacity); 48 | return bitSet; 49 | } 50 | 51 | /** 52 | * Creates a bitset with the given elements and maximum. 53 | * 54 | * @param capacity the maximum 55 | * @param sortedElements the elements assumed to be sorted 56 | * @return the new bitset. 57 | */ 58 | public static IntSetBase createBitSet(int capacity, int... sortedElements) { 59 | BitSet bitSet = new BitSet(capacity); 60 | for (int element : sortedElements) { 61 | bitSet.addNext(element); 62 | } 63 | return bitSet; 64 | } 65 | 66 | protected abstract IntSetBase newSet(int capacity, int... sortedElements); 67 | 68 | protected void addSome(IntSetBase bitSet) { 69 | for (int next : SOME) { 70 | bitSet.addNext(next); 71 | } 72 | } 73 | 74 | protected static void fill(IntSetBase bitSet) { 75 | for (int i = 0; i < bitSet.capacity(); i++) { 76 | bitSet.addNext(i); 77 | } 78 | } 79 | 80 | 81 | protected void assertSetsEqual(IntSet set1, IntSet set2) { 82 | Assert.assertEquals(set1.capacity(), set2.capacity()); 83 | Assert.assertEquals(set1.size(), set2.size()); 84 | IntSet.IntSetIterator iter1 = set1.iterator(), iter2 = set2.iterator(); 85 | while (iter1.hasNext() || iter2.hasNext()) { 86 | Assert.assertEquals(iter1.next(), iter2.next()); 87 | } 88 | } 89 | 90 | protected void assertSetsNotEqual(IntSetBase set1, IntSetBase set2) { 91 | try { 92 | assertSetsEqual(set1, set2); 93 | fail("Sets are equal"); 94 | } catch (AssertionFailedError ignored) { 95 | } 96 | } 97 | 98 | public void testAdd() { 99 | IntSetBase bitSet = newSet(1000); 100 | boolean ea = true; 101 | try { 102 | bitSet.addNext(-1); 103 | Assert.fail("expected an error"); 104 | } catch (AssertionError ignored) { 105 | } catch (ArrayIndexOutOfBoundsException ignored) { 106 | System.err.println("Assertions are not set. Use java -ea ... "); 107 | ea = false; 108 | } 109 | 110 | addSome(bitSet); 111 | Assert.assertEquals(bitSet.size(), SOME.length); 112 | 113 | try { 114 | bitSet.addNext(805); 115 | if (ea) { 116 | Assert.fail("expected an error"); 117 | } 118 | } catch (AssertionError ignored) { 119 | } 120 | 121 | try { 122 | bitSet.addNext(1000); 123 | if (ea) { 124 | Assert.fail("expected an error"); 125 | } 126 | } catch (AssertionError ignored) { 127 | } 128 | } 129 | 130 | public void testContains() { 131 | IntSetBase intSet = newSet(1000); 132 | addSome(intSet); 133 | for (int next : SOME) { 134 | Assert.assertTrue(intSet.contains(next)); 135 | } 136 | 137 | int sum = 0; 138 | for (int i = 0; i < intSet.capacity(); i++) { 139 | sum += intSet.contains(i) ? 1 : 0; 140 | } 141 | Assert.assertEquals(sum, SOME.length); 142 | } 143 | 144 | public void testClear() { 145 | IntSetBase intSet = newSet(1000); 146 | Assert.assertEquals(intSet.size(), 0); 147 | Assert.assertTrue(intSet.isEmpty()); 148 | intSet.clear(); 149 | 150 | addClearAndCheck(intSet); 151 | addClearAndCheck(intSet); 152 | } 153 | 154 | 155 | private void addClearAndCheck(IntSetBase intSetBase) { 156 | addSome(intSetBase); 157 | Assert.assertEquals(intSetBase.size(), SOME.length); 158 | Assert.assertFalse(intSetBase.isEmpty()); 159 | intSetBase.clear(); 160 | Assert.assertEquals(intSetBase.size(), 0); 161 | Assert.assertTrue(intSetBase.isEmpty()); 162 | } 163 | 164 | public void testClone() { 165 | IntSetBase intSet = newSet(10000); 166 | IntSetBase otherIntSet = (IntSetBase) intSet.clone(); 167 | assertSetsEqual(intSet, otherIntSet); 168 | 169 | addSome(intSet); 170 | assertSetsNotEqual(intSet, otherIntSet); 171 | 172 | otherIntSet = (IntSetBase) intSet.clone(); 173 | assertSetsEqual(intSet, otherIntSet); 174 | 175 | intSet.addNext(1001); 176 | Assert.assertEquals(intSet.size(), otherIntSet.size() + 1); 177 | Assert.assertFalse(otherIntSet.contains(1001)); 178 | } 179 | 180 | public void testIterator() { 181 | IntSetBase intSet = newSet(1000); 182 | IntSet.IntSetIterator iter = intSet.iterator(); 183 | Assert.assertFalse(iter.hasNext()); 184 | 185 | addSome(intSet); 186 | iter = intSet.iterator(); 187 | for (int num : SOME) { 188 | Assert.assertTrue(iter.hasNext()); 189 | Assert.assertEquals(num, iter.next()); 190 | } 191 | Assert.assertFalse(iter.hasNext()); 192 | 193 | intSet = new BitSet(1000); 194 | fill(intSet); 195 | iter = intSet.iterator(); 196 | for (int num = 0; num < 1000; num++) { 197 | Assert.assertTrue(iter.hasNext()); 198 | Assert.assertEquals(num, iter.next()); 199 | } 200 | Assert.assertFalse(iter.hasNext()); 201 | } 202 | 203 | 204 | public void testComplement() { 205 | IntSetBase emptySet = newSet(0); 206 | Assert.assertEquals(emptySet.complement().size(), emptySet.size()); 207 | 208 | for (int capacity = 950; capacity < 1050; capacity++) { 209 | IntSetBase intSet = newSet(capacity); 210 | Assert.assertEquals(intSet.size(), 0); 211 | Assert.assertEquals(intSet.complement().size(), capacity); 212 | } 213 | 214 | IntSetBase intSet = newSet(1001); 215 | addSome(intSet); 216 | BitSet cBitSet = (BitSet) intSet.clone().complement(); 217 | Assert.assertEquals(cBitSet.size() + intSet.size(), 1001); 218 | for (int i = 0; i < 1001; i++) { 219 | Assert.assertTrue(intSet.contains(i) != cBitSet.contains(i)); 220 | } 221 | } 222 | 223 | public void testIntersect() { 224 | IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 765, 225 | 999); 226 | IntSetBase intset2 = newSet(1013); 227 | 228 | Assert.assertTrue(intset1.clone().intersect(intset2).isEmpty()); 229 | Assert.assertTrue(intset2.clone().intersect(intset1).isEmpty()); 230 | 231 | assertSetsEqual(intset1.clone().intersect(intset1.clone()), intset1); 232 | intset2 = newSet(1013); 233 | fill(intset2); 234 | assertSetsEqual(intset1.clone().intersect(intset2), intset1); 235 | 236 | assertSetsEqual(intset1.clone().intersect(newSet(1013, 34, 63, 64, 65, 107, 237 | 244, 340, 765, 894, 1012)), 238 | newSet(1013, 34, 244, 765)); 239 | } 240 | 241 | 242 | public void testUnite() { 243 | IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 765, 244 | 999); 245 | IntSetBase intset2 = newSet(1013); 246 | 247 | assertSetsEqual(intset1.clone().unite(intset2), intset1); 248 | assertSetsEqual(intset2.clone().unite(intset1), intset1); 249 | 250 | assertSetsEqual(intset1.clone().unite(intset1.clone()), intset1); 251 | intset2 = newSet(1013); 252 | fill(intset2); 253 | assertSetsEqual(intset1.clone().unite(intset2), intset2); 254 | 255 | assertSetsEqual(intset1.clone().unite(newSet(1013, 34, 63, 64, 65, 107, 244, 256 | 340, 765, 894, 1012)), 257 | newSet(1013, 3, 7, 34, 63, 64, 65, 87, 107, 178, 244, 340, 507, 643, 765 258 | , 894, 999, 1012)); 259 | } 260 | 261 | public void testSubtract() { 262 | IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 263 | 765, 999); 264 | IntSetBase intset2 = newSet(1013); 265 | 266 | assertSetsEqual(intset1.clone().subtract(intset2), intset1); 267 | assertSetsEqual(intset2.clone().subtract(intset1), intset2); 268 | 269 | assertSetsEqual(intset1.clone().subtract(intset1.clone()), intset2); 270 | intset2 = newSet(1013); 271 | fill(intset2); 272 | assertSetsEqual(intset1.clone().subtract(intset2), newSet(1013)); 273 | assertSetsEqual(intset2.clone().subtract(intset1), 274 | intset1.clone().complement()); 275 | 276 | assertSetsEqual(intset1.clone().subtract(newSet(1013, 34, 63, 64, 65, 277 | 107, 244, 340, 765, 894, 1012)), 278 | newSet(1013, 3, 7, 87, 178, 507, 643, 999)); 279 | } 280 | 281 | public void testDifference() { 282 | IntSetBase intset1 = newSet(1013, 3, 7, 34, 87, 178, 244, 507, 643, 283 | 765, 999); 284 | IntSetBase intset2 = newSet(1013); 285 | 286 | assertSetsEqual(intset1.clone().difference(intset2), intset1); 287 | assertSetsEqual(intset2.clone().difference(intset1), intset1); 288 | 289 | assertSetsEqual(intset1.clone().difference(intset1.clone()), intset2); 290 | intset2 = newSet(1013); 291 | fill(intset2); 292 | assertSetsEqual(intset1.clone().difference(intset2), 293 | intset1.clone().complement()); 294 | assertSetsEqual(intset2.clone().difference(intset1), 295 | intset1.clone().complement()); 296 | 297 | assertSetsEqual(intset1.clone().difference(newSet(1013, 34, 63, 64, 65, 298 | 107, 244, 340, 765, 894, 1012)), 299 | newSet(1013, 3, 7, 63, 64, 65, 87, 107, 178, 340, 507, 643, 894, 300 | 999, 1012)); 301 | } 302 | } 303 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestBitSet.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.sets; 21 | 22 | import org.apache.hadoop.hbase.util.ClassSize; 23 | 24 | /** 25 | * Tests the {@link BitSet}. 26 | */ 27 | public class TestBitSet extends IntSetBaseTestCase { 28 | 29 | 30 | @Override 31 | protected IntSetBase newSet(int capacity, int... sortedElements) { 32 | return createBitSet(capacity, sortedElements); 33 | } 34 | 35 | /** 36 | * Tests that the heap size estimate of the fixed parts matches the 37 | * FIXED SIZE constant. 38 | */ 39 | public void testHeapSize() { 40 | assertEquals(ClassSize.estimateBase(BitSet.class, false), BitSet.FIXED_SIZE); 41 | } 42 | 43 | } 44 | -------------------------------------------------------------------------------- /src/test/java/org/apache/hadoop/hbase/regionserver/idx/support/sets/TestSparseBitSet.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.idx.support.sets; 21 | 22 | import org.apache.hadoop.hbase.util.ClassSize; 23 | import org.apache.log4j.Logger; 24 | 25 | 26 | /** 27 | * Tests the {@link SparseBitSet} implementation. 28 | */ 29 | public class TestSparseBitSet extends IntSetBaseTestCase { 30 | private static final Logger LOG = Logger.getLogger(TestSparseBitSet.class); 31 | 32 | @Override 33 | protected IntSetBase newSet(int max, int... sortedElements) { 34 | return createSparseBitSet(max, sortedElements); 35 | } 36 | 37 | /** 38 | * Tests that the heap size estimate of the fixed parts matches the 39 | * FIXED SIZE constant. 40 | */ 41 | public void testHeapSize() { 42 | assertEquals(ClassSize.estimateBase(SparseBitSet.class, false), SparseBitSet.FIXED_SIZE); 43 | LOG.info("Empty Sparse BitSet heap size: " + SparseBitSet.FIXED_SIZE); 44 | SparseBitSet bitSet = new SparseBitSet(); 45 | assertEquals(SparseBitSet.FIXED_SIZE, bitSet.heapSize()); 46 | 47 | bitSet.addNext(9); 48 | assertTrue(SparseBitSet.FIXED_SIZE < bitSet.heapSize()); 49 | long oneBitHeapSize = bitSet.heapSize(); 50 | LOG.info("Sparse BitSet with one bit heap size: " + oneBitHeapSize); 51 | 52 | bitSet.addNext(10); 53 | assertEquals(oneBitHeapSize, bitSet.heapSize()); 54 | 55 | bitSet.addNext(1000); 56 | assertEquals(SparseBitSet.FIXED_SIZE + 2 * (oneBitHeapSize - SparseBitSet.FIXED_SIZE), bitSet.heapSize()); 57 | LOG.info("Sparse BitSet with two entries heap size: " + bitSet.heapSize()); 58 | } 59 | 60 | } -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Define some default values that can be overridden by system properties 2 | hbase.root.logger=INFO,console 3 | 4 | # Define the root logger to the system property "hbase.root.logger". 5 | log4j.rootLogger=${hbase.root.logger} 6 | 7 | # Logging Threshold 8 | log4j.threshhold=ALL 9 | # 10 | # console 11 | # Add "console" to rootlogger above if you want to use this 12 | # 13 | log4j.appender.console=org.apache.log4j.ConsoleAppender 14 | log4j.appender.console.target=System.err 15 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 16 | log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n 17 | 18 | # Custom Logging levels 19 | 20 | #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG 21 | 22 | log4j.logger.org.apache.hadoop=WARN 23 | log4j.logger.org.apache.zookeeper=ERROR 24 | log4j.logger.org.apache.hadoop.hbase=INFO 25 | --------------------------------------------------------------------------------