├── .gitignore
├── LICENSE
├── README.md
├── build.xml
├── conf
├── hbase-site.xml
└── log4j.properties
├── lib
├── hbase-binding-0.1.4.jar
└── libDeli-coproc.jar
├── src
└── tthbase
│ ├── client
│ ├── Demo.java
│ └── HTableGetByIndex.java
│ ├── commons
│ ├── HTableWithIndexesDriver.java
│ ├── MaterializeIndex.java
│ └── MaterializeIndexByCompositeRowkey.java
│ ├── coprocessor
│ ├── BasicIndexObserver.java
│ ├── HTableUpdateIndexByPut.java
│ ├── IndexObserverBaseline.java
│ ├── IndexObserverwReadRepair.java
│ └── LoggedObserver.java
│ └── util
│ ├── HIndexConstantsAndUtils.java
│ ├── IssueCompaction.java
│ ├── UpdateCoprocessor.java
│ └── UpdateTableAttribute.java
└── tt_sh
└── run.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | build/*
2 | *.class
3 | *.swp
4 | *.swo
5 | *.bak
6 | *.~
7 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 2, June 1991
3 |
4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6 | Everyone is permitted to copy and distribute verbatim copies
7 | of this license document, but changing it is not allowed.
8 |
9 | Preamble
10 |
11 | The licenses for most software are designed to take away your
12 | freedom to share and change it. By contrast, the GNU General Public
13 | License is intended to guarantee your freedom to share and change free
14 | software--to make sure the software is free for all its users. This
15 | General Public License applies to most of the Free Software
16 | Foundation's software and to any other program whose authors commit to
17 | using it. (Some other Free Software Foundation software is covered by
18 | the GNU Lesser General Public License instead.) You can apply it to
19 | your programs, too.
20 |
21 | When we speak of free software, we are referring to freedom, not
22 | price. Our General Public Licenses are designed to make sure that you
23 | have the freedom to distribute copies of free software (and charge for
24 | this service if you wish), that you receive source code or can get it
25 | if you want it, that you can change the software or use pieces of it
26 | in new free programs; and that you know you can do these things.
27 |
28 | To protect your rights, we need to make restrictions that forbid
29 | anyone to deny you these rights or to ask you to surrender the rights.
30 | These restrictions translate to certain responsibilities for you if you
31 | distribute copies of the software, or if you modify it.
32 |
33 | For example, if you distribute copies of such a program, whether
34 | gratis or for a fee, you must give the recipients all the rights that
35 | you have. You must make sure that they, too, receive or can get the
36 | source code. And you must show them these terms so they know their
37 | rights.
38 |
39 | We protect your rights with two steps: (1) copyright the software, and
40 | (2) offer you this license which gives you legal permission to copy,
41 | distribute and/or modify the software.
42 |
43 | Also, for each author's protection and ours, we want to make certain
44 | that everyone understands that there is no warranty for this free
45 | software. If the software is modified by someone else and passed on, we
46 | want its recipients to know that what they have is not the original, so
47 | that any problems introduced by others will not reflect on the original
48 | authors' reputations.
49 |
50 | Finally, any free program is threatened constantly by software
51 | patents. We wish to avoid the danger that redistributors of a free
52 | program will individually obtain patent licenses, in effect making the
53 | program proprietary. To prevent this, we have made it clear that any
54 | patent must be licensed for everyone's free use or not licensed at all.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | GNU GENERAL PUBLIC LICENSE
60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
61 |
62 | 0. This License applies to any program or other work which contains
63 | a notice placed by the copyright holder saying it may be distributed
64 | under the terms of this General Public License. The "Program", below,
65 | refers to any such program or work, and a "work based on the Program"
66 | means either the Program or any derivative work under copyright law:
67 | that is to say, a work containing the Program or a portion of it,
68 | either verbatim or with modifications and/or translated into another
69 | language. (Hereinafter, translation is included without limitation in
70 | the term "modification".) Each licensee is addressed as "you".
71 |
72 | Activities other than copying, distribution and modification are not
73 | covered by this License; they are outside its scope. The act of
74 | running the Program is not restricted, and the output from the Program
75 | is covered only if its contents constitute a work based on the
76 | Program (independent of having been made by running the Program).
77 | Whether that is true depends on what the Program does.
78 |
79 | 1. You may copy and distribute verbatim copies of the Program's
80 | source code as you receive it, in any medium, provided that you
81 | conspicuously and appropriately publish on each copy an appropriate
82 | copyright notice and disclaimer of warranty; keep intact all the
83 | notices that refer to this License and to the absence of any warranty;
84 | and give any other recipients of the Program a copy of this License
85 | along with the Program.
86 |
87 | You may charge a fee for the physical act of transferring a copy, and
88 | you may at your option offer warranty protection in exchange for a fee.
89 |
90 | 2. You may modify your copy or copies of the Program or any portion
91 | of it, thus forming a work based on the Program, and copy and
92 | distribute such modifications or work under the terms of Section 1
93 | above, provided that you also meet all of these conditions:
94 |
95 | a) You must cause the modified files to carry prominent notices
96 | stating that you changed the files and the date of any change.
97 |
98 | b) You must cause any work that you distribute or publish, that in
99 | whole or in part contains or is derived from the Program or any
100 | part thereof, to be licensed as a whole at no charge to all third
101 | parties under the terms of this License.
102 |
103 | c) If the modified program normally reads commands interactively
104 | when run, you must cause it, when started running for such
105 | interactive use in the most ordinary way, to print or display an
106 | announcement including an appropriate copyright notice and a
107 | notice that there is no warranty (or else, saying that you provide
108 | a warranty) and that users may redistribute the program under
109 | these conditions, and telling the user how to view a copy of this
110 | License. (Exception: if the Program itself is interactive but
111 | does not normally print such an announcement, your work based on
112 | the Program is not required to print an announcement.)
113 |
114 | These requirements apply to the modified work as a whole. If
115 | identifiable sections of that work are not derived from the Program,
116 | and can be reasonably considered independent and separate works in
117 | themselves, then this License, and its terms, do not apply to those
118 | sections when you distribute them as separate works. But when you
119 | distribute the same sections as part of a whole which is a work based
120 | on the Program, the distribution of the whole must be on the terms of
121 | this License, whose permissions for other licensees extend to the
122 | entire whole, and thus to each and every part regardless of who wrote it.
123 |
124 | Thus, it is not the intent of this section to claim rights or contest
125 | your rights to work written entirely by you; rather, the intent is to
126 | exercise the right to control the distribution of derivative or
127 | collective works based on the Program.
128 |
129 | In addition, mere aggregation of another work not based on the Program
130 | with the Program (or with a work based on the Program) on a volume of
131 | a storage or distribution medium does not bring the other work under
132 | the scope of this License.
133 |
134 | 3. You may copy and distribute the Program (or a work based on it,
135 | under Section 2) in object code or executable form under the terms of
136 | Sections 1 and 2 above provided that you also do one of the following:
137 |
138 | a) Accompany it with the complete corresponding machine-readable
139 | source code, which must be distributed under the terms of Sections
140 | 1 and 2 above on a medium customarily used for software interchange; or,
141 |
142 | b) Accompany it with a written offer, valid for at least three
143 | years, to give any third party, for a charge no more than your
144 | cost of physically performing source distribution, a complete
145 | machine-readable copy of the corresponding source code, to be
146 | distributed under the terms of Sections 1 and 2 above on a medium
147 | customarily used for software interchange; or,
148 |
149 | c) Accompany it with the information you received as to the offer
150 | to distribute corresponding source code. (This alternative is
151 | allowed only for noncommercial distribution and only if you
152 | received the program in object code or executable form with such
153 | an offer, in accord with Subsection b above.)
154 |
155 | The source code for a work means the preferred form of the work for
156 | making modifications to it. For an executable work, complete source
157 | code means all the source code for all modules it contains, plus any
158 | associated interface definition files, plus the scripts used to
159 | control compilation and installation of the executable. However, as a
160 | special exception, the source code distributed need not include
161 | anything that is normally distributed (in either source or binary
162 | form) with the major components (compiler, kernel, and so on) of the
163 | operating system on which the executable runs, unless that component
164 | itself accompanies the executable.
165 |
166 | If distribution of executable or object code is made by offering
167 | access to copy from a designated place, then offering equivalent
168 | access to copy the source code from the same place counts as
169 | distribution of the source code, even though third parties are not
170 | compelled to copy the source along with the object code.
171 |
172 | 4. You may not copy, modify, sublicense, or distribute the Program
173 | except as expressly provided under this License. Any attempt
174 | otherwise to copy, modify, sublicense or distribute the Program is
175 | void, and will automatically terminate your rights under this License.
176 | However, parties who have received copies, or rights, from you under
177 | this License will not have their licenses terminated so long as such
178 | parties remain in full compliance.
179 |
180 | 5. You are not required to accept this License, since you have not
181 | signed it. However, nothing else grants you permission to modify or
182 | distribute the Program or its derivative works. These actions are
183 | prohibited by law if you do not accept this License. Therefore, by
184 | modifying or distributing the Program (or any work based on the
185 | Program), you indicate your acceptance of this License to do so, and
186 | all its terms and conditions for copying, distributing or modifying
187 | the Program or works based on it.
188 |
189 | 6. Each time you redistribute the Program (or any work based on the
190 | Program), the recipient automatically receives a license from the
191 | original licensor to copy, distribute or modify the Program subject to
192 | these terms and conditions. You may not impose any further
193 | restrictions on the recipients' exercise of the rights granted herein.
194 | You are not responsible for enforcing compliance by third parties to
195 | this License.
196 |
197 | 7. If, as a consequence of a court judgment or allegation of patent
198 | infringement or for any other reason (not limited to patent issues),
199 | conditions are imposed on you (whether by court order, agreement or
200 | otherwise) that contradict the conditions of this License, they do not
201 | excuse you from the conditions of this License. If you cannot
202 | distribute so as to satisfy simultaneously your obligations under this
203 | License and any other pertinent obligations, then as a consequence you
204 | may not distribute the Program at all. For example, if a patent
205 | license would not permit royalty-free redistribution of the Program by
206 | all those who receive copies directly or indirectly through you, then
207 | the only way you could satisfy both it and this License would be to
208 | refrain entirely from distribution of the Program.
209 |
210 | If any portion of this section is held invalid or unenforceable under
211 | any particular circumstance, the balance of the section is intended to
212 | apply and the section as a whole is intended to apply in other
213 | circumstances.
214 |
215 | It is not the purpose of this section to induce you to infringe any
216 | patents or other property right claims or to contest validity of any
217 | such claims; this section has the sole purpose of protecting the
218 | integrity of the free software distribution system, which is
219 | implemented by public license practices. Many people have made
220 | generous contributions to the wide range of software distributed
221 | through that system in reliance on consistent application of that
222 | system; it is up to the author/donor to decide if he or she is willing
223 | to distribute software through any other system and a licensee cannot
224 | impose that choice.
225 |
226 | This section is intended to make thoroughly clear what is believed to
227 | be a consequence of the rest of this License.
228 |
229 | 8. If the distribution and/or use of the Program is restricted in
230 | certain countries either by patents or by copyrighted interfaces, the
231 | original copyright holder who places the Program under this License
232 | may add an explicit geographical distribution limitation excluding
233 | those countries, so that distribution is permitted only in or among
234 | countries not thus excluded. In such case, this License incorporates
235 | the limitation as if written in the body of this License.
236 |
237 | 9. The Free Software Foundation may publish revised and/or new versions
238 | of the General Public License from time to time. Such new versions will
239 | be similar in spirit to the present version, but may differ in detail to
240 | address new problems or concerns.
241 |
242 | Each version is given a distinguishing version number. If the Program
243 | specifies a version number of this License which applies to it and "any
244 | later version", you have the option of following the terms and conditions
245 | either of that version or of any later version published by the Free
246 | Software Foundation. If the Program does not specify a version number of
247 | this License, you may choose any version ever published by the Free Software
248 | Foundation.
249 |
250 | 10. If you wish to incorporate parts of the Program into other free
251 | programs whose distribution conditions are different, write to the author
252 | to ask for permission. For software which is copyrighted by the Free
253 | Software Foundation, write to the Free Software Foundation; we sometimes
254 | make exceptions for this. Our decision will be guided by the two goals
255 | of preserving the free status of all derivatives of our free software and
256 | of promoting the sharing and reuse of software generally.
257 |
258 | NO WARRANTY
259 |
260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
268 | REPAIR OR CORRECTION.
269 |
270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
278 | POSSIBILITY OF SUCH DAMAGES.
279 |
280 | END OF TERMS AND CONDITIONS
281 |
282 | How to Apply These Terms to Your New Programs
283 |
284 | If you develop a new program, and you want it to be of the greatest
285 | possible use to the public, the best way to achieve this is to make it
286 | free software which everyone can redistribute and change under these terms.
287 |
288 | To do so, attach the following notices to the program. It is safest
289 | to attach them to the start of each source file to most effectively
290 | convey the exclusion of warranty; and each file should have at least
291 | the "copyright" line and a pointer to where the full notice is found.
292 |
293 | {description}
294 | Copyright (C) {year} {fullname}
295 |
296 | This program is free software; you can redistribute it and/or modify
297 | it under the terms of the GNU General Public License as published by
298 | the Free Software Foundation; either version 2 of the License, or
299 | (at your option) any later version.
300 |
301 | This program is distributed in the hope that it will be useful,
302 | but WITHOUT ANY WARRANTY; without even the implied warranty of
303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
304 | GNU General Public License for more details.
305 |
306 | You should have received a copy of the GNU General Public License along
307 | with this program; if not, write to the Free Software Foundation, Inc.,
308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
309 |
310 | Also add information on how to contact you by electronic and paper mail.
311 |
312 | If the program is interactive, make it output a short notice like this
313 | when it starts in an interactive mode:
314 |
315 | Gnomovision version 69, Copyright (C) year name of author
316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
317 | This is free software, and you are welcome to redistribute it
318 | under certain conditions; type `show c' for details.
319 |
320 | The hypothetical commands `show w' and `show c' should show the appropriate
321 | parts of the General Public License. Of course, the commands you use may
322 | be called something other than `show w' and `show c'; they could even be
323 | mouse-clicks or menu items--whatever suits your program.
324 |
325 | You should also get your employer (if you work as a programmer) or your
326 | school, if any, to sign a "copyright disclaimer" for the program, if
327 | necessary. Here is a sample; alter the names:
328 |
329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program
330 | `Gnomovision' (which makes passes at compilers) written by James Hacker.
331 |
332 | {signature of Ty Coon}, 1 April 1989
333 | Ty Coon, President of Vice
334 |
335 | This General Public License does not permit incorporating your program into
336 | proprietary programs. If your program is a subroutine library, you may
337 | consider it more useful to permit linking proprietary applications with the
338 | library. If this is what you want to do, use the GNU Lesser General
339 | Public License instead of this License.
340 |
341 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | DELI: a Log-Structured Secondary Index for HBase/NoSQL
2 | ======
3 |
4 | Introduction
5 | ------
6 | ``DELI`` is a secondary index for NoSQL systems. It currently supports global indexing and is applicable for HBase alike NoSQL systems where write performance is optimized through a LSM tree structure [[link](http://en.wikipedia.org/wiki/Log-structured_merge-tree)].
7 |
8 | DELI stands for "*DEferred Lightweight Indexing*". Its unique design (given there are other secondary indices for HBase and NoSQL systems) is that:
9 |
10 | * It strictly follows the log-structured design principle that **all writes must be append-only**.
11 | * It **couples the index-base-table sync-up with the compaction**.
12 |
13 | By this design, ``DELI`` preserves the performance characteristic of original HBase (i.e. write optimized) while adding support for secondary index. Details can be found in the referenced research paper below, which is published in [[CCGrid 2015](http://cloud.siat.ac.cn/ccgrid2015/)].
14 |
15 |
16 | Demo
17 | ---
18 |
19 | 1. Install ``docker`` on your machine; [[link](https://www.docker.com)]
20 | 2. Launch the ``docker`` container running ``DELI`` image.
21 |
22 | ```bash
23 | sudo docker run -i -t tristartom/deli-hadoop-hbase-ubuntu /bin/bash
24 | ```
25 | 3. Inside the container's bash, run the following to demonstrate a DELI client program.
26 | ```bash
27 | #step 0
28 | source ~/.profile
29 |
30 | #step 1: first start hdfs
31 | cd ~/app/hadoop-2.6.0
32 | bin/hdfs namenode -format
33 | sbin/start-dfs.sh
34 |
35 | #step 2: then run hbase
36 | cd ~/app/hbase-0.99.2
37 | bin/start-hbase.sh
38 |
39 | #step 3: run deli demo program
40 | cd ~/app/deli
41 | ant #compile the deli client
42 | ./tt_sh/run.sh #demonstrate data can be accessed through a value-based Get (GetByIndex).
43 | ```
44 |
45 | If you observe ``Result is key1`` by the end of printout, it means the demo runs successfully. The demo source code can be found in ``~/app/deli/src/tthbase/client/Demo.java``
46 |
47 | Reference
48 | ---
49 |
50 | "Deferred Lightweight Indexing for Log-Structured Key-Value Stores", Yuzhe Tang, Arun Iyengar, Wei Tan, Liana Fong, Ling Liu, in Proceedings of the 15th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid 2015), Shenzhen, Guangdong, China, May 2015, [[pdf](http://tristartom.github.io/docs/ccgrid15.pdf)]
51 |
52 | Updates: `DELI` is awarded the best paper in CCGRid 2015 [[link](http://cloud.siat.ac.cn/ccgrid2015/awards.html)]!
53 |
--------------------------------------------------------------------------------
/build.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/conf/hbase-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
24 |
25 |
26 | hbase.zookeeper.quorum
27 | localhost
28 |
32 |
33 |
34 | hbase.zookeeper.property.clientPort
35 | 2181
36 |
37 |
38 |
--------------------------------------------------------------------------------
/conf/log4j.properties:
--------------------------------------------------------------------------------
1 | # Define some default values that can be overridden by system properties
2 | hbase.root.logger=INFO,console
3 | hbase.security.logger=INFO,console
4 | hbase.log.dir=.
5 | hbase.log.file=hbase.log
6 |
7 | # Define the root logger to the system property "hbase.root.logger".
8 | log4j.rootLogger=${hbase.root.logger}
9 |
10 | # Logging Threshold
11 | log4j.threshold=ALL
12 |
13 | #
14 | # Daily Rolling File Appender
15 | #
16 | log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
17 | log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
18 |
19 | # Rollver at midnight
20 | log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
21 |
22 | # 30-day backup
23 | #log4j.appender.DRFA.MaxBackupIndex=30
24 | log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
25 |
26 | # Pattern format: Date LogLevel LoggerName LogMessage
27 | log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
28 |
29 | # Rolling File Appender properties
30 | hbase.log.maxfilesize=256MB
31 | hbase.log.maxbackupindex=20
32 |
33 | # Rolling File Appender
34 | log4j.appender.RFA=org.apache.log4j.RollingFileAppender
35 | log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
36 |
37 | log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
38 | log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
39 |
40 | log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
41 | log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
42 |
43 | #
44 | # Security audit appender
45 | #
46 | hbase.security.log.file=SecurityAuth.audit
47 | hbase.security.log.maxfilesize=256MB
48 | hbase.security.log.maxbackupindex=20
49 | log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
50 | log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
51 | log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
52 | log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
53 | log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
54 | log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
55 | log4j.category.SecurityLogger=${hbase.security.logger}
56 | log4j.additivity.SecurityLogger=false
57 | #log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
58 | #log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.visibility.VisibilityController=TRACE
59 |
60 | #
61 | # Null Appender
62 | #
63 | log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
64 |
65 | #
66 | # console
67 | # Add "console" to rootlogger above if you want to use this
68 | #
69 | log4j.appender.console=org.apache.log4j.ConsoleAppender
70 | log4j.appender.console.target=System.err
71 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
72 | log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
73 |
74 | # Custom Logging levels
75 |
76 | log4j.logger.org.apache.zookeeper=INFO
77 | #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
78 | log4j.logger.org.apache.hadoop.hbase=INFO
79 | log4j.logger.org.apache.hadoop.hbase.META=INFO
80 | # Make these two classes INFO-level. Make them DEBUG to see more zk debug.
81 | log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
82 | log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
83 | #log4j.logger.org.apache.hadoop.dfs=DEBUG
84 | # Set this class to log INFO only otherwise its OTT
85 | # Enable this to get detailed connection error/retry logging.
86 | # log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE
87 |
88 |
89 | # Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
90 | #log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
91 |
92 | # Uncomment the below if you want to remove logging of client region caching'
93 | # and scan of hbase:meta messages
94 | # log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=INFO
95 |
--------------------------------------------------------------------------------
/lib/hbase-binding-0.1.4.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tristartom/key-value-store-indexing/82d8ac735ee6d164498f269ddd22d4cc191d5b41/lib/hbase-binding-0.1.4.jar
--------------------------------------------------------------------------------
/lib/libDeli-coproc.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tristartom/key-value-store-indexing/82d8ac735ee6d164498f269ddd22d4cc191d5b41/lib/libDeli-coproc.jar
--------------------------------------------------------------------------------
/src/tthbase/client/Demo.java:
--------------------------------------------------------------------------------
1 | package tthbase.client;
2 |
3 | import org.apache.hadoop.hbase.*;
4 | import org.apache.hadoop.hbase.client.*;
5 | import org.apache.hadoop.hbase.util.Bytes;
6 | import org.apache.hadoop.conf.Configuration;
7 |
8 | import java.util.*;
9 |
10 | import tthbase.util.*;
11 | import tthbase.client.*;
12 |
13 | public class Demo {
14 |
15 | public static void initTables(Configuration conf, String testTableName, String columnFamily, String indexedColumnName) throws Exception{
16 | HBaseAdmin admin = new HBaseAdmin(conf);
17 | if (!admin.isTableAvailable(testTableName)){
18 | HIndexConstantsAndUtils.createAndConfigBaseTable(conf, Bytes.toBytes(testTableName), Bytes.toBytes(columnFamily), new String[]{indexedColumnName});
19 | }
20 |
21 | byte[] indexTableName = HIndexConstantsAndUtils.generateIndexTableName(Bytes.toBytes(testTableName), Bytes.toBytes(columnFamily), Bytes.toBytes(indexedColumnName)/*TODO column family in index table*/);
22 | if (!admin.isTableAvailable(indexTableName)){
23 | HIndexConstantsAndUtils.createAndConfigIndexTable(conf, indexTableName, Bytes.toBytes(columnFamily));
24 | }
25 | }
26 |
27 | public static void initCoProcessors(Configuration conf, String coprocessorJarLoc, HTableGetByIndex htable) throws Exception {
28 | int coprocessorIndex = 1;
29 | HIndexConstantsAndUtils.updateCoprocessor(conf, htable.getTableName(), coprocessorIndex++, true, coprocessorJarLoc, "tthbase.coprocessor.IndexObserverwReadRepair");
30 | HIndexConstantsAndUtils.updateCoprocessor(conf, htable.getTableName(), coprocessorIndex++, true, coprocessorJarLoc, "tthbase.coprocessor.PhysicalDeletionInCompaction");
31 | htable.configPolicy(HTableGetByIndex.PLY_READCHECK);
32 | }
33 |
34 | public static void main(String[] args) throws Exception{
35 | Configuration conf = HBaseConfiguration.create();
36 | String testTableName = "testtable";
37 | String columnFamily = "cf";
38 | String indexedColumnName = "country";
39 |
40 | if(args.length <= 0){
41 | System.err.println("format: java -cp tthbase.client.Demo ");
42 | System.err.println("example: java -cp build/jar/libDeli-client.jar:conf:lib/hbase-binding-0.1.4.jar tthbase.client.Demo /root/app/deli/build/jar/libDeli-coproc.jar ");
43 | return;
44 | }
45 | String locCoproc = args[0];
46 | String coprocessorJarLoc = "file:" + locCoproc;
47 |
48 | initTables(conf, testTableName, columnFamily, indexedColumnName);
49 | HTableGetByIndex htable = new HTableGetByIndex(conf, Bytes.toBytes(testTableName));
50 | initCoProcessors(conf, coprocessorJarLoc, htable);
51 |
52 | //put value1
53 | Put p = new Put(Bytes.toBytes("key1"));
54 | p.add(Bytes.toBytes(columnFamily), Bytes.toBytes(indexedColumnName), 101, Bytes.toBytes("v1"));
55 | htable.put(p);
56 |
57 | //getByIndex
58 | htable.configPolicy(HTableGetByIndex.PLY_FASTREAD);
59 | List res = htable.getByIndex(Bytes.toBytes(columnFamily), Bytes.toBytes(indexedColumnName), Bytes.toBytes("v1"));
60 | assert(res != null && res.size() != 0);
61 | System.out.println("Result is " + Bytes.toString(res.get(0)));
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/src/tthbase/client/HTableGetByIndex.java:
--------------------------------------------------------------------------------
1 | package tthbase.client;
2 | import org.apache.hadoop.hbase.client.HTable;
3 | import org.apache.hadoop.hbase.client.Get;
4 | import org.apache.hadoop.hbase.client.Delete;
5 | import org.apache.hadoop.hbase.client.Result;
6 | import org.apache.hadoop.hbase.util.Bytes;
7 | import org.apache.hadoop.conf.Configuration;
8 | import org.apache.hadoop.hbase.KeyValue;
9 | import java.io.IOException;
10 |
11 | import java.util.List;
12 | import java.util.ArrayList;
13 | import java.util.Map;
14 | import java.util.HashMap;
15 |
16 | import tthbase.commons.HTableWithIndexesDriver;
17 |
18 | public class HTableGetByIndex extends HTableWithIndexesDriver {
19 |
20 | int policyReadIndex;
21 | public static final int PLY_FASTREAD = 0;
22 | public static final int PLY_READCHECK = 1;
23 |
24 | public HTableGetByIndex (Configuration conf, byte[] tableName) throws IOException {
25 | super(conf, tableName);
26 | //default is baseline
27 | configPolicy(PLY_FASTREAD);
28 | }
29 |
30 | public void configPolicy(int p){
31 | policyReadIndex = p;
32 | }
33 |
34 | public int getIndexingPolicy(){
35 | return policyReadIndex;
36 | }
37 |
38 | public List getByIndex(byte[] columnFamily, byte[] columnName, byte[] value) throws IOException {
39 | List rawResults = readIndexOnly(columnFamily, columnName, value);
40 | List datakeyToDels = new ArrayList();
41 | if(policyReadIndex == PLY_READCHECK) {
42 | //perform read repair
43 | if (rawResults != null) {
44 | for(byte[] dataRowkey : rawResults) {
45 | //read in base table to verify
46 | byte[] valueFromBase = readBase(dataRowkey, columnFamily, columnName);
47 | if(!Bytes.equals(valueFromBase, value)) {
48 | datakeyToDels.add(dataRowkey);
49 | }
50 | }
51 | rawResults.removeAll(datakeyToDels);
52 | for(byte[] datakeyToDel : datakeyToDels){
53 | deleteFromIndex(columnFamily, columnName, value, datakeyToDel);
54 | }
55 | }
56 | }
57 | return rawResults;
58 | }
59 |
60 | private byte[] readBase(byte[] dataRowkey, byte[] columnFamily, byte[] columnName) throws IOException {
61 | Get g = new Get(dataRowkey);
62 | g.addColumn(columnFamily, columnName);
63 | Result r = this.get(g);
64 | assert r.raw().length == 1;
65 | KeyValue keyValue = r.raw()[0];
66 | return keyValue.getValue();
67 | }
68 |
69 | private List readIndexOnly(byte[] columnFamily, byte[] columnName, byte[] value) throws IOException {
70 | assert value != null;
71 | Map > res = internalGetByIndexByRange(columnFamily, columnName, value, null);
72 | if(res == null || res.size() ==0) {
73 | return null;
74 | } else {
75 | //System.out.print("index read res-" + res.size() + ": ");
76 | List toRet = new ArrayList();
77 | for(Map.Entry > e : res.entrySet())
78 | {
79 | // System.out.print(Bytes.toString(e.getKey()));
80 | List keys = e.getValue();
81 | // System.out.print("=>{");
82 | for(byte[] key : keys){
83 | toRet.add(key);
84 | // System.out.print(Bytes.toString(key) + ",");
85 | }
86 | // System.out.print("}");
87 | }
88 | //System.out.println();
89 | return toRet;
90 | }
91 | }
92 |
93 | //note valueEnd is inclusive.
94 | //TODO
95 | public Map > getByIndexByRange(byte[] columnFamily, byte[] columnName, byte[] valueStart, byte[] valueEnd) throws IOException {
96 | assert valueStart != null;
97 | assert valueEnd != null;
98 | assert Bytes.toString(valueStart).compareTo(Bytes.toString(valueEnd)) < 0; //assert valueStart < valueEnd, lexicographically;
99 | return internalGetByIndexByRange(columnFamily, columnName, valueStart, Bytes.toBytes(Bytes.toString(valueEnd) + "0"));
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/src/tthbase/commons/HTableWithIndexesDriver.java:
--------------------------------------------------------------------------------
1 | package tthbase.commons;
2 | import org.apache.hadoop.hbase.HTableDescriptor;
3 | import org.apache.hadoop.hbase.client.HTable;
4 | import org.apache.hadoop.hbase.util.Bytes;
5 | import org.apache.hadoop.conf.Configuration;
6 |
7 | import java.io.IOException;
8 | import java.util.Map;
9 | import java.util.HashMap;
10 | import java.util.List;
11 | import java.util.ArrayList;
12 |
13 | import tthbase.util.HIndexConstantsAndUtils;
14 |
15 | public class HTableWithIndexesDriver extends HTable {
16 | protected static int errorIndex = 0;
17 | protected Map indexTables = null;
18 | protected MaterializeIndex policyToMaterializeIndex = null;
19 |
20 | public HTableWithIndexesDriver(Configuration conf, byte[] tableName) throws IOException {
21 | super(conf, tableName);
22 | HTableDescriptor dataTableDesc = null;
23 | try {
24 | dataTableDesc = getTableDescriptor();
25 | //enable autoflush
26 | setAutoFlush(true);
27 | } catch (IOException e1) {
28 | throw new RuntimeException("TTERROR_" + (errorIndex++) + ": " + e1.getMessage());
29 | }
30 |
31 | policyToMaterializeIndex = new MaterializeIndexByCompositeRowkey(); //TOREMOVE
32 | initIndexTables(dataTableDesc, conf);
33 | }
34 |
35 | public void initIndexTables(HTableDescriptor dataTableDesc, Configuration conf) {
36 | //initialize index table
37 | indexTables = new HashMap();
38 | //scan through all indexed columns
39 | for (int indexNumber = 1; ; indexNumber++){
40 | String indexedColumn = dataTableDesc.getValue(HIndexConstantsAndUtils.INDEX_INDICATOR + indexNumber);
41 | if(indexedColumn == null){
42 | //no (further) index column, at current index
43 | break;
44 | } else {
45 | String[] names = indexedColumn.split("\\|");
46 | String indexedColumnFamilyName = names[0];
47 | String indexedColumnName = names[1];
48 | String indexTableName = dataTableDesc.getNameAsString() + "_" + indexedColumnFamilyName + "_" + indexedColumnName;
49 | try {
50 | HTable indexTable = new HTable(conf, indexTableName);
51 | indexTable.setAutoFlush(true);
52 | indexTables.put(indexTableName, indexTable);
53 | } catch (IOException e1) {
54 | throw new RuntimeException("TTERROR_" + (errorIndex++) + ": " + e1.getMessage());
55 | }
56 | }
57 | }
58 | }
59 |
60 | public HTable getIndexTable(byte[] columnFamily, byte[] columnName) {
61 | String dataTableName = Bytes.toString(this.getTableName());
62 | String indexTableName = dataTableName + "_" + Bytes.toString(columnFamily) + "_" + Bytes.toString(columnName);
63 | HTable indexTable = indexTables.get(indexTableName);
64 | if(indexTable == null){
65 | throw new RuntimeException("TTERROR_" + (errorIndex ++) + ": Unable to find index table with name:" + indexTableName + "!");
66 | }
67 | return indexTable;
68 | }
69 |
70 | @Override
71 | public void close() throws IOException {
72 | super.close();
73 | if(indexTables != null) {
74 | for(Map.Entry entry : indexTables.entrySet()){
75 | entry.getValue().close();
76 | }
77 | }
78 | }
79 |
80 | /**
81 | @para, valueStop is exclusive!
82 | @return,
83 | */
84 | protected Map > internalGetByIndexByRange(byte[] columnFamily, byte[] columnName, byte[] valueStart, byte[] valueStop) throws IOException {
85 | HTable indexTable = getIndexTable(columnFamily, columnName);
86 | return policyToMaterializeIndex.getByIndexByRange(indexTable, valueStart, valueStop);
87 | }
88 |
89 | public void putToIndex(byte[] columnFamily, byte[] columnName, byte[] dataValue, byte[] dataKey) throws IOException {
90 | HTable indexTable = getIndexTable(columnFamily, columnName);
91 | policyToMaterializeIndex.putToIndex(indexTable, dataValue, dataKey);
92 | }
93 |
94 | public void deleteFromIndex(byte[] columnFamily, byte[] columnName, byte[] dataValue, byte[] dataKey) throws IOException {
95 | HTable indexTable = getIndexTable(columnFamily, columnName);
96 | policyToMaterializeIndex.deleteFromIndex(indexTable, dataValue, dataKey);
97 | }
98 | }
99 |
100 |
--------------------------------------------------------------------------------
/src/tthbase/commons/MaterializeIndex.java:
--------------------------------------------------------------------------------
1 | package tthbase.commons;
2 | import org.apache.hadoop.hbase.client.HTable;
3 | import java.io.IOException;
4 | import java.util.Map;
5 | import java.util.HashMap;
6 | import java.util.List;
7 | import java.util.ArrayList;
8 |
9 |
10 | interface MaterializeIndex {
11 | public Map > getByIndexByRange(HTable indexTable, byte[] valueStart, byte[] valueStop) throws IOException;
12 |
13 | public void putToIndex(HTable indexTable, byte[] dataValue, byte[] dataKey) throws IOException;
14 |
15 | public void deleteFromIndex(HTable indexTable, byte[] dataValue, byte[] dataKey) throws IOException;
16 |
17 | }
18 |
--------------------------------------------------------------------------------
/src/tthbase/commons/MaterializeIndexByCompositeRowkey.java:
--------------------------------------------------------------------------------
1 | package tthbase.commons;
2 | import org.apache.hadoop.hbase.client.HTable;
3 | import org.apache.hadoop.hbase.KeyValue;
4 | import org.apache.hadoop.hbase.client.Result;
5 | import org.apache.hadoop.hbase.client.ResultScanner;
6 | import org.apache.hadoop.hbase.client.Scan;
7 | import org.apache.hadoop.hbase.client.Put;
8 | import org.apache.hadoop.hbase.client.Delete;
9 | import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
10 | import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
11 | import org.apache.hadoop.hbase.filter.Filter;
12 | import org.apache.hadoop.hbase.filter.FilterList;
13 | import org.apache.hadoop.hbase.filter.PrefixFilter;
14 | import org.apache.hadoop.hbase.util.Bytes;
15 |
16 | import java.io.IOException;
17 | import java.util.Map;
18 | import java.util.HashMap;
19 | import java.util.List;
20 | import java.util.ArrayList;
21 |
22 | // specific to index materialization by composite index rowkey (i.e., indexRowKey=value/key)
23 | public class MaterializeIndexByCompositeRowkey implements MaterializeIndex {
24 | //using scanner
25 | public Map > getByIndexByRange(HTable indexTable, byte[] valueStart, byte[] valueStop) throws IOException {
26 | //read against index table
27 | Scan scan = new Scan();
28 | FilterList fl = new FilterList(FilterList.Operator.MUST_PASS_ALL); //return rows that meet all filter conditions. (AND)
29 | fl.addFilter(new FirstKeyOnlyFilter());//return first instance of a row, then skip to next row. (avoiding rows of the same keys).
30 | fl.addFilter(new KeyOnlyFilter());// return only the key, not the value.
31 | assert valueStart != null;
32 | scan.setStartRow(valueStart);
33 | if (valueStop == null){ //point query
34 | Filter prefixFilter = new PrefixFilter(valueStart);
35 | fl.addFilter(prefixFilter);
36 | } else {
37 | scan.setStopRow(valueStop);
38 | }
39 | scan.setFilter(fl);
40 |
41 | ResultScanner scanner = indexTable.getScanner(scan);
42 | //ResultScanner is for client-side scanning.
43 | Map > toRet = new HashMap>();
44 | for (Result r : scanner) {
45 | if(r.raw().length == 0) continue;
46 | for (KeyValue kv : r.raw()) {
47 | byte[] indexRowkey = kv.getRow();
48 | String [] parsedIndexRowkey = IndexStorageFormat.parseIndexRowkey(indexRowkey);
49 | byte[] dataValue = Bytes.toBytes(parsedIndexRowkey[0]);
50 | byte[] dataKey = Bytes.toBytes(parsedIndexRowkey[1]);
51 |
52 | if(toRet.get(dataValue) == null){
53 | List results = new ArrayList();
54 | results.add(dataKey);
55 | toRet.put(dataValue, results);
56 | } else {
57 | toRet.get(dataValue).add(dataKey);
58 | }
59 | }
60 | }
61 | scanner.close();
62 | return toRet;
63 | }
64 |
65 | public void putToIndex(HTable indexTable, byte[] dataValue, byte[] dataKey) throws IOException {
66 | byte[] indexRowkey = IndexStorageFormat.generateIndexRowkey(dataKey, dataValue);
67 | Put put2Index = new Put(indexRowkey);
68 | put2Index.add(IndexStorageFormat.INDEXTABLE_COLUMNFAMILY, IndexStorageFormat.INDEXTABLE_SPACEHOLDER, IndexStorageFormat.INDEXTABLE_SPACEHOLDER);
69 | indexTable.put(put2Index);
70 | }
71 |
72 | public void deleteFromIndex(HTable indexTable, byte[] dataValue, byte[] dataKey) throws IOException {
73 | byte[] indexRowkey = IndexStorageFormat.generateIndexRowkey(dataKey, dataValue);
74 | Delete del = new Delete(indexRowkey);
75 | //del.setTimestamp(timestamp);
76 | indexTable.delete(del);
77 | }
78 | }
79 |
80 | class IndexStorageFormat {
81 | //TOREMOVE below is specific to implemention by composedIndexRowkey (rowkey=value/key)
82 | static final String INDEX_ROWKEY_DELIMITER = "/";
83 |
84 | static final public byte[] INDEXTABLE_COLUMNFAMILY = Bytes.toBytes("cf"); //be consistent with column_family_name in weblog_cf_country (in current preloaded dataset)
85 | static final public byte[] INDEXTABLE_SPACEHOLDER = Bytes.toBytes("EMPTY");
86 |
87 | static String[] parseIndexRowkey(byte[] indexRowkey){
88 | return Bytes.toString(indexRowkey).split(INDEX_ROWKEY_DELIMITER);
89 | }
90 |
91 | static byte[] generateIndexRowkey(byte[] dataKey, byte[] dataValue){
92 | return Bytes.toBytes(Bytes.toString(dataValue) + INDEX_ROWKEY_DELIMITER + Bytes.toString(dataKey));
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/src/tthbase/coprocessor/BasicIndexObserver.java:
--------------------------------------------------------------------------------
1 | package tthbase.coprocessor;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.hadoop.conf.Configuration;
6 | import org.apache.hadoop.hbase.CoprocessorEnvironment;
7 | import org.apache.hadoop.hbase.HBaseConfiguration;
8 | import org.apache.hadoop.hbase.HTableDescriptor;
9 | import org.apache.hadoop.hbase.Cell;
10 | import org.apache.hadoop.hbase.client.Delete;
11 | import org.apache.hadoop.hbase.client.Get;
12 | import org.apache.hadoop.hbase.client.HTable;
13 | import org.apache.hadoop.hbase.client.Put;
14 | import org.apache.hadoop.hbase.client.Result;
15 | import org.apache.hadoop.hbase.client.Durability;
16 | import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
17 | import org.apache.hadoop.hbase.coprocessor.ObserverContext;
18 | import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
19 | import org.apache.hadoop.hbase.regionserver.HRegion;
20 | import org.apache.hadoop.hbase.regionserver.ScanType;
21 | import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
22 | import org.apache.hadoop.hbase.util.Bytes;
23 |
24 | import org.apache.hadoop.hbase.regionserver.InternalScanner;
25 | import org.apache.hadoop.hbase.regionserver.Store;
26 |
27 | import java.io.IOException;
28 | import java.util.List;
29 | import java.util.Map;
30 | import java.util.HashMap;
31 | import java.util.Iterator;
32 | import java.util.ArrayList;
33 |
34 | public class BasicIndexObserver extends LoggedObserver {
35 | private boolean initialized;
36 |
37 | protected HTableUpdateIndexByPut dataTableWithIndexes = null;
38 |
39 | private void tryInitialize(HTableDescriptor desc) throws IOException {
40 | if(initialized == false) {
41 | synchronized(this) {
42 | if(initialized == false) {
43 | Configuration conf = HBaseConfiguration.create();
44 | dataTableWithIndexes = new HTableUpdateIndexByPut(conf, desc.getName()); //this will make copy of data table instance.
45 | initialized = true;
46 | }
47 | }
48 | }
49 | }
50 |
51 | @Override
52 | public void start(CoprocessorEnvironment e) throws IOException {
53 | setFunctionLevelLogging(false);
54 | initialized = false;
55 | super.start(e);
56 | }
57 |
58 | @Override
59 | public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException {
60 | super.prePut(e, put, edit, durability);
61 | tryInitialize(e.getEnvironment().getRegion().getTableDesc());
62 | }
63 |
64 | @Override
65 | public void postPut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException {
66 | super.postPut(e, put, edit, durability);
67 | }
68 |
69 | @Override
70 | public void preDelete(ObserverContext e, Delete delete, WALEdit edit, final Durability durability) throws IOException {
71 | super.preDelete(e, delete, edit, durability);
72 | tryInitialize(e.getEnvironment().getRegion().getTableDesc());
73 | }
74 |
75 | @Override
76 | public InternalScanner preCompact(ObserverContext e,
77 | final Store store, final InternalScanner scanner, final ScanType scanType) throws IOException{
78 | InternalScanner toRet = super.preCompact(e, store, scanner, scanType);
79 | tryInitialize(e.getEnvironment().getRegion().getTableDesc());
80 | return toRet;
81 | }
82 |
83 | @Override
84 | public void preGetOp(ObserverContext e, Get get, List result) throws IOException {
85 | super.preGetOp(e, get, result);
86 | tryInitialize(e.getEnvironment().getRegion().getTableDesc());
87 | }
88 |
89 | @Override
90 | public void stop(CoprocessorEnvironment e) throws IOException {
91 | super.stop(e);
92 | if(dataTableWithIndexes != null){
93 | dataTableWithIndexes.close();
94 | }
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/src/tthbase/coprocessor/HTableUpdateIndexByPut.java:
--------------------------------------------------------------------------------
1 | package tthbase.coprocessor;
2 | import org.apache.hadoop.conf.Configuration;
3 | import org.apache.hadoop.hbase.client.Put;
4 | import org.apache.hadoop.hbase.client.Delete;
5 | import org.apache.hadoop.hbase.client.Get;
6 | import org.apache.hadoop.hbase.Cell;
7 | import org.apache.hadoop.hbase.client.Result;
8 | import org.apache.hadoop.hbase.util.Bytes;
9 | import org.apache.hadoop.hbase.HTableDescriptor;
10 |
11 | import java.io.IOException;
12 | import java.util.List;
13 |
14 | import tthbase.util.HIndexConstantsAndUtils;
15 | import tthbase.commons.HTableWithIndexesDriver;
16 |
17 | public class HTableUpdateIndexByPut extends HTableWithIndexesDriver {
18 | public HTableUpdateIndexByPut(Configuration conf, byte[] tableName) throws IOException {
19 | super(conf, tableName);
20 | }
21 |
22 | final static private int INSERT_INDEX = 0;
23 | final static private int READ_BASE = 1;
24 | final static private int DELETE_INDEX = 2;
25 |
26 | /**
27 | @parameter, if DELETE_INDEX, readResult4Delete is not null, otherwise, null.
28 | @return, if READ_BASE, return read result; otherwise, null.
29 | */
30 | private Result internalPrimitivePerPut(Put put, int mode, Result readResult4Delete) throws IOException {
31 | HTableDescriptor dataTableDesc = null;
32 | try {
33 | dataTableDesc = getTableDescriptor();
34 | } catch (IOException e1) {
35 | throw new RuntimeException("TTERROR" + (errorIndex++) + "_DETAIL: " + e1.getMessage());
36 | }
37 |
38 | byte[] dataKey = put.getRow();
39 | Get get = null;
40 | if (mode == READ_BASE) {
41 | get = new Get(dataKey);
42 | }
43 |
44 | for (int index = 1; ; index++) {
45 | String fullpathOfIndexedcolumnInDatatable = dataTableDesc.getValue(HIndexConstantsAndUtils.INDEX_INDICATOR + index);
46 | if(fullpathOfIndexedcolumnInDatatable == null){
47 | //no (further) index column, stop at current index
48 | break;
49 | } else {
50 | String[] datatableColumnPath = fullpathOfIndexedcolumnInDatatable.split("\\|");
51 | byte[] indexedColumnFamily = Bytes.toBytes(datatableColumnPath[0]);
52 | byte[] indexedColumnName = Bytes.toBytes(datatableColumnPath[1]);
53 | byte[] dataValuePerColumn = getColumnValue(put, indexedColumnFamily, indexedColumnName);
54 | if(dataValuePerColumn != null){
55 | if(mode == INSERT_INDEX){
56 | //put new to index
57 | putToIndex(indexedColumnFamily, indexedColumnName, dataValuePerColumn, dataKey);
58 | } else if (mode == READ_BASE) {
59 | //read base
60 | //TOREMOVE need specify timestamp to guarantee get old values.
61 | get.addColumn(indexedColumnFamily, indexedColumnName);
62 | } else { // DELETE_INDEX
63 | //delete old from index
64 | Result readResultOld = readResult4Delete;
65 | byte[] oldDataValuePerColumn = readResultOld.getValue(indexedColumnFamily, indexedColumnName);
66 | deleteFromIndex(indexedColumnFamily, indexedColumnName, oldDataValuePerColumn, dataKey);
67 | }
68 | } else {
69 | //the indexed column (family) is not associated with the put, to continue.
70 | continue;
71 | }
72 | }
73 | }
74 | if (mode == READ_BASE) {
75 | Result readResultOld = this.get(get);
76 | return readResultOld;
77 | } else {
78 | return null;
79 | }
80 | }
81 |
82 | public void insertNewToIndexes(Put put) throws IOException {
83 | internalPrimitivePerPut(put, INSERT_INDEX, null);
84 | }
85 |
86 | public void readBaseAndDeleteOld(Put put) throws IOException {
87 | Result readBaseResult = internalPrimitivePerPut(put, READ_BASE, null);
88 | internalPrimitivePerPut(put, DELETE_INDEX, readBaseResult);
89 | }
90 |
91 | //TOREMOVE does it belong to HTableWithIndexesDriver?
92 | //it gets one and only one version.
93 | protected byte[] getColumnValue(final Put put, byte[] columnFamily, byte[] columnName){
94 | if(!put.has(columnFamily, columnName)){
95 | return null;
96 | }
97 |
98 | List values = put.get(columnFamily, columnName);
99 | if (values == null || values.isEmpty()) {
100 | throw new RuntimeException("TTERROR_" + (errorIndex++) + ": " + "empty value lists while put.has() returns true!");
101 | }
102 |
103 | //should be one element in values, since column qualifier is an exact name, matching one column; also one version of value is expected.
104 | if (values.size() != 1) {
105 | throw new RuntimeException("TTERROR_" + (errorIndex++) + ": " + "multiple versions of values or multiple columns by qualier in put()!");
106 | }
107 |
108 | //TOREMOVE to get timestamp, refer to old project code.
109 | Cell cur = values.get(0);
110 | byte[] value = cur.getValue();
111 | return value;
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/src/tthbase/coprocessor/IndexObserverBaseline.java:
--------------------------------------------------------------------------------
1 | package tthbase.coprocessor;
2 | import org.apache.hadoop.hbase.coprocessor.ObserverContext;
3 | import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
4 | import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
5 | import org.apache.hadoop.hbase.client.Put;
6 | import org.apache.hadoop.hbase.client.Durability;
7 | import java.io.IOException;
8 |
9 | public class IndexObserverBaseline extends BasicIndexObserver {
10 | @Override
11 | public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException {
12 | super.prePut(e, put, edit, durability);
13 | dataTableWithIndexes.insertNewToIndexes(put);
14 | dataTableWithIndexes.readBaseAndDeleteOld(put);
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/tthbase/coprocessor/IndexObserverwReadRepair.java:
--------------------------------------------------------------------------------
1 | package tthbase.coprocessor;
2 | import org.apache.hadoop.hbase.coprocessor.ObserverContext;
3 | import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
4 | import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
5 | import org.apache.hadoop.hbase.client.Put;
6 | import org.apache.hadoop.hbase.client.Durability;
7 | import java.io.IOException;
8 |
9 | public class IndexObserverwReadRepair extends BasicIndexObserver {
10 | @Override
11 | public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException {
12 | super.prePut(e, put, edit, durability);
13 | dataTableWithIndexes.insertNewToIndexes(put);
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/src/tthbase/coprocessor/LoggedObserver.java:
--------------------------------------------------------------------------------
1 | package tthbase.coprocessor;
2 |
3 | import org.apache.commons.logging.Log;
4 | import org.apache.commons.logging.LogFactory;
5 | import org.apache.hadoop.conf.Configuration;
6 | import org.apache.hadoop.hbase.CoprocessorEnvironment;
7 | import org.apache.hadoop.hbase.HBaseConfiguration;
8 | import org.apache.hadoop.hbase.HTableDescriptor;
9 | import org.apache.hadoop.hbase.Cell;
10 | import org.apache.hadoop.hbase.client.Delete;
11 | import org.apache.hadoop.hbase.client.Get;
12 | import org.apache.hadoop.hbase.client.HTable;
13 | import org.apache.hadoop.hbase.client.Put;
14 | import org.apache.hadoop.hbase.client.Result;
15 | import org.apache.hadoop.hbase.client.Durability;
16 | import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
17 | import org.apache.hadoop.hbase.coprocessor.ObserverContext;
18 | import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
19 | import org.apache.hadoop.hbase.regionserver.HRegion;
20 | import org.apache.hadoop.hbase.regionserver.ScanType;
21 | import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
22 | import org.apache.hadoop.hbase.util.Bytes;
23 |
24 | import java.io.IOException;
25 | import java.util.List;
26 | import java.util.Map;
27 | import java.util.HashMap;
28 | import java.util.Iterator;
29 | import java.util.ArrayList;
30 | import java.util.NavigableSet;
31 |
32 | import org.apache.hadoop.hbase.client.Scan;
33 | import org.apache.hadoop.hbase.regionserver.Store;
34 | import org.apache.hadoop.hbase.regionserver.StoreFile;
35 | import org.apache.hadoop.hbase.regionserver.InternalScanner;
36 | import org.apache.hadoop.hbase.regionserver.*;
37 | /**
38 | to view the printout, see it in logs/*.log file
39 | */
40 |
41 | public class LoggedObserver extends BaseRegionObserver {
42 | public static final Log LOG = LogFactory.getLog(HRegion.class);
43 |
44 | private boolean functionLevelLoggingEnabled = true;
45 |
46 | public void setFunctionLevelLogging(boolean b) {
47 | functionLevelLoggingEnabled = b;
48 | }
49 |
50 | @Override
51 | public void start(CoprocessorEnvironment e) throws IOException {
52 | if(functionLevelLoggingEnabled){
53 | LOG.debug("TTDEBUG_FUNC: start()");
54 | }
55 | }
56 |
57 | @Override
58 | public void postOpen(ObserverContext e) {
59 | if(functionLevelLoggingEnabled){
60 | LOG.debug("TTDEBUG_FUNC: postOpen()");
61 | }
62 | }
63 |
64 | @Override
65 | public void prePut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException {
66 | if(functionLevelLoggingEnabled){
67 | LOG.debug("TTDEBUG_FUNC: prePut()");
68 | }
69 | }
70 |
71 | @Override
72 | public void postPut(final ObserverContext e, final Put put, final WALEdit edit, final Durability durability) throws IOException {
73 | if(functionLevelLoggingEnabled){
74 | LOG.debug("TTDEBUG_FUNC: postPut()");
75 | }
76 | }
77 |
78 | @Override
79 | public void preDelete(ObserverContext e, Delete delete, WALEdit edit, final Durability durability) throws IOException {
80 | if(functionLevelLoggingEnabled){
81 | LOG.debug("TTDEBUG_FUNC: preDelete()");
82 | }
83 | }
84 |
85 | @Override
86 | public void preGetOp(ObserverContext e, Get get, List result) throws IOException {
87 | if(functionLevelLoggingEnabled){
88 | LOG.debug("TTDEBUG_FUNC: preGet()");
89 | }
90 | }
91 |
92 | @Override
93 | public void postGetOp(ObserverContext e, Get get, List result) throws IOException {
94 | if(functionLevelLoggingEnabled){
95 | LOG.debug("TTDEBUG_FUNC: postGet()");
96 | }
97 | }
98 |
99 | @Override
100 | public void stop(CoprocessorEnvironment e) throws IOException {
101 | if(functionLevelLoggingEnabled){
102 | LOG.debug("TTDEBUG_FUNC: stop()");
103 | }
104 | }
105 |
106 | //new features in hbase-0.94.2
107 |
108 | @Override
109 | public InternalScanner preCompactScannerOpen(final ObserverContext c, Store store, List extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s) throws IOException {
110 | if(functionLevelLoggingEnabled){
111 | LOG.debug("TTDEBUG_FUNC: preCompactScannerOpen()");
112 | }
113 | return null; //return null to perform default processing.
114 | }
115 |
116 | @Override
117 | public KeyValueScanner preStoreScannerOpen(final ObserverContext c, final Store store, final Scan scan, final NavigableSet targetCols, final KeyValueScanner s) throws IOException {
118 | if(functionLevelLoggingEnabled){
119 | LOG.debug("TTDEBUG_FUNC: preStoreScannerOpen()");
120 | }
121 | return null;
122 | }
123 |
124 | @Override
125 | public InternalScanner preFlushScannerOpen(final ObserverContext c, Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
126 | if(functionLevelLoggingEnabled){
127 | LOG.debug("TTDEBUG_FUNC: preFlushScannerOpen()");
128 | }
129 | return null;
130 | }
131 |
132 | @Override
133 | public void postCompact(ObserverContext c, Store store, StoreFile resultFile) {
134 | if(functionLevelLoggingEnabled){
135 | LOG.debug("TTDEBUG_FUNC: postCompact()");
136 | }
137 | }
138 |
139 | @Override
140 | public InternalScanner preCompact(ObserverContext e,
141 | final Store store, final InternalScanner scanner, final ScanType scanType) throws IOException{
142 | if(functionLevelLoggingEnabled){
143 | LOG.debug("TTDEBUG_FUNC: preCompact()");
144 | }
145 | return scanner;
146 | //Returns: the scanner to use during compaction. Should not be null unless the implementation is writing new store files on its own.
147 | }
148 |
149 | @Override
150 | public void preClose(ObserverContext e, boolean abortRequested) {
151 | if(functionLevelLoggingEnabled){
152 | LOG.debug("TTDEBUG_FUNC: preClose()");
153 | }
154 | }
155 |
156 | }
157 |
--------------------------------------------------------------------------------
/src/tthbase/util/HIndexConstantsAndUtils.java:
--------------------------------------------------------------------------------
1 | package tthbase.util;
2 |
3 | import org.apache.hadoop.hbase.client.Result;
4 | import org.apache.hadoop.hbase.client.Put;
5 | import org.apache.hadoop.hbase.client.Delete;
6 | import org.apache.hadoop.conf.Configuration;
7 | import org.apache.hadoop.hbase.HBaseConfiguration;
8 | import org.apache.hadoop.hbase.util.Bytes;
9 | import org.apache.hadoop.hbase.client.HBaseAdmin;
10 | import org.apache.hadoop.hbase.HColumnDescriptor;
11 | import org.apache.hadoop.hbase.HTableDescriptor;
12 | import org.apache.hadoop.hbase.HConstants;
13 |
14 | import java.io.IOException;
15 |
16 | public class HIndexConstantsAndUtils {
17 | //TODO refactor: to match with HTableWithIndexesDriver.java
18 | //TODO refactor: to match with tthbase.util.UpdateCoprocessor.java
19 | public static final String INDEX_INDICATOR = "secondaryIndex$";
20 | public static final String INDEX_DELIMITOR = "|";
21 | //TODO make it a static function in commons.jar.
22 | public static byte[] generateIndexTableName(byte[] dataTableName, byte[] columnFamily, byte[] columnName){
23 | return Bytes.toBytes(Bytes.toString(dataTableName) + '_' + Bytes.toString(columnFamily) + '_' + Bytes.toString(columnName));
24 | }
25 |
26 | //TODO copied from and refined based on UpdateCoprocessor.java
27 | public static void updateCoprocessor(Configuration conf, byte[] tableName, int indexOfCoprocessor, boolean ifUpdateorRemove, String coprocessorLoc, String coprocessorClassname) throws IOException{
28 | String rawAttributeName = "coprocessor$";
29 | String value = coprocessorLoc + "|" /*Not index delimitor*/+ coprocessorClassname + "|1001|arg1=1,arg2=2";
30 | updateTableAttribute(conf, tableName, rawAttributeName, indexOfCoprocessor, ifUpdateorRemove, value);
31 | }
32 |
33 | public static void updateIndexIndicator(Configuration conf, byte[] tableName, int indexOfIndexIndicator, boolean ifUpdateorRemove, String indexedCF, String indexedColumn) throws IOException{
34 | String rawAttributeName = INDEX_INDICATOR;
35 | String value = indexedCF + INDEX_DELIMITOR + indexedColumn;
36 | updateTableAttribute(conf, tableName, rawAttributeName, indexOfIndexIndicator, ifUpdateorRemove, value);
37 | }
38 |
39 | /**
40 | @param rawAttributeName is the attribute name viewed by applications, it allows multiple values. For example, secondaryIndex in secondaryIndex$1 and coprocessor in corpcessor$2
41 | @param indexOfAttribute is of the same raw attribute name, for example 2 in secondary$2
42 | */
43 | static void updateTableAttribute(Configuration conf, byte[] tableName, String rawAttributeName, int indexOfAttribute, boolean ifUpdateorRemove, String value) throws IOException{
44 | HBaseAdmin admin = new HBaseAdmin(conf);
45 | HTableDescriptor desc = admin.getTableDescriptor(tableName);
46 | admin.disableTable(tableName);
47 | // System.out.println("TTDEBUG: disable table " + Bytes.toString(tableName));
48 | String coprocessorKey = rawAttributeName + indexOfAttribute;
49 | if(!ifUpdateorRemove) {
50 | desc.remove(Bytes.toBytes(coprocessorKey));
51 | } else {
52 | desc.setValue(coprocessorKey, value);
53 | }
54 | admin.modifyTable(tableName, desc);
55 | // System.out.println("TTDEBUG: modify table " + Bytes.toString(tableName));
56 | admin.enableTable(tableName);
57 | // System.out.println("TTDEBUG: enable table " + Bytes.toString(tableName));
58 | HTableDescriptor descNew = admin.getTableDescriptor(tableName);
59 | //modify table is asynchronous, has to loop over to check
60 | while (!desc.equals(descNew)){
61 | System.err.println("TTDEBUG: waiting for descriptor to change: from " + descNew + " to " + desc);
62 | try {Thread.sleep(500);} catch(InterruptedException ex) {}
63 | descNew = admin.getTableDescriptor(tableName);
64 | }
65 | }
66 |
67 | public static void createAndConfigBaseTable(Configuration conf, byte[] tableName, byte[] columnFamily, String[] indexedColumnNames) throws IOException{
68 | //create a table with column familiy columnFamily
69 | HBaseAdmin admin = new HBaseAdmin(conf);
70 | HTableDescriptor desc = new HTableDescriptor(tableName);
71 | //specify indexable columns.
72 | for (int i = 0; i < indexedColumnNames.length; i ++){
73 | desc.setValue(INDEX_INDICATOR + (i + 1), Bytes.toString(columnFamily) + INDEX_DELIMITOR + indexedColumnNames[i]);
74 | }
75 | HColumnDescriptor descColFamily = new HColumnDescriptor(columnFamily);
76 | //configure to set KEEP_DELETED_CELLS => 'true'
77 | descColFamily.setKeepDeletedCells(true);
78 | descColFamily.setTimeToLive(HConstants.FOREVER);
79 | descColFamily.setMaxVersions(Integer.MAX_VALUE);
80 |
81 | desc.addFamily(descColFamily);
82 | admin.createTable(desc);
83 | }
84 |
85 | public static void createAndConfigIndexTable(Configuration conf, byte[] tableName, byte[] columnFamily) throws IOException{
86 | //create a table with column familiy columnFamily
87 | HBaseAdmin admin = new HBaseAdmin(conf);
88 | HTableDescriptor desc = new HTableDescriptor(tableName);
89 | //configure to set KEEP_DELETED_CELLS => 'true'
90 | HColumnDescriptor descColFamily = new HColumnDescriptor(columnFamily);
91 | desc.addFamily(descColFamily);
92 | admin.createTable(desc);
93 | }
94 |
95 |
96 | public static void deleteTable(Configuration conf, byte[] tableName) throws IOException{
97 | HBaseAdmin admin = new HBaseAdmin(conf);
98 | admin.disableTable(tableName);
99 | admin.deleteTable(tableName);
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/src/tthbase/util/IssueCompaction.java:
--------------------------------------------------------------------------------
1 | package tthbase.util;
2 | import tthbase.client.HTableGetByIndex;
3 | import org.apache.hadoop.hbase.client.HBaseAdmin;
4 | import org.apache.hadoop.hbase.client.Get;
5 | import org.apache.hadoop.hbase.client.Put;
6 | import org.apache.hadoop.hbase.client.Delete;
7 | import org.apache.hadoop.hbase.client.Result;
8 | import org.apache.hadoop.hbase.KeyValue;
9 | import org.apache.hadoop.conf.Configuration;
10 | import org.apache.hadoop.hbase.HBaseConfiguration;
11 | import org.apache.hadoop.hbase.util.Bytes;
12 | import tthbase.util.HIndexConstantsAndUtils;
13 | import java.util.List;
14 | import java.io.IOException;
15 |
16 | /*
17 | disable 'testtable2'
18 | drop 'testtable2'
19 | create 'testtable2', {NAME=>'cf', KEEP_DELETED_CELLS=>true}
20 | disable 'testtable2_cf_country'
21 | drop 'testtable2_cf_country'
22 | create 'testtable2_cf_country', "cf"
23 |
24 | disable 'testtable2'
25 | alter 'testtable2', METHOD => 'table_att', 'coprocessor' => 'hdfs://node1:8020/hbase_cp/libHbaseCoprocessor.jar|tthbase.coprocessor.PhysicalDeletionInCompaction|1001|arg1=1,arg2=2'
26 | alter 'testtable2', METHOD => 'table_att', 'secondaryIndex$1' => 'cf|country'
27 | enable 'testtable2'
28 |
29 | put 'testtable2', "key1", 'cf:country', "v1", 101
30 | flush 'testtable2' #hbase compaction ignores all data from memstore.
31 | put 'testtable2', "key1", 'cf:country', "v2", 102
32 | flush 'testtable2' #hbase compaction ignores all data from memstore.
33 | major_compact 'testtable2'
34 | get 'testtable2', 'key1', {COLUMN => 'cf:country', VERSIONS => 4} #1 version
35 | describe 'testtable2'
36 | */
37 |
38 | public class IssueCompaction {
39 | static byte[] columnFamily = Bytes.toBytes("cf");
40 | static String indexedColumnName = "country";
41 | static byte[] indexTableName = null;
42 | static String coprocessorJarLoc = "hdfs://node1:8020/hbase_cp/libHbaseCoprocessor.jar";
43 |
44 | static HTableGetByIndex htable = null;
45 | static Configuration conf = null;
46 | static byte[] rowKey = Bytes.toBytes("key1");
47 |
48 | static void fakedCreateTable(byte[] dataTableName) throws IOException{
49 | //flush and compact
50 | //setup
51 | HIndexConstantsAndUtils.createAndConfigBaseTable(conf, dataTableName, columnFamily, new String[]{indexedColumnName});
52 | //create index table
53 | HIndexConstantsAndUtils.createAndConfigIndexTable(conf, indexTableName, columnFamily);
54 | htable = new HTableGetByIndex(conf, dataTableName);
55 | }
56 |
57 | static void fakedLoadData(byte[] dataTableName) throws IOException{
58 | //load data
59 | //put value1
60 | Put p = new Put(rowKey);
61 | p.add(columnFamily, Bytes.toBytes(indexedColumnName), 101, Bytes.toBytes("v1"));
62 | htable.put(p);
63 |
64 | //put value2
65 | p = new Put(rowKey);
66 | p.add(columnFamily, Bytes.toBytes(indexedColumnName), 102, Bytes.toBytes("v2"));
67 | htable.put(p);
68 | }
69 |
70 | public static void issueMajorCompactionAsynchronously(byte[] dataTableName){
71 | try{
72 | HBaseAdmin admin = new HBaseAdmin(conf);
73 | //admin.flush(dataTableName);
74 | admin.majorCompact(dataTableName);
75 | } catch(Exception e) {
76 | e.printStackTrace();
77 | }
78 | }
79 |
80 | public static void fakedTest(byte[] dataTableName){
81 | try{
82 | //get th'm all
83 | Get g = null;
84 | Result res = null;
85 | g = new Get(rowKey);
86 | g.addColumn(columnFamily, Bytes.toBytes(indexedColumnName));
87 | g.setMaxVersions();
88 | res = htable.get(g);
89 | List rl = res.getColumn(columnFamily, Bytes.toBytes(indexedColumnName));
90 | System.out.println("TTDEBUG: " + rl.size());
91 | } catch(Exception e) {
92 | e.printStackTrace();
93 | }
94 | }
95 |
96 | public static void fakedTeardown(byte[] dataTableName){
97 | try{
98 | //teardown
99 | if(htable != null){
100 | htable.close();
101 | }
102 | HIndexConstantsAndUtils.deleteTable(conf, indexTableName);
103 | HIndexConstantsAndUtils.deleteTable(conf, dataTableName);
104 | } catch(Exception e) {
105 | e.printStackTrace();
106 | }
107 | }
108 |
109 | public static void main(String[] args) {
110 | if(args == null || args.length != 2){
111 | System.out.println("format: 0.tablename 1.[yes|no]if_load_coprocessor_physical_deletion");
112 | System.out.println("example: java tthbase.util.IssueMajorCompactionAsynchronously testtable yes");
113 | return;
114 | }
115 | byte[] dataTableName = Bytes.toBytes(args[0]);
116 | boolean loadCoprocessor = "yes".equals(args[1]);
117 | // System.out.println("TTDEBUG tablename=" + Bytes.toString(dataTableName) + ", if2loadcoprocessor=" + loadCoprocessor);
118 | indexTableName = HIndexConstantsAndUtils.generateIndexTableName(dataTableName, columnFamily, Bytes.toBytes(indexedColumnName)/*TODO column family in index table*/);
119 | conf = HBaseConfiguration.create();
120 |
121 | try{
122 | if (loadCoprocessor){
123 | HIndexConstantsAndUtils.updateCoprocessor(conf, dataTableName, 1, true, coprocessorJarLoc, "tthbase.coprocessor.PhysicalDeletionInCompaction");
124 | } else {
125 | HIndexConstantsAndUtils.updateCoprocessor(conf, dataTableName, 1, false, null, null);
126 | }
127 | issueMajorCompactionAsynchronously(dataTableName);
128 | } catch(Exception e){
129 | e.printStackTrace();
130 | }
131 | }
132 | }
133 |
--------------------------------------------------------------------------------
/src/tthbase/util/UpdateCoprocessor.java:
--------------------------------------------------------------------------------
1 | package tthbase.util;
2 |
3 | import org.apache.hadoop.conf.Configuration;
4 | import org.apache.hadoop.hbase.HBaseConfiguration;
5 | import org.apache.hadoop.hbase.HColumnDescriptor;
6 | import org.apache.hadoop.hbase.HTableDescriptor;
7 | import org.apache.hadoop.hbase.client.HBaseAdmin;
8 | import org.apache.hadoop.hbase.client.HTable;
9 | import org.apache.hadoop.hbase.client.Put;
10 | import org.apache.hadoop.hbase.client.Row;
11 | import org.apache.hadoop.hbase.util.Bytes;
12 | import org.apache.hadoop.hbase.util.Pair;
13 |
14 | import java.io.File;
15 | import java.io.IOException;
16 | import java.nio.charset.Charset;
17 | import java.util.ArrayList;
18 | import java.util.List;
19 |
20 | import org.apache.hadoop.hbase.HConstants;
21 |
22 | /**
23 | 1. update hbase coprocessor
24 | 2. disable physical deletion in all column families where indexed columns are in.
25 | -KEEP_DELETED_CELLS = true
26 | -TTL = HConstants.FOREVER
27 | -maxVersion = Integer.MAX_VALUE
28 | */
29 |
30 | public class UpdateCoprocessor {
31 | public static final byte[] FIXED_INDEX_CF = Bytes.toBytes("cf");
32 | public static final String USAGE = "Create a table and associated index;\n " +
33 | "Arguments:\n 1)zkserver 2)zkserver_port \n 3)table_name 4)list of cfs in table, in a single {},separated by ," +
34 | "5) INDEX_CP_NAME 6) INDEX_CP_PATH 7) INDEX_CP_CLASS "+
35 | "8)-[list of index columns in the format of cfName|colName]\n"+
36 | "format: if INDEX_CP_CLASS contains null, any coprocessor will be unloaded\n" +
37 | "***An example\n" +
38 | "saba20.watson.ibm.com 2181 weblog {cf} coprocessor\\$1 hdfs://saba20.watson.ibm.com:8020/index-coprocessor-0.1.0.jar org.apache.hadoop.hbase.coprocessor.index.SyncSecondaryIndexObserver cf\\|country cf\\|ip";
39 |
40 | public static String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";// = "coprocessor$1";
41 | public static String INDEX_CP_NAME;// = "coprocessor$1";
42 | public static String INDEX_CP_PATH;// = "hdfs://saba20.watson.ibm.com:8020/index-coprocessor-0.1.0.jar";
43 | public static String INDEX_CP_CLASS;// = "org.apache.hadoop.hbase.coprocessor.index.SyncSecondaryIndexObserver";
44 |
45 | public static String zkserver;//saba20.watson.ibm.com
46 | public static String zkport;//2181
47 | public static String dataTableName;//weblog
48 | public static String cfList;
49 | public static String [] indexItems;//{cf|country, cf|ip}
50 | public static String cflist;
51 | public static String [] cfs;
52 |
53 | public static void main(String[] args) throws IOException {
54 | //six parameters to be given
55 | int bar = 8;
56 | if(args.lengthbar){
70 | indexItems = new String[args.length-bar];
71 | for(int i=bar;i" + INDEX_CP_CLASS);
83 |
84 | HTable dataTable = new HTable(conf, dataTableName);
85 | updateCoprocessor(conf, Bytes.toBytes(dataTableName));
86 | }
87 |
88 | private static void updateCoprocessor(Configuration conf, byte[] dataTableName) throws IOException{
89 | HBaseAdmin admin = new HBaseAdmin(conf);
90 | HTableDescriptor desc = admin.getTableDescriptor(dataTableName);
91 | admin.disableTable(dataTableName);
92 | System.out.println("TTDEBUG: disable data table");
93 | if(INDEX_CP_CLASS.contains("null")) {
94 | desc.remove(Bytes.toBytes(INDEX_CP_NAME));
95 | } else {
96 | desc.setValue(INDEX_CP_NAME, INDEX_CP_PATH + "|" + INDEX_CP_CLASS + "|1001|arg1=1,arg2=2");
97 | }
98 |
99 | HColumnDescriptor descIndexCF = desc.getFamily(Bytes.toBytes("cf"));//TOREMOVE don't use cf,
100 | //KEEP_DELETED_CELLS => 'true'
101 | descIndexCF.setKeepDeletedCells(true);
102 | descIndexCF.setTimeToLive(HConstants.FOREVER);
103 | descIndexCF.setMaxVersions(Integer.MAX_VALUE);
104 |
105 | admin.modifyTable(dataTableName, desc);
106 | System.out.println("TTDEBUG: modify data table");
107 | admin.enableTable(dataTableName);
108 | System.out.println("TTDEBUG: enable data table");
109 | HTableDescriptor descNew = admin.getTableDescriptor(dataTableName);
110 | //modify table is asynchronous, has to loop over to check
111 | while (!desc.equals(descNew)){
112 | System.out.println("TTDEBUG: waiting for descriptor to change: from " + descNew + " to " + desc);
113 | try {Thread.sleep(500);} catch(InterruptedException ex) {}
114 | descNew = admin.getTableDescriptor(dataTableName);
115 | }
116 | }
117 |
118 | private static String[] getColFamilys(String cflist) {
119 | //{cf1, cf2}
120 | String t = cflist.substring(cflist.indexOf('{')+1, cflist.lastIndexOf('}'));
121 | String temp[] = t.split(",");
122 | for(int i=0;i> getIndexCFAndColumn(HTableDescriptor htd) {
128 | List> result = new ArrayList>();
129 | Pair cfp = null;
130 | int i = 1;
131 | String index = null;
132 | do {
133 | index = htd.getValue(HIndexConstantsAndUtils.INDEX_INDICATOR + i);
134 | if (index != null) {
135 | String temp[] = index.split("\\"+HIndexConstantsAndUtils.INDEX_DELIMITOR);
136 | cfp = new Pair(temp[0],temp[1]);
137 | if (cfp!=null) result.add(cfp);
138 | }
139 | i ++;
140 | } while (index != null);
141 | return result;
142 | }
143 | }
144 |
--------------------------------------------------------------------------------
/src/tthbase/util/UpdateTableAttribute.java:
--------------------------------------------------------------------------------
1 | package tthbase.util;
2 |
3 | import org.apache.hadoop.conf.Configuration;
4 | import org.apache.hadoop.hbase.HBaseConfiguration;
5 | import org.apache.hadoop.hbase.HColumnDescriptor;
6 | import org.apache.hadoop.hbase.HTableDescriptor;
7 | import org.apache.hadoop.hbase.client.HBaseAdmin;
8 | import org.apache.hadoop.hbase.client.HTable;
9 | import org.apache.hadoop.hbase.client.Put;
10 | import org.apache.hadoop.hbase.client.Row;
11 | import org.apache.hadoop.hbase.util.Bytes;
12 | import org.apache.hadoop.hbase.util.Pair;
13 |
14 | import java.io.File;
15 | import java.io.IOException;
16 | import java.nio.charset.Charset;
17 | import java.util.ArrayList;
18 | import java.util.List;
19 |
20 | import tthbase.util.HIndexConstantsAndUtils;
21 |
22 | import org.apache.hadoop.hbase.HConstants;
23 |
24 | /**
25 | 1. update hbase coprocessor
26 | 2. disable physical deletion in all column families where indexed columns are in.
27 | -KEEP_DELETED_CELLS = true
28 | -TTL = HConstants.FOREVER
29 | -maxVersion = Integer.MAX_VALUE
30 | */
31 |
32 | public class UpdateTableAttribute {
33 | public static final String INDEX_INDICATOR = "secondaryIndex$";
34 | public static final String INDEX_DELIMITOR = "|";
35 | public static final String TABLE_NAME_DELIMITOR = "_";
36 | public static final byte[] FIXED_INDEX_CF = Bytes.toBytes("cf");
37 | public static final String USAGE = "Create a table and associated index;\n " +
38 | "Arguments:\n 1)zkserver 2)zkserver_port \n 3)table_name 4)list of cfs in table, in a single {},separated by ," +
39 | "5) INDEX_CP_NAME 6) INDEX_CP_PATH 7) INDEX_CP_CLASS "+
40 | "8)-[list of index columns in the format of cfName|colName]\n"+
41 | "format: if INDEX_CP_CLASS contains null, any coprocessor will be unloaded\n" +
42 | "***An example\n" +
43 | "saba20.watson.ibm.com 2181 weblog {cf} coprocessor\\$1 hdfs://saba20.watson.ibm.com:8020/index-coprocessor-0.1.0.jar org.apache.hadoop.hbase.coprocessor.index.SyncSecondaryIndexObserver cf\\|country cf\\|ip";
44 |
45 | public static String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";// = "coprocessor$1";
46 | public static String INDEX_CP_NAME;// = "coprocessor$1";
47 | public static String INDEX_CP_PATH;// = "hdfs://saba20.watson.ibm.com:8020/index-coprocessor-0.1.0.jar";
48 | public static String INDEX_CP_CLASS;// = "org.apache.hadoop.hbase.coprocessor.index.SyncSecondaryIndexObserver";
49 |
50 | public static String zkserver;//saba20.watson.ibm.com
51 | public static String zkport;//2181
52 | public static String dataTableName;//weblog
53 | public static String cfList;
54 | public static String [] indexItems;//{cf|country, cf|ip}
55 | public static String cf;
56 |
57 | public static void main(String[] args) throws IOException {
58 | //six parameters to be given
59 | int bar = 3;
60 | if(args.lengthbar){
71 | indexItems = new String[args.length-bar];
72 | for(int i=bar;i" + INDEX_CP_CLASS);
82 |
83 | HTable dataTable = new HTable(conf, dataTableName);
84 | System.out.println("run WWW in main(): ");
85 | HIndexConstantsAndUtils.updateIndexIndicator(conf, Bytes.toBytes(dataTableName), 1, true, cf, "country");
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/tt_sh/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | java -cp build/jar/libDeli-client.jar:conf:lib/hbase-binding-0.1.4.jar tthbase.client.Demo $(pwd)/lib/libDeli-coproc.jar
4 |
--------------------------------------------------------------------------------
| | | |