├── conf
├── regionservers
└── hbase-site.xml
├── src
├── main
│ ├── java
│ │ └── org
│ │ │ └── apache
│ │ │ └── hadoop
│ │ │ └── hbase
│ │ │ ├── avro
│ │ │ ├── hbase.genavro
│ │ │ └── generated
│ │ │ │ ├── ACompressionAlgorithm.java
│ │ │ │ ├── IOError.java
│ │ │ │ ├── AAlreadyExists.java
│ │ │ │ ├── TCell.java
│ │ │ │ ├── AIOError.java
│ │ │ │ ├── ATableExists.java
│ │ │ │ ├── AIllegalArgument.java
│ │ │ │ ├── AMasterNotRunning.java
│ │ │ │ ├── ATimeRange.java
│ │ │ │ ├── AColumn.java
│ │ │ │ ├── AServerAddress.java
│ │ │ │ ├── ADelete.java
│ │ │ │ └── AResult.java
│ │ │ ├── zookeeper
│ │ │ └── ZKTableDisable.java
│ │ │ ├── rest
│ │ │ ├── transform
│ │ │ │ ├── NullTransform.java
│ │ │ │ ├── Base64.java
│ │ │ │ └── Transform.java
│ │ │ ├── ResourceConfig.java
│ │ │ ├── ResourceBase.java
│ │ │ ├── metrics
│ │ │ │ └── RESTStatistics.java
│ │ │ ├── Constants.java
│ │ │ └── ProtobufMessageHandler.java
│ │ │ ├── ipc
│ │ │ ├── ServerNotRunningException.java
│ │ │ ├── HBaseRPCErrorHandler.java
│ │ │ └── CoprocessorProtocol.java
│ │ │ ├── TableExistsException.java
│ │ │ ├── client
│ │ │ ├── Row.java
│ │ │ ├── RegionOfflineException.java
│ │ │ ├── ScannerTimeoutException.java
│ │ │ ├── NoServerForRegionException.java
│ │ │ ├── HTableInterfaceFactory.java
│ │ │ └── HTableFactory.java
│ │ │ ├── coprocessor
│ │ │ ├── MasterCoprocessorEnvironment.java
│ │ │ └── RegionCoprocessorEnvironment.java
│ │ │ ├── regionserver
│ │ │ ├── FlushRequester.java
│ │ │ ├── ChangedReadersObserver.java
│ │ │ ├── RegionServerStoppedException.java
│ │ │ ├── LeaseException.java
│ │ │ ├── LeaseListener.java
│ │ │ ├── wal
│ │ │ │ ├── OrphanHLogAfterSplitException.java
│ │ │ │ └── FailedLogCloseException.java
│ │ │ ├── WrongRegionException.java
│ │ │ ├── CompactionRequestor.java
│ │ │ ├── NoSuchColumnFamilyException.java
│ │ │ ├── handler
│ │ │ │ ├── OpenMetaHandler.java
│ │ │ │ └── OpenRootHandler.java
│ │ │ └── RegionServerRunningException.java
│ │ │ ├── Stoppable.java
│ │ │ ├── ClockOutOfSyncException.java
│ │ │ ├── DroppedSnapshotException.java
│ │ │ ├── UnknownRegionException.java
│ │ │ ├── util
│ │ │ ├── EnvironmentEdge.java
│ │ │ ├── DefaultEnvironmentEdge.java
│ │ │ ├── FileSystemVersionException.java
│ │ │ ├── IncrementingEnvironmentEdge.java
│ │ │ ├── ManualEnvironmentEdge.java
│ │ │ ├── HBaseConfTool.java
│ │ │ └── JvmVersion.java
│ │ │ ├── io
│ │ │ ├── WritableWithSize.java
│ │ │ └── HeapSize.java
│ │ │ ├── TableNotFoundException.java
│ │ │ ├── PleaseHoldException.java
│ │ │ ├── YouAreDeadException.java
│ │ │ ├── master
│ │ │ ├── handler
│ │ │ │ └── TotesHRegionInfo.java
│ │ │ └── metrics
│ │ │ │ └── MasterStatistics.java
│ │ │ ├── filter
│ │ │ ├── InvalidRowFilterException.java
│ │ │ ├── IncompatibleFilterException.java
│ │ │ ├── BinaryComparator.java
│ │ │ └── package-info.java
│ │ │ ├── UnknownRowLockException.java
│ │ │ ├── mapred
│ │ │ ├── Driver.java
│ │ │ ├── TableReduce.java
│ │ │ └── TableMap.java
│ │ │ ├── RegionException.java
│ │ │ ├── Abortable.java
│ │ │ ├── mapreduce
│ │ │ └── TableMapper.java
│ │ │ ├── NotAllMetaRegionsOnlineException.java
│ │ │ ├── UnknownScannerException.java
│ │ │ ├── MasterNotRunningException.java
│ │ │ ├── ZooKeeperConnectionException.java
│ │ │ ├── DoNotRetryIOException.java
│ │ │ ├── TableNotDisabledException.java
│ │ │ ├── NotServingRegionException.java
│ │ │ └── InvalidFamilyOperationException.java
│ ├── resources
│ │ ├── hbase-webapps
│ │ │ ├── master
│ │ │ │ ├── index.html
│ │ │ │ └── zk.jsp
│ │ │ ├── regionserver
│ │ │ │ └── index.html
│ │ │ └── static
│ │ │ │ ├── hbase_logo_med.gif
│ │ │ │ └── hbase.css
│ │ └── org
│ │ │ └── apache
│ │ │ └── hadoop
│ │ │ └── hbase
│ │ │ ├── mapred
│ │ │ └── RowCounter_Counters.properties
│ │ │ ├── mapreduce
│ │ │ └── RowCounter_Counters.properties
│ │ │ └── rest
│ │ │ └── protobuf
│ │ │ ├── TableListMessage.proto
│ │ │ ├── CellMessage.proto
│ │ │ ├── CellSetMessage.proto
│ │ │ ├── VersionMessage.proto
│ │ │ ├── ScannerMessage.proto
│ │ │ ├── TableInfoMessage.proto
│ │ │ ├── ColumnSchemaMessage.proto
│ │ │ └── TableSchemaMessage.proto
│ ├── javadoc
│ │ └── org
│ │ │ └── apache
│ │ │ └── hadoop
│ │ │ └── hbase
│ │ │ ├── thrift
│ │ │ └── doc-files
│ │ │ │ └── style.css
│ │ │ ├── io
│ │ │ └── hfile
│ │ │ │ └── package.html
│ │ │ └── ipc
│ │ │ └── package.html
│ └── ruby
│ │ └── shell
│ │ └── commands
│ │ ├── zk_dump.rb
│ │ ├── enable.rb
│ │ ├── disable.rb
│ │ ├── compact.rb
│ │ ├── is_enabled.rb
│ │ ├── is_disabled.rb
│ │ ├── major_compact.rb
│ │ ├── remove_peer.rb
│ │ ├── drop.rb
│ │ ├── exists.rb
│ │ ├── truncate.rb
│ │ ├── status.rb
│ │ ├── balancer.rb
│ │ ├── version.rb
│ │ ├── flush.rb
│ │ ├── enable_peer.rb
│ │ ├── disable_peer.rb
│ │ ├── assign.rb
│ │ ├── describe.rb
│ │ ├── balance_switch.rb
│ │ ├── stop_replication.rb
│ │ ├── start_replication.rb
│ │ ├── put.rb
│ │ ├── incr.rb
│ │ ├── deleteall.rb
│ │ ├── get_counter.rb
│ │ ├── split.rb
│ │ ├── unassign.rb
│ │ ├── list.rb
│ │ ├── add_peer.rb
│ │ ├── delete.rb
│ │ ├── create.rb
│ │ └── close_region.rb
├── site
│ └── resources
│ │ └── images
│ │ ├── favicon.ico
│ │ ├── hadoop-logo.jpg
│ │ ├── hbase_small.gif
│ │ ├── architecture.gif
│ │ ├── asf_logo_wide.png
│ │ ├── hbase_logo_med.gif
│ │ └── replication_overview.png
├── examples
│ ├── README.txt
│ ├── mapreduce
│ │ └── index-builder-setup.rb
│ └── thrift
│ │ ├── README.txt
│ │ └── Makefile
└── test
│ ├── java
│ └── org
│ │ └── apache
│ │ └── hadoop
│ │ └── hbase
│ │ ├── EmptyWatcher.java
│ │ ├── util
│ │ ├── EnvironmentEdgeManagerTestHelper.java
│ │ ├── TestIncrementingEnvironmentEdge.java
│ │ └── SoftValueSortedMapTest.java
│ │ └── coprocessor
│ │ └── ColumnAggregationProtocol.java
│ ├── ruby
│ └── shell
│ │ └── commands_test.rb
│ └── resources
│ ├── org
│ └── apache
│ │ └── hadoop
│ │ └── hbase
│ │ └── PerformanceEvaluation_Counter.properties
│ └── log4j.properties
├── .gitignore
├── NOTICE.txt
├── bin
├── local-master-backup.sh
└── local-regionservers.sh
└── README.txt
/conf/regionservers:
--------------------------------------------------------------------------------
1 | localhost
2 |
--------------------------------------------------------------------------------
/src/main/java/org/apache/hadoop/hbase/avro/hbase.genavro:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableDisable.java:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/main/resources/hbase-webapps/master/index.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/main/resources/hbase-webapps/regionserver/index.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/site/resources/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/site/resources/images/favicon.ico
--------------------------------------------------------------------------------
/src/site/resources/images/hadoop-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/site/resources/images/hadoop-logo.jpg
--------------------------------------------------------------------------------
/src/site/resources/images/hbase_small.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/site/resources/images/hbase_small.gif
--------------------------------------------------------------------------------
/src/site/resources/images/architecture.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/site/resources/images/architecture.gif
--------------------------------------------------------------------------------
/src/site/resources/images/asf_logo_wide.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/site/resources/images/asf_logo_wide.png
--------------------------------------------------------------------------------
/src/site/resources/images/hbase_logo_med.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/site/resources/images/hbase_logo_med.gif
--------------------------------------------------------------------------------
/src/site/resources/images/replication_overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/site/resources/images/replication_overview.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.classpath
2 | /.externalToolBuilders
3 | /.project
4 | /.settings
5 | /build
6 | /.idea/
7 | /logs
8 | /target
9 | *.iml
10 | *.orig
11 | *~
12 |
--------------------------------------------------------------------------------
/src/main/resources/hbase-webapps/static/hbase_logo_med.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dropbox/hbase/trunk/src/main/resources/hbase-webapps/static/hbase_logo_med.gif
--------------------------------------------------------------------------------
/src/main/resources/org/apache/hadoop/hbase/mapred/RowCounter_Counters.properties:
--------------------------------------------------------------------------------
1 |
2 | # ResourceBundle properties file for RowCounter MR job
3 |
4 | CounterGroupName= RowCounter
5 |
6 | ROWS.name= Rows
--------------------------------------------------------------------------------
/src/main/resources/org/apache/hadoop/hbase/mapreduce/RowCounter_Counters.properties:
--------------------------------------------------------------------------------
1 |
2 | # ResourceBundle properties file for RowCounter MR job
3 |
4 | CounterGroupName= RowCounter
5 |
6 | ROWS.name= Rows
7 |
--------------------------------------------------------------------------------
/src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Autogenerated by Avro
3 | *
4 | * DO NOT EDIT DIRECTLY
5 | */
6 | package org.apache.hadoop.hbase.avro.generated;
7 |
8 | @SuppressWarnings("all")
9 | public enum ACompressionAlgorithm {
10 | LZO, GZ, NONE
11 | }
12 |
--------------------------------------------------------------------------------
/NOTICE.txt:
--------------------------------------------------------------------------------
1 | This product includes software developed by The Apache Software
2 | Foundation (http://www.apache.org/).
3 |
4 | In addition, this product includes software developed by:
5 |
6 | Facebook, Inc. (http://developers.facebook.com/thrift/ -- Page includes the Thrift Software License)
7 |
8 | JUnit (http://www.junit.org/)
9 |
--------------------------------------------------------------------------------
/src/main/resources/hbase-webapps/static/hbase.css:
--------------------------------------------------------------------------------
1 | h1, h2, h3 { color: DarkSlateBlue }
2 | table { border: thin solid DodgerBlue }
3 | tr { border: thin solid DodgerBlue }
4 | td { border: thin solid DodgerBlue }
5 | th { border: thin solid DodgerBlue }
6 | #logo {float: left;}
7 | #logo img {border: none;}
8 | #page_title {padding-top: 27px;}
9 |
10 | div.warning {
11 | border: 1px solid #666;
12 | background-color: #fcc;
13 | font-size: 110%;
14 | font-weight: bold;
15 | }
16 |
17 | td.undeployed-region {
18 | background-color: #faa;
19 | }
20 |
--------------------------------------------------------------------------------
/src/examples/README.txt:
--------------------------------------------------------------------------------
1 | Example code.
2 |
3 | * src/examples/thrift
4 | Examples for interacting with HBase via Thrift from C++, PHP, Python and Ruby.
5 | * org.apache.hadoop.hbase.mapreduce.SampleUploader
6 | Demonstrates uploading data from text files (presumably stored in HDFS) to HBase.
7 | * org.apache.hadoop.hbase.mapreduce.IndexBuilder
8 | Demonstrates map/reduce with a table as the source and other tables as the sink.
9 |
10 | As of 0.20 there is no ant target for building the examples. You can easily build
11 | the Java examples by copying them to the right location in the main source hierarchy.
--------------------------------------------------------------------------------
/src/examples/mapreduce/index-builder-setup.rb:
--------------------------------------------------------------------------------
1 | # Set up sample data for IndexBuilder example
2 | create "people", "attributes"
3 | create "people-email", "INDEX"
4 | create "people-phone", "INDEX"
5 | create "people-name", "INDEX"
6 |
7 | [["1", "jenny", "jenny@example.com", "867-5309"],
8 | ["2", "alice", "alice@example.com", "555-1234"],
9 | ["3", "kevin", "kevinpet@example.com", "555-1212"]].each do |fields|
10 | (id, name, email, phone) = *fields
11 | put "people", id, "attributes:name", name
12 | put "people", id, "attributes:email", email
13 | put "people", id, "attributes:phone", phone
14 | end
15 |
16 |
--------------------------------------------------------------------------------
/src/main/javadoc/org/apache/hadoop/hbase/thrift/doc-files/style.css:
--------------------------------------------------------------------------------
1 | /* Auto-generated CSS for generated Thrift docs */
2 | body { font-family: Tahoma, sans-serif; }
3 | pre { background-color: #dddddd; padding: 6px; }
4 | h3,h4 { padding-top: 0px; margin-top: 0px; }
5 | div.definition { border: 1px solid gray; margin: 10px; padding: 10px; }
6 | div.extends { margin: -0.5em 0 1em 5em }
7 | table { border: 1px solid grey; border-collapse: collapse; }
8 | td { border: 1px solid grey; padding: 1px 6px; vertical-align: top; }
9 | th { border: 1px solid black; background-color: #bbbbbb;
10 | text-align: left; padding: 1px 6px; }
11 |
--------------------------------------------------------------------------------
/src/examples/thrift/README.txt:
--------------------------------------------------------------------------------
1 | Hbase Thrift Client Examples
2 | ============================
3 |
4 | Included in this directory are sample clients of the HBase ThriftServer. They
5 | all perform the same actions but are implemented in C++, Java, Ruby, PHP, and
6 | Python respectively.
7 |
8 | To run/compile this clients, you will first need to install the thrift package
9 | (from http://developers.facebook.com/thrift/) and then run thrift to generate
10 | the language files:
11 |
12 | thrift --gen cpp --gen java --gen rb --gen py -php \
13 | ../../../src/java/org/apache/hadoop/hbase/thrift/Hbase.thrift
14 |
15 | See the individual DemoClient test files for more specific instructions on
16 | running each test.
17 |
--------------------------------------------------------------------------------
/bin/local-master-backup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This is used for starting multiple masters on the same machine.
3 | # run it from hbase-dir/ just like 'bin/hbase'
4 | # Supports up to 10 masters (limitation = overlapping ports)
5 |
6 | bin=`dirname "${BASH_SOURCE-$0}"`
7 | bin=`cd "$bin" >/dev/null && pwd`
8 |
9 | if [ $# -lt 2 ]; then
10 | S=`basename "${BASH_SOURCE-$0}"`
11 | echo "Usage: $S [start|stop] offset(s)"
12 | echo ""
13 | echo " e.g. $S start 1"
14 | exit
15 | fi
16 |
17 | # sanity check: make sure your master opts don't use ports [i.e. JMX/DBG]
18 | export HBASE_MASTER_OPTS=" "
19 |
20 | run_master () {
21 | DN=$2
22 | export HBASE_IDENT_STRING="$USER-$DN"
23 | HBASE_MASTER_ARGS="\
24 | --backup \
25 | -D hbase.master.port=`expr 60000 + $DN` \
26 | -D hbase.master.info.port=`expr 60010 + $DN`"
27 | "$bin"/hbase-daemon.sh $1 master $HBASE_MASTER_ARGS
28 | }
29 |
30 | cmd=$1
31 | shift;
32 |
33 | for i in $*
34 | do
35 | run_master $cmd $i
36 | done
37 |
--------------------------------------------------------------------------------
/bin/local-regionservers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This is used for starting multiple regionservers on the same machine.
3 | # run it from hbase-dir/ just like 'bin/hbase'
4 | # Supports up to 100 regionservers (limitation = overlapping ports)
5 |
6 | bin=`dirname "${BASH_SOURCE-$0}"`
7 | bin=`cd "$bin" >/dev/null && pwd`
8 |
9 | if [ $# -lt 2 ]; then
10 | S=`basename "${BASH_SOURCE-$0}"`
11 | echo "Usage: $S [start|stop] offset(s)"
12 | echo ""
13 | echo " e.g. $S start 1 2"
14 | exit
15 | fi
16 |
17 | # sanity check: make sure your regionserver opts don't use ports [i.e. JMX/DBG]
18 | export HBASE_REGIONSERVER_OPTS=" "
19 |
20 | run_regionserver () {
21 | DN=$2
22 | export HBASE_IDENT_STRING="$USER-$DN"
23 | HBASE_REGIONSERVER_ARGS="\
24 | -D hbase.regionserver.port=`expr 60200 + $DN` \
25 | -D hbase.regionserver.info.port=`expr 60300 + $DN`"
26 | "$bin"/hbase-daemon.sh $1 regionserver $HBASE_REGIONSERVER_ARGS
27 | }
28 |
29 | cmd=$1
30 | shift;
31 |
32 | for i in $*
33 | do
34 | run_regionserver $cmd $i
35 | done
36 |
--------------------------------------------------------------------------------
/conf/hbase-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
24 |
25 | * This is used primarily for ZooKeeper usage when we could get an unexpected 26 | * and fatal exception, requiring an abort. 27 | *
28 | * Implemented by the Master, RegionServer, and TableServers (client). 29 | */ 30 | public interface Abortable { 31 | /** 32 | * Abort the server or client. 33 | * @param why Why we're aborting. 34 | * @param e Throwable that caused abort. Can be null. 35 | */ 36 | public void abort(String why, Throwable e); 37 | } -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | 21 | package org.apache.hadoop.hbase.filter; 22 | 23 | /** 24 | * A binary comparator which lexicographically compares against the specified 25 | * byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}. 26 | */ 27 | public class BinaryComparator extends WritableByteArrayComparable { 28 | 29 | /** Nullary constructor for Writable, do not use */ 30 | public BinaryComparator() { } 31 | 32 | /** 33 | * Constructor 34 | * @param value value 35 | */ 36 | public BinaryComparator(byte[] value) { 37 | super(value); 38 | } 39 | 40 | } 41 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.util; 21 | 22 | /** 23 | * An environment edge that uses a manually set value. This is useful for testing events that are supposed to 24 | * happen in the same millisecond. 25 | */ 26 | public class ManualEnvironmentEdge implements EnvironmentEdge { 27 | 28 | // Sometimes 0 ts might have a special value, so lets start with 1 29 | protected long value = 1L; 30 | 31 | public void setValue(long newValue) { 32 | value = newValue; 33 | } 34 | 35 | @Override 36 | public long currentTimeMillis() { 37 | return this.value; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.client; 21 | 22 | import org.apache.hadoop.hbase.RegionException; 23 | 24 | /** 25 | * Thrown when no region server can be found for a region 26 | */ 27 | public class NoServerForRegionException extends RegionException { 28 | private static final long serialVersionUID = 1L << 11 - 1L; 29 | 30 | /** default constructor */ 31 | public NoServerForRegionException() { 32 | super(); 33 | } 34 | 35 | /** 36 | * Constructor 37 | * @param s message 38 | */ 39 | public NoServerForRegionException(String s) { 40 | super(s); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver; 21 | 22 | public interface CompactionRequestor { 23 | /** 24 | * @param r Region to compact 25 | * @param why Why compaction was requested -- used in debug messages 26 | */ 27 | public void requestCompaction(final HRegion r, final String why); 28 | 29 | /** 30 | * @param r Region to compact 31 | * @param why Why compaction was requested -- used in debug messages 32 | * @param pri Priority of this compaction. minHeap. <=0 is critical 33 | */ 34 | public void requestCompaction(final HRegion r, final String why, int pri); 35 | } -------------------------------------------------------------------------------- /src/main/ruby/shell/commands/incr.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2010 The Apache Software Foundation 3 | # 4 | # Licensed to the Apache Software Foundation (ASF) under one 5 | # or more contributor license agreements. See the NOTICE file 6 | # distributed with this work for additional information 7 | # regarding copyright ownership. The ASF licenses this file 8 | # to you under the Apache License, Version 2.0 (the 9 | # "License"); you may not use this file except in compliance 10 | # with the License. You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | # 20 | 21 | module Shell 22 | module Commands 23 | class Incr < Command 24 | def help 25 | return <<-EOF 26 | Increments a cell 'value' at specified table/row/column coordinates. 27 | To increment a cell value in table 't1' at row 'r1' under column 28 | 'c1' by 1 (can be omitted) or 10 do: 29 | 30 | hbase> incr 't1', 'r1', 'c1' 31 | hbase> incr 't1', 'r1', 'c1', 1 32 | hbase> incr 't1', 'r1', 'c1', 10 33 | EOF 34 | end 35 | 36 | def command(table, row, column, value = nil) 37 | cnt = table(table).incr(row, column, value) 38 | puts "COUNTER VALUE = #{cnt}" 39 | end 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Define some default values that can be overridden by system properties 2 | hbase.root.logger=INFO,console 3 | hbase.log.dir=. 4 | hbase.log.file=hbase.log 5 | 6 | # Define the root logger to the system property "hbase.root.logger". 7 | log4j.rootLogger=${hbase.root.logger} 8 | 9 | # Logging Threshold 10 | log4j.threshhold=ALL 11 | 12 | # 13 | # Daily Rolling File Appender 14 | # 15 | log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender 16 | log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} 17 | 18 | # Rollver at midnight 19 | log4j.appender.DRFA.DatePattern=.yyyy-MM-dd 20 | 21 | # 30-day backup 22 | #log4j.appender.DRFA.MaxBackupIndex=30 23 | log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout 24 | 25 | # Pattern format: Date LogLevel LoggerName LogMessage 26 | #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n 27 | 28 | # Debugging Pattern format 29 | log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n 30 | 31 | 32 | # 33 | # console 34 | # Add "console" to rootlogger above if you want to use this 35 | # 36 | log4j.appender.console=org.apache.log4j.ConsoleAppender 37 | log4j.appender.console.target=System.err 38 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 39 | log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n 40 | 41 | # Custom Logging levels 42 | 43 | #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG 44 | 45 | log4j.logger.org.apache.hadoop=WARN 46 | log4j.logger.org.apache.zookeeper=ERROR 47 | log4j.logger.org.apache.hadoop.hbase=DEBUG 48 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/filter/package-info.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | 21 | /** 22 | * Provides row-level filters applied to HRegion scan results during calls to 23 | * {@link org.apache.hadoop.hbase.client.ResultScanner#next()}. 24 | 25 |
26 | Filters run the extent of a table unless you wrap your filter in a 27 | {@link org.apache.hadoop.hbase.filter.WhileMatchFilter}. 28 | The latter returns as soon as the filter stops matching. 29 |
30 |Do not rely on filters carrying state across rows; its not reliable in current 31 | hbase as we have no handlers in place for when regions split, close or server 32 | crashes. 33 |
34 | */ 35 | package org.apache.hadoop.hbase.filter; 36 | -------------------------------------------------------------------------------- /src/main/ruby/shell/commands/deleteall.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2010 The Apache Software Foundation 3 | # 4 | # Licensed to the Apache Software Foundation (ASF) under one 5 | # or more contributor license agreements. See the NOTICE file 6 | # distributed with this work for additional information 7 | # regarding copyright ownership. The ASF licenses this file 8 | # to you under the Apache License, Version 2.0 (the 9 | # "License"); you may not use this file except in compliance 10 | # with the License. You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | # 20 | 21 | module Shell 22 | module Commands 23 | class Deleteall < Command 24 | def help 25 | return <<-EOF 26 | Delete all cells in a given row; pass a table name, row, and optionally 27 | a column and timestamp. Examples: 28 | 29 | hbase> deleteall 't1', 'r1' 30 | hbase> deleteall 't1', 'r1', 'c1' 31 | hbase> deleteall 't1', 'r1', 'c1', ts1 32 | EOF 33 | end 34 | 35 | def command(table, row, column = nil, timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP) 36 | format_simple_command do 37 | table(table).deleteall(row, column, timestamp) 38 | end 39 | end 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.mapred; 21 | 22 | import org.apache.hadoop.hbase.client.Put; 23 | import org.apache.hadoop.hbase.io.ImmutableBytesWritable; 24 | import org.apache.hadoop.io.Writable; 25 | import org.apache.hadoop.io.WritableComparable; 26 | import org.apache.hadoop.mapred.Reducer; 27 | 28 | /** 29 | * Write a table, sorting by the input key 30 | * 31 | * @paramMapper class to add the required input key
28 | * and value classes.
29 | *
30 | * @param 29 | * This is executed after receiving an OPEN RPC from the master for meta. 30 | */ 31 | public class OpenMetaHandler extends OpenRegionHandler { 32 | public OpenMetaHandler(final Server server, 33 | final RegionServerServices rsServices, HRegionInfo regionInfo) { 34 | super(server,rsServices, regionInfo, EventType.M_RS_OPEN_META); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 The Apache Software Foundation 3 | * 4 | * Licensed to the Apache Software Foundation (ASF) under one 5 | * or more contributor license agreements. See the NOTICE file 6 | * distributed with this work for additional information 7 | * regarding copyright ownership. The ASF licenses this file 8 | * to you under the Apache License, Version 2.0 (the 9 | * "License"); you may not use this file except in compliance 10 | * with the License. You may obtain a copy of the License at 11 | * 12 | * http://www.apache.org/licenses/LICENSE-2.0 13 | * 14 | * Unless required by applicable law or agreed to in writing, software 15 | * distributed under the License is distributed on an "AS IS" BASIS, 16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | * See the License for the specific language governing permissions and 18 | * limitations under the License. 19 | */ 20 | package org.apache.hadoop.hbase.regionserver.handler; 21 | 22 | import org.apache.hadoop.hbase.HRegionInfo; 23 | import org.apache.hadoop.hbase.Server; 24 | import org.apache.hadoop.hbase.regionserver.RegionServerServices; 25 | 26 | /** 27 | * Handles opening of the root region on a region server. 28 | *
29 | * This is executed after receiving an OPEN RPC from the master for root.
30 | */
31 | public class OpenRootHandler extends OpenRegionHandler {
32 | public OpenRootHandler(final Server server,
33 | final RegionServerServices rsServices, HRegionInfo regionInfo) {
34 | super(server, rsServices, regionInfo, EventType.M_RS_OPEN_ROOT);
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/main/ruby/shell/commands/add_peer.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2010 The Apache Software Foundation
3 | #
4 | # Licensed to the Apache Software Foundation (ASF) under one
5 | # or more contributor license agreements. See the NOTICE file
6 | # distributed with this work for additional information
7 | # regarding copyright ownership. The ASF licenses this file
8 | # to you under the Apache License, Version 2.0 (the
9 | # "License"); you may not use this file except in compliance
10 | # with the License. You may obtain a copy of the License at
11 | #
12 | # http://www.apache.org/licenses/LICENSE-2.0
13 | #
14 | # Unless required by applicable law or agreed to in writing, software
15 | # distributed under the License is distributed on an "AS IS" BASIS,
16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | # See the License for the specific language governing permissions and
18 | # limitations under the License.
19 | #
20 |
21 | module Shell
22 | module Commands
23 | class AddPeer< Command
24 | def help
25 | return <<-EOF
26 | Add a peer cluster to replicate to, the id must be a short and
27 | the cluster key is composed like this:
28 | hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
29 | This gives a full path for HBase to connect to another cluster.
30 | Examples:
31 |
32 | hbase> add_peer '1', "server1.cie.com:2181:/hbase"
33 | hbase> add_peer '2', "zk1,zk2,zk3:2182:/hbase-prod"
34 | EOF
35 | end
36 |
37 | def command(id, cluster_key)
38 | format_simple_command do
39 | replication_admin.add_peer(id, cluster_key)
40 | end
41 | end
42 | end
43 | end
44 | end
45 |
--------------------------------------------------------------------------------
/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2010 The Apache Software Foundation
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one
5 | * or more contributor license agreements. See the NOTICE file
6 | * distributed with this work for additional information
7 | * regarding copyright ownership. The ASF licenses this file
8 | * to you under the Apache License, Version 2.0 (the
9 | * "License"); you may not use this file except in compliance
10 | * with the License. You may obtain a copy of the License at
11 | *
12 | * http://www.apache.org/licenses/LICENSE-2.0
13 | *
14 | * Unless required by applicable law or agreed to in writing, software
15 | * distributed under the License is distributed on an "AS IS" BASIS,
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | * See the License for the specific language governing permissions and
18 | * limitations under the License.
19 | */
20 | package org.apache.hadoop.hbase.util;
21 |
22 | import org.junit.Test;
23 |
24 | import static junit.framework.Assert.assertEquals;
25 |
26 | /**
27 | * Tests that the incrementing environment edge increments time instead of using
28 | * the default.
29 | */
30 | public class TestIncrementingEnvironmentEdge {
31 |
32 | @Test
33 | public void testGetCurrentTimeUsesSystemClock() {
34 | IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
35 | assertEquals(1, edge.currentTimeMillis());
36 | assertEquals(2, edge.currentTimeMillis());
37 | assertEquals(3, edge.currentTimeMillis());
38 | assertEquals(4, edge.currentTimeMillis());
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2010 The Apache Software Foundation
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one
5 | * or more contributor license agreements. See the NOTICE file
6 | * distributed with this work for additional information
7 | * regarding copyright ownership. The ASF licenses this file
8 | * to you under the Apache License, Version 2.0 (the
9 | * "License"); you may not use this file except in compliance
10 | * with the License. You may obtain a copy of the License at
11 | *
12 | * http://www.apache.org/licenses/LICENSE-2.0
13 | *
14 | * Unless required by applicable law or agreed to in writing, software
15 | * distributed under the License is distributed on an "AS IS" BASIS,
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | * See the License for the specific language governing permissions and
18 | * limitations under the License.
19 | */
20 | package org.apache.hadoop.hbase.mapred;
21 |
22 | import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
23 | import org.apache.hadoop.hbase.client.Result;
24 | import org.apache.hadoop.io.Writable;
25 | import org.apache.hadoop.io.WritableComparable;
26 | import org.apache.hadoop.mapred.Mapper;
27 |
28 | /**
29 | * Scan an HBase table to sort by a specified sort column.
30 | * If the column does not exist, the record is not passed to Reduce.
31 | *
32 | * @param
28 | * Note that all callable methods must have a return type handled by
29 | * {@link org.apache.hadoop.hbase.io.HbaseObjectWritable#writeObject(java.io.DataOutput, Object, Class, org.apache.hadoop.conf.Configuration)}.
30 | * That is:
31 | *
29 | ZooKeeper Dump
30 |
31 |
32 |
33 | <%= ZKUtil.dump(watcher) %>
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/src/main/ruby/shell/commands/create.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright 2010 The Apache Software Foundation
3 | #
4 | # Licensed to the Apache Software Foundation (ASF) under one
5 | # or more contributor license agreements. See the NOTICE file
6 | # distributed with this work for additional information
7 | # regarding copyright ownership. The ASF licenses this file
8 | # to you under the Apache License, Version 2.0 (the
9 | # "License"); you may not use this file except in compliance
10 | # with the License. You may obtain a copy of the License at
11 | #
12 | # http://www.apache.org/licenses/LICENSE-2.0
13 | #
14 | # Unless required by applicable law or agreed to in writing, software
15 | # distributed under the License is distributed on an "AS IS" BASIS,
16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | # See the License for the specific language governing permissions and
18 | # limitations under the License.
19 | #
20 |
21 | module Shell
22 | module Commands
23 | class Create < Command
24 | def help
25 | return <<-EOF
26 | Create table; pass table name, a dictionary of specifications per
27 | column family, and optionally a dictionary of table configuration.
28 | Dictionaries are described below in the GENERAL NOTES section.
29 | Examples:
30 |
31 | hbase> create 't1', {NAME => 'f1', VERSIONS => 5}
32 | hbase> create 't1', {NAME => 'f1'}, {NAME => 'f2'}, {NAME => 'f3'}
33 | hbase> # The above in shorthand would be the following:
34 | hbase> create 't1', 'f1', 'f2', 'f3'
35 | hbase> create 't1', {NAME => 'f1', VERSIONS => 1, TTL => 2592000, BLOCKCACHE => true}
36 | EOF
37 | end
38 |
39 | def command(table, *args)
40 | format_simple_command do
41 | admin.create(table, *args)
42 | end
43 | end
44 | end
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2010 The Apache Software Foundation
3 | *
4 | * Licensed to the Apache Software Foundation (ASF) under one
5 | * or more contributor license agreements. See the NOTICE file
6 | * distributed with this work for additional information
7 | * regarding copyright ownership. The ASF licenses this file
8 | * to you under the Apache License, Version 2.0 (the
9 | * "License"); you may not use this file except in compliance
10 | * with the License. You may obtain a copy of the License at
11 | *
12 | * http://www.apache.org/licenses/LICENSE-2.0
13 | *
14 | * Unless required by applicable law or agreed to in writing, software
15 | * distributed under the License is distributed on an "AS IS" BASIS,
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | * See the License for the specific language governing permissions and
18 | * limitations under the License.
19 | */
20 | package org.apache.hadoop.hbase;
21 |
22 | import java.io.IOException;
23 |
24 | /**
25 | * Thrown if a request is table schema modification is requested but
26 | * made for an invalid family name.
27 | */
28 | public class InvalidFamilyOperationException extends IOException {
29 | private static final long serialVersionUID = 1L << 22 - 1L;
30 | /** default constructor */
31 | public InvalidFamilyOperationException() {
32 | super();
33 | }
34 |
35 | /**
36 | * Constructor
37 | * @param s message
38 | */
39 | public InvalidFamilyOperationException(String s) {
40 | super(s);
41 | }
42 |
43 | /**
44 | * Constructor taking another exception.
45 | * @param e Exception to grab data from.
46 | */
47 | public InvalidFamilyOperationException(Exception e) {
48 | super(e);
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Autogenerated by Avro
3 | *
4 | * DO NOT EDIT DIRECTLY
5 | */
6 | package org.apache.hadoop.hbase.avro.generated;
7 |
8 | @SuppressWarnings("all")
9 | public class AResult extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
10 | public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResult\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}}}]}");
11 | public java.nio.ByteBuffer row;
12 | public java.util.List
32 | *
37 | *
25 | * Useful for sizing caches. Its a given that implementation approximations 26 | * do not account for 32 vs 64 bit nor for different VM implementations. 27 | *
28 | * An Object's size is determined by the non-static data members in it, 29 | * as well as the fixed {@link Object} overhead. 30 | *
31 | * For example: 32 | *
33 | * public class SampleObject implements HeapSize {
34 | *
35 | * int [] numbers;
36 | * int x;
37 | * }
38 | *
39 | */
40 | public interface HeapSize {
41 | /**
42 | * @return Approximate 'exclusive deep size' of implementing object. Includes
43 | * count of payload and hosting object sizings.
44 | */
45 | public long heapSize();
46 |
47 | }
48 |
--------------------------------------------------------------------------------