├── .gitignore
├── .travis.yml
├── CHANGELOG
├── Gemfile
├── LICENSE
├── Manifest
├── README.md
├── Rakefile
├── bin
└── cassandra_helper
├── cassandra.gemspec
├── conf
├── 0.6
│ ├── cassandra.in.sh
│ ├── log4j.properties
│ ├── schema.json
│ └── storage-conf.xml
├── 0.7
│ ├── cassandra.in.sh
│ ├── cassandra.yaml
│ ├── log4j-server.properties
│ ├── schema.json
│ └── schema.txt
├── 0.8
│ ├── cassandra.in.sh
│ ├── cassandra.yaml
│ ├── log4j-server.properties
│ ├── schema.json
│ └── schema.txt
├── 1.0
│ ├── cassandra.in.sh
│ ├── cassandra.yaml
│ ├── log4j-server.properties
│ ├── schema.json
│ └── schema.txt
├── 1.1
│ ├── cassandra.in.sh
│ ├── cassandra.yaml
│ ├── log4j-server.properties
│ ├── schema.json
│ └── schema.txt
└── 1.2
│ ├── cassandra.in.sh
│ ├── cassandra.yaml
│ ├── log4j-server.properties
│ ├── schema.json
│ └── schema.txt
├── ext
├── cassandra_native.c
└── extconf.rb
├── lib
├── .gitignore
├── cassandra.rb
└── cassandra
│ ├── 0.6.rb
│ ├── 0.6
│ ├── cassandra.rb
│ ├── columns.rb
│ └── protocol.rb
│ ├── 0.7.rb
│ ├── 0.7
│ ├── cassandra.rb
│ ├── columns.rb
│ └── protocol.rb
│ ├── 0.8.rb
│ ├── 0.8
│ ├── cassandra.rb
│ ├── columns.rb
│ └── protocol.rb
│ ├── 1.0.rb
│ ├── 1.0
│ ├── cassandra.rb
│ ├── columns.rb
│ └── protocol.rb
│ ├── 1.1.rb
│ ├── 1.1
│ ├── cassandra.rb
│ ├── columns.rb
│ └── protocol.rb
│ ├── 1.2.rb
│ ├── 1.2
│ ├── cassandra.rb
│ ├── columns.rb
│ └── protocol.rb
│ ├── array.rb
│ ├── batch.rb
│ ├── cassandra.rb
│ ├── column_family.rb
│ ├── columns.rb
│ ├── comparable.rb
│ ├── composite.rb
│ ├── constants.rb
│ ├── debug.rb
│ ├── dynamic_composite.rb
│ ├── helpers.rb
│ ├── keyspace.rb
│ ├── long.rb
│ ├── mock.rb
│ ├── ordered_hash.rb
│ ├── protocol.rb
│ └── time.rb
├── test
├── cassandra_client_test.rb
├── cassandra_mock_test.rb
├── cassandra_test.rb
├── comparable_types_test.rb
├── composite_type_test.rb
├── eventmachine_test.rb
├── ordered_hash_test.rb
└── test_helper.rb
└── vendor
├── 0.6
└── gen-rb
│ ├── cassandra.rb
│ ├── cassandra_constants.rb
│ └── cassandra_types.rb
├── 0.7
└── gen-rb
│ ├── cassandra.rb
│ ├── cassandra_constants.rb
│ └── cassandra_types.rb
├── 0.8
└── gen-rb
│ ├── cassandra.rb
│ ├── cassandra_constants.rb
│ └── cassandra_types.rb
├── 1.0
└── gen-rb
│ ├── cassandra.rb
│ ├── cassandra_constants.rb
│ └── cassandra_types.rb
├── 1.1
└── gen-rb
│ ├── cassandra.rb
│ ├── cassandra_constants.rb
│ └── cassandra_types.rb
└── 1.2
└── gen-rb
├── cassandra.rb
├── cassandra_constants.rb
└── cassandra_types.rb
/.gitignore:
--------------------------------------------------------------------------------
1 | pkg
2 | *.rbc
3 | .redcar
4 | doc/
5 | .yardoc/
6 | Gemfile.lock
7 | cassandra-*.gem
8 | tmp
9 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: ruby
2 | rvm:
3 | - 2.0.0
4 | - 1.9.3
5 | - 1.9.2
6 | - 1.8.7
7 | env:
8 | - CASSANDRA_VERSION=1.2
9 | - CASSANDRA_VERSION=1.1
10 | - CASSANDRA_VERSION=1.0
11 | - CASSANDRA_VERSION=0.8
12 | # these two requires Java 6, see https://github.com/travis-ci/travis-ci/issues/686
13 | # - CASSANDRA_VERSION=0.7
14 | # - CASSANDRA_VERSION=0.6
15 | before_script:
16 | - java -version
17 | - bundle exec rake 'cassandra:start[daemonize]'
18 |
--------------------------------------------------------------------------------
/CHANGELOG:
--------------------------------------------------------------------------------
1 | v0.23.0
2 | - Update simple_uuid dependency (issue #197, courtesy @tsenart)
3 | - Changed to new github url: github.com/cassandra-rb/cassandra
4 |
5 | v0.22.0
6 | - Add new composite API to resolve ambiguity between unpacking a composite and packing a composite with one element (issue #196, courtesy @kevinkehl)
7 | - Breaking change: schema change methods no longer catch exceptions internally. They work the same as everything else.
8 |
9 | v0.21.0
10 | - Add 1.2 libs to gem
11 |
12 | v0.19.0
13 | - Fixed windows build by disabling native extensions (issue #188, courtesy @jacek99)
14 | - Speed improvement to native composite column extension (issue #186, courtesy @muirmanders)
15 | - Fix OrderedHash iterator return values (issue #185, courtesy @fester)
16 | - Native support for dynamic composite decoding (issue #184, courtesy @muirmanders)
17 |
18 | v0.18.0
19 | - Cassandra 1.2 support (issue #175, courtesy @brainopia)
20 | - drop_keyspace defaults to current keyspace (issue #176, courtesy @brianopia)
21 | - Easier flush_batch interface (issue #182, courtesy @brianopia)
22 | - Support for removing multiple columns (issue #183, courtesy @brianopia)
23 |
24 | v0.17.0
25 | - multi_get_columns with only one query (courtesy @christian-blades-cb)
26 | - documentation fixes for get and multi_get (issue #136, courtesy @christian-blades-cb)
27 |
28 | v0.16.0
29 | - New :queue_size option for batch mode (courtesy @vicentllongo)
30 | - Cassandra 1.1 support (courtesy @hawknewton)
31 |
32 | v0.15.0
33 | - Fixes for ruby 1.8 (issue #158, courtesy @jarib)
34 | - get_indexed_slices returns an OrderedHash (issue #110)
35 |
36 | v0.14.0
37 | - Numerous performance improvements (courtesy @nearbuy)
38 | - Fixed many 0.6 bugs
39 | - Added batch counter update support to Cassandra::Mock
40 | - Add support DynamicComposite columns (issue #154, courtesy @nearbuy)
41 | - API cleanup in get_indexed_slices (issue #155, courtesy @mcmire)
42 |
43 | v0.13.0
44 | - Support for new thrift gem versions
45 | - Updated all links to point to new github repo
46 | - Batch support for counter updates (courtesy @nearbuy)
47 | - Counter super column support in get and get_range (courtesy @behrangj)
48 | - Composite columns (courtesy @nearbuy)
49 | - Gemspec cleanup
50 |
51 | v0.12.2
52 | - Respect the start_key in get_range. Resolves Issue #127.
53 | - Fix issue with differences in gemspec and what is required. Resolves Issue #125.
54 | - Update to Cassandra 0.8.7 and 1.0.2.
55 |
56 | v0.12.1
57 | - Fix issue with simple_uuid dependency.
58 | - Fix issue with get_range & get_range_batch keeping all results when a block is given. Resolves Issue# 112.
59 |
60 | v0.12.0 Changed thrift_client dependency to 0.7.0
61 |
62 | v0.11.4
63 | - Fix get_range to invoke blocks
64 | - Fix current distribution urls in Rakfile. Resolves Issue# 97.
65 |
66 | v0.11.3
67 | - Fix login after reconnect
68 |
69 | v0.11.2
70 | - Update thrift_client gem to deal with failover bug.
71 |
72 | v0.11.1
73 | - Add counter support for Cassandra 0.8. (Thanks Swrve team!)
74 | - Add additional rake tasks to start and stop cassandra in the background.
75 | - Fix issue with non-utf8 column names not being encoded/decoded properly.
76 | - Add way to specify a default read/write consistency.
77 | - Consolidate 0.7 and 0.8 functions, and move 0.6 specific functions into lib/cassandra/0.6/
78 | - Use 0.8-rc1 as 0.8 install target.
79 | - Updated get_index_slices API to allow passing an array of hashes instead of IndexClause instance. (Maintains backward compatibility.)
80 | - Fix permissions issue.
81 |
82 | v0.11.0
83 | - Remove direct thrift dependency. Allow thrift_client to require it.
84 | - Add functions for each and each_key to iterate through key ranges.
85 | - Add function for get_range_keys which returns an array of keys in a given range.
86 | - Changed the return value of get_range to an OrderedHash.
87 | - Change get_range to accept both a range of keys and a range of columns.
88 | - Add batched range support to get_range and add get_range_batch.
89 |
90 | v0.10.0 Major Update (rjackson)
91 | - Update Rakefile to install 0.6.13, 0.7.4, 0.8.0-beta1 to ~/cassandra/cassandra-VERSION
92 | - Add data:load task to Rakefile for creating the schema required for the tests
93 | - Default the Rakefile to use 0.8-beta1
94 | - Setup test suite to work on 0.6.13, 0.7.4, and 0.8.0-beta1
95 | - All tests pass for all supported (0.6.13, 0.7.4, 0.8.0-beta1) versions.
96 | - Added Support for 0.8-beta1
97 | - Changed get_index_slices to return a hash of rows
98 | - Updated Cassandra::Mock to pass all tests for each Cassandra version
99 |
100 | v0.9.2 fix bug with deletions in batch mutations
101 |
102 | v0.9.1 Support for secondary indexing. (jhermes)
103 | Fix bug in mock where we didn't support range queries. (therealadam)
104 | Support deletes in batch mutations. [blanquer]
105 |
106 | v0.9.0 cassandra 0.7 compat
107 |
108 | v0.8.2 Renamed :thrift_client_class option, UUID fix, 0.6 update, fixing cassanda install, much Mock fixes
109 |
110 | v0.8.1 Adding the ability to compose ThriftClient classes.
111 |
112 | v0.8.0 Compatibility with Cassandra 0.6 betas (no longer compatible with 0.5); assorted bugfixes.
113 |
114 | v0.7.6 Bugfixes.
115 |
116 | v0.7.5 Another packaging error.
117 |
118 | v0.7.4 Fix thrift_client dependency
119 |
120 | v0.7.3 New Cassandra::Mock - a pure ruby mock of cassanra; bugfixes.
121 |
122 | v0.7.2 Switch to newly-extraced simple_uuid gem, minor bugfixes.
123 |
124 | v0.7.1 update thrift-generated code to 0.5
125 |
126 | v0.7 Various improvements.
127 | - Compatibility with Cassandra 0.5.
128 | - performance improvements
129 |
130 | v0.6. Use thrift_client gem.
131 |
132 | v0.5.6.2. Release process was busted.
133 |
134 | v0.5.6. Real multiget.
135 |
136 | v0.5.5. to_guid for Long. Don't continually re-checkout the Git mirror. Use curl instead of wget, sigh. Use new unified insert API.
137 |
138 | v0.5.4. Use wget instead of curl.
139 |
140 | v0.5.3. Update Multiblog sample schema.
141 |
142 | v0.5.2. Update server version so CommitLog replay is fixed.
143 |
144 | v0.5.1. Add bin/cassandra_helper script, to build and start the server for you. Improve build error reporting.
145 |
146 | v0.5. More API changes. Working temporal comparators.
147 |
148 | v0.4. Use new comparator API. Namespace Thrift bindings; rename gem and class to Cassandra. Make tokens and limits actually work. Retry UnavailableExceptions.
149 |
150 | v0.3. Use new Thrift API.
151 |
152 | v0.2.2. Use nil for empty byte array, not empty string. Improves API.
153 |
154 | v0.2.1. Don't write serialization overhead on nulls.
155 |
156 | v0.2. Re-factor table vs. column family interface per discussion with jbellis.
157 |
158 | v0.1. First release.
159 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source "http://rubygems.org"
2 |
3 | gemspec
4 |
5 | group :development do
6 | gem 'eventmachine'
7 | end
8 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/Manifest:
--------------------------------------------------------------------------------
1 | CHANGELOG
2 | Gemfile
3 | LICENSE
4 | Manifest
5 | README.md
6 | Rakefile
7 | bin/cassandra_helper
8 | conf/0.6/cassandra.in.sh
9 | conf/0.6/log4j.properties
10 | conf/0.6/schema.json
11 | conf/0.6/storage-conf.xml
12 | conf/0.7/cassandra.in.sh
13 | conf/0.7/cassandra.yaml
14 | conf/0.7/log4j-server.properties
15 | conf/0.7/schema.json
16 | conf/0.7/schema.txt
17 | conf/0.8/cassandra.in.sh
18 | conf/0.8/cassandra.yaml
19 | conf/0.8/log4j-server.properties
20 | conf/0.8/schema.json
21 | conf/0.8/schema.txt
22 | conf/1.0/cassandra.in.sh
23 | conf/1.0/cassandra.yaml
24 | conf/1.0/log4j-server.properties
25 | conf/1.0/schema.json
26 | conf/1.0/schema.txt
27 | conf/1.1/cassandra.in.sh
28 | conf/1.1/cassandra.yaml
29 | conf/1.1/log4j-server.properties
30 | conf/1.1/schema.json
31 | conf/1.1/schema.txt
32 | ext/cassandra_native.c
33 | ext/extconf.rb
34 | lib/cassandra.rb
35 | lib/cassandra/0.6.rb
36 | lib/cassandra/0.6/cassandra.rb
37 | lib/cassandra/0.6/columns.rb
38 | lib/cassandra/0.6/protocol.rb
39 | lib/cassandra/0.7.rb
40 | lib/cassandra/0.7/cassandra.rb
41 | lib/cassandra/0.7/columns.rb
42 | lib/cassandra/0.7/protocol.rb
43 | lib/cassandra/0.8.rb
44 | lib/cassandra/0.8/cassandra.rb
45 | lib/cassandra/0.8/columns.rb
46 | lib/cassandra/0.8/protocol.rb
47 | lib/cassandra/1.0.rb
48 | lib/cassandra/1.0/cassandra.rb
49 | lib/cassandra/1.0/columns.rb
50 | lib/cassandra/1.0/protocol.rb
51 | lib/cassandra/1.1.rb
52 | lib/cassandra/1.1/cassandra.rb
53 | lib/cassandra/1.1/columns.rb
54 | lib/cassandra/1.1/protocol.rb
55 | lib/cassandra/array.rb
56 | lib/cassandra/batch.rb
57 | lib/cassandra/cassandra.rb
58 | lib/cassandra/column_family.rb
59 | lib/cassandra/columns.rb
60 | lib/cassandra/comparable.rb
61 | lib/cassandra/composite.rb
62 | lib/cassandra/constants.rb
63 | lib/cassandra/debug.rb
64 | lib/cassandra/dynamic_composite.rb
65 | lib/cassandra/helpers.rb
66 | lib/cassandra/keyspace.rb
67 | lib/cassandra/long.rb
68 | lib/cassandra/mock.rb
69 | lib/cassandra/ordered_hash.rb
70 | lib/cassandra/protocol.rb
71 | lib/cassandra/time.rb
72 | test/cassandra_client_test.rb
73 | test/cassandra_mock_test.rb
74 | test/cassandra_test.rb
75 | test/comparable_types_test.rb
76 | test/composite_type_test.rb
77 | test/eventmachine_test.rb
78 | test/ordered_hash_test.rb
79 | test/test_helper.rb
80 | vendor/0.6/gen-rb/cassandra.rb
81 | vendor/0.6/gen-rb/cassandra_constants.rb
82 | vendor/0.6/gen-rb/cassandra_types.rb
83 | vendor/0.7/gen-rb/cassandra.rb
84 | vendor/0.7/gen-rb/cassandra_constants.rb
85 | vendor/0.7/gen-rb/cassandra_types.rb
86 | vendor/0.8/gen-rb/cassandra.rb
87 | vendor/0.8/gen-rb/cassandra_constants.rb
88 | vendor/0.8/gen-rb/cassandra_types.rb
89 | vendor/1.0/gen-rb/cassandra.rb
90 | vendor/1.0/gen-rb/cassandra_constants.rb
91 | vendor/1.0/gen-rb/cassandra_types.rb
92 | vendor/1.1/gen-rb/cassandra.rb
93 | vendor/1.1/gen-rb/cassandra_constants.rb
94 | vendor/1.1/gen-rb/cassandra_types.rb
95 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # cassandra
2 | A Ruby client for the Cassandra distributed database.
3 |
4 | Supports 1.8.7, 1.9.2, 1.9.3, 2.0.0, 2.1 and rubinius on Cassandra 0.7.x through 2.0.x.
5 |
6 | ## Status of this gem
7 |
8 | There is no longer much development effort being put into this gem. If you
9 | are just getting started with Cassandra then you probably want to use the Datastax
10 | [ruby-driver](https://github.com/datastax/ruby-driver).
11 |
12 | We are still happy to take patches if you want to improve this gem.
13 |
14 | ## Getting Started
15 |
16 | Here is a quick sample of the general use (more details in Read/Write
17 | API below):
18 |
19 | require 'cassandra'
20 | client = Cassandra.new('Twitter', '127.0.0.1:9160')
21 | client.insert(:Users, "5", {'screen_name' => "buttonscat"})
22 |
23 | ## License
24 |
25 | Copyright 2009-2011 Twitter, Inc. See included LICENSE file. Portions copyright 2004-2009 David Heinemeier Hansson, and used with permission.
26 |
27 | ## Cassandra Version
28 |
29 | The Cassandra project is under very active development, and as such
30 | there are a few different versions that you may need to use this gem
31 | with. We have set up an easy sure fire mechanism for selecting the
32 | specific version that you are connecting to while requiring the gem.
33 |
34 | #### Require Method
35 | The default version is the currently stable release of cassandra. (0.8
36 | at this time.)
37 |
38 | To use the default version simply use a normal require:
39 |
40 | require 'cassandra'
41 |
42 | To use a specific version (1.0 in this example) you would use a
43 | slightly differently formatted require:
44 |
45 | require 'cassandra/1.0'
46 |
47 | #### Environment Variable Method
48 | These mechanisms work well when you are using the cassandra gem in your
49 | own projects or irb, but if you would rather not hard code your app to a
50 | specific version you can always specify an environment variable with the
51 | version you are using:
52 |
53 | export CASSANDRA_VERSION=0.8
54 |
55 | Then you would use the default require as listed above:
56 |
57 | require 'cassandra'
58 |
59 | ## Read/Write API Method Reference
60 |
61 | ### insert
62 |
63 | * column\_family - The column\_family that you are inserting into.
64 | * key - The row key to insert.
65 | * hash - The columns or super columns to insert.
66 | * options - Valid options are:
67 | * :timestamp - Uses the current time if none specified.
68 | * :consistency - Uses the default write consistency if none specified.
69 | * :ttl - If specified this is the number of seconds after the insert that this value will be available.
70 |
71 | This is the main method used to insert rows into cassandra. If the
72 | column\_family that you are inserting into is a SuperColumnFamily then
73 | the hash passed in should be a nested hash, otherwise it should be a
74 | flat hash.
75 |
76 | This method can also be called while in batch mode. If in batch mode
77 | then we queue up the mutations (an insert in this case) and pass them to
78 | cassandra in a single batch at the end of the block.
79 |
80 | Example:
81 |
82 | @client.insert(:Statuses, key, {'body' => 'v', 'user' => 'v'})
83 |
84 | columns = {@uuids[1] => 'v1', @uuids[2] => 'v2'}
85 | @client.insert(:StatusRelationships, key, {'user_timelines' => columns})
86 |
87 |
88 | ### remove
89 |
90 | * column\_family - The column\_family that you are working with.
91 | * key - The row key to remove (or remove columns from).
92 | * columns - Either a single super_column or a list of columns to remove.
93 | * sub_columns - The list of sub\_columns to remove.
94 | * options - Valid options are:
95 | * :timestamp - Uses the current time if none specified.
96 | * :consistency - Uses the default write consistency if none specified.
97 |
98 | This method is used to delete (actually marking them as deleted with a
99 | tombstone) rows, columns, or super columns depending on the parameters
100 | passed. If only a key is passed the entire row will be marked as deleted.
101 | If a column name is passed in that column will be deleted.
102 |
103 | Example:
104 |
105 | @client.insert(:Statuses, key, {'body' => 'v', 'subject' => 'v'})
106 |
107 | @client.remove(:Statuses, key, 'body') # removes the 'body' column
108 | @client.remove(:Statuses, key) # removes the row
109 |
110 | ### count\_columns
111 |
112 | Count the columns for the provided parameters.
113 |
114 | * column\_family - The column\_family that you are working with.
115 | * key - The row key.
116 | * columns - Either a single super_column or a list of columns.
117 | * sub_columns - The list of sub\_columns to select.
118 | * options - Valid options are:
119 | * :start - The column name to start from.
120 | * :stop - The column name to stop at.
121 | * :count - The maximum count of columns to return. (By default cassandra will count up to 100 columns)
122 | * :consistency - Uses the default read consistency if none specified.
123 |
124 | Example:
125 |
126 | @client.insert(:Statuses, key, {'body' => 'v1', 'user' => 'v2'})
127 | @client.count_columns(:Statuses, key) # returns 2
128 |
129 | ### get
130 |
131 | Return a hash (actually, a Cassandra::OrderedHash) or a single value
132 | representing the element at the column_family:key:[column]:[sub_column]
133 | path you request.
134 |
135 | * column\_family - The column\_family that you are working with.
136 | * key - The row key to select.
137 | * columns - Either a single super\_column or a list of columns.
138 | * sub\_columns - The list of sub\_columns to select.
139 | * options - Valid options are:
140 | * :count - The number of columns requested to be returned.
141 | * :start - The starting value for selecting a range of columns.
142 | * :finish - The final value for selecting a range of columns.
143 | * :reversed - If set to true the results will be returned in
144 | reverse order.
145 | * :consistency - Uses the default read consistency if none specified.
146 |
147 | Example:
148 |
149 | @client.insert(:Users, key, {'body' => 'v', 'user' => 'v'})
150 | @client.get(:Users, key)) # returns {'body' => 'v', 'user' => 'v'}
151 |
152 | ### multi\_get
153 |
154 | Multi-key version of Cassandra#get.
155 |
156 | This method allows you to select multiple rows with a single query.
157 | If a key that is passed in doesn't exist an empty hash will be
158 | returned.
159 |
160 | Supports the same parameters as Cassandra#get.
161 |
162 | * column_family - The column_family that you are working with.
163 | * key - An array of keys to select.
164 | * columns - Either a single super_column or a list of columns.
165 | * sub_columns - The list of sub\_columns to select.
166 | * options - Valid options are:
167 | * :count - The number of columns requested to be returned.
168 | * :start - The starting value for selecting a range of columns.
169 | * :finish - The final value for selecting a range of columns.
170 | * :reversed - If set to true the results will be returned in reverse order.
171 | * :consistency - Uses the default read consistency if none specified.
172 |
173 | Example:
174 |
175 | @client.insert(:Users, '1', {'body' => 'v1', 'user' => 'v1'})
176 | @client.insert(:Users, '2', {'body' => 'v2', 'user' => 'v2'})
177 |
178 | expected = OrderedHash[
179 | '1', {'body' => 'v1', 'user' => 'v1'},
180 | '2', {'body' => 'v2', 'user' => 'v2'},
181 | 'bogus', {}
182 | ]
183 | result = @client.multi_get(:Users, ['1', '2', 'bogus'])
184 |
185 | ### exists?
186 |
187 | Return true if the column\_family:key:[column]:[sub\_column] path you
188 | request exists.
189 |
190 | If passed in only a row key it will query for any columns (limiting
191 | to 1) for that row key. If a column is passed in it will query for
192 | that specific column/super column.
193 |
194 | This method will return true or false.
195 |
196 | * column\_family - The column\_family that you are working with.
197 | * key - The row key to check.
198 | * columns - Either a single super\_column or a list of columns.
199 | * sub\_columns - The list of sub\_columns to check.
200 | * options - Valid options are:
201 | * :consistency - Uses the default read consistency if none specified.
202 |
203 | Example:
204 |
205 | @client.insert(:Statuses, 'key', {'body' => 'v'})
206 | @client.exists?(:Statuses, 'key') # returns true
207 | @client.exists?(:Statuses, 'bogus') # returns false
208 | @client.exists?(:Statuses, 'key', 'body') # returns true
209 | @client.exists?(:Statuses, 'key', 'bogus') # returns false
210 |
211 | ### get\_range
212 | Return an Cassandra::OrderedHash containing the columns specified for the given
213 | range of keys in the column\_family you request.
214 |
215 | This method is just a convenience wrapper around Cassandra#get_range_single
216 | and Cassandra#get\_range\_batch. If :key\_size, :batch\_size, or a block
217 | is passed in Cassandra#get\_range\_batch will be called. Otherwise
218 | Cassandra#get\_range\_single will be used.
219 |
220 | The start\_key and finish\_key parameters are only useful for iterating of all records
221 | as is done in the Cassandra#each and Cassandra#each\_key methods if you are using the
222 | RandomPartitioner.
223 |
224 | If the table is partitioned with OrderPreservingPartitioner you may
225 | use the start\_key and finish\_key params to select all records with
226 | the same prefix value.
227 |
228 | If a block is passed in we will yield the row key and columns for
229 | each record returned.
230 |
231 | Please note that Cassandra returns a row for each row that has existed in the
232 | system since gc\_grace\_seconds. This is because deleted row keys are marked as
233 | deleted, but left in the system until the cluster has had resonable time to replicate the deletion.
234 | This function attempts to suppress deleted rows (actually any row returned without
235 | columns is suppressed).
236 |
237 | * column\_family - The column\_family that you are working with.
238 | * options - Valid options are:
239 | * :start\_key - The starting value for selecting a range of keys (only useful with OPP).
240 | * :finish\_key - The final value for selecting a range of keys (only useful with OPP).
241 | * :key\_count - The total number of keys to return from the query. (see note regarding deleted records)
242 | * :batch\_size - The maximum number of keys to return per query. If specified will loop until :key\_count is obtained or all records have been returned.
243 | * :columns - A list of columns to return.
244 | * :count - The number of columns requested to be returned.
245 | * :start - The starting value for selecting a range of columns.
246 | * :finish - The final value for selecting a range of columns.
247 | * :reversed - If set to true the results will be returned in reverse order.
248 | * :consistency - Uses the default read consistency if none specified.
249 |
250 | Example:
251 |
252 | 10.times do |i|
253 | @client.insert(:Statuses, i.to_s, {'body' => '1'})
254 | end
255 |
256 | @client.get_range_keys(:Statuses, :key_count => 4)
257 |
258 | # returns:
259 | #{
260 | # '0' => {'body' => '1'},
261 | # '1' => {'body' => '1'},
262 | # '2' => {'body' => '1'},
263 | # '3' => {'body' => '1'}
264 | #}
265 |
266 | ### count\_range
267 |
268 | Return an Array containing all of the keys within a given range.
269 |
270 | This method just calls Cassandra#get\_range and returns the
271 | row keys for the records returned.
272 |
273 | See Cassandra#get\_range for options.
274 |
275 | ### get\_range\_keys
276 |
277 | Return an Array containing all of the keys within a given range.
278 |
279 | This method just calls Cassandra#get\_range and returns the
280 | row keys for the records returned.
281 |
282 | See Cassandra#get\_range for options.
283 |
284 | ### each\_key
285 | Iterate through each key within the given range parameters. This function can be
286 | used to iterate over each key in the given column family.
287 |
288 | This method just calls Cassandra#get\_range and yields each row key.
289 |
290 | See Cassandra#get\_range for options.
291 |
292 | Example:
293 | 10.times do |i|
294 | @client.insert(:Statuses, k + i.to_s, {"body-#{i.to_s}" => 'v'})
295 | end
296 |
297 | @client.each_key(:Statuses) do |key|
298 | print key
299 | end
300 |
301 | # returns 0123456789
302 |
303 | ### each
304 | Iterate through each row within the given column\_family.
305 |
306 | This method just calls Cassandra#get\_range and yields the key and
307 | columns.
308 |
309 | See Cassandra#get\_range for options.
310 |
311 | ### get\_index\_slices
312 | This method is used to query a secondary index with a set of
313 | provided search parameters
314 |
315 | Please note that you can either specify a
316 | CassandraThrift::IndexClause or an array of hashes with the
317 | format as below.
318 |
319 | * column\_family - The Column Family this operation will be run on.
320 | * index\_clause - This can either be a CassandraThrift::IndexClause or an array of hashes with the following keys:
321 | * :column\_name - Column to be compared
322 | * :value - Value to compare against
323 | * :comparison - Type of comparison to do.
324 | * options
325 | * :key\_count - Set maximum number of rows to return. (Only works if CassandraThrift::IndexClause is not passed in.)
326 | * :key\_start - Set starting row key for search. (Only works if CassandraThrift::IndexClause is not passed in.)
327 | * :consistency
328 |
329 | Example:
330 |
331 | @client.create_index('Twitter', 'Statuses', 'x', 'LongType')
332 |
333 | @client.insert(:Statuses, 'row1', { 'x' => [0,10].pack("NN") })
334 |
335 | (2..10).to_a.each do |i|
336 | @twitter.insert(:Statuses, 'row' + i.to_s, { 'x' => [0,20].pack("NN"), 'non_indexed' => [i].pack('N*') })
337 | end
338 |
339 | @client.insert(:Statuses, 'row11', { 'x' => [0,30].pack("NN") })
340 |
341 | expressions = [{:column_name => 'x', :value => [0,20].pack("NN"), :comparison => "=="}]
342 |
343 | # verify multiples will be returned
344 | @client.get_indexed_slices(:Statuses, expressions).length # returns 9
345 |
346 | # verify that GT and LT queries perform properly
347 | expressions = [
348 | { :column_name => 'x',
349 | :value => [0,20].pack("NN"),
350 | :comparison => "=="},
351 | { :column_name => 'non_indexed',
352 | :value => [5].pack("N*"),
353 | :comparison => ">"}
354 | ]
355 |
356 | @client.get_indexed_slices(:Statuses, expressions).length # returns 5
357 |
358 | ### batch
359 | Takes a block where all the mutations (inserts and deletions) inside it are
360 | queued, and at the end of the block are passed to cassandra in a single batch.
361 |
362 | If you don't want to send all the mutations inside the block in a big single
363 | batch, you can use the :queue\_size option to send smaller batches. If the
364 | queue is not empty at the end of the block, the remaining mutations are sent.
365 |
366 | * options
367 | * :consistency - Override the consistency level from individual mutations.
368 | * :queue\_size - Maximum number of mutations to send at once.
369 |
370 | Example:
371 |
372 | @client.batch do
373 | @client.insert(:Statuses, 'k1', {'body' => 'v1'})
374 | @client.insert(:Statuses, 'k2', {'body' => 'v2'})
375 | @client.remove(:Statuses, 'k3')
376 | end
377 |
378 |
379 | ## Reporting Problems
380 |
381 | The Github issue tracker is [here](http://github.com/cassandra-rb/cassandra/issues). If you have problems with this library or Cassandra itself, please use the [cassandra-user mailing list](http://mail-archives.apache.org/mod_mbox/incubator-cassandra-user/).
382 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | require 'fileutils'
2 | require 'rake/testtask'
3 | require 'rake/extensiontask'
4 |
5 | CassandraBinaries = {
6 | '0.6' => 'http://archive.apache.org/dist/cassandra/0.6.13/apache-cassandra-0.6.13-bin.tar.gz',
7 | '0.7' => 'http://archive.apache.org/dist/cassandra/0.7.9/apache-cassandra-0.7.9-bin.tar.gz',
8 | '0.8' => 'http://archive.apache.org/dist/cassandra/0.8.7/apache-cassandra-0.8.7-bin.tar.gz',
9 | '1.0' => 'http://archive.apache.org/dist/cassandra/1.0.6/apache-cassandra-1.0.6-bin.tar.gz',
10 | '1.1' => 'http://archive.apache.org/dist/cassandra/1.1.5/apache-cassandra-1.1.5-bin.tar.gz',
11 | '1.2' => 'http://archive.apache.org/dist/cassandra/1.2.1/apache-cassandra-1.2.1-bin.tar.gz'
12 | }
13 |
14 | CASSANDRA_HOME = ENV['CASSANDRA_HOME'] || "#{ENV['HOME']}/cassandra"
15 | CASSANDRA_VERSION = ENV['CASSANDRA_VERSION'] || '0.8'
16 | CASSANDRA_PIDFILE = ENV['CASSANDRA_PIDFILE'] || "#{CASSANDRA_HOME}/cassandra.pid"
17 |
18 | def setup_cassandra_version(version = CASSANDRA_VERSION)
19 | FileUtils.mkdir_p CASSANDRA_HOME
20 |
21 | destination_directory = File.join(CASSANDRA_HOME, 'cassandra-' + CASSANDRA_VERSION)
22 |
23 | unless File.exists?(File.join(destination_directory, 'bin','cassandra'))
24 | download_source = CassandraBinaries[CASSANDRA_VERSION]
25 | download_destination = File.join("/tmp", File.basename(download_source))
26 | untar_directory = File.join(CASSANDRA_HOME, File.basename(download_source,'-bin.tar.gz'))
27 |
28 | puts "downloading cassandra"
29 | sh "curl -L -o #{download_destination} #{download_source}"
30 |
31 | sh "tar xzf #{download_destination} -C #{CASSANDRA_HOME}"
32 | sh "mv #{untar_directory} #{destination_directory}"
33 | end
34 | end
35 |
36 | def setup_environment
37 | env = ""
38 | if !ENV["CASSANDRA_INCLUDE"]
39 | env << "CASSANDRA_INCLUDE=#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}/cassandra.in.sh "
40 | env << "CASSANDRA_HOME=#{CASSANDRA_HOME}/cassandra-#{CASSANDRA_VERSION} "
41 | env << "CASSANDRA_CONF=#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}"
42 | else
43 | env << "CASSANDRA_INCLUDE=#{ENV['CASSANDRA_INCLUDE']} "
44 | env << "CASSANDRA_HOME=#{ENV['CASSANDRA_HOME']} "
45 | env << "CASSANDRA_CONF=#{ENV['CASSANDRA_CONF']}"
46 | end
47 |
48 | env
49 | end
50 |
51 | def running?(pid_file = nil)
52 | pid_file ||= CASSANDRA_PIDFILE
53 |
54 | if File.exists?(pid_file)
55 | pid = File.new(pid_file).read.to_i
56 | begin
57 | Process.kill(0, pid)
58 | return true
59 | rescue
60 | File.delete(pid_file)
61 | end
62 | end
63 |
64 | false
65 | end
66 |
67 | def listening?(host, port)
68 | TCPSocket.new(host, port).close
69 | true
70 | rescue Errno::ECONNREFUSED => e
71 | false
72 | end
73 |
74 | namespace :cassandra do
75 | desc "Start Cassandra"
76 | task :start, [:daemonize] => :java do |t, args|
77 | args.with_defaults(:daemonize => true)
78 |
79 | setup_cassandra_version
80 | env = setup_environment
81 |
82 | Dir.chdir(File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")) do
83 | sh("env #{env} bin/cassandra #{'-f' unless args.daemonize} -p #{CASSANDRA_PIDFILE}")
84 | end
85 |
86 | if args.daemonize
87 | end_time = Time.now + 30
88 | host = '127.0.0.1'
89 | port = 9160
90 |
91 | until Time.now >= end_time || listening?(host, port)
92 | puts "waiting for 127.0.0.1:9160"
93 | sleep 0.1
94 | end
95 |
96 | unless listening?(host, port)
97 | raise "timed out waiting for cassandra to start"
98 | end
99 | end
100 | end
101 |
102 | desc "Stop Cassandra"
103 | task :stop => :java do
104 | setup_cassandra_version
105 | env = setup_environment
106 | sh("kill $(cat #{CASSANDRA_PIDFILE})")
107 | end
108 | end
109 |
110 | desc "Start Cassandra"
111 | task :cassandra => :java do
112 | begin
113 | Rake::Task["cassandra:start"].invoke(false)
114 | rescue RuntimeError => e
115 | raise e unless e.message =~ /Command failed with status \(130\)/ # handle keyboard interupt errors
116 | end
117 | end
118 |
119 | desc "Run the Cassandra CLI"
120 | task :cli do
121 | Dir.chdir(File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")) do
122 | sh("bin/cassandra-cli -host localhost -port 9160")
123 | end
124 | end
125 |
126 | desc "Check Java version"
127 | task :java do
128 | is_java16 = `java -version 2>&1`.split("\n").first =~ /java version "1.6/
129 |
130 | if ['0.6', '0.7'].include?(CASSANDRA_VERSION) && !java16
131 | puts "You need to configure your environment for Java 1.6."
132 | puts "If you're on OS X, just export the following environment variables:"
133 | puts ' JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/1.6/Home"'
134 | puts ' PATH="/System/Library/Frameworks/JavaVM.framework/Versions/1.6/Home/bin:$PATH"'
135 | exit(1)
136 | end
137 | end
138 |
139 | namespace :data do
140 | desc "Reset test data"
141 | task :reset do
142 | puts "Resetting test data"
143 | sh("rm -rf #{File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}", 'data')}")
144 | end
145 |
146 | desc "Load test data structures."
147 | task :load do
148 | unless CASSANDRA_VERSION == '0.6'
149 |
150 | schema_path = "#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}/schema.txt"
151 | puts "Loading test data structures."
152 | Dir.chdir(File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")) do
153 | begin
154 | sh("bin/cassandra-cli --host localhost --batch < #{schema_path}")
155 | rescue
156 | puts "Schema already loaded."
157 | end
158 | end
159 | end
160 | end
161 | end
162 |
163 | task :test => 'data:load'
164 |
165 | # desc "Regenerate thrift bindings for Cassandra" # Dev only
166 | task :thrift do
167 | puts "Generating Thrift bindings"
168 | FileUtils.mkdir_p "vendor/#{CASSANDRA_VERSION}"
169 |
170 | system(
171 | "cd vendor/#{CASSANDRA_VERSION} &&
172 | rm -rf gen-rb &&
173 | thrift -gen rb #{File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")}/interface/cassandra.thrift")
174 | end
175 |
176 | task :fix_perms do
177 | chmod_R 0755, './'
178 | end
179 |
180 | task :pkg => [:fix_perms]
181 |
182 | Rake::ExtensionTask.new('cassandra_native') do |ext|
183 | ext.ext_dir = 'ext'
184 | end
185 |
186 | Rake::TestTask.new do |t|
187 | t.test_files = FileList['test/*.rb']
188 | end
189 |
190 | task :default => :test
191 | task :test => :compile
192 |
--------------------------------------------------------------------------------
/bin/cassandra_helper:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 |
3 | require 'rubygems'
4 | require 'rake'
5 | require 'cassandra'
6 |
7 | gem_path = $LOAD_PATH.last.sub(/lib$/, "")
8 |
9 | Dir.chdir(gem_path) do
10 | if !ENV["CASSANDRA_INCLUDE"]
11 | puts "Set the CASSANDRA_INCLUDE environment variable to use a non-default cassandra.in.sh and friends."
12 | end
13 |
14 | ARGV << "-T" if ARGV.empty?
15 | exec("env FROM_BIN_CASSANDRA_HELPER=1 rake #{ARGV.join(' ')}")
16 | end
--------------------------------------------------------------------------------
/cassandra.gemspec:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 |
3 | Gem::Specification.new do |s|
4 | s.name = "cassandra"
5 | s.version = "0.23.0"
6 |
7 | s.required_rubygems_version = Gem::Requirement.new(">= 0.8") if s.respond_to? :required_rubygems_version=
8 | s.authors = ["Evan Weaver, Ryan King"]
9 | s.description = "A Ruby client for the Cassandra distributed database."
10 | s.email = ""
11 | s.license = "Apache 2.0"
12 | s.executables = ["cassandra_helper"]
13 | s.extensions = ["ext/extconf.rb"]
14 | s.extra_rdoc_files = ["CHANGELOG", "LICENSE", "README.md", "bin/cassandra_helper", "ext/cassandra_native.c", "ext/extconf.rb", "lib/cassandra.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/0.8.rb", "lib/cassandra/0.8/cassandra.rb", "lib/cassandra/0.8/columns.rb", "lib/cassandra/0.8/protocol.rb", "lib/cassandra/1.0.rb", "lib/cassandra/1.0/cassandra.rb", "lib/cassandra/1.0/columns.rb", "lib/cassandra/1.0/protocol.rb", "lib/cassandra/1.1.rb", "lib/cassandra/1.1/cassandra.rb", "lib/cassandra/1.1/columns.rb", "lib/cassandra/1.1/protocol.rb", "lib/cassandra/array.rb", "lib/cassandra/batch.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/column_family.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/composite.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/dynamic_composite.rb", "lib/cassandra/helpers.rb", "lib/cassandra/keyspace.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/protocol.rb", "lib/cassandra/time.rb"]
15 | s.files = ["CHANGELOG", "Gemfile", "LICENSE", "Manifest", "README.md", "Rakefile", "bin/cassandra_helper", "conf/0.6/cassandra.in.sh", "conf/0.6/log4j.properties", "conf/0.6/schema.json", "conf/0.6/storage-conf.xml", "conf/0.7/cassandra.in.sh", "conf/0.7/cassandra.yaml", "conf/0.7/log4j-server.properties", "conf/0.7/schema.json", "conf/0.7/schema.txt", "conf/0.8/cassandra.in.sh", "conf/0.8/cassandra.yaml", "conf/0.8/log4j-server.properties", "conf/0.8/schema.json", "conf/0.8/schema.txt", "conf/1.0/cassandra.in.sh", "conf/1.0/cassandra.yaml", "conf/1.0/log4j-server.properties", "conf/1.0/schema.json", "conf/1.0/schema.txt", "conf/1.1/cassandra.in.sh", "conf/1.1/cassandra.yaml", "conf/1.1/log4j-server.properties", "conf/1.1/schema.json", "conf/1.1/schema.txt", "ext/cassandra_native.c", "ext/extconf.rb", "lib/cassandra.rb", "lib/cassandra/0.6.rb", "lib/cassandra/0.6/cassandra.rb", "lib/cassandra/0.6/columns.rb", "lib/cassandra/0.6/protocol.rb", "lib/cassandra/0.7.rb", "lib/cassandra/0.7/cassandra.rb", "lib/cassandra/0.7/columns.rb", "lib/cassandra/0.7/protocol.rb", "lib/cassandra/0.8.rb", "lib/cassandra/0.8/cassandra.rb", "lib/cassandra/0.8/columns.rb", "lib/cassandra/0.8/protocol.rb", "lib/cassandra/1.0.rb", "lib/cassandra/1.0/cassandra.rb", "lib/cassandra/1.0/columns.rb", "lib/cassandra/1.0/protocol.rb", "lib/cassandra/1.1.rb", "lib/cassandra/1.1/cassandra.rb", "lib/cassandra/1.1/columns.rb", "lib/cassandra/1.1/protocol.rb", "lib/cassandra/array.rb", "lib/cassandra/batch.rb", "lib/cassandra/cassandra.rb", "lib/cassandra/column_family.rb", "lib/cassandra/columns.rb", "lib/cassandra/comparable.rb", "lib/cassandra/composite.rb", "lib/cassandra/constants.rb", "lib/cassandra/debug.rb", "lib/cassandra/dynamic_composite.rb", "lib/cassandra/helpers.rb", "lib/cassandra/keyspace.rb", "lib/cassandra/long.rb", "lib/cassandra/mock.rb", "lib/cassandra/ordered_hash.rb", "lib/cassandra/protocol.rb", "lib/cassandra/time.rb", "test/cassandra_client_test.rb", "test/cassandra_mock_test.rb", "test/cassandra_test.rb", "test/comparable_types_test.rb", "test/composite_type_test.rb", "test/eventmachine_test.rb", "test/ordered_hash_test.rb", "test/test_helper.rb", "vendor/0.6/gen-rb/cassandra.rb", "vendor/0.6/gen-rb/cassandra_constants.rb", "vendor/0.6/gen-rb/cassandra_types.rb", "vendor/0.7/gen-rb/cassandra.rb", "vendor/0.7/gen-rb/cassandra_constants.rb", "vendor/0.7/gen-rb/cassandra_types.rb", "vendor/0.8/gen-rb/cassandra.rb", "vendor/0.8/gen-rb/cassandra_constants.rb", "vendor/0.8/gen-rb/cassandra_types.rb", "vendor/1.0/gen-rb/cassandra.rb", "vendor/1.0/gen-rb/cassandra_constants.rb", "vendor/1.0/gen-rb/cassandra_types.rb", "vendor/1.1/gen-rb/cassandra.rb", "vendor/1.1/gen-rb/cassandra_constants.rb", "vendor/1.1/gen-rb/cassandra_types.rb", "cassandra.gemspec", "vendor/1.2/gen-rb", "vendor/1.2/gen-rb/cassandra.rb", "vendor/1.2/gen-rb/cassandra_constants.rb", "vendor/1.2/gen-rb/cassandra_types.rb", "conf/1.2/cassandra.in.sh", "conf/1.2/cassandra.yaml", "conf/1.2/log4j-server.properties", "conf/1.2/schema.json", "conf/1.2/schema.txt", "lib/cassandra/1.2.rb", "lib/cassandra/1.2/cassandra.rb", "lib/cassandra/1.2/columns.rb", "lib/cassandra/1.2/protocol.rb"]
16 | s.homepage = "http://github.com/cassandra-rb/cassandra"
17 | s.rdoc_options = ["--line-numbers", "--inline-source", "--title", "Cassandra", "--main", "README.md"]
18 | s.require_paths = ["lib", "ext"]
19 | s.rubyforge_project = "cassandra"
20 | s.rubygems_version = "1.8.17"
21 | s.summary = "A Ruby client for the Cassandra distributed database."
22 | s.test_files = ["test/cassandra_client_test.rb", "test/cassandra_mock_test.rb", "test/cassandra_test.rb", "test/comparable_types_test.rb", "test/composite_type_test.rb", "test/eventmachine_test.rb", "test/ordered_hash_test.rb", "test/test_helper.rb"]
23 |
24 | s.add_runtime_dependency('thrift_client', ['< 0.9', '~> 0.7'])
25 | s.add_runtime_dependency('json', '>= 0')
26 | s.add_runtime_dependency('rake', '>= 0')
27 | s.add_runtime_dependency('simple_uuid', '~> 0.3')
28 | s.add_development_dependency('rake-compiler', '~> 0.8.0')
29 | end
30 |
--------------------------------------------------------------------------------
/conf/0.6/cassandra.in.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # The directory where Cassandra's configs live (required)
18 | CASSANDRA_CONF=$CASSANDRA_CONF
19 |
20 | # This can be the path to a jar file, or a directory containing the
21 | # compiled classes. NOTE: This isn't needed by the startup script,
22 | # it's just used here in constructing the classpath.
23 | cassandra_bin=$CASSANDRA_HOME/build/classes
24 |
25 | # The java classpath (required)
26 | CLASSPATH=$CASSANDRA_CONF:$CASSANDRA_BIN
27 |
28 | for jar in $CASSANDRA_HOME/lib/*.jar $CASSANDRA_HOME/build/lib/jars/*.jar; do
29 | CLASSPATH=$CLASSPATH:$jar
30 | done
31 |
32 | # Arguments to pass to the JVM
33 | JVM_OPTS=" \
34 | -ea \
35 | -Xms128M \
36 | -Xmx1G \
37 | -XX:TargetSurvivorRatio=90 \
38 | -XX:+AggressiveOpts \
39 | -XX:+UseParNewGC \
40 | -XX:+UseConcMarkSweepGC \
41 | -XX:+CMSParallelRemarkEnabled \
42 | -XX:+HeapDumpOnOutOfMemoryError \
43 | -XX:SurvivorRatio=128 \
44 | -XX:MaxTenuringThreshold=0 \
45 | -Dcom.sun.management.jmxremote.port=8080 \
46 | -Dcom.sun.management.jmxremote.ssl=false \
47 | -Dcom.sun.management.jmxremote.authenticate=false"
48 |
--------------------------------------------------------------------------------
/conf/0.6/log4j.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # for production, you should probably set the root to INFO
18 | # and the pattern to %c instead of %l. (%l is slower.)
19 |
20 | # output messages into a rolling log file as well as stdout
21 | log4j.rootLogger=DEBUG,stdout,R
22 |
23 | # stdout
24 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25 | log4j.appender.stdout.layout=org.apache.log4j.SimpleLayout
26 |
27 | # rolling log file ("system.log
28 | log4j.appender.R=org.apache.log4j.DailyRollingFileAppender
29 | log4j.appender.R.DatePattern='.'yyyy-MM-dd-HH
30 | log4j.appender.R.layout=org.apache.log4j.PatternLayout
31 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
32 | # Edit the next line to point to your logs directory
33 | log4j.appender.R.File=data/logs/system.log
34 |
35 | # Application logging options
36 | #log4j.logger.com.facebook=DEBUG
37 | #log4j.logger.com.facebook.infrastructure.gms=DEBUG
38 | #log4j.logger.com.facebook.infrastructure.db=DEBUG
39 |
--------------------------------------------------------------------------------
/conf/0.6/schema.json:
--------------------------------------------------------------------------------
1 | {"Twitter":{
2 | "Users":{
3 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
4 | "Type":"Standard"},
5 | "UserAudits":{
6 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
7 | "Type":"Standard"},
8 | "UserRelationships":{
9 | "CompareSubcolumnsWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
10 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
11 | "Type":"Super"},
12 | "Usernames":{
13 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
14 | "Type":"Standard"},
15 | "Statuses":{
16 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
17 | "Type":"Standard"},
18 | "StatusAudits":{
19 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
20 | "Type":"Standard"},
21 | "StatusRelationships":{
22 | "CompareSubcolumnsWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
23 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
24 | "Type":"Super"},
25 | "Indexes":{
26 | "CompareWith":"org.apache.cassandra.db.marshal.UTF8Type",
27 | "Type":"Super"},
28 | "TimelinishThings":{
29 | "CompareWith":"org.apache.cassandra.db.marshal.BytesType",
30 | "Type":"Standard"}
31 | },
32 | "Multiblog":{
33 | "Blogs":{
34 | "CompareWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
35 | "Type":"Standard"},
36 | "Comments":{
37 | "CompareWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
38 | "Type":"Standard"}
39 | },
40 | "MultiblogLong":{
41 | "Blogs":{
42 | "CompareWith":"org.apache.cassandra.db.marshal.LongType",
43 | "Type":"Standard"},
44 | "Comments":{
45 | "CompareWith":"org.apache.cassandra.db.marshal.LongType",
46 | "Type":"Standard"}
47 | },
48 | "TypeConversions":{
49 | "UUIDColumnConversion":{
50 | "CompareWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
51 | "Type":"Standard"},
52 | "SuperUUID":{
53 | "CompareSubcolumnsWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
54 | "CompareWith":"org.apache.cassandra.db.marshal.TimeUUIDType",
55 | "Type":"Super"}
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/conf/0.6/storage-conf.xml:
--------------------------------------------------------------------------------
1 |
19 |
20 |
21 |
22 |
23 |
24 |
28 | Test
29 |
30 |
46 | false
47 |
48 |
56 |
57 |
58 | 0.01
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 | org.apache.cassandra.locator.RackUnawareStrategy
70 | 1
71 | org.apache.cassandra.locator.EndPointSnitch
72 |
73 |
74 |
75 | 0.01
76 |
77 |
78 |
79 | org.apache.cassandra.locator.RackUnawareStrategy
80 | 1
81 | org.apache.cassandra.locator.EndPointSnitch
82 |
83 |
84 |
85 | 0.01
86 |
87 |
88 |
89 | org.apache.cassandra.locator.RackUnawareStrategy
90 | 1
91 | org.apache.cassandra.locator.EndPointSnitch
92 |
93 |
94 |
95 | 0.01
96 |
97 |
98 |
99 | org.apache.cassandra.locator.RackUnawareStrategy
100 | 1
101 | org.apache.cassandra.locator.EndPointSnitch
102 |
103 |
104 |
105 | 0.01
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 | org.apache.cassandra.locator.RackUnawareStrategy
117 | 1
118 | org.apache.cassandra.locator.EndPointSnitch
119 |
120 |
121 |
122 |
132 | org.apache.cassandra.auth.AllowAllAuthenticator
133 |
134 |
148 | org.apache.cassandra.dht.RandomPartitioner
149 |
150 |
162 |
163 |
164 |
169 | data/commitlog
170 |
171 | data/data
172 |
173 | data/callouts
174 | data/staging
175 | data/saved_caches
176 |
177 |
178 |
183 |
184 | 127.0.0.1
185 |
186 |
187 |
188 |
189 |
190 |
191 | 5000
192 |
193 | 128
194 |
195 |
196 |
197 |
198 |
207 | localhost
208 |
209 | 7000
210 |
211 |
219 | localhost
220 |
221 | 9160
222 |
227 | false
228 |
229 |
230 |
231 |
232 |
233 |
234 |
244 | auto
245 |
246 |
252 | 64
253 |
254 |
261 | 32
262 | 8
263 |
264 |
273 | 64
274 |
275 |
282 | 64
283 |
287 | 256
288 |
293 | 0.3
294 |
303 | 60
304 |
305 |
311 | 8
312 | 32
313 |
314 |
327 | periodic
328 |
333 | 10000
334 |
342 |
343 |
344 |
351 | 864000
352 |
353 |
--------------------------------------------------------------------------------
/conf/0.7/cassandra.in.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | if [ "x$CASSANDRA_HOME" = "x" ]; then
18 | CASSANDRA_HOME=`dirname $0`/..
19 | fi
20 |
21 | # The directory where Cassandra's configs live (required)
22 | if [ "x$CASSANDRA_CONF" = "x" ]; then
23 | CASSANDRA_CONF=$CASSANDRA_HOME/conf
24 | fi
25 |
26 | # This can be the path to a jar file, or a directory containing the
27 | # compiled classes. NOTE: This isn't needed by the startup script,
28 | # it's just used here in constructing the classpath.
29 | cassandra_bin=$CASSANDRA_HOME/build/classes
30 | #cassandra_bin=$cassandra_home/build/cassandra.jar
31 |
32 | # JAVA_HOME can optionally be set here
33 | #JAVA_HOME=/usr/local/jdk6
34 |
35 | # The java classpath (required)
36 | CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
37 |
38 | for jar in $CASSANDRA_HOME/lib/*.jar; do
39 | CLASSPATH=$CLASSPATH:$jar
40 | done
41 |
42 | # Arguments to pass to the JVM
43 | JVM_OPTS=" \
44 | -ea \
45 | -Xms128M \
46 | -Xmx1G"
47 |
--------------------------------------------------------------------------------
/conf/0.7/cassandra.yaml:
--------------------------------------------------------------------------------
1 | # Cassandra storage config YAML
2 |
3 | # NOTE:
4 | # See http://wiki.apache.org/cassandra/StorageConfiguration for
5 | # full explanations of configuration directives
6 | # /NOTE
7 |
8 | # The name of the cluster. This is mainly used to prevent machines in
9 | # one logical cluster from joining another.
10 | cluster_name: 'Test Cluster'
11 |
12 | # You should always specify InitialToken when setting up a production
13 | # cluster for the first time, and often when adding capacity later.
14 | # The principle is that each node should be given an equal slice of
15 | # the token ring; see http://wiki.apache.org/cassandra/Operations
16 | # for more details.
17 | #
18 | # If blank, Cassandra will request a token bisecting the range of
19 | # the heaviest-loaded existing node. If there is no load information
20 | # available, such as is the case with a new cluster, it will pick
21 | # a random token, which will lead to hot spots.
22 | initial_token:
23 |
24 | # Set to true to make new [non-seed] nodes automatically migrate data
25 | # to themselves from the pre-existing nodes in the cluster. Defaults
26 | # to false because you can only bootstrap N machines at a time from
27 | # an existing cluster of N, so if you are bringing up a cluster of
28 | # 10 machines with 3 seeds you would have to do it in stages. Leaving
29 | # this off for the initial start simplifies that.
30 | auto_bootstrap: false
31 |
32 | # See http://wiki.apache.org/cassandra/HintedHandoff
33 | hinted_handoff_enabled: true
34 | # this defines the maximum amount of time a dead host will have hints
35 | # generated. After it has been dead this long, hints will be dropped.
36 | max_hint_window_in_ms: 3600000 # one hour
37 | # Sleep this long after delivering each row or row fragment
38 | hinted_handoff_throttle_delay_in_ms: 50
39 |
40 | # authentication backend, implementing IAuthenticator; used to identify users
41 | authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
42 |
43 | # authorization backend, implementing IAuthority; used to limit access/provide permissions
44 | authority: org.apache.cassandra.auth.AllowAllAuthority
45 |
46 | # The partitioner is responsible for distributing rows (by key) across
47 | # nodes in the cluster. Any IPartitioner may be used, including your
48 | # own as long as it is on the classpath. Out of the box, Cassandra
49 | # provides org.apache.cassandra.dht.RandomPartitioner
50 | # org.apache.cassandra.dht.ByteOrderedPartitioner,
51 | # org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
52 | # and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
53 | # (deprecated).
54 | #
55 | # - RandomPartitioner distributes rows across the cluster evenly by md5.
56 | # When in doubt, this is the best option.
57 | # - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
58 | # scanning rows in key order, but the ordering can generate hot spots
59 | # for sequential insertion workloads.
60 | # - OrderPreservingPartitioner is an obsolete form of BOP, that stores
61 | # - keys in a less-efficient format and only works with keys that are
62 | # UTF8-encoded Strings.
63 | # - CollatingOPP colates according to EN,US rules rather than lexical byte
64 | # ordering. Use this as an example if you need custom collation.
65 | #
66 | # See http://wiki.apache.org/cassandra/Operations for more on
67 | # partitioners and token selection.
68 | partitioner: org.apache.cassandra.dht.RandomPartitioner
69 |
70 | # directories where Cassandra should store data on disk.
71 | data_file_directories:
72 | - data/data
73 |
74 | # commit log
75 | commitlog_directory: data/commitlog
76 |
77 | # saved caches
78 | saved_caches_directory: data/saved_caches
79 |
80 | # Size to allow commitlog to grow to before creating a new segment
81 | commitlog_rotation_threshold_in_mb: 128
82 |
83 | # commitlog_sync may be either "periodic" or "batch."
84 | # When in batch mode, Cassandra won't ack writes until the commit log
85 | # has been fsynced to disk. It will wait up to
86 | # CommitLogSyncBatchWindowInMS milliseconds for other writes, before
87 | # performing the sync.
88 | commitlog_sync: periodic
89 |
90 | # the other option is "periodic" where writes may be acked immediately
91 | # and the CommitLog is simply synced every commitlog_sync_period_in_ms
92 | # milliseconds.
93 | commitlog_sync_period_in_ms: 10000
94 |
95 | # emergency pressure valve: each time heap usage after a full (CMS)
96 | # garbage collection is above this fraction of the max, Cassandra will
97 | # flush the largest memtables.
98 | #
99 | # Set to 1.0 to disable. Setting this lower than
100 | # CMSInitiatingOccupancyFraction is not likely to be useful.
101 | #
102 | # RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
103 | # it is most effective under light to moderate load, or read-heavy
104 | # workloads; under truly massive write load, it will often be too
105 | # little, too late.
106 | flush_largest_memtables_at: 0.75
107 |
108 | # emergency pressure valve #2: the first time heap usage after a full
109 | # (CMS) garbage collection is above this fraction of the max,
110 | # Cassandra will reduce cache maximum _capacity_ to the given fraction
111 | # of the current _size_. Should usually be set substantially above
112 | # flush_largest_memtables_at, since that will have less long-term
113 | # impact on the system.
114 | #
115 | # Set to 1.0 to disable. Setting this lower than
116 | # CMSInitiatingOccupancyFraction is not likely to be useful.
117 | reduce_cache_sizes_at: 0.85
118 | reduce_cache_capacity_to: 0.6
119 |
120 | # Addresses of hosts that are deemed contact points.
121 | # Cassandra nodes use this list of hosts to find each other and learn
122 | # the topology of the ring. You must change this if you are running
123 | # multiple nodes!
124 | seeds:
125 | - 127.0.0.1
126 |
127 | # Access mode. mmapped i/o is substantially faster, but only practical on
128 | # a 64bit machine (which notably does not include EC2 "small" instances)
129 | # or relatively small datasets. "auto", the safe choice, will enable
130 | # mmapping on a 64bit JVM. Other values are "mmap", "mmap_index_only"
131 | # (which may allow you to get part of the benefits of mmap on a 32bit
132 | # machine by mmapping only index files) and "standard".
133 | # (The buffer size settings that follow only apply to standard,
134 | # non-mmapped i/o.)
135 | disk_access_mode: auto
136 |
137 | # For workloads with more data than can fit in memory, Cassandra's
138 | # bottleneck will be reads that need to fetch data from
139 | # disk. "concurrent_reads" should be set to (16 * number_of_drives) in
140 | # order to allow the operations to enqueue low enough in the stack
141 | # that the OS and drives can reorder them.
142 | #
143 | # On the other hand, since writes are almost never IO bound, the ideal
144 | # number of "concurrent_writes" is dependent on the number of cores in
145 | # your system; (8 * number_of_cores) is a good rule of thumb.
146 | concurrent_reads: 32
147 | concurrent_writes: 32
148 |
149 | # This sets the amount of memtable flush writer threads. These will
150 | # be blocked by disk io, and each one will hold a memtable in memory
151 | # while blocked. If you have a large heap and many data directories,
152 | # you can increase this value for better flush performance.
153 | # By default this will be set to the amount of data directories defined.
154 | #memtable_flush_writers: 1
155 |
156 | # Buffer size to use when performing contiguous column slices.
157 | # Increase this to the size of the column slices you typically perform
158 | sliced_buffer_size_in_kb: 64
159 |
160 | # TCP port, for commands and data
161 | storage_port: 7000
162 |
163 | # Address to bind to and tell other Cassandra nodes to connect to. You
164 | # _must_ change this if you want multiple nodes to be able to
165 | # communicate!
166 | #
167 | # Leaving it blank leaves it up to InetAddress.getLocalHost(). This
168 | # will always do the Right Thing *if* the node is properly configured
169 | # (hostname, name resolution, etc), and the Right Thing is to use the
170 | # address associated with the hostname (it might not be).
171 | #
172 | # Setting this to 0.0.0.0 is always wrong.
173 | listen_address: localhost
174 |
175 | # The address to bind the Thrift RPC service to -- clients connect
176 | # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
177 | # you want Thrift to listen on all interfaces.
178 | #
179 | # Leaving this blank has the same effect it does for ListenAddress,
180 | # (i.e. it will be based on the configured hostname of the node).
181 | rpc_address: localhost
182 | # port for Thrift to listen for clients on
183 | rpc_port: 9160
184 |
185 | # enable or disable keepalive on rpc connections
186 | rpc_keepalive: true
187 |
188 | # Cassandra uses thread-per-client for client RPC. This can
189 | # be expensive in memory used for thread stack for a large
190 | # enough number of clients. (Hence, connection pooling is
191 | # very, very strongly recommended.)
192 | #
193 | # Uncomment rpc_min|max|thread to set request pool size.
194 | # You would primarily set max as a safeguard against misbehaved
195 | # clients; if you do hit the max, Cassandra will block until
196 | # one disconnects before accepting more. The defaults are
197 | # min of 16 and max unlimited.
198 | #
199 | # rpc_min_threads: 16
200 | # rpc_max_threads: 2048
201 |
202 | # uncomment to set socket buffer sizes on rpc connections
203 | # rpc_send_buff_size_in_bytes:
204 | # rpc_recv_buff_size_in_bytes:
205 |
206 | # Frame size for thrift (maximum field length).
207 | # 0 disables TFramedTransport in favor of TSocket. This option
208 | # is deprecated; we strongly recommend using Framed mode.
209 | thrift_framed_transport_size_in_mb: 15
210 |
211 | # The max length of a thrift message, including all fields and
212 | # internal thrift overhead.
213 | thrift_max_message_length_in_mb: 16
214 |
215 | # Set to true to have Cassandra create a hard link to each sstable
216 | # flushed or streamed locally in a backups/ subdirectory of the
217 | # Keyspace data. Removing these links is the operator's
218 | # responsibility.
219 | incremental_backups: false
220 |
221 | # Whether or not to take a snapshot before each compaction. Be
222 | # careful using this option, since Cassandra won't clean up the
223 | # snapshots for you. Mostly useful if you're paranoid when there
224 | # is a data format change.
225 | snapshot_before_compaction: false
226 |
227 | # change this to increase the compaction thread's priority. In java, 1 is the
228 | # lowest priority and that is our default.
229 | # compaction_thread_priority: 1
230 |
231 | # Add column indexes to a row after its contents reach this size.
232 | # Increase if your column values are large, or if you have a very large
233 | # number of columns. The competing causes are, Cassandra has to
234 | # deserialize this much of the row to read a single column, so you want
235 | # it to be small - at least if you do many partial-row reads - but all
236 | # the index data is read for each access, so you don't want to generate
237 | # that wastefully either.
238 | column_index_size_in_kb: 64
239 |
240 | # Size limit for rows being compacted in memory. Larger rows will spill
241 | # over to disk and use a slower two-pass compaction process. A message
242 | # will be logged specifying the row key.
243 | in_memory_compaction_limit_in_mb: 64
244 |
245 | # Track cached row keys during compaction, and re-cache their new
246 | # positions in the compacted sstable. Disable if you use really large
247 | # key caches.
248 | compaction_preheat_key_cache: true
249 |
250 | # Time to wait for a reply from other nodes before failing the command
251 | rpc_timeout_in_ms: 10000
252 |
253 | # phi value that must be reached for a host to be marked down.
254 | # most users should never need to adjust this.
255 | # phi_convict_threshold: 8
256 |
257 | # endpoint_snitch -- Set this to a class that implements
258 | # IEndpointSnitch, which will let Cassandra know enough
259 | # about your network topology to route requests efficiently.
260 | # Out of the box, Cassandra provides
261 | # - org.apache.cassandra.locator.SimpleSnitch:
262 | # Treats Strategy order as proximity. This improves cache locality
263 | # when disabling read repair, which can further improve throughput.
264 | # - org.apache.cassandra.locator.RackInferringSnitch:
265 | # Proximity is determined by rack and data center, which are
266 | # assumed to correspond to the 3rd and 2nd octet of each node's
267 | # IP address, respectively
268 | # org.apache.cassandra.locator.PropertyFileSnitch:
269 | # - Proximity is determined by rack and data center, which are
270 | # explicitly configured in cassandra-topology.properties.
271 | endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
272 |
273 | # dynamic_snitch -- This boolean controls whether the above snitch is
274 | # wrapped with a dynamic snitch, which will monitor read latencies
275 | # and avoid reading from hosts that have slowed (due to compaction,
276 | # for instance)
277 | dynamic_snitch: true
278 | # controls how often to perform the more expensive part of host score
279 | # calculation
280 | dynamic_snitch_update_interval_in_ms: 100
281 | # controls how often to reset all host scores, allowing a bad host to
282 | # possibly recover
283 | dynamic_snitch_reset_interval_in_ms: 600000
284 | # if set greater than zero and read_repair_chance is < 1.0, this will allow
285 | # 'pinning' of replicas to hosts in order to increase cache capacity.
286 | # The badness threshold will control how much worse the pinned host has to be
287 | # before the dynamic snitch will prefer other replicas over it. This is
288 | # expressed as a double which represents a percentage. Thus, a value of
289 | # 0.2 means Cassandra would continue to prefer the static snitch values
290 | # until the pinned host was 20% worse than the fastest.
291 | dynamic_snitch_badness_threshold: 0.0
292 |
293 | # request_scheduler -- Set this to a class that implements
294 | # RequestScheduler, which will schedule incoming client requests
295 | # according to the specific policy. This is useful for multi-tenancy
296 | # with a single Cassandra cluster.
297 | # NOTE: This is specifically for requests from the client and does
298 | # not affect inter node communication.
299 | # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
300 | # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
301 | # client requests to a node with a separate queue for each
302 | # request_scheduler_id. The scheduler is further customized by
303 | # request_scheduler_options as described below.
304 | request_scheduler: org.apache.cassandra.scheduler.NoScheduler
305 |
306 | # Scheduler Options vary based on the type of scheduler
307 | # NoScheduler - Has no options
308 | # RoundRobin
309 | # - throttle_limit -- The throttle_limit is the number of in-flight
310 | # requests per client. Requests beyond
311 | # that limit are queued up until
312 | # running requests can complete.
313 | # The value of 80 here is twice the number of
314 | # concurrent_reads + concurrent_writes.
315 | # - default_weight -- default_weight is optional and allows for
316 | # overriding the default which is 1.
317 | # - weights -- Weights are optional and will default to 1 or the
318 | # overridden default_weight. The weight translates into how
319 | # many requests are handled during each turn of the
320 | # RoundRobin, based on the scheduler id.
321 | #
322 | # request_scheduler_options:
323 | # throttle_limit: 80
324 | # default_weight: 5
325 | # weights:
326 | # Keyspace1: 1
327 | # Keyspace2: 5
328 |
329 | # request_scheduler_id -- An identifer based on which to perform
330 | # the request scheduling. Currently the only valid option is keyspace.
331 | # request_scheduler_id: keyspace
332 |
333 | # The Index Interval determines how large the sampling of row keys
334 | # is for a given SSTable. The larger the sampling, the more effective
335 | # the index is at the cost of space.
336 | index_interval: 128
337 |
--------------------------------------------------------------------------------
/conf/0.7/log4j-server.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # for production, you should probably set pattern to %c instead of %l.
18 | # (%l is slower.)
19 |
20 | # output messages into a rolling log file as well as stdout
21 | log4j.rootLogger=DEBUG,stdout,R
22 |
23 | # stdout
24 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
26 | log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27 |
28 | # rolling log file
29 | log4j.appender.R=org.apache.log4j.RollingFileAppender
30 | log4j.appender.R.maxFileSize=20MB
31 | log4j.appender.R.maxBackupIndex=50
32 | log4j.appender.R.layout=org.apache.log4j.PatternLayout
33 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34 | # Edit the next line to point to your logs directory
35 | log4j.appender.R.File=data/logs/system.log
36 |
37 | # Application logging options
38 | #log4j.logger.org.apache.cassandra=DEBUG
39 | #log4j.logger.org.apache.cassandra.db=DEBUG
40 | #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
41 |
42 |
--------------------------------------------------------------------------------
/conf/0.7/schema.json:
--------------------------------------------------------------------------------
1 | {"Twitter":{
2 | "Users":{
3 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
4 | "column_type":"Standard"},
5 | "UserAudits":{
6 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
7 | "column_type":"Standard"},
8 | "UserRelationships":{
9 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
10 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
11 | "column_type":"Super"},
12 | "Usernames":{
13 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
14 | "column_type":"Standard"},
15 | "Statuses":{
16 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
17 | "column_type":"Standard"},
18 | "StatusAudits":{
19 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
20 | "column_type":"Standard"},
21 | "StatusRelationships":{
22 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
23 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
24 | "column_type":"Super"},
25 | "Indexes":{
26 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
27 | "column_type":"Super"},
28 | "TimelinishThings":{
29 | "comparator_type":"org.apache.cassandra.db.marshal.BytesType",
30 | "column_type":"Standard"}
31 | },
32 | "Multiblog":{
33 | "Blogs":{
34 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
35 | "column_type":"Standard"},
36 | "Comments":{
37 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
38 | "column_type":"Standard"}
39 | },
40 | "MultiblogLong":{
41 | "Blogs":{
42 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
43 | "column_type":"Standard"},
44 | "Comments":{
45 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
46 | "column_type":"Standard"}
47 | },
48 | "TypeConversions":{
49 | "UUIDColumnConversion":{
50 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
51 | "column_type":"Standard"},
52 | "SuperUUID":{
53 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
54 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
55 | "column_type":"Super"}
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/conf/0.7/schema.txt:
--------------------------------------------------------------------------------
1 | create keyspace Twitter with
2 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
3 | replication_factor = 1;
4 | use Twitter;
5 | create column family Users with comparator = 'UTF8Type';
6 | create column family UserAudits with comparator = 'UTF8Type';
7 | create column family UserRelationships with
8 | comparator = 'UTF8Type' and
9 | column_type = 'Super' and
10 | subcomparator = 'TimeUUIDType';
11 | create column family Usernames with comparator = 'UTF8Type';
12 | create column family Statuses with comparator = 'UTF8Type';
13 | create column family StatusAudits with comparator = 'UTF8Type';
14 | create column family StatusRelationships with
15 | comparator = 'UTF8Type' and
16 | column_type = 'Super' and
17 | subcomparator = 'TimeUUIDType';
18 | create column family Indexes with
19 | comparator = 'UTF8Type' and
20 | column_type = 'Super';
21 | create column family TimelinishThings with
22 | comparator = 'BytesType';
23 |
24 | create keyspace Multiblog with
25 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
26 | replication_factor = 1;
27 | use Multiblog;
28 | create column family Blogs with comparator = 'TimeUUIDType';
29 | create column family Comments with comparator = 'TimeUUIDType';
30 |
31 |
32 | create keyspace MultiblogLong with
33 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
34 | replication_factor = 1;
35 | use MultiblogLong;
36 | create column family Blogs with comparator = 'LongType';
37 | create column family Comments with comparator = 'LongType';
38 |
39 | create keyspace TypeConversions with
40 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
41 | replication_factor = 1;
42 | use TypeConversions;
43 | create column family UUIDColumnConversion with comparator = TimeUUIDType;
44 | create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
45 |
46 |
--------------------------------------------------------------------------------
/conf/0.8/cassandra.in.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | if [ "x$CASSANDRA_HOME" = "x" ]; then
18 | CASSANDRA_HOME=`dirname $0`/..
19 | fi
20 |
21 | # The directory where Cassandra's configs live (required)
22 | if [ "x$CASSANDRA_CONF" = "x" ]; then
23 | CASSANDRA_CONF=$CASSANDRA_HOME/conf
24 | fi
25 |
26 | # This can be the path to a jar file, or a directory containing the
27 | # compiled classes. NOTE: This isn't needed by the startup script,
28 | # it's just used here in constructing the classpath.
29 | cassandra_bin=$CASSANDRA_HOME/build/classes/main
30 | cassandra_bin=$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift
31 | #cassandra_bin=$cassandra_home/build/cassandra.jar
32 |
33 | # JAVA_HOME can optionally be set here
34 | #JAVA_HOME=/usr/local/jdk6
35 |
36 | # The java classpath (required)
37 | CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
38 |
39 | for jar in $CASSANDRA_HOME/lib/*.jar; do
40 | CLASSPATH=$CLASSPATH:$jar
41 | done
42 |
--------------------------------------------------------------------------------
/conf/0.8/cassandra.yaml:
--------------------------------------------------------------------------------
1 | # Cassandra storage config YAML
2 | cluster_name: 'Test'
3 | initial_token:
4 | auto_bootstrap: false
5 | hinted_handoff_enabled: true
6 | max_hint_window_in_ms: 3600000 # one hour
7 | hinted_handoff_throttle_delay_in_ms: 50
8 | authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
9 | authority: org.apache.cassandra.auth.AllowAllAuthority
10 | partitioner: org.apache.cassandra.dht.RandomPartitioner
11 |
12 | # directories where Cassandra should store data on disk.
13 | data_file_directories:
14 | - data/data
15 | commitlog_directory: data/commitlog
16 |
17 | # saved caches
18 | saved_caches_directory: data/saved_caches
19 |
20 | commitlog_rotation_threshold_in_mb: 128
21 | commitlog_sync: periodic
22 | commitlog_sync_period_in_ms: 10000
23 | seed_provider:
24 | - class_name: org.apache.cassandra.locator.SimpleSeedProvider
25 | parameters:
26 | - seeds: "127.0.0.1"
27 | flush_largest_memtables_at: 0.75
28 | reduce_cache_sizes_at: 0.85
29 | reduce_cache_capacity_to: 0.6
30 | concurrent_reads: 32
31 | concurrent_writes: 32
32 | memtable_flush_queue_size: 4
33 | sliced_buffer_size_in_kb: 64
34 | storage_port: 7000
35 | listen_address: localhost
36 | rpc_address: localhost
37 | rpc_port: 9160
38 | rpc_keepalive: true
39 | thrift_framed_transport_size_in_mb: 15
40 | thrift_max_message_length_in_mb: 16
41 | incremental_backups: false
42 | snapshot_before_compaction: false
43 | column_index_size_in_kb: 64
44 | in_memory_compaction_limit_in_mb: 64
45 | concurrent_compactors: 1
46 | compaction_throughput_mb_per_sec: 16
47 | compaction_preheat_key_cache: true
48 | rpc_timeout_in_ms: 10000
49 | endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
50 | dynamic_snitch: true
51 | dynamic_snitch_update_interval_in_ms: 100
52 | dynamic_snitch_reset_interval_in_ms: 600000
53 | dynamic_snitch_badness_threshold: 0.0
54 | request_scheduler: org.apache.cassandra.scheduler.NoScheduler
55 | index_interval: 128
56 | encryption_options:
57 | internode_encryption: none
58 | keystore: conf/.keystore
59 | keystore_password: cassandra
60 | truststore: conf/.truststore
61 | truststore_password: cassandra
62 |
--------------------------------------------------------------------------------
/conf/0.8/log4j-server.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # for production, you should probably set pattern to %c instead of %l.
18 | # (%l is slower.)
19 |
20 | # output messages into a rolling log file as well as stdout
21 | log4j.rootLogger=INFO,stdout,R
22 |
23 | # stdout
24 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
26 | log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27 |
28 | # rolling log file
29 | log4j.appender.R=org.apache.log4j.RollingFileAppender
30 | log4j.appender.R.maxFileSize=20MB
31 | log4j.appender.R.maxBackupIndex=50
32 | log4j.appender.R.layout=org.apache.log4j.PatternLayout
33 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34 | # Edit the next line to point to your logs directory
35 | log4j.appender.R.File=data/logs/system.log
36 |
37 | # Application logging options
38 | #log4j.logger.org.apache.cassandra=DEBUG
39 | #log4j.logger.org.apache.cassandra.db=DEBUG
40 | #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
41 |
--------------------------------------------------------------------------------
/conf/0.8/schema.json:
--------------------------------------------------------------------------------
1 | {"Twitter":{
2 | "Users":{
3 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
4 | "column_type":"Standard"},
5 | "UserAudits":{
6 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
7 | "column_type":"Standard"},
8 | "UserCounters":{
9 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
10 | "column_type":"Standard",
11 | "default_validation_class":"CounterColumnType"},
12 | "UserCounterAggregates":{
13 | "subcomparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
14 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
15 | "column_type":"Super",
16 | "default_validation_class":"CounterColumnType"},
17 | "UserRelationships":{
18 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
19 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
20 | "column_type":"Super"},
21 | "Usernames":{
22 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
23 | "column_type":"Standard"},
24 | "Statuses":{
25 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
26 | "column_type":"Standard"},
27 | "StatusAudits":{
28 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
29 | "column_type":"Standard"},
30 | "StatusRelationships":{
31 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
32 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
33 | "column_type":"Super"},
34 | "Indexes":{
35 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
36 | "column_type":"Super"},
37 | "TimelinishThings":{
38 | "comparator_type":"org.apache.cassandra.db.marshal.BytesType",
39 | "column_type":"Standard"}
40 | },
41 | "Multiblog":{
42 | "Blogs":{
43 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
44 | "column_type":"Standard"},
45 | "Comments":{
46 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
47 | "column_type":"Standard"}
48 | },
49 | "MultiblogLong":{
50 | "Blogs":{
51 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
52 | "column_type":"Standard"},
53 | "Comments":{
54 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
55 | "column_type":"Standard"}
56 | },
57 | "TypeConversions":{
58 | "UUIDColumnConversion":{
59 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
60 | "column_type":"Standard"},
61 | "SuperUUID":{
62 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
63 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
64 | "column_type":"Super"},
65 | "CompositeColumnConversion":{
66 | "comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
67 | "column_type":"Standard"},
68 | "DynamicComposite":{
69 | "comparator_type":"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType,i=>org.apache.cassandra.db.marshal.IntegerType)",
70 | "column_type":"Standard"}
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/conf/0.8/schema.txt:
--------------------------------------------------------------------------------
1 | create keyspace Twitter with
2 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
3 | strategy_options = [{replication_factor:1}];
4 | use Twitter;
5 | create column family Users with comparator = 'UTF8Type';
6 | create column family UserAudits with comparator = 'UTF8Type';
7 | create column family UserCounters with comparator = 'UTF8Type' and
8 | default_validation_class = CounterColumnType;
9 | create column family UserCounterAggregates with column_type = 'Super'
10 | and comparator = 'UTF8Type' and
11 | subcomparator = 'UTF8Type' and
12 | default_validation_class = CounterColumnType;
13 | create column family UserRelationships with
14 | comparator = 'UTF8Type' and
15 | column_type = 'Super' and
16 | subcomparator = 'TimeUUIDType';
17 | create column family Usernames with comparator = 'UTF8Type';
18 | create column family Statuses
19 | with comparator = 'UTF8Type'
20 | and column_metadata = [
21 | {column_name: 'tags', validation_class: 'BytesType', index_type: 'KEYS'}
22 | ];
23 | create column family StatusAudits with comparator = 'UTF8Type';
24 | create column family StatusRelationships with
25 | comparator = 'UTF8Type' and
26 | column_type = 'Super' and
27 | subcomparator = 'TimeUUIDType';
28 | create column family Indexes with
29 | comparator = 'UTF8Type' and
30 | column_type = 'Super';
31 | create column family TimelinishThings with
32 | comparator = 'BytesType';
33 |
34 | create keyspace Multiblog with
35 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
36 | strategy_options = [{replication_factor:1}];
37 | use Multiblog;
38 | create column family Blogs with comparator = 'TimeUUIDType';
39 | create column family Comments with comparator = 'TimeUUIDType';
40 |
41 |
42 | create keyspace MultiblogLong with
43 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
44 | strategy_options = [{replication_factor:1}];
45 | use MultiblogLong;
46 | create column family Blogs with comparator = 'LongType';
47 | create column family Comments with comparator = 'LongType';
48 |
49 | create keyspace TypeConversions with
50 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
51 | strategy_options = [{replication_factor:1}];
52 | use TypeConversions;
53 | create column family UUIDColumnConversion with comparator = TimeUUIDType;
54 | create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
55 | create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
56 | create column family DynamicComposite with comparator ='DynamicCompositeType
57 | (a=>AsciiType,b=>BytesType,i=>IntegerType,x=>LexicalUUIDType,l=>LongType,t=>TimeUUIDType,s=>UTF8Type,u=>UUIDType)';
58 |
--------------------------------------------------------------------------------
/conf/1.0/cassandra.in.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | if [ "x$CASSANDRA_HOME" = "x" ]; then
18 | CASSANDRA_HOME=`dirname $0`/..
19 | fi
20 |
21 | # The directory where Cassandra's configs live (required)
22 | if [ "x$CASSANDRA_CONF" = "x" ]; then
23 | CASSANDRA_CONF=$CASSANDRA_HOME/conf
24 | fi
25 |
26 | # This can be the path to a jar file, or a directory containing the
27 | # compiled classes. NOTE: This isn't needed by the startup script,
28 | # it's just used here in constructing the classpath.
29 | cassandra_bin=$CASSANDRA_HOME/build/classes/main
30 | cassandra_bin=$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift
31 | #cassandra_bin=$cassandra_home/build/cassandra.jar
32 |
33 | # JAVA_HOME can optionally be set here
34 | #JAVA_HOME=/usr/local/jdk6
35 |
36 | # The java classpath (required)
37 | CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
38 |
39 | for jar in $CASSANDRA_HOME/lib/*.jar; do
40 | CLASSPATH=$CLASSPATH:$jar
41 | done
42 |
--------------------------------------------------------------------------------
/conf/1.0/log4j-server.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # for production, you should probably set pattern to %c instead of %l.
18 | # (%l is slower.)
19 |
20 | # output messages into a rolling log file as well as stdout
21 | log4j.rootLogger=INFO,stdout,R
22 |
23 | # stdout
24 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
26 | log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27 |
28 | # rolling log file
29 | log4j.appender.R=org.apache.log4j.RollingFileAppender
30 | log4j.appender.R.maxFileSize=20MB
31 | log4j.appender.R.maxBackupIndex=50
32 | log4j.appender.R.layout=org.apache.log4j.PatternLayout
33 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34 | # Edit the next line to point to your logs directory
35 | log4j.appender.R.File=data/logs/system.log
36 |
37 | # Application logging options
38 | #log4j.logger.org.apache.cassandra=DEBUG
39 | #log4j.logger.org.apache.cassandra.db=DEBUG
40 | #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
41 |
--------------------------------------------------------------------------------
/conf/1.0/schema.json:
--------------------------------------------------------------------------------
1 | {"Twitter":{
2 | "Users":{
3 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
4 | "column_type":"Standard"},
5 | "UserAudits":{
6 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
7 | "column_type":"Standard"},
8 | "UserCounters":{
9 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
10 | "column_type":"Standard",
11 | "default_validation_class":"CounterColumnType"},
12 | "UserCounterAggregates":{
13 | "subcomparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
14 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
15 | "column_type":"Super",
16 | "default_validation_class":"CounterColumnType"},
17 | "UserRelationships":{
18 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
19 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
20 | "column_type":"Super"},
21 | "Usernames":{
22 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
23 | "column_type":"Standard"},
24 | "Statuses":{
25 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
26 | "column_type":"Standard"},
27 | "StatusAudits":{
28 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
29 | "column_type":"Standard"},
30 | "StatusRelationships":{
31 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
32 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
33 | "column_type":"Super"},
34 | "Indexes":{
35 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
36 | "column_type":"Super"},
37 | "TimelinishThings":{
38 | "comparator_type":"org.apache.cassandra.db.marshal.BytesType",
39 | "column_type":"Standard"}
40 | },
41 | "Multiblog":{
42 | "Blogs":{
43 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
44 | "column_type":"Standard"},
45 | "Comments":{
46 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
47 | "column_type":"Standard"}
48 | },
49 | "MultiblogLong":{
50 | "Blogs":{
51 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
52 | "column_type":"Standard"},
53 | "Comments":{
54 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
55 | "column_type":"Standard"}
56 | },
57 | "TypeConversions":{
58 | "UUIDColumnConversion":{
59 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
60 | "column_type":"Standard"},
61 | "SuperUUID":{
62 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
63 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
64 | "column_type":"Super"},
65 | "CompositeColumnConversion":{
66 | "comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
67 | "column_type":"Standard"},
68 | "DynamicComposite":{
69 | "comparator_type":"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType,i=>org.apache.cassandra.db.marshal.IntegerType)",
70 | "column_type":"Standard"}
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/conf/1.0/schema.txt:
--------------------------------------------------------------------------------
1 | create keyspace Twitter with
2 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
3 | strategy_options = {replication_factor:1};
4 | use Twitter;
5 | create column family Users with comparator = 'UTF8Type';
6 | create column family UserAudits with comparator = 'UTF8Type';
7 | create column family UserCounters with comparator = 'UTF8Type' and
8 | default_validation_class = CounterColumnType;
9 | create column family UserCounterAggregates with column_type = 'Super'
10 | and comparator = 'UTF8Type' and
11 | subcomparator = 'UTF8Type' and
12 | default_validation_class = CounterColumnType;
13 | create column family UserRelationships with
14 | comparator = 'UTF8Type' and
15 | column_type = 'Super' and
16 | subcomparator = 'TimeUUIDType';
17 | create column family Usernames with comparator = 'UTF8Type';
18 | create column family Statuses
19 | with comparator = 'UTF8Type'
20 | and column_metadata = [
21 | {column_name: 'tags', validation_class: 'BytesType', index_type: 'KEYS'}
22 | ];
23 | create column family StatusAudits with comparator = 'UTF8Type';
24 | create column family StatusRelationships with
25 | comparator = 'UTF8Type' and
26 | column_type = 'Super' and
27 | subcomparator = 'TimeUUIDType';
28 | create column family Indexes with
29 | comparator = 'UTF8Type' and
30 | column_type = 'Super';
31 | create column family TimelinishThings with
32 | comparator = 'BytesType';
33 |
34 | create keyspace Multiblog with
35 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
36 | strategy_options = {replication_factor:1};
37 | use Multiblog;
38 | create column family Blogs with comparator = 'TimeUUIDType';
39 | create column family Comments with comparator = 'TimeUUIDType';
40 |
41 |
42 | create keyspace MultiblogLong with
43 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
44 | strategy_options = {replication_factor:1};
45 | use MultiblogLong;
46 | create column family Blogs with comparator = 'LongType';
47 | create column family Comments with comparator = 'LongType';
48 |
49 | create keyspace TypeConversions with
50 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
51 | strategy_options = {replication_factor:1};
52 | use TypeConversions;
53 | create column family UUIDColumnConversion with comparator = TimeUUIDType;
54 | create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
55 | create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
56 | create column family DynamicComposite with comparator ='DynamicCompositeType
57 | (a=>AsciiType,b=>BytesType,i=>IntegerType,x=>LexicalUUIDType,l=>LongType,t=>TimeUUIDType,s=>UTF8Type,u=>UUIDType)';
58 |
--------------------------------------------------------------------------------
/conf/1.1/cassandra.in.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | if [ "x$CASSANDRA_HOME" = "x" ]; then
18 | CASSANDRA_HOME=`dirname $0`/..
19 | fi
20 |
21 | # The directory where Cassandra's configs live (required)
22 | if [ "x$CASSANDRA_CONF" = "x" ]; then
23 | CASSANDRA_CONF=$CASSANDRA_HOME/conf
24 | fi
25 |
26 | # This can be the path to a jar file, or a directory containing the
27 | # compiled classes. NOTE: This isn't needed by the startup script,
28 | # it's just used here in constructing the classpath.
29 | cassandra_bin=$CASSANDRA_HOME/build/classes/main
30 | cassandra_bin=$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift
31 | #cassandra_bin=$cassandra_home/build/cassandra.jar
32 |
33 | # JAVA_HOME can optionally be set here
34 | #JAVA_HOME=/usr/local/jdk6
35 |
36 | # The java classpath (required)
37 | CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
38 |
39 | for jar in $CASSANDRA_HOME/lib/*.jar; do
40 | CLASSPATH=$CLASSPATH:$jar
41 | done
42 |
--------------------------------------------------------------------------------
/conf/1.1/log4j-server.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # for production, you should probably set pattern to %c instead of %l.
18 | # (%l is slower.)
19 |
20 | # output messages into a rolling log file as well as stdout
21 | log4j.rootLogger=INFO,stdout,R
22 |
23 | # stdout
24 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
26 | log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27 |
28 | # rolling log file
29 | log4j.appender.R=org.apache.log4j.RollingFileAppender
30 | log4j.appender.R.maxFileSize=20MB
31 | log4j.appender.R.maxBackupIndex=50
32 | log4j.appender.R.layout=org.apache.log4j.PatternLayout
33 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34 | # Edit the next line to point to your logs directory
35 | log4j.appender.R.File=/var/log/cassandra/system.log
36 |
37 | # Application logging options
38 | #log4j.logger.org.apache.cassandra=DEBUG
39 | #log4j.logger.org.apache.cassandra.db=DEBUG
40 | #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
41 |
42 | # Adding this to avoid thrift logging disconnect errors.
43 | log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
44 |
45 |
--------------------------------------------------------------------------------
/conf/1.1/schema.json:
--------------------------------------------------------------------------------
1 | {"Twitter":{
2 | "Users":{
3 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
4 | "column_type":"Standard"},
5 | "UserAudits":{
6 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
7 | "column_type":"Standard"},
8 | "UserCounters":{
9 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
10 | "column_type":"Standard",
11 | "default_validation_class":"CounterColumnType"},
12 | "UserCounterAggregates":{
13 | "subcomparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
14 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
15 | "column_type":"Super",
16 | "default_validation_class":"CounterColumnType"},
17 | "UserRelationships":{
18 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
19 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
20 | "column_type":"Super"},
21 | "Usernames":{
22 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
23 | "column_type":"Standard"},
24 | "Statuses":{
25 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
26 | "column_type":"Standard"},
27 | "StatusAudits":{
28 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
29 | "column_type":"Standard"},
30 | "StatusRelationships":{
31 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
32 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
33 | "column_type":"Super"},
34 | "Indexes":{
35 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
36 | "column_type":"Super"},
37 | "TimelinishThings":{
38 | "comparator_type":"org.apache.cassandra.db.marshal.BytesType",
39 | "column_type":"Standard"}
40 | },
41 | "Multiblog":{
42 | "Blogs":{
43 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
44 | "column_type":"Standard"},
45 | "Comments":{
46 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
47 | "column_type":"Standard"}
48 | },
49 | "MultiblogLong":{
50 | "Blogs":{
51 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
52 | "column_type":"Standard"},
53 | "Comments":{
54 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
55 | "column_type":"Standard"}
56 | },
57 | "TypeConversions":{
58 | "UUIDColumnConversion":{
59 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
60 | "column_type":"Standard"},
61 | "SuperUUID":{
62 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
63 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
64 | "column_type":"Super"},
65 | "CompositeColumnConversion":{
66 | "comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
67 | "column_type":"Standard"},
68 | "DynamicComposite":{
69 | "comparator_type":"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType,i=>org.apache.cassandra.db.marshal.IntegerType)",
70 | "column_type":"Standard"}
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/conf/1.1/schema.txt:
--------------------------------------------------------------------------------
1 | create keyspace Twitter with
2 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
3 | strategy_options = {replication_factor:1};
4 | use Twitter;
5 | create column family Users with comparator = 'UTF8Type';
6 | create column family UserAudits with comparator = 'UTF8Type';
7 | create column family UserCounters with comparator = 'UTF8Type' and
8 | default_validation_class = CounterColumnType;
9 | create column family UserCounterAggregates with column_type = 'Super'
10 | and comparator = 'UTF8Type' and
11 | subcomparator = 'UTF8Type' and
12 | default_validation_class = CounterColumnType;
13 | create column family UserRelationships with
14 | comparator = 'UTF8Type' and
15 | column_type = 'Super' and
16 | subcomparator = 'TimeUUIDType';
17 | create column family Usernames with comparator = 'UTF8Type';
18 | create column family Statuses
19 | with comparator = 'UTF8Type'
20 | and column_metadata = [
21 | {column_name: 'tags', validation_class: 'BytesType', index_type: 'KEYS'}
22 | ];
23 | create column family StatusAudits with comparator = 'UTF8Type';
24 | create column family StatusRelationships with
25 | comparator = 'UTF8Type' and
26 | column_type = 'Super' and
27 | subcomparator = 'TimeUUIDType';
28 | create column family Indexes with
29 | comparator = 'UTF8Type' and
30 | column_type = 'Super';
31 | create column family TimelinishThings with
32 | comparator = 'BytesType';
33 |
34 | create keyspace Multiblog with
35 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
36 | strategy_options = {replication_factor:1};
37 | use Multiblog;
38 | create column family Blogs with comparator = 'TimeUUIDType';
39 | create column family Comments with comparator = 'TimeUUIDType';
40 |
41 |
42 | create keyspace MultiblogLong with
43 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
44 | strategy_options = {replication_factor:1};
45 | use MultiblogLong;
46 | create column family Blogs with comparator = 'LongType';
47 | create column family Comments with comparator = 'LongType';
48 |
49 | create keyspace TypeConversions with
50 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
51 | strategy_options = {replication_factor:1};
52 | use TypeConversions;
53 | create column family UUIDColumnConversion with comparator = TimeUUIDType;
54 | create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
55 | create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
56 | create column family DynamicComposite with comparator ='DynamicCompositeType
57 | (a=>AsciiType,b=>BytesType,i=>IntegerType,x=>LexicalUUIDType,l=>LongType,t=>TimeUUIDType,s=>UTF8Type,u=>UUIDType)';
58 |
--------------------------------------------------------------------------------
/conf/1.2/cassandra.in.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | if [ "x$CASSANDRA_HOME" = "x" ]; then
18 | CASSANDRA_HOME=`dirname $0`/..
19 | fi
20 |
21 | # The directory where Cassandra's configs live (required)
22 | if [ "x$CASSANDRA_CONF" = "x" ]; then
23 | CASSANDRA_CONF=$CASSANDRA_HOME/conf
24 | fi
25 |
26 | # This can be the path to a jar file, or a directory containing the
27 | # compiled classes. NOTE: This isn't needed by the startup script,
28 | # it's just used here in constructing the classpath.
29 | cassandra_bin=$CASSANDRA_HOME/build/classes/main
30 | cassandra_bin=$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift
31 | #cassandra_bin=$cassandra_home/build/cassandra.jar
32 |
33 | # JAVA_HOME can optionally be set here
34 | #JAVA_HOME=/usr/local/jdk6
35 |
36 | # The java classpath (required)
37 | CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
38 |
39 | for jar in $CASSANDRA_HOME/lib/*.jar; do
40 | CLASSPATH=$CLASSPATH:$jar
41 | done
42 |
--------------------------------------------------------------------------------
/conf/1.2/log4j-server.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # for production, you should probably set pattern to %c instead of %l.
18 | # (%l is slower.)
19 |
20 | # output messages into a rolling log file as well as stdout
21 | log4j.rootLogger=INFO,stdout,R
22 |
23 | # stdout
24 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender
25 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
26 | log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
27 |
28 | # rolling log file
29 | log4j.appender.R=org.apache.log4j.RollingFileAppender
30 | log4j.appender.R.maxFileSize=20MB
31 | log4j.appender.R.maxBackupIndex=50
32 | log4j.appender.R.layout=org.apache.log4j.PatternLayout
33 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
34 | # Edit the next line to point to your logs directory
35 | log4j.appender.R.File=/var/log/cassandra/system.log
36 |
37 | # Application logging options
38 | #log4j.logger.org.apache.cassandra=DEBUG
39 | #log4j.logger.org.apache.cassandra.db=DEBUG
40 | #log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
41 |
42 | # Adding this to avoid thrift logging disconnect errors.
43 | log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
44 |
45 |
--------------------------------------------------------------------------------
/conf/1.2/schema.json:
--------------------------------------------------------------------------------
1 | {"Twitter":{
2 | "Users":{
3 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
4 | "column_type":"Standard"},
5 | "UserAudits":{
6 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
7 | "column_type":"Standard"},
8 | "UserCounters":{
9 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
10 | "column_type":"Standard",
11 | "default_validation_class":"CounterColumnType"},
12 | "UserCounterAggregates":{
13 | "subcomparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
14 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
15 | "column_type":"Super",
16 | "default_validation_class":"CounterColumnType"},
17 | "UserRelationships":{
18 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
19 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
20 | "column_type":"Super"},
21 | "Usernames":{
22 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
23 | "column_type":"Standard"},
24 | "Statuses":{
25 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
26 | "column_type":"Standard"},
27 | "StatusAudits":{
28 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
29 | "column_type":"Standard"},
30 | "StatusRelationships":{
31 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
32 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
33 | "column_type":"Super"},
34 | "Indexes":{
35 | "comparator_type":"org.apache.cassandra.db.marshal.UTF8Type",
36 | "column_type":"Super"},
37 | "TimelinishThings":{
38 | "comparator_type":"org.apache.cassandra.db.marshal.BytesType",
39 | "column_type":"Standard"}
40 | },
41 | "Multiblog":{
42 | "Blogs":{
43 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
44 | "column_type":"Standard"},
45 | "Comments":{
46 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
47 | "column_type":"Standard"}
48 | },
49 | "MultiblogLong":{
50 | "Blogs":{
51 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
52 | "column_type":"Standard"},
53 | "Comments":{
54 | "comparator_type":"org.apache.cassandra.db.marshal.LongType",
55 | "column_type":"Standard"}
56 | },
57 | "TypeConversions":{
58 | "UUIDColumnConversion":{
59 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
60 | "column_type":"Standard"},
61 | "SuperUUID":{
62 | "subcomparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
63 | "comparator_type":"org.apache.cassandra.db.marshal.TimeUUIDType",
64 | "column_type":"Super"},
65 | "CompositeColumnConversion":{
66 | "comparator_type":"org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.IntegerType,org.apache.cassandra.db.marshal.UTF8Type)",
67 | "column_type":"Standard"},
68 | "DynamicComposite":{
69 | "comparator_type":"org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,s=>org.apache.cassandra.db.marshal.UTF8Type,b=>org.apache.cassandra.db.marshal.BytesType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType,i=>org.apache.cassandra.db.marshal.IntegerType)",
70 | "column_type":"Standard"}
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/conf/1.2/schema.txt:
--------------------------------------------------------------------------------
1 | create keyspace Twitter with
2 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
3 | strategy_options = {replication_factor:1};
4 | use Twitter;
5 | create column family Users with comparator = 'UTF8Type';
6 | create column family UserAudits with comparator = 'UTF8Type';
7 | create column family UserCounters with comparator = 'UTF8Type' and
8 | default_validation_class = CounterColumnType;
9 | create column family UserCounterAggregates with column_type = 'Super'
10 | and comparator = 'UTF8Type' and
11 | subcomparator = 'UTF8Type' and
12 | default_validation_class = CounterColumnType;
13 | create column family UserRelationships with
14 | comparator = 'UTF8Type' and
15 | column_type = 'Super' and
16 | subcomparator = 'TimeUUIDType';
17 | create column family Usernames with comparator = 'UTF8Type';
18 | create column family Statuses
19 | with comparator = 'UTF8Type'
20 | and column_metadata = [
21 | {column_name: 'tags', validation_class: 'BytesType', index_type: 'KEYS'}
22 | ];
23 | create column family StatusAudits with comparator = 'UTF8Type';
24 | create column family StatusRelationships with
25 | comparator = 'UTF8Type' and
26 | column_type = 'Super' and
27 | subcomparator = 'TimeUUIDType';
28 | create column family Indexes with
29 | comparator = 'UTF8Type' and
30 | column_type = 'Super';
31 | create column family TimelinishThings with
32 | comparator = 'BytesType';
33 |
34 | create keyspace Multiblog with
35 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
36 | strategy_options = {replication_factor:1};
37 | use Multiblog;
38 | create column family Blogs with comparator = 'TimeUUIDType';
39 | create column family Comments with comparator = 'TimeUUIDType';
40 |
41 |
42 | create keyspace MultiblogLong with
43 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
44 | strategy_options = {replication_factor:1};
45 | use MultiblogLong;
46 | create column family Blogs with comparator = 'LongType';
47 | create column family Comments with comparator = 'LongType';
48 |
49 | create keyspace TypeConversions with
50 | placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' AND
51 | strategy_options = {replication_factor:1};
52 | use TypeConversions;
53 | create column family UUIDColumnConversion with comparator = TimeUUIDType;
54 | create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
55 | create column family CompositeColumnConversion with comparator = 'CompositeType(IntegerType, UTF8Type)';
56 | create column family DynamicComposite with comparator ='DynamicCompositeType
57 | (a=>AsciiType,b=>BytesType,i=>IntegerType,x=>LexicalUUIDType,l=>LongType,t=>TimeUUIDType,s=>UTF8Type,u=>UUIDType)';
58 |
--------------------------------------------------------------------------------
/ext/cassandra_native.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | VALUE parts_ivar_id, types_ivar_id, hash_ivar_id;
5 |
6 | VALUE rb_cassandra_composite_fast_unpack(VALUE self, VALUE packed_string_value) {
7 | int index = 0;
8 | int message_length = RSTRING_LEN(packed_string_value);
9 | char *packed_string = (char *)RSTRING_PTR(packed_string_value);
10 |
11 | VALUE parts = rb_ary_new();
12 | while (index < message_length) {
13 | uint16_t length = ntohs(((uint16_t *)(packed_string+index))[0]);
14 | VALUE part = rb_str_new(packed_string+index+2, length);
15 | rb_ary_push(parts, part);
16 | index += length + 3;
17 | }
18 |
19 | rb_ivar_set(self, parts_ivar_id, parts);
20 | rb_ivar_set(self, hash_ivar_id, rb_funcall(packed_string_value, rb_intern("hash"), 0));
21 |
22 | return Qnil;
23 | }
24 |
25 | VALUE rb_cassandra_dynamic_composite_fast_unpack(VALUE self, VALUE packed_string_value) {
26 | int index = 0;
27 | int message_length = RSTRING_LEN(packed_string_value);
28 | char *packed_string = (char *)RSTRING_PTR(packed_string_value);
29 | uint16_t length;
30 |
31 | VALUE parts = rb_ary_new();
32 | VALUE types = rb_ary_new();
33 | while (index < message_length) {
34 | if (packed_string[index] & 0x80) {
35 | VALUE type = rb_str_new(packed_string + index + 1, 1);
36 | rb_ary_push(types, type);
37 | index += 2;
38 | } else {
39 | length = ntohs(((uint16_t *)(packed_string+index))[0]);
40 | VALUE type = rb_str_new(packed_string + index + 2, length);
41 | rb_ary_push(types, type);
42 | index += 2 + length;
43 | }
44 |
45 | length = ntohs(((uint16_t *)(packed_string+index))[0]);
46 | VALUE part = rb_str_new(packed_string + index + 2, length);
47 | rb_ary_push(parts, part);
48 | index += length + 3;
49 | }
50 |
51 | rb_ivar_set(self, parts_ivar_id, parts);
52 | rb_ivar_set(self, types_ivar_id, types);
53 | rb_ivar_set(self, hash_ivar_id, rb_funcall(packed_string_value, rb_intern("hash"), 0));
54 |
55 | return Qnil;
56 | }
57 |
58 | void Init_cassandra_native(void) {
59 | VALUE cassandra_module = rb_const_get(rb_cObject, rb_intern("Cassandra"));
60 | VALUE cassandra_composite_class = rb_define_class_under(cassandra_module, "Composite", rb_cObject);
61 | rb_define_method(cassandra_composite_class, "fast_unpack", rb_cassandra_composite_fast_unpack, 1);
62 |
63 | VALUE dynamic_composite = rb_const_get(cassandra_module, rb_intern("DynamicComposite"));
64 | rb_define_method(dynamic_composite, "fast_unpack", rb_cassandra_dynamic_composite_fast_unpack, 1);
65 |
66 | parts_ivar_id = rb_intern("@parts");
67 | types_ivar_id = rb_intern("@types");
68 | hash_ivar_id = rb_intern("@hash");
69 | }
70 |
--------------------------------------------------------------------------------
/ext/extconf.rb:
--------------------------------------------------------------------------------
1 | if (defined?(RUBY_ENGINE) && RUBY_ENGINE =~ /jruby/) or ENV['OS'] == "Windows_NT"
2 | File.open('Makefile', 'w'){|f| f.puts "all:\n\ninstall:\n" }
3 | else
4 | require 'mkmf'
5 |
6 | $CFLAGS = "-g -O2 -Wall -Werror"
7 |
8 | create_makefile 'cassandra_native'
9 | end
10 |
--------------------------------------------------------------------------------
/lib/.gitignore:
--------------------------------------------------------------------------------
1 | *.bundle
2 | *.so
3 |
--------------------------------------------------------------------------------
/lib/cassandra.rb:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | gem 'thrift_client', '~> 0.7'
3 | require 'thrift_client'
4 | gem 'simple_uuid' , '~> 0.3'
5 | require 'simple_uuid'
6 |
7 | require 'json' unless defined?(JSON)
8 |
9 | here = File.expand_path(File.dirname(__FILE__))
10 |
11 | class Cassandra ; end
12 | unless Cassandra.respond_to?(:VERSION)
13 | require "#{here}/cassandra/0.8"
14 | end
15 |
16 | $LOAD_PATH << "#{here}/../vendor/#{Cassandra.VERSION}/gen-rb"
17 | require "#{here}/../vendor/#{Cassandra.VERSION}/gen-rb/cassandra"
18 |
19 | $LOAD_PATH << "#{here}"
20 |
21 | require 'cassandra/helpers'
22 | require 'cassandra/array'
23 | require 'cassandra/time'
24 | require 'cassandra/comparable'
25 | require 'cassandra/long'
26 | require 'cassandra/composite'
27 | require 'cassandra/dynamic_composite'
28 | require 'cassandra/ordered_hash'
29 | require 'cassandra/columns'
30 | require 'cassandra/protocol'
31 | require 'cassandra/batch'
32 | require "cassandra/#{Cassandra.VERSION}/columns"
33 | require "cassandra/#{Cassandra.VERSION}/protocol"
34 | require "cassandra/cassandra"
35 | require "cassandra/#{Cassandra.VERSION}/cassandra"
36 | unless Cassandra.VERSION.eql?("0.6")
37 | require "cassandra/column_family"
38 | require "cassandra/keyspace"
39 | end
40 | require 'cassandra/constants'
41 | require 'cassandra/debug' if ENV['DEBUG']
42 |
43 | begin
44 | require "cassandra_native"
45 | rescue LoadError
46 | puts "Unable to load cassandra_native extension. Defaulting to pure Ruby libraries."
47 | end
48 |
--------------------------------------------------------------------------------
/lib/cassandra/0.6.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | def self.VERSION
3 | "0.6"
4 | end
5 | end
6 |
7 | require "#{File.expand_path(File.dirname(__FILE__))}/../cassandra"
--------------------------------------------------------------------------------
/lib/cassandra/0.6/cassandra.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | def self.DEFAULT_TRANSPORT_WRAPPER
3 | Thrift::BufferedTransport
4 | end
5 |
6 | ##
7 | # Issues a login attempt using the username and password specified.
8 | #
9 | # * username
10 | # * password
11 | #
12 | def login!(username, password)
13 | @auth_request = CassandraThrift::AuthenticationRequest.new
14 | @auth_request.credentials = {'username' => username, 'password' => password}
15 | client.login(@keyspace, @auth_request)
16 | end
17 |
18 | def inspect
19 | "# #{hash['type'].inspect}"}.join(', ')
21 | }}, @servers=#{servers.inspect}>"
22 | end
23 |
24 | ##
25 | # Returns an array of available keyspaces.
26 | #
27 | def keyspaces
28 | @keyspaces ||= client.describe_keyspaces()
29 | end
30 |
31 | ##
32 | # Remove all rows in the column family you request.
33 | #
34 | # * column_family
35 | # * options
36 | # * consitency
37 | # * timestamp
38 | #
39 | def clear_column_family!(column_family, options = {})
40 | each_key(column_family) do |key|
41 | remove(column_family, key, options)
42 | end
43 | end
44 | alias truncate! clear_column_family!
45 |
46 | # Remove all rows in the keyspace. Supports options :consistency and
47 | # :timestamp.
48 | # FIXME May not currently delete all records without multiple calls. Waiting
49 | # for ranged remove support in Cassandra.
50 | def clear_keyspace!(options = {})
51 | schema.keys.each { |column_family| clear_column_family!(column_family, options) }
52 | end
53 |
54 | # Open a batch operation and yield self. Inserts and deletes will be queued
55 | # until the block closes, and then sent atomically to the server. Supports
56 | # the :consistency option, which overrides the consistency set in
57 | # the individual commands.
58 | def batch(options = {})
59 | _, _, _, options =
60 | extract_and_validate_params(schema.keys.first, "", [options], WRITE_DEFAULTS)
61 |
62 | @batch = []
63 | yield(self)
64 | compacted_map,seen_clevels = compact_mutations!
65 | clevel = if options[:consistency] != nil # Override any clevel from individual mutations if
66 | options[:consistency]
67 | elsif seen_clevels.length > 1 # Cannot choose which CLevel to use if there are several ones
68 | raise "Multiple consistency levels used in the batch, and no override...cannot pick one"
69 | else # if no consistency override has been provided but all the clevels in the batch are the same: use that one
70 | seen_clevels.first
71 | end
72 |
73 | _mutate(compacted_map,clevel)
74 | ensure
75 | @batch = nil
76 | end
77 |
78 | protected
79 |
80 | def schema(load=true)
81 | if !load && !@schema
82 | []
83 | else
84 | @schema ||= client.describe_keyspace(@keyspace)
85 | end
86 | end
87 |
88 | def client
89 | reconnect! if @client.nil?
90 | @client
91 | end
92 |
93 | def reconnect!
94 | @servers = all_nodes
95 | @client = new_client
96 | end
97 |
98 | def all_nodes
99 | if @auto_discover_nodes
100 | temp_client = new_client
101 | begin
102 | ips = ::JSON.parse(temp_client.get_string_property('token map')).values
103 | port = @servers.first.split(':').last
104 | ips.map{|ip| "#{ip}:#{port}" }
105 | ensure
106 | temp_client.disconnect!
107 | end
108 | else
109 | @servers
110 | end
111 | end
112 |
113 | end
114 |
--------------------------------------------------------------------------------
/lib/cassandra/0.6/columns.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | # A bunch of crap, mostly related to introspecting on column types
3 | module Columns #:nodoc:
4 | private
5 |
6 | def is_super(column_family)
7 | @is_super[column_family] ||= column_family_property(column_family, 'Type') == "Super"
8 | end
9 |
10 | def column_name_class(column_family)
11 | @column_name_class[column_family] ||= column_name_class_for_key(column_family, "CompareWith")
12 | end
13 |
14 | def sub_column_name_class(column_family)
15 | @sub_column_name_class[column_family] ||= column_name_class_for_key(column_family, "CompareSubcolumnsWith")
16 | end
17 |
18 | def column_family_property(column_family, key)
19 | unless schema[column_family]
20 | raise AccessError, "Invalid column family \"#{column_family}\""
21 | end
22 | schema[column_family][key]
23 | end
24 |
25 | def _standard_insert_mutation(column_family, column_name, value, timestamp, _=nil)
26 | CassandraThrift::Mutation.new(
27 | :column_or_supercolumn => CassandraThrift::ColumnOrSuperColumn.new(
28 | :column => CassandraThrift::Column.new(
29 | :name => column_name_class(column_family).new(column_name).to_s,
30 | :value => value,
31 | :timestamp => timestamp
32 | )
33 | )
34 | )
35 | end
36 |
37 | def _super_insert_mutation(column_family, super_column_name, sub_columns, timestamp, _=nil)
38 | CassandraThrift::Mutation.new(:column_or_supercolumn =>
39 | CassandraThrift::ColumnOrSuperColumn.new(
40 | :super_column => CassandraThrift::SuperColumn.new(
41 | :name => column_name_class(column_family).new(super_column_name).to_s,
42 | :columns => sub_columns.collect { |sub_column_name, sub_column_value|
43 | CassandraThrift::Column.new(
44 | :name => sub_column_name_class(column_family).new(sub_column_name).to_s,
45 | :value => sub_column_value.to_s,
46 | :timestamp => timestamp
47 | )
48 | }
49 | )
50 | )
51 | )
52 | end
53 |
54 | # General info about a deletion object within a mutation
55 | # timestamp - required. If this is the only param, it will cause deletion of the whole key at that TS
56 | # supercolumn - opt. If passed, the deletes will only occur within that supercolumn (only subcolumns
57 | # will be deleted). Otherwise the normal columns will be deleted.
58 | # predicate - opt. Defines how to match the columns to delete. if supercolumn passed, the slice will
59 | # be scoped to subcolumns of that supercolumn.
60 |
61 | # Deletes a single column from the containing key/CF (and possibly supercolumn), at a given timestamp.
62 | # Although mutations (as opposed to 'remove' calls) support deleting slices and lists of columns in one shot, this is not implemented here.
63 | # The main reason being that the batch function takes removes, but removes don't have that capability...so we'd need to change the remove
64 | # methods to use delete mutation calls...although that might have performance implications. We'll leave that refactoring for later.
65 | def _delete_mutation(cf, column, subcolumn, timestamp, options={})
66 | deletion_hash = {:timestamp => timestamp}
67 | if is_super(cf)
68 | deletion_hash[:super_column] = column if column
69 | deletion_hash[:predicate] = CassandraThrift::SlicePredicate.new(:column_names => [subcolumn]) if subcolumn
70 | else
71 | deletion_hash[:predicate] = CassandraThrift::SlicePredicate.new(:column_names => [column]) if column
72 | end
73 | CassandraThrift::Mutation.new(
74 | :deletion => CassandraThrift::Deletion.new(deletion_hash)
75 | )
76 | end
77 | end
78 | end
79 |
--------------------------------------------------------------------------------
/lib/cassandra/0.6/protocol.rb:
--------------------------------------------------------------------------------
1 |
2 | class Cassandra
3 | # Inner methods for actually doing the Thrift calls
4 | module Protocol #:nodoc:
5 | private
6 |
7 | def _mutate(mutation_map, consistency_level)
8 | client.batch_mutate(@keyspace, mutation_map, consistency_level)
9 | end
10 |
11 | def _remove(key, column_path, timestamp, consistency_level)
12 | client.remove(@keyspace, key, column_path, timestamp, consistency_level)
13 | end
14 |
15 | def _count_columns(column_family, key, super_column, start, stop, count, consistency)
16 | client.get_count(@keyspace, key,
17 | CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => super_column),
18 | consistency
19 | )
20 | end
21 |
22 | # FIXME: add support for start, stop, count functionality
23 | def _get_columns(column_family, key, columns, sub_columns, consistency)
24 | result = if is_super(column_family)
25 | if sub_columns
26 | columns_to_hash(column_family, client.get_slice(@keyspace, key,
27 | CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => columns),
28 | CassandraThrift::SlicePredicate.new(:column_names => sub_columns),
29 | consistency))
30 | else
31 | columns_to_hash(column_family, client.get_slice(@keyspace, key,
32 | CassandraThrift::ColumnParent.new(:column_family => column_family),
33 | CassandraThrift::SlicePredicate.new(:column_names => columns),
34 | consistency))
35 | end
36 | else
37 | columns_to_hash(column_family, client.get_slice(@keyspace, key,
38 | CassandraThrift::ColumnParent.new(:column_family => column_family),
39 | CassandraThrift::SlicePredicate.new(:column_names => columns),
40 | consistency))
41 | end
42 |
43 | klass = column_name_class(column_family)
44 | (sub_columns || columns).map { |name| result[klass.new(name)] }
45 | end
46 |
47 | def _multiget(column_family, keys, column, sub_column, count, start, finish, reversed, consistency)
48 | # Single values; count and range parameters have no effect
49 | if is_super(column_family) and sub_column
50 | column_path = CassandraThrift::ColumnPath.new(:column_family => column_family, :super_column => column, :column => sub_column)
51 | multi_column_to_hash!(client.multiget(@keyspace, keys, column_path, consistency))
52 | elsif !is_super(column_family) and column
53 | column_path = CassandraThrift::ColumnPath.new(:column_family => column_family, :column => column)
54 | multi_column_to_hash!(client.multiget(@keyspace, keys, column_path, consistency))
55 |
56 | # Slices
57 | else
58 | predicate = CassandraThrift::SlicePredicate.new(:slice_range =>
59 | CassandraThrift::SliceRange.new(
60 | :reversed => reversed,
61 | :count => count,
62 | :start => start,
63 | :finish => finish))
64 |
65 | if is_super(column_family) and column
66 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => column)
67 | multi_sub_columns_to_hash!(column_family, client.multiget_slice(@keyspace, keys, column_parent, predicate, consistency))
68 | else
69 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
70 | multi_columns_to_hash!(column_family, client.multiget_slice(@keyspace, keys, column_parent, predicate, consistency))
71 | end
72 | end
73 | end
74 |
75 | def _get_range(column_family, start_key, finish_key, key_count, columns, start, finish, count, consistency, reversed=false)
76 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
77 | predicate = if columns
78 | CassandraThrift::SlicePredicate.new(:column_names => columns)
79 | else
80 | CassandraThrift::SlicePredicate.new(:slice_range =>
81 | CassandraThrift::SliceRange.new(
82 | :start => start,
83 | :finish => finish,
84 | :count => count,
85 | :reversed => reversed))
86 | end
87 | range = CassandraThrift::KeyRange.new(:start_key => start_key, :end_key => finish_key, :count => key_count)
88 | client.get_range_slices(@keyspace, column_parent, predicate, range, consistency)
89 | end
90 | end
91 | end
92 |
--------------------------------------------------------------------------------
/lib/cassandra/0.7.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | def self.VERSION
3 | "0.7"
4 | end
5 | end
6 |
7 | require "#{File.expand_path(File.dirname(__FILE__))}/../cassandra"
--------------------------------------------------------------------------------
/lib/cassandra/0.7/cassandra.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | end
3 |
--------------------------------------------------------------------------------
/lib/cassandra/0.7/columns.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | module Columns #:nodoc:
3 | end
4 | end
5 |
--------------------------------------------------------------------------------
/lib/cassandra/0.7/protocol.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | # Inner methods for actually doing the Thrift calls
3 | module Protocol #:nodoc:
4 | end
5 | end
6 |
--------------------------------------------------------------------------------
/lib/cassandra/0.8.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | def self.VERSION
3 | "0.8"
4 | end
5 | end
6 |
7 | require "#{File.expand_path(File.dirname(__FILE__))}/../cassandra"
8 |
--------------------------------------------------------------------------------
/lib/cassandra/0.8/cassandra.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 |
3 | ## Counters
4 |
5 | # Add a value to the counter in cf:key:super column:column
6 | def add(column_family, key, value, *columns_and_options)
7 | column_family, column, sub_column, options = extract_and_validate_params(column_family, key, columns_and_options, WRITE_DEFAULTS)
8 |
9 | mutation_map = if is_super(column_family)
10 | {
11 | key => {
12 | column_family => [_super_counter_mutation(column_family, column, sub_column, value)]
13 | }
14 | }
15 | else
16 | {
17 | key => {
18 | column_family => [_standard_counter_mutation(column_family, column, value)]
19 | }
20 | }
21 | end
22 |
23 | @batch ? @batch << [mutation_map, options[:consistency]] : _mutate(mutation_map, options[:consistency])
24 | end
25 | end
26 |
--------------------------------------------------------------------------------
/lib/cassandra/0.8/columns.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | module Columns #:nodoc:
3 | def _standard_counter_mutation(column_family, column_name, value)
4 | CassandraThrift::Mutation.new(
5 | :column_or_supercolumn => CassandraThrift::ColumnOrSuperColumn.new(
6 | :counter_column => CassandraThrift::CounterColumn.new(
7 | :name => column_name_class(column_family).new(column_name).to_s,
8 | :value => value
9 | )
10 | )
11 | )
12 | end
13 |
14 | def _super_counter_mutation(column_family, super_column_name, sub_column, value)
15 | CassandraThrift::Mutation.new(:column_or_supercolumn =>
16 | CassandraThrift::ColumnOrSuperColumn.new(
17 | :counter_super_column => CassandraThrift::SuperColumn.new(
18 | :name => column_name_class(column_family).new(super_column_name).to_s,
19 | :columns => [CassandraThrift::CounterColumn.new(
20 | :name => sub_column_name_class(column_family).new(sub_column).to_s,
21 | :value => value
22 | )]
23 | )
24 | )
25 | )
26 | end
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/cassandra/0.8/protocol.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | # Inner methods for actually doing the Thrift calls
3 | module Protocol #:nodoc:
4 | private
5 |
6 | def _remove_counter(key, column_path, consistency_level)
7 | client.remove_counter(key, column_path, consistency_level)
8 | end
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/lib/cassandra/1.0.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | def self.VERSION
3 | "1.0"
4 | end
5 | end
6 |
7 | require "#{File.expand_path(File.dirname(__FILE__))}/../cassandra"
8 |
--------------------------------------------------------------------------------
/lib/cassandra/1.0/cassandra.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../0.8/cassandra"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.0/columns.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../0.8/columns"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.0/protocol.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../0.8/protocol"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.1.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | def self.VERSION
3 | "1.1"
4 | end
5 | end
6 |
7 | require "#{File.expand_path(File.dirname(__FILE__))}/../cassandra"
8 |
--------------------------------------------------------------------------------
/lib/cassandra/1.1/cassandra.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../1.0/cassandra"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.1/columns.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../1.0/columns"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.1/protocol.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../1.0/protocol"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.2.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | def self.VERSION
3 | "1.2"
4 | end
5 | end
6 |
7 | require "#{File.expand_path(File.dirname(__FILE__))}/../cassandra"
8 |
--------------------------------------------------------------------------------
/lib/cassandra/1.2/cassandra.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../1.1/cassandra"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.2/columns.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../1.1/columns"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/1.2/protocol.rb:
--------------------------------------------------------------------------------
1 | require "#{File.expand_path(File.dirname(__FILE__))}/../1.1/protocol"
2 |
--------------------------------------------------------------------------------
/lib/cassandra/array.rb:
--------------------------------------------------------------------------------
1 |
2 | class Array
3 | def _flatten_once
4 | result = []
5 | each { |el| result.concat(Array(el)) }
6 | result
7 | end
8 | end
9 |
--------------------------------------------------------------------------------
/lib/cassandra/batch.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | class Batch
3 | include Enumerable
4 |
5 | def initialize(cassandra, options)
6 | @queue_size = options.delete(:queue_size) || 0
7 | @cassandra = cassandra
8 | @options = options
9 | @batch_queue = []
10 | end
11 |
12 | ##
13 | # Append mutation to the batch queue
14 | # Flush the batch queue if full
15 | #
16 | def <<(mutation)
17 | @batch_queue << mutation
18 | if @queue_size > 0 and @batch_queue.length >= @queue_size
19 | begin
20 | @cassandra.flush_batch(@options)
21 | ensure
22 | @batch_queue = []
23 | end
24 | end
25 | end
26 |
27 | ##
28 | # Implement each method (required by Enumerable)
29 | #
30 | def each(&block)
31 | @batch_queue.each(&block)
32 | end
33 |
34 | ##
35 | # Queue size
36 | #
37 | def length
38 | @batch_queue.length
39 | end
40 | end
41 | end
42 |
--------------------------------------------------------------------------------
/lib/cassandra/column_family.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | class ColumnFamily < CassandraThrift::CfDef ; end
3 | end
--------------------------------------------------------------------------------
/lib/cassandra/columns.rb:
--------------------------------------------------------------------------------
1 |
2 | class Cassandra
3 | # A bunch of crap, mostly related to introspecting on column types
4 | module Columns #:nodoc:
5 | private
6 |
7 | def is_super(column_family)
8 | @is_super[column_family] ||= column_family_property(column_family, 'column_type') == "Super"
9 | end
10 |
11 | def column_name_class(column_family)
12 | @column_name_class[column_family] ||= column_name_class_for_key(column_family, "comparator_type")
13 | end
14 |
15 | def sub_column_name_class(column_family)
16 | @sub_column_name_class[column_family] ||= column_name_class_for_key(column_family, "subcomparator_type")
17 | end
18 |
19 | def column_name_maker(column_family)
20 | @column_name_maker[column_family] ||=
21 | begin
22 | klass = column_name_class(column_family)
23 | if klass == Composite || klass == DynamicComposite
24 | lambda {|name| klass.new_from_packed(name) }
25 | else
26 | lambda {|name| klass.new(name) }
27 | end
28 | end
29 | end
30 |
31 | def sub_column_name_maker(column_family)
32 | @sub_column_name_maker[column_family] ||=
33 | begin
34 | klass = sub_column_name_class(column_family)
35 | if klass == Composite || klass == DynamicComposite
36 | lambda {|name| klass.new_from_packed(name) }
37 | else
38 | lambda {|name| klass.new(name) }
39 | end
40 | end
41 | end
42 |
43 | def column_name_class_for_key(column_family, comparator_key)
44 | property = column_family_property(column_family, comparator_key)
45 | property =~ /[^(]*\.(.*?)$/
46 | case $1
47 | when "LongType" then Long
48 | when "LexicalUUIDType", "TimeUUIDType" then SimpleUUID::UUID
49 | when /^DynamicCompositeType\(/ then DynamicComposite
50 | when /^CompositeType\(/ then Composite
51 | else
52 | String # UTF8, Ascii, Bytes, anything else
53 | end
54 | end
55 |
56 | def column_family_property(column_family, key)
57 | cfdef = schema.cf_defs.find {|cfdef| cfdef.name == column_family }
58 | unless cfdef
59 | raise AccessError, "Invalid column family \"#{column_family}\""
60 | end
61 | cfdef.send(key)
62 | end
63 |
64 | def multi_key_slices_to_hash(column_family, array, return_empty_rows = false)
65 | ret = OrderedHash.new
66 | array.each do |value|
67 | next if return_empty_rows == false && value.columns.length == 0
68 | ret[value.key] = columns_to_hash(column_family, value.columns)
69 | end
70 | ret
71 | end
72 |
73 | def multi_column_to_hash!(hash)
74 | hash.each do |key, column_or_supercolumn|
75 | hash[key] = (column_or_supercolumn.column.value if column_or_supercolumn.column)
76 | end
77 | end
78 |
79 | def multi_columns_to_hash!(column_family, hash)
80 | hash.each do |key, columns|
81 | hash[key] = columns_to_hash(column_family, columns)
82 | end
83 | end
84 |
85 | def multi_sub_columns_to_hash!(column_family, hash)
86 | hash.each do |key, sub_columns|
87 | hash[key] = sub_columns_to_hash(column_family, sub_columns)
88 | end
89 | end
90 |
91 | def columns_to_hash(column_family, columns)
92 | columns_to_hash_for_classes(columns, column_name_maker(column_family), sub_column_name_maker(column_family))
93 | end
94 |
95 | def sub_columns_to_hash(column_family, columns)
96 | columns_to_hash_for_classes(columns, sub_column_name_maker(column_family))
97 | end
98 |
99 | def columns_to_hash_for_classes(columns, column_name_maker, sub_column_name_maker = nil)
100 | hash = OrderedHash.new
101 | Array(columns).each do |c|
102 | c = c.super_column || c.column || c.counter_column || c.counter_super_column if c.is_a?(CassandraThrift::ColumnOrSuperColumn)
103 | case c
104 | when CassandraThrift::SuperColumn
105 | hash.[]=(column_name_maker.call(c.name), columns_to_hash_for_classes(c.columns, sub_column_name_maker)) # Pop the class stack, and recurse
106 | when CassandraThrift::Column
107 | hash.[]=(column_name_maker.call(c.name), c.value, c.timestamp)
108 | when CassandraThrift::CounterColumn
109 | hash.[]=(column_name_maker.call(c.name), c.value, 0)
110 | when CassandraThrift::CounterSuperColumn
111 | hash.[]=(column_name_maker.call(c.name), columns_to_hash_for_classes(c.columns, sub_column_name_maker)) # Pop the class stack, and recurse
112 | end
113 | end
114 | hash
115 | end
116 |
117 | def _standard_insert_mutation(column_family, column_name, value, timestamp, ttl = nil)
118 | CassandraThrift::Mutation.new(
119 | :column_or_supercolumn => CassandraThrift::ColumnOrSuperColumn.new(
120 | :column => CassandraThrift::Column.new(
121 | :name => column_name_class(column_family).new(column_name).to_s,
122 | :value => value,
123 | :timestamp => timestamp,
124 | :ttl => ttl
125 | )
126 | )
127 | )
128 | end
129 |
130 | def _super_insert_mutation(column_family, super_column_name, sub_columns, timestamp, ttl = nil)
131 | CassandraThrift::Mutation.new(:column_or_supercolumn =>
132 | CassandraThrift::ColumnOrSuperColumn.new(
133 | :super_column => CassandraThrift::SuperColumn.new(
134 | :name => column_name_class(column_family).new(super_column_name).to_s,
135 | :columns => sub_columns.collect { |sub_column_name, sub_column_value|
136 | CassandraThrift::Column.new(
137 | :name => sub_column_name_class(column_family).new(sub_column_name).to_s,
138 | :value => sub_column_value.to_s,
139 | :timestamp => timestamp,
140 | :ttl => ttl
141 | )
142 | }
143 | )
144 | )
145 | )
146 | end
147 |
148 | # General info about a deletion object within a mutation
149 | # timestamp - required. If this is the only param, it will cause deletion of the whole key at that TS
150 | # supercolumn - opt. If passed, the deletes will only occur within that supercolumn (only subcolumns
151 | # will be deleted). Otherwise the normal columns will be deleted.
152 | # predicate - opt. Defines how to match the columns to delete. if supercolumn passed, the slice will
153 | # be scoped to subcolumns of that supercolumn.
154 |
155 | # Deletes a single column from the containing key/CF (and possibly supercolumn), at a given timestamp.
156 | # Although mutations (as opposed to 'remove' calls) support deleting slices and lists of columns in one shot, this is not implemented here.
157 | # The main reason being that the batch function takes removes, but removes don't have that capability...so we'd need to change the remove
158 | # methods to use delete mutation calls...although that might have performance implications. We'll leave that refactoring for later.
159 | def _delete_mutation(cf, column, subcolumn, timestamp, options={})
160 | deletion_hash = {:timestamp => timestamp}
161 | if is_super(cf)
162 | deletion_hash[:super_column] = column if column
163 | deletion_hash[:predicate] = CassandraThrift::SlicePredicate.new(:column_names => [subcolumn]) if subcolumn
164 | else
165 | deletion_hash[:predicate] = CassandraThrift::SlicePredicate.new(:column_names => [column]) if column
166 | end
167 | CassandraThrift::Mutation.new(
168 | :deletion => CassandraThrift::Deletion.new(deletion_hash)
169 | )
170 | end
171 | end
172 | end
173 |
--------------------------------------------------------------------------------
/lib/cassandra/comparable.rb:
--------------------------------------------------------------------------------
1 |
2 | class Cassandra
3 | # Abstract base class for comparable numeric column name types
4 | class Comparable
5 | class TypeError < ::TypeError #:nodoc:
6 | end
7 |
8 | def <=>(other)
9 | self.to_i <=> other.to_i
10 | end
11 |
12 | def hash
13 | @bytes.hash
14 | end
15 |
16 | def eql?(other)
17 | other.is_a?(Comparable) and @bytes == other.to_s
18 | end
19 |
20 | def ==(other)
21 | other.respond_to?(:to_i) && self.to_i == other.to_i
22 | end
23 |
24 | def to_s
25 | @bytes
26 | end
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/cassandra/composite.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | class Composite
3 | include ::Comparable
4 | attr_reader :parts
5 | attr_reader :column_slice
6 |
7 | def initialize(*parts)
8 | return if parts.empty?
9 |
10 | options = {}
11 | if parts.last.is_a?(Hash)
12 | options = parts.pop
13 | end
14 |
15 | if parts.length == 1 && parts[0].instance_of?(self.class)
16 | make_from_parts(parts[0].parts, :slice => parts[0].column_slice)
17 | elsif parts.length == 1 && parts[0].instance_of?(String) && @column_slice.nil? && try_packed_composite(parts[0])
18 | @hash = parts[0].hash
19 | else
20 | make_from_parts(parts, options)
21 | end
22 | end
23 |
24 | def self.new_from_packed(packed)
25 | obj = new
26 | obj.fast_unpack(packed)
27 | return obj
28 | end
29 |
30 | def self.new_from_parts(parts, args={})
31 | obj = new
32 | obj.make_from_parts(parts, args)
33 |
34 | return obj
35 | end
36 |
37 | def [](*args)
38 | return @parts[*args]
39 | end
40 |
41 | def pack
42 | packed = @parts.map do |part|
43 | [part.length].pack('n') + part + "\x00"
44 | end
45 | if @column_slice
46 | part = @parts[-1]
47 | packed[-1] = [part.length].pack('n') + part + slice_end_of_component
48 | end
49 | return packed.join('')
50 | end
51 |
52 | def to_s
53 | return pack
54 | end
55 |
56 | def <=>(other)
57 | if !other.instance_of?(self.class)
58 | return @parts.first <=> other
59 | end
60 | eoc = slice_end_of_component.unpack('c')[0]
61 | other_eoc = other.slice_end_of_component.unpack('c')[0]
62 | @parts.zip(other.parts).each do |a, b|
63 | next if a == b
64 | if a.nil? && b.nil?
65 | return eoc <=> other_eoc
66 | end
67 |
68 | if a.nil?
69 | return @column_slice == :after ? 1 : -1
70 | end
71 | if b.nil?
72 | return other.column_slice == :after ? -1 : 1
73 | end
74 | return -1 if a < b
75 | return 1 if a > b
76 | end
77 | return 0
78 | end
79 |
80 | def inspect
81 | return "#<#{self.class}:#{@column_slice} #{@parts.inspect}>"
82 | end
83 |
84 | def slice_end_of_component
85 | ret = "\x00"
86 | ret = "\x01" if @column_slice == :after
87 | ret = "\xFF" if @column_slice == :before
88 |
89 | ret.force_encoding('BINARY') if ret.respond_to?(:force_encoding)
90 | return ret
91 | end
92 |
93 | def fast_unpack(packed_string)
94 | @hash = packed_string.hash
95 |
96 | @parts = []
97 | end_of_component = packed_string.slice(packed_string.length-1, 1)
98 | while packed_string.length > 0
99 | length = packed_string.unpack('n')[0]
100 | @parts << packed_string.slice(2, length)
101 |
102 | packed_string.slice!(0, length+3)
103 | end
104 |
105 | @column_slice = :after if end_of_component == "\x01"
106 | @column_slice = :before if end_of_component == "\xFF"
107 | end
108 |
109 | def make_from_parts(parts, args)
110 | @parts = parts
111 | @column_slice = args[:slice]
112 | raise ArgumentError if @column_slice != nil && ![:before, :after].include?(@column_slice)
113 | end
114 |
115 | private
116 | def try_packed_composite(packed_string)
117 | parts = []
118 | end_of_component = nil
119 | while packed_string.length > 0
120 | length = packed_string.slice(0, 2).unpack('n')[0]
121 | return false if length.nil? || length + 3 > packed_string.length
122 |
123 | parts << packed_string.slice(2, length)
124 | end_of_component = packed_string.slice(2 + length, 1)
125 | if length + 3 != packed_string.length
126 | return false if end_of_component != "\x00"
127 | end
128 |
129 | packed_string = packed_string.slice(3 + length, packed_string.length)
130 | end
131 |
132 | @column_slice = :after if end_of_component == "\x01"
133 | @column_slice = :before if end_of_component == "\xFF"
134 | @parts = parts
135 |
136 | return true
137 | end
138 |
139 | def hash
140 | return @hash ||= pack.hash
141 | end
142 |
143 | def eql?(other)
144 | return to_s == other.to_s
145 | end
146 | end
147 | end
148 |
--------------------------------------------------------------------------------
/lib/cassandra/constants.rb:
--------------------------------------------------------------------------------
1 |
2 | class Cassandra
3 | # A helper module you can include in your own class. Makes it easier
4 | # to work with Cassandra subclasses.
5 | module Constants
6 | include Cassandra::Consistency
7 |
8 | Long = Cassandra::Long
9 | OrderedHash = Cassandra::OrderedHash
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/lib/cassandra/debug.rb:
--------------------------------------------------------------------------------
1 |
2 | require 'pp'
3 |
4 | class CassandraThrift::Cassandra::Client
5 | def send_message(*args)
6 | pp args
7 | super
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/lib/cassandra/dynamic_composite.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | class DynamicComposite < Composite
3 | attr_accessor :types
4 |
5 | def initialize(*parts)
6 | return if parts.empty?
7 |
8 | options = {}
9 | if parts.last.is_a?(Hash)
10 | options = parts.pop
11 | end
12 | @column_slice = options[:slice]
13 | raise ArgumentError if @column_slice != nil && ![:before, :after].include?(@column_slice)
14 |
15 | if parts.length == 1 && parts[0].instance_of?(self.class)
16 | @column_slice = parts[0].column_slice
17 | @parts = parts[0].parts
18 | @types = parts[0].types
19 | elsif parts.length == 1 && parts[0].instance_of?(String) && @column_slice.nil? && try_packed_composite(parts[0])
20 | @hash = parts[0].hash
21 | else
22 | @types, @parts = parts.transpose
23 | end
24 | end
25 |
26 | def pack
27 | packed_parts = @parts.map do |part|
28 | [part.length].pack('n') + part + "\x00"
29 | end
30 |
31 | if @column_slice
32 | part = @parts[-1]
33 | packed_parts[-1] = [part.length].pack('n') + part + slice_end_of_component
34 | end
35 |
36 | packed_types = @types.map do |type|
37 | if type.length == 1
38 | [0x8000 | type[0].ord].pack('n')
39 | else
40 | [type.length].pack('n') + type
41 | end
42 | end
43 |
44 | return packed_types.zip(packed_parts).flatten.join('')
45 | end
46 |
47 | def fast_unpack(packed_string)
48 | @hash = packed_string.hash
49 |
50 | @types = []
51 | @parts = []
52 |
53 | offset = 0
54 | length = nil
55 | while offset < packed_string.length
56 | if packed_string[offset].ord & 0x80 != 0
57 | @types << packed_string[offset+1]
58 | offset += 2
59 | else
60 | length = packed_string.slice(offset, 2).unpack('n')[0]
61 | offset += 2
62 | @types << packed_string.slice(offset, length)
63 | offset += length
64 | end
65 | length = packed_string.slice(offset, 2).unpack('n')[0]
66 | offset += 2
67 | @parts << packed_string.slice(offset, length)
68 | offset += length + 1
69 | end
70 |
71 | @column_slice = :after if packed_string[-1] == "\x01"
72 | @column_slice = :before if packed_string[-1] == "\xFF"
73 | end
74 |
75 | private
76 | def try_packed_composite(packed_string)
77 | types = []
78 | parts = []
79 | end_of_component = nil
80 | offset = 0
81 |
82 | read_bytes = proc do |length|
83 | return false if offset + length > packed_string.length
84 | out = packed_string.slice(offset, length)
85 | offset += length
86 | out
87 | end
88 |
89 | while offset < packed_string.length
90 | header = read_bytes.call(2).unpack('n')[0]
91 | is_alias = header & 0x8000 != 0
92 | if is_alias
93 | alias_char = (header & 0xFF).chr
94 | types << alias_char
95 | else
96 | length = header
97 | return false if length.nil? || length + offset > packed_string.length
98 | type = read_bytes.call(length)
99 | types << type
100 | end
101 | length = read_bytes.call(2).unpack('n')[0]
102 | return false if length.nil? || length + offset > packed_string.length
103 | parts << read_bytes.call(length)
104 | end_of_component = read_bytes.call(1)
105 | if offset < packed_string.length
106 | return false if end_of_component != "\x00"
107 | end
108 | end
109 | @column_slice = :after if end_of_component == "\x01"
110 | @column_slice = :before if end_of_component == "\xFF"
111 | @types = types
112 | @parts = parts
113 | @hash = packed_string.hash
114 |
115 | return true
116 | end
117 | end
118 | end
119 |
--------------------------------------------------------------------------------
/lib/cassandra/helpers.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | module Helpers
3 | def extract_and_validate_params(column_family, keys, args, options)
4 | options = options.dup
5 | column_family = column_family.to_s
6 | # Keys
7 | [keys].flatten.each do |key|
8 | raise ArgumentError, "Key #{key.inspect} must be a String for #{caller[2].inspect}." unless key.is_a?(String)
9 | end
10 |
11 | # Options
12 | if args.last.is_a?(Hash)
13 | extras = args.last.keys - options.keys
14 | raise ArgumentError, "Invalid options #{extras.inspect[1..-2]} for #{caller[1]}" if extras.any?
15 | options.merge!(args.pop)
16 | end
17 |
18 | # Ranges
19 | column, sub_column = args[0], args[1]
20 | raise ArgumentError, "Invalid arguments: subcolumns specified for a non-supercolumn family" if sub_column && !is_super(column_family)
21 | klass, sub_klass = column_name_class(column_family), sub_column_name_class(column_family)
22 | range_class = column ? sub_klass : klass
23 |
24 | [:start, :finish].each do |opt|
25 | options[opt] = options[opt] ? range_class.new(options[opt]).to_s : ''
26 | end
27 |
28 | [column_family, s_map(column, klass), s_map(sub_column, sub_klass), options]
29 | end
30 |
31 | # Convert stuff to strings.
32 | def s_map(el, klass)
33 | case el
34 | when Array then el.map { |i| s_map(i, klass) }
35 | when NilClass then nil
36 | else
37 | klass.new(el).to_s
38 | end
39 | end
40 | end
41 | end
42 |
--------------------------------------------------------------------------------
/lib/cassandra/keyspace.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | class Keyspace < CassandraThrift::KsDef ; end
3 | end
--------------------------------------------------------------------------------
/lib/cassandra/long.rb:
--------------------------------------------------------------------------------
1 |
2 | class Cassandra
3 | # A temporally-ordered Long class for use in Cassandra column names
4 | class Long < Comparable
5 |
6 | # FIXME Should unify with or subclass Cassandra::UUID
7 | def initialize(bytes = nil)
8 | case bytes
9 | when self.class # Long
10 | @bytes = bytes.to_s
11 | when String
12 | case bytes.size
13 | when 8 # Raw byte array
14 | @bytes = bytes
15 | when 18 # Human-readable UUID-like representation; inverse of #to_guid
16 | elements = bytes.split("-")
17 | raise TypeError, "Expected #{bytes.inspect} to cast to a #{self.class} (malformed UUID-like representation)" if elements.size != 3
18 | @bytes = [elements.join].pack('H32')
19 | else
20 | raise TypeError, "Expected #{bytes.inspect} to cast to a #{self.class} (invalid bytecount)"
21 | end
22 | when Integer
23 | raise TypeError, "Expected #{bytes.inspect} to cast to a #{self.class} (integer out of range)" if bytes < 0 or bytes > 2**64
24 | @bytes = [bytes >> 32, bytes % 2**32].pack("NN")
25 | when NilClass, Time
26 | # Time.stamp is 52 bytes, so we have 12 bytes of entropy left over
27 | int = ((bytes || Time).stamp << 12) + rand(2**12)
28 | @bytes = [int >> 32, int % 2**32].pack("NN")
29 | else
30 | raise TypeError, "Expected #{bytes.inspect} to cast to a #{self.class} (unknown source class)"
31 | end
32 | end
33 |
34 | def to_i
35 | @to_i ||= begin
36 | ints = @bytes.unpack("NN")
37 | (ints[0] << 32) +
38 | ints[1]
39 | end
40 | end
41 |
42 | def to_guid
43 | "%08x-%04x-%04x" % @bytes.unpack("Nnn")
44 | end
45 |
46 | def inspect
47 | "> 12) / 1_000_000).utc.inspect
49 | }, usecs: #{
50 | (to_i >> 12) % 1_000_000
51 | }, jitter: #{
52 | to_i % 2**12
53 | }, guid: #{
54 | to_guid
55 | }>"
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/lib/cassandra/ordered_hash.rb:
--------------------------------------------------------------------------------
1 | # OrderedHash is namespaced to prevent conflicts with other implementations
2 | class Cassandra
3 | class OrderedHashInt < Hash #:nodoc:
4 | def initialize(*args, &block)
5 | super
6 | @keys = []
7 | end
8 |
9 | def self.[](*args)
10 | ordered_hash = new
11 |
12 | if (args.length == 1 && args.first.is_a?(Array))
13 | args.first.each do |key_value_pair|
14 | next unless (key_value_pair.is_a?(Array))
15 | ordered_hash[key_value_pair[0]] = key_value_pair[1]
16 | end
17 |
18 | return ordered_hash
19 | end
20 |
21 | unless (args.size % 2 == 0)
22 | raise ArgumentError.new("odd number of arguments for Hash")
23 | end
24 |
25 | args.each_with_index do |val, ind|
26 | next if (ind % 2 != 0)
27 | ordered_hash[val] = args[ind + 1]
28 | end
29 |
30 | ordered_hash
31 | end
32 |
33 | def initialize_copy(other)
34 | super
35 | # make a deep copy of keys
36 | @keys = other.keys
37 | end
38 |
39 | def []=(key, value)
40 | @keys << key if !has_key?(key)
41 | super
42 | end
43 |
44 | def delete(key)
45 | if has_key? key
46 | index = @keys.index(key)
47 | @keys.delete_at index
48 | end
49 | super
50 | end
51 |
52 | def delete_if
53 | super
54 | sync_keys!
55 | self
56 | end
57 |
58 | def reject!
59 | super
60 | sync_keys!
61 | self
62 | end
63 |
64 | def reject(&block)
65 | dup.reject!(&block)
66 | end
67 |
68 | def keys
69 | @keys.dup
70 | end
71 |
72 | def values
73 | @keys.collect { |key| self[key] }
74 | end
75 |
76 | def to_hash
77 | self
78 | end
79 |
80 | def to_a
81 | @keys.map { |key| [ key, self[key] ] }
82 | end
83 |
84 | def each_key
85 | @keys.each { |key| yield key }
86 | self
87 | end
88 |
89 | def each_value
90 | @keys.each { |key| yield self[key]}
91 | self
92 | end
93 |
94 | def each
95 | @keys.each {|key| yield [key, self[key]]}
96 | self
97 | end
98 |
99 | alias_method :each_pair, :each
100 |
101 | def clear
102 | super
103 | @keys.clear
104 | self
105 | end
106 |
107 | def shift
108 | k = @keys.first
109 | v = delete(k)
110 | [k, v]
111 | end
112 |
113 | def merge!(other_hash)
114 | other_hash.each {|k,v| self[k] = v }
115 | self
116 | end
117 |
118 | def merge(other_hash)
119 | dup.merge!(other_hash)
120 | end
121 |
122 | # When replacing with another hash, the initial order of our keys must come from the other hash -ordered or not.
123 | def replace(other)
124 | super
125 | @keys = other.keys
126 | self
127 | end
128 |
129 | def reverse
130 | OrderedHashInt[self.to_a.reverse]
131 | end
132 |
133 | private
134 |
135 | def sync_keys!
136 | @keys.delete_if {|k| !has_key?(k)}
137 | end
138 | end
139 |
140 | class OrderedHash < OrderedHashInt #:nodoc:
141 | def initialize(*args, &block)
142 | @timestamps = OrderedHashInt.new
143 | super
144 | end
145 |
146 | def initialize_copy(other)
147 | @timestamps = other.timestamps
148 | super
149 | end
150 |
151 | def []=(key, value, timestamp = nil)
152 | @timestamps[key] = timestamp
153 | super(key, value)
154 | end
155 |
156 | def delete(key)
157 | @timestamps.delete(key)
158 | super
159 | end
160 |
161 | def delete_if(&block)
162 | @timestamps.delete_if(&block)
163 | super
164 | end
165 |
166 | def reject!(&block)
167 | @timestamps.reject!(&block)
168 | super
169 | end
170 |
171 | def timestamps
172 | @timestamps.dup
173 | end
174 |
175 | def clear
176 | @timestamps.clear
177 | super
178 | end
179 |
180 | def shift
181 | k, v = super
182 | @timestamps.delete(k)
183 | [k, v]
184 | end
185 |
186 | def replace(other)
187 | @timestamps = other.timestamps
188 | super
189 | end
190 |
191 | def inspect
192 | "#"
193 | end
194 | end
195 | end
196 |
--------------------------------------------------------------------------------
/lib/cassandra/protocol.rb:
--------------------------------------------------------------------------------
1 | class Cassandra
2 | # Inner methods for actually doing the Thrift calls
3 | module Protocol #:nodoc:
4 | private
5 |
6 | def _mutate(mutation_map, consistency_level)
7 | client.batch_mutate(mutation_map, consistency_level)
8 | end
9 |
10 | def _remove(key, column_path, timestamp, consistency_level)
11 | client.remove(key, column_path, timestamp, consistency_level)
12 | end
13 |
14 | def _count_columns(column_family, key, super_column, start, stop, count, consistency)
15 | client.get_count(key,
16 | CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => super_column),
17 | CassandraThrift::SlicePredicate.new(:slice_range =>
18 | CassandraThrift::SliceRange.new(
19 | :start => start || '',
20 | :finish => stop || '',
21 | :count => count || 100
22 | )),
23 | consistency
24 | )
25 | end
26 |
27 | # FIXME: Add support for start, stop, count
28 | def _get_columns(column_family, key, columns, sub_columns, consistency)
29 | result = if is_super(column_family)
30 | if sub_columns
31 | columns_to_hash(column_family, client.get_slice(key,
32 | CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => columns),
33 | CassandraThrift::SlicePredicate.new(:column_names => sub_columns),
34 | consistency))
35 | else
36 | columns_to_hash(column_family, client.get_slice(key,
37 | CassandraThrift::ColumnParent.new(:column_family => column_family),
38 | CassandraThrift::SlicePredicate.new(:column_names => columns),
39 | consistency))
40 | end
41 | else
42 | columns_to_hash(column_family, client.get_slice(key,
43 | CassandraThrift::ColumnParent.new(:column_family => column_family),
44 | CassandraThrift::SlicePredicate.new(:column_names => columns),
45 | consistency))
46 | end
47 |
48 | klass = column_name_class(column_family)
49 | (sub_columns || columns).map { |name| result[klass.new(name)] }
50 | end
51 |
52 | def _multi_get_columns(column_family, keys, columns, sub_columns, consistency)
53 | result = if is_super(column_family) and sub_columns
54 | predicate = CassandraThrift::SlicePredicate.new(:column_names => sub_columns)
55 | column_parent = CassandraThrift::ColumnParent.new(
56 | :column_family => column_family,
57 | :super_column => columns.kind_of?(Array) ? columns[0] : columns )
58 | multi_sub_columns_to_hash!(column_family, client.multiget_slice(keys, column_parent, predicate, consistency))
59 | else
60 | predicate = CassandraThrift::SlicePredicate.new(:column_names => columns)
61 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
62 | multi_columns_to_hash!(column_family, client.multiget_slice(keys, column_parent, predicate, consistency))
63 | end
64 |
65 | klass = column_name_class(column_family)
66 | OrderedHash[result.keys.map { |key| [key, (sub_columns || columns).map { |column| result[key][klass.new(column)] }] }]
67 | end
68 |
69 | def _multiget(column_family, keys, column, sub_column, count, start, finish, reversed, consistency)
70 | # Single values; count and range parameters have no effect
71 | if is_super(column_family) and sub_column
72 | predicate = CassandraThrift::SlicePredicate.new(:column_names => [sub_column])
73 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => column)
74 | column_hash = multi_sub_columns_to_hash!(column_family, client.multiget_slice(keys, column_parent, predicate, consistency))
75 |
76 | klass = sub_column_name_class(column_family)
77 | keys.inject({}){|hash, key| hash[key] = column_hash[key][klass.new(sub_column)]; hash}
78 | elsif !is_super(column_family) and column
79 | predicate = CassandraThrift::SlicePredicate.new(:column_names => [column])
80 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
81 | column_hash = multi_columns_to_hash!(column_family, client.multiget_slice(keys, column_parent, predicate, consistency))
82 |
83 | klass = column_name_class(column_family)
84 | keys.inject({}){|hash, key| hash[key] = column_hash[key][klass.new(column)]; hash}
85 |
86 | # Slices
87 | else
88 | predicate = CassandraThrift::SlicePredicate.new(:slice_range =>
89 | CassandraThrift::SliceRange.new(
90 | :reversed => reversed,
91 | :count => count,
92 | :start => start,
93 | :finish => finish))
94 |
95 | if is_super(column_family) and column
96 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family, :super_column => column)
97 | multi_sub_columns_to_hash!(column_family, client.multiget_slice(keys, column_parent, predicate, consistency))
98 | else
99 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
100 | multi_columns_to_hash!(column_family, client.multiget_slice(keys, column_parent, predicate, consistency))
101 | end
102 | end
103 | end
104 |
105 | def _get_range(column_family, start_key, finish_key, key_count, columns, start, finish, count, consistency, reversed=false)
106 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
107 | predicate = if columns
108 | CassandraThrift::SlicePredicate.new(:column_names => columns)
109 | else
110 | CassandraThrift::SlicePredicate.new(:slice_range =>
111 | CassandraThrift::SliceRange.new(
112 | :start => start,
113 | :finish => finish,
114 | :count => count,
115 | :reversed => reversed))
116 | end
117 | range = CassandraThrift::KeyRange.new(:start_key => start_key, :end_key => finish_key, :count => key_count)
118 | client.get_range_slices(column_parent, predicate, range, consistency)
119 | end
120 |
121 | # TODO: Supercolumn support
122 | def _get_indexed_slices(column_family, index_clause, column, count, start, finish, reversed, consistency)
123 | column_parent = CassandraThrift::ColumnParent.new(:column_family => column_family)
124 | if column
125 | predicate = CassandraThrift::SlicePredicate.new(:column_names => [column])
126 | else
127 | predicate = CassandraThrift::SlicePredicate.new(:slice_range =>
128 | CassandraThrift::SliceRange.new(
129 | :reversed => reversed,
130 | :count => count,
131 | :start => start,
132 | :finish => finish))
133 | end
134 | client.get_indexed_slices(column_parent, index_clause, predicate, consistency)
135 | end
136 | end
137 | end
138 |
--------------------------------------------------------------------------------
/lib/cassandra/time.rb:
--------------------------------------------------------------------------------
1 |
2 | class Time
3 | def self.stamp
4 | Time.now.stamp
5 | end
6 |
7 | def stamp
8 | to_i * 1_000_000 + usec
9 | end
10 | end
11 |
12 |
--------------------------------------------------------------------------------
/test/cassandra_client_test.rb:
--------------------------------------------------------------------------------
1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper')
2 |
3 | class CassandraClientTest < Test::Unit::TestCase
4 | include Cassandra::Constants
5 |
6 | def setup
7 | @twitter = Cassandra.new('Twitter', "127.0.0.1:9160", :retries => 2, :exception_classes => [])
8 | end
9 |
10 | def test_client_method_is_called
11 | assert_nil @twitter.instance_variable_get(:@client)
12 | @twitter.insert(:Statuses, key, {'1' => 'v', '2' => 'v', '3' => 'v'})
13 | assert_not_nil @twitter.instance_variable_get(:@client)
14 | end
15 |
16 | def key
17 | caller.first[/`(.*?)'/, 1]
18 | end
19 |
20 | end
--------------------------------------------------------------------------------
/test/cassandra_mock_test.rb:
--------------------------------------------------------------------------------
1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper')
2 | require File.expand_path(File.dirname(__FILE__) + '/cassandra_test')
3 | require 'cassandra/mock'
4 | require 'json'
5 |
6 | class CassandraMockTest < CassandraTest
7 | include Cassandra::Constants
8 |
9 | def setup
10 | @test_schema = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)), '..','conf', CASSANDRA_VERSION, 'schema.json')))
11 | @twitter = Cassandra::Mock.new('Twitter', @test_schema)
12 | @twitter.clear_keyspace!
13 |
14 | @blogs = Cassandra::Mock.new('Multiblog', @test_schema)
15 | @blogs.clear_keyspace!
16 |
17 | @blogs_long = Cassandra::Mock.new('MultiblogLong', @test_schema)
18 | @blogs_long.clear_keyspace!
19 |
20 | @type_conversions = Cassandra::Mock.new('TypeConversions', @test_schema)
21 | @type_conversions.clear_keyspace!
22 |
23 | @uuids = (0..6).map {|i| SimpleUUID::UUID.new(Time.at(2**(24+i))) }
24 | @longs = (0..6).map {|i| Long.new(Time.at(2**(24+i))) }
25 | @composites = [
26 | Cassandra::Composite.new([5].pack('N'), "zebra"),
27 | Cassandra::Composite.new([5].pack('N'), "aardvark"),
28 | Cassandra::Composite.new([1].pack('N'), "elephant"),
29 | Cassandra::Composite.new([10].pack('N'), "kangaroo"),
30 | ]
31 | @dynamic_composites = [
32 | Cassandra::DynamicComposite.new(['i', [5].pack('N')], ['UTF8Type', "zebra"]),
33 | Cassandra::DynamicComposite.new(['i', [5].pack('N')], ['UTF8Type', "aardvark"]),
34 | Cassandra::DynamicComposite.new(['IntegerType', [1].pack('N')], ['s', "elephant"]),
35 | Cassandra::DynamicComposite.new(['IntegerType', [10].pack('N')], ['s', "kangaroo"]),
36 | ]
37 | end
38 |
39 | def test_setup
40 | assert @twitter
41 | assert @blogs
42 | assert @blogs_long
43 | end
44 |
45 | def test_schema_for_keyspace
46 | data = @test_schema['Twitter']
47 | stuff = @twitter.send(:schema_for_keyspace, 'Twitter')
48 | data.keys.each do |k|
49 | assert_equal data[k], stuff[k], k
50 | end
51 | end
52 |
53 | def test_sorting_row_keys
54 | @twitter.insert(:Statuses, 'b', {:text => 'foo'})
55 | @twitter.insert(:Statuses, 'a', {:text => 'foo'})
56 | assert_equal ['a'], @twitter.get_range(:Statuses, :key_count => 1).keys
57 | end
58 |
59 | def test_get_range_reversed
60 | data = 3.times.map { |i| ["body-#{i.to_s}", "v"] }
61 | hash = Cassandra::OrderedHash[data]
62 | reversed_hash = Cassandra::OrderedHash[data.reverse]
63 |
64 | @twitter.insert(:Statuses, "all-keys", hash)
65 |
66 | columns = @twitter.get_range(:Statuses, :reversed => true)["all-keys"]
67 | columns.each do |column|
68 | assert_equal reversed_hash.shift, column
69 | end
70 | end
71 |
72 | def test_get_range_reversed_slice
73 | data = 4.times.map { |i| ["body-#{i.to_s}", "v"] }
74 | hash = Cassandra::OrderedHash[data]
75 | sliced_hash = Cassandra::OrderedHash[data.reverse[1..-1]]
76 |
77 | @twitter.insert(:Statuses, "all-keys", hash)
78 |
79 | columns = @twitter.get_range(
80 | :Statuses,
81 | :start => sliced_hash.keys.first,
82 | :reversed => true
83 | )["all-keys"]
84 |
85 | columns.each do |column|
86 | assert_equal sliced_hash.shift, column
87 | end
88 | end
89 |
90 | def test_get_range_count
91 | data = 3.times.map { |i| ["body-#{i.to_s}", "v"] }
92 | hash = Cassandra::OrderedHash[data]
93 |
94 | @twitter.insert(:Statuses, "all-keys", hash)
95 |
96 | columns = @twitter.get_range(:Statuses, :count => 2)["all-keys"]
97 | assert_equal 2, columns.count
98 | end
99 |
100 | def test_inserting_array_for_indices
101 | @twitter.insert(:TimelinishThings, 'a', ['1','2'])
102 | row = @twitter.get(:TimelinishThings, 'a')
103 | assert_equal({'1' => nil, '2' => nil}, row)
104 |
105 | assert_raises(ArgumentError) {
106 | @twitter.insert(:UserRelationships, 'a', ['u1','u2'])
107 | }
108 | end
109 |
110 | def test_column_timestamps
111 | base_time = Time.now
112 | @twitter.insert(:Statuses, "time-key", { "body" => "value" })
113 |
114 | results = @twitter.get(:Statuses, "time-key")
115 | assert(results.timestamps["body"] / 1000000 >= base_time.to_i)
116 | end
117 |
118 | def test_supercolumn_timestamps
119 | base_time = Time.now
120 | @twitter.insert(:StatusRelationships, "time-key", { "super" => { @uuids[1] => "value" }})
121 |
122 | results = @twitter.get(:StatusRelationships, "time-key")
123 | assert_nil(results.timestamps["super"])
124 |
125 | columns = results["super"]
126 | assert(columns.timestamps[@uuids[1]] / 1000000 >= base_time.to_i)
127 | end
128 | end
129 |
--------------------------------------------------------------------------------
/test/comparable_types_test.rb:
--------------------------------------------------------------------------------
1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper')
2 |
3 | class ComparableTypesTest < Test::Unit::TestCase
4 | include Cassandra::Constants
5 |
6 | def test_long_sort
7 | ary = []
8 | 10.times { ary << Long.new }
9 | assert_equal ary.sort, ary
10 | end
11 |
12 | def test_long_equality
13 | long = Long.new
14 | assert_equal long, Long.new(long)
15 | assert_equal long, Long.new(long.to_s)
16 | assert_equal long, Long.new(long.to_i)
17 | assert_equal long, Long.new(long.to_guid)
18 | end
19 |
20 | def test_long_error
21 | assert_raises(Cassandra::Comparable::TypeError) do
22 | Long.new("bogus")
23 | end
24 | end
25 |
26 | def test_types_behave_well
27 | assert !(Long.new() == false)
28 | end
29 |
30 | def test_casting_unknown_class
31 | assert_raises(Cassandra::Comparable::TypeError) do
32 | Cassandra::Long.new({})
33 | end
34 | end
35 |
36 | def test_long_inspect
37 | obj = Long.new("\000\000\000\000\000\000\000\000")
38 | if RUBY_VERSION < '1.9'
39 | assert_equal "", obj.inspect
40 | else
41 | assert_equal "", obj.inspect
42 | end
43 | end
44 |
45 | end
--------------------------------------------------------------------------------
/test/composite_type_test.rb:
--------------------------------------------------------------------------------
1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper')
2 |
3 | class CompositeTypesTest < Test::Unit::TestCase
4 | include Cassandra::Constants
5 |
6 | def setup
7 | @col_parts = [[363].pack('N'), 'extradites-mulling', SimpleUUID::UUID.new().bytes]
8 | @col = Cassandra::Composite.new(*@col_parts)
9 |
10 | @part0_length = 2 + 4 + 1 # size + int + end_term
11 | @part1_length = 2 + @col_parts[1].length + 1 # size + string_len + end_term
12 | @part2_length = 2 + @col_parts[2].length + 1 # size + uuid_bytes + end_term
13 |
14 | @types = ['IntegerType', 'UTF8Type', 'TimeUUIDType']
15 | @type_aliaes = ['i', 's', 't']
16 |
17 | @dycol = Cassandra::DynamicComposite.new(*@types.zip(@col_parts))
18 | @dycol_alias = Cassandra::DynamicComposite.new(*@type_aliaes.zip(@col_parts))
19 | end
20 |
21 | def test_creation_from_parts
22 | (0..2).each do |i|
23 | assert_equal(@col_parts[i], @col[i])
24 | assert_equal(@col_parts[i], @dycol[i])
25 | assert_equal(@col_parts[i], @dycol_alias[i])
26 | end
27 |
28 | col2 = Cassandra::Composite.new_from_parts(@col_parts)
29 | (0..2).each do |i|
30 | assert_equal(@col_parts[i], col2[i].to_s)
31 | end
32 | end
33 |
34 | def test_packing_and_unpacking
35 | assert_equal(@part0_length + @part1_length + @part2_length, @col.pack.length)
36 |
37 | col2 = Cassandra::Composite.new_from_packed(@col.pack)
38 | assert_equal(@col_parts[0], col2[0])
39 | assert_equal(@col_parts[1], col2[1])
40 | assert_equal(@col_parts[2], col2[2])
41 | assert_equal(@col, col2)
42 | # make sure we set this while we have the packed string handy
43 | assert_equal(@col.pack.hash, col2.instance_variable_get(:@hash))
44 |
45 | col2 = Cassandra::Composite.new(@col.pack)
46 | assert_equal(@col_parts[0], col2[0])
47 | assert_equal(@col_parts[1], col2[1])
48 | assert_equal(@col_parts[2], col2[2])
49 | assert_equal(@col, col2)
50 | end
51 |
52 | def test_packing_and_unpacking_dynamic_columns
53 | part0_length = @part0_length + 2 + @types[0].length
54 | part1_length = @part1_length + 2 + @types[1].length
55 | part2_length = @part2_length + 2 + @types[2].length
56 | assert_equal(part0_length + part1_length + part2_length, @dycol.pack.length)
57 |
58 | col2 = Cassandra::DynamicComposite.new_from_packed(@dycol.pack)
59 | assert_equal(@col_parts[0], col2[0])
60 | assert_equal(@col_parts[1], col2[1])
61 | assert_equal(@col_parts[2], col2[2])
62 | assert_equal(@dycol, col2)
63 | # make sure we set this while we have the packed string handy
64 | assert_equal(@dycol.pack.hash, col2.instance_variable_get(:@hash))
65 |
66 | col2 = Cassandra::DynamicComposite.new(@dycol.pack)
67 | assert_equal(@col_parts[0], col2[0])
68 | assert_equal(@col_parts[1], col2[1])
69 | assert_equal(@col_parts[2], col2[2])
70 | assert_equal(@dycol, col2)
71 | end
72 |
73 | def test_packing_and_unpacking_dynamic_columns_with_aliases
74 | part0_length = @part0_length + 2
75 | part1_length = @part1_length + 2
76 | part2_length = @part2_length + 2
77 | assert_equal(part0_length + part1_length + part2_length, @dycol_alias.pack.length)
78 |
79 | col2 = Cassandra::DynamicComposite.new_from_packed(@dycol_alias.pack)
80 | assert_equal(@col_parts[0], col2[0])
81 | assert_equal(@col_parts[1], col2[1])
82 | assert_equal(@col_parts[2], col2[2])
83 | assert_equal(@dycol_alias, col2)
84 |
85 | col2 = Cassandra::DynamicComposite.new(@dycol_alias.pack)
86 | assert_equal(@col_parts[0], col2[0])
87 | assert_equal(@col_parts[1], col2[1])
88 | assert_equal(@col_parts[2], col2[2])
89 | assert_equal(@dycol_alias, col2)
90 | end
91 | end
92 |
--------------------------------------------------------------------------------
/test/eventmachine_test.rb:
--------------------------------------------------------------------------------
1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper')
2 |
3 | if RUBY_VERSION < '1.9' || CASSANDRA_VERSION == '0.6'
4 | puts "Skipping EventMachine test"
5 | else
6 |
7 | require 'thrift_client/event_machine'
8 |
9 | class EventmachineTest < Test::Unit::TestCase
10 |
11 | def test_twitter
12 | @twitter = Cassandra.new('Twitter', "127.0.0.1:9160", :retries => 2, :exception_classes => [], :transport => Thrift::EventMachineTransport, :transport_wrapper => nil)
13 | @twitter.clear_keyspace!
14 | end
15 |
16 | private
17 |
18 | def em_test(name)
19 | EM.run do
20 | Fiber.new do
21 | begin
22 | send("raw_#{name}".to_sym)
23 | ensure
24 | EM.stop
25 | end
26 | end.resume
27 | end
28 | end
29 |
30 | def self.wrap_tests
31 | self.public_instance_methods.select { |m| m =~ /^test_/ }.each do |meth|
32 | alias_method :"raw_#{meth}", meth
33 | define_method(meth) do
34 | em_test(meth)
35 | end
36 | end
37 | end
38 |
39 | wrap_tests
40 |
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/test/ordered_hash_test.rb:
--------------------------------------------------------------------------------
1 | require File.expand_path(File.dirname(__FILE__) + '/test_helper')
2 |
3 | class OrderedHashTestInt < Test::Unit::TestCase
4 | def setup
5 | @keys = %w( blue green red pink orange )
6 | @values = %w( 000099 009900 aa0000 cc0066 cc6633 )
7 | @hash = Hash.new
8 | @ordered_hash = Cassandra::OrderedHash.new
9 |
10 | @keys.each_with_index do |key, index|
11 | @hash[key] = @values[index]
12 | @ordered_hash[key] = @values[index]
13 | end
14 | end
15 |
16 | def test_order
17 | assert_equal @keys, @ordered_hash.keys
18 | assert_equal @values, @ordered_hash.values
19 | end
20 |
21 | def test_access
22 | assert @hash.all? { |k, v| @ordered_hash[k] == v }
23 | end
24 |
25 | def test_assignment
26 | key, value = 'purple', '5422a8'
27 |
28 | @ordered_hash[key] = value
29 | assert_equal @keys.length + 1, @ordered_hash.length
30 | assert_equal key, @ordered_hash.keys.last
31 | assert_equal value, @ordered_hash.values.last
32 | assert_equal value, @ordered_hash[key]
33 | end
34 |
35 | def test_delete
36 | key, value = 'white', 'ffffff'
37 | bad_key = 'black'
38 |
39 | @ordered_hash[key] = value
40 | assert_equal @keys.length + 1, @ordered_hash.length
41 | assert_equal @ordered_hash.keys.length, @ordered_hash.length
42 |
43 | assert_equal value, @ordered_hash.delete(key)
44 | assert_equal @keys.length, @ordered_hash.length
45 | assert_equal @ordered_hash.keys.length, @ordered_hash.length
46 |
47 | assert_nil @ordered_hash.delete(bad_key)
48 | end
49 |
50 | def test_to_hash
51 | assert_same @ordered_hash, @ordered_hash.to_hash
52 | end
53 |
54 | def test_to_a
55 | assert_equal @keys.zip(@values), @ordered_hash.to_a
56 | end
57 |
58 | def test_has_key
59 | assert_equal true, @ordered_hash.has_key?('blue')
60 | assert_equal true, @ordered_hash.key?('blue')
61 | assert_equal true, @ordered_hash.include?('blue')
62 | assert_equal true, @ordered_hash.member?('blue')
63 |
64 | assert_equal false, @ordered_hash.has_key?('indigo')
65 | assert_equal false, @ordered_hash.key?('indigo')
66 | assert_equal false, @ordered_hash.include?('indigo')
67 | assert_equal false, @ordered_hash.member?('indigo')
68 | end
69 |
70 | def test_has_value
71 | assert_equal true, @ordered_hash.has_value?('000099')
72 | assert_equal true, @ordered_hash.value?('000099')
73 | assert_equal false, @ordered_hash.has_value?('ABCABC')
74 | assert_equal false, @ordered_hash.value?('ABCABC')
75 | end
76 |
77 | def test_each_key
78 | keys = []
79 | @ordered_hash.each_key { |k| keys << k }
80 | assert_equal @keys, keys
81 | end
82 |
83 | def test_each_value
84 | values = []
85 | @ordered_hash.each_value { |v| values << v }
86 | assert_equal @values, values
87 | end
88 |
89 | def test_each
90 | values = []
91 | @ordered_hash.each {|key, value| values << value}
92 | assert_equal @values, values
93 | end
94 |
95 | def test_each_with_index
96 | @ordered_hash.each_with_index { |pair, index| assert_equal [@keys[index], @values[index]], pair}
97 | end
98 |
99 | def test_each_pair
100 | values = []
101 | keys = []
102 | @ordered_hash.each_pair do |key, value|
103 | keys << key
104 | values << value
105 | end
106 | assert_equal @values, values
107 | assert_equal @keys, keys
108 | end
109 |
110 | def test_delete_if
111 | copy = @ordered_hash.dup
112 | copy.delete('pink')
113 | assert_equal copy, @ordered_hash.delete_if { |k, _| k == 'pink' }
114 | assert !@ordered_hash.keys.include?('pink')
115 | end
116 |
117 | def test_reject!
118 | (copy = @ordered_hash.dup).delete('pink')
119 | @ordered_hash.reject! { |k, _| k == 'pink' }
120 | assert_equal copy, @ordered_hash
121 | assert !@ordered_hash.keys.include?('pink')
122 | end
123 |
124 | def test_reject
125 | copy = @ordered_hash.dup
126 | new_ordered_hash = @ordered_hash.reject { |k, _| k == 'pink' }
127 | assert_equal copy, @ordered_hash
128 | assert !new_ordered_hash.keys.include?('pink')
129 | assert @ordered_hash.keys.include?('pink')
130 | end
131 |
132 | def test_clear
133 | @ordered_hash.clear
134 | assert_equal [], @ordered_hash.keys
135 | end
136 |
137 | def test_merge
138 | other_hash = Cassandra::OrderedHash.new
139 | other_hash['purple'] = '800080'
140 | other_hash['violet'] = 'ee82ee'
141 | merged = @ordered_hash.merge other_hash
142 | assert_equal merged.length, @ordered_hash.length + other_hash.length
143 | assert_equal @keys + ['purple', 'violet'], merged.keys
144 |
145 | @ordered_hash.merge! other_hash
146 | assert_equal @ordered_hash, merged
147 | assert_equal @ordered_hash.keys, merged.keys
148 | end
149 |
150 | def test_shift
151 | pair = @ordered_hash.shift
152 | assert_equal [@keys.first, @values.first], pair
153 | assert !@ordered_hash.keys.include?(pair.first)
154 | end
155 |
156 | def test_keys
157 | original = @ordered_hash.keys.dup
158 | @ordered_hash.keys.pop
159 | assert_equal original, @ordered_hash.keys
160 | end
161 |
162 | def test_inspect
163 | assert @ordered_hash.inspect.include?(@hash.inspect)
164 | end
165 |
166 | def test_alternate_initialization_with_splat
167 | alternate = Cassandra::OrderedHash[1,2,3,4]
168 | assert_kind_of Cassandra::OrderedHash, alternate
169 | assert_equal [1, 3], alternate.keys
170 | end
171 |
172 | def test_alternate_initialization_with_array
173 | alternate = Cassandra::OrderedHash[ [
174 | [1, 2],
175 | [3, 4],
176 | "bad key value pair",
177 | [ 'missing value' ]
178 | ]]
179 |
180 | assert_kind_of Cassandra::OrderedHash, alternate
181 | assert_equal [1, 3, 'missing value'], alternate.keys
182 | assert_equal [2, 4, nil ], alternate.values
183 | end
184 |
185 | def test_alternate_initialization_raises_exception_on_odd_length_args
186 | begin
187 | alternate = Cassandra::OrderedHash[1,2,3,4,5]
188 | flunk "Hash::[] should have raised an exception on initialization " +
189 | "with an odd number of parameters"
190 | rescue
191 | assert_equal "odd number of arguments for Hash", $!.message
192 | end
193 | end
194 |
195 | def test_replace_updates_keys
196 | @other_ordered_hash = Cassandra::OrderedHash[:black, '000000', :white, '000000']
197 | original = @ordered_hash.replace(@other_ordered_hash)
198 | assert_same original, @ordered_hash
199 | assert_equal @other_ordered_hash.keys, @ordered_hash.keys
200 | end
201 |
202 | def test_reverse
203 | assert_equal @keys.reverse, @ordered_hash.reverse.keys
204 | assert_equal @values.reverse, @ordered_hash.reverse.values
205 | end
206 | end
207 |
208 | class OrderedHashTest < Test::Unit::TestCase
209 | def setup
210 | @keys = %w( blue green red pink orange )
211 | @values = %w( 000099 009900 aa0000 cc0066 cc6633 )
212 | @timestamps = %w( 12 34 56 78 90 )
213 | @hash = Hash.new
214 | @timestamps_hash = Hash.new
215 | @ordered_hash = Cassandra::OrderedHash.new
216 |
217 | @keys.each_with_index do |key, index|
218 | @hash[key] = @values[index]
219 | @timestamps_hash[key] = @timestamps[index]
220 | @ordered_hash.[]=(key, @values[index], @timestamps[index])
221 | end
222 | end
223 |
224 | def test_order
225 | assert_equal @keys, @ordered_hash.keys
226 | assert_equal @values, @ordered_hash.values
227 | assert_equal @timestamps_hash, @ordered_hash.timestamps
228 | end
229 |
230 | def test_access
231 | assert @hash.all? { |k, v| @ordered_hash[k] == v }
232 | assert @timestamps_hash.all? { |k, v| @ordered_hash.timestamps[k] == v }
233 | end
234 |
235 | def test_assignment
236 | key, value, timestamp = 'purple', '5422a8', '1234'
237 |
238 | @ordered_hash.[]=(key, value, timestamp)
239 |
240 | assert_equal @keys.length + 1, @ordered_hash.length
241 | assert_equal key, @ordered_hash.keys.last
242 | assert_equal value, @ordered_hash.values.last
243 | assert_equal value, @ordered_hash[key]
244 |
245 | assert_equal @keys.length + 1, @ordered_hash.timestamps.length
246 | assert_equal key, @ordered_hash.timestamps.keys.last
247 | assert_equal timestamp, @ordered_hash.timestamps.values.last
248 | assert_equal timestamp, @ordered_hash.timestamps[key]
249 | end
250 |
251 | def test_delete
252 | key, value, timestamp = 'white', 'ffffff', '999'
253 | bad_key = 'black'
254 |
255 | @ordered_hash.[]=(key, value, timestamp)
256 | assert_equal @keys.length + 1, @ordered_hash.length
257 | assert_equal @ordered_hash.keys.length, @ordered_hash.length
258 |
259 | assert_equal value, @ordered_hash.delete(key)
260 | assert_equal @keys.length, @ordered_hash.length
261 | assert_equal @ordered_hash.keys.length, @ordered_hash.length
262 |
263 | assert_nil @ordered_hash.delete(bad_key)
264 |
265 | @ordered_hash.[]=(key, value, timestamp)
266 | assert_equal @keys.length + 1, @ordered_hash.timestamps.length
267 | assert_equal @ordered_hash.keys.length, @ordered_hash.timestamps.length
268 |
269 | assert_equal value, @ordered_hash.delete(key)
270 | assert_equal @keys.length, @ordered_hash.timestamps.length
271 | assert_equal @ordered_hash.keys.length, @ordered_hash.timestamps.length
272 |
273 | assert_nil @ordered_hash.delete(bad_key)
274 | end
275 |
276 | def test_to_a
277 | assert_equal @keys.zip(@timestamps).sort, @ordered_hash.timestamps.sort.to_a
278 | end
279 |
280 | def test_has_key
281 | assert_equal true, @ordered_hash.timestamps.has_key?('blue')
282 | assert_equal true, @ordered_hash.timestamps.key?('blue')
283 | assert_equal true, @ordered_hash.timestamps.include?('blue')
284 | assert_equal true, @ordered_hash.timestamps.member?('blue')
285 |
286 | assert_equal false, @ordered_hash.timestamps.has_key?('indigo')
287 | assert_equal false, @ordered_hash.timestamps.key?('indigo')
288 | assert_equal false, @ordered_hash.timestamps.include?('indigo')
289 | assert_equal false, @ordered_hash.timestamps.member?('indigo')
290 | end
291 |
292 | def test_has_value
293 | assert_equal true, @ordered_hash.timestamps.has_value?('12')
294 | assert_equal true, @ordered_hash.timestamps.value?('12')
295 | assert_equal false, @ordered_hash.timestamps.has_value?('99')
296 | assert_equal false, @ordered_hash.timestamps.value?('99')
297 | end
298 |
299 | def test_each_key
300 | keys = []
301 | @ordered_hash.timestamps.each_key { |k| keys << k }
302 | assert_equal @keys.sort, keys.sort
303 | end
304 |
305 | def test_each_value
306 | values = []
307 | @ordered_hash.timestamps.each_value { |v| values << v }
308 | assert_equal @timestamps.sort, values.sort
309 | end
310 |
311 | def test_each
312 | values = []
313 | @ordered_hash.timestamps.each {|key, value| values << value}
314 | assert_equal @timestamps.sort, values.sort
315 | end
316 |
317 | def test_delete_if
318 | copy = @ordered_hash.dup
319 | copy.delete('pink')
320 | assert_equal copy, @ordered_hash.delete_if { |k, _| k == 'pink' }
321 | assert !@ordered_hash.timestamps.keys.include?('pink')
322 | end
323 |
324 | def test_reject!
325 | (copy = @ordered_hash.dup).delete('pink')
326 | @ordered_hash.reject! { |k, _| k == 'pink' }
327 | assert_equal copy, @ordered_hash
328 | assert !@ordered_hash.keys.include?('pink')
329 | assert !@ordered_hash.timestamps.keys.include?('pink')
330 | end
331 |
332 | def test_reject
333 | copy = @ordered_hash.dup
334 | new_ordered_hash = @ordered_hash.reject { |k, _| k == 'pink' }
335 | assert_equal copy, @ordered_hash
336 | assert !new_ordered_hash.timestamps.keys.include?('pink')
337 | assert @ordered_hash.timestamps.keys.include?('pink')
338 | end
339 |
340 | def test_clear
341 | @ordered_hash.clear
342 | assert_equal [], @ordered_hash.timestamps.keys
343 | end
344 |
345 | def test_merge
346 | other_hash = Cassandra::OrderedHash.new
347 | other_hash['purple'] = '800080'
348 | other_hash['violet'] = 'ee82ee'
349 | merged = @ordered_hash.merge other_hash
350 | assert_equal merged.timestamps.length, @ordered_hash.timestamps.length + other_hash.timestamps.length
351 | assert_equal (@keys + ['purple', 'violet']).sort, merged.timestamps.keys.sort
352 |
353 | @ordered_hash.merge! other_hash
354 | assert_equal @ordered_hash.timestamps, merged.timestamps
355 | assert_equal @ordered_hash.timestamps.keys.sort, merged.timestamps.keys.sort
356 | end
357 |
358 | def test_shift
359 | pair = @ordered_hash.shift
360 | assert_equal [@keys.first, @values.first], pair
361 | assert !@ordered_hash.timestamps.keys.include?(pair.first)
362 | end
363 |
364 | def test_keys
365 | original = @ordered_hash.keys.dup
366 | @ordered_hash.keys.pop
367 | assert_equal original.sort, @ordered_hash.timestamps.keys.sort
368 | end
369 |
370 | def test_inspect
371 | assert @ordered_hash.timestamps.sort.inspect.include?(@timestamps_hash.sort.inspect)
372 | end
373 |
374 | def test_alternate_initialization_with_splat
375 | alternate = Cassandra::OrderedHash[1,2,3,4]
376 | assert_kind_of Cassandra::OrderedHash, alternate
377 | assert_equal [1, 3], alternate.timestamps.keys
378 | end
379 |
380 | def test_replace_updates_keys
381 | @other_ordered_hash = Cassandra::OrderedHash[:black, '000000', :white, '000000']
382 | original = @ordered_hash.replace(@other_ordered_hash)
383 | assert_equal original.timestamps, @ordered_hash.timestamps
384 | assert_equal @other_ordered_hash.timestamps.keys, @ordered_hash.timestamps.keys
385 | end
386 | end
387 |
--------------------------------------------------------------------------------
/test/test_helper.rb:
--------------------------------------------------------------------------------
1 | CASSANDRA_VERSION = ENV['CASSANDRA_VERSION'] || '0.8' unless defined?(CASSANDRA_VERSION)
2 |
3 | require 'test/unit'
4 | require "#{File.expand_path(File.dirname(__FILE__))}/../lib/cassandra/#{CASSANDRA_VERSION}"
5 | begin; require 'ruby-debug'; rescue LoadError; end
6 |
7 | begin
8 | @test_client = Cassandra.new('Twitter', 'localhost:9160', :thrift_client_options => {
9 | :retries => 3,
10 | :timeout => 5,
11 | :connect_timeout => 1
12 | })
13 | rescue Thrift::TransportException => e
14 | #FIXME Make server automatically start if not running
15 | if e.message =~ /Could not connect/
16 | puts "*** Please start the Cassandra server by running 'rake cassandra'. ***"
17 | exit 1
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/vendor/0.6/gen-rb/cassandra_constants.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Autogenerated by Thrift
3 | #
4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5 | #
6 |
7 | require 'cassandra_types'
8 |
9 | module CassandraThrift
10 | VERSION = %q"2.1.0"
11 |
12 | end
13 |
--------------------------------------------------------------------------------
/vendor/0.7/gen-rb/cassandra_constants.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Autogenerated by Thrift
3 | #
4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5 | #
6 |
7 | require 'cassandra_types'
8 |
9 | module CassandraThrift
10 | VERSION = %q"19.4.0"
11 |
12 | end
13 |
--------------------------------------------------------------------------------
/vendor/0.8/gen-rb/cassandra_constants.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Autogenerated by Thrift
3 | #
4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5 | #
6 |
7 | require 'cassandra_types'
8 |
9 | module CassandraThrift
10 | VERSION = %q"19.16.0"
11 |
12 | end
13 |
--------------------------------------------------------------------------------
/vendor/1.0/gen-rb/cassandra_constants.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Autogenerated by Thrift
3 | #
4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5 | #
6 |
7 | require 'cassandra_types'
8 |
9 | module CassandraThrift
10 | VERSION = %q"19.17.0"
11 |
12 | end
13 |
--------------------------------------------------------------------------------
/vendor/1.1/gen-rb/cassandra_constants.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Autogenerated by Thrift Compiler (0.8.0)
3 | #
4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5 | #
6 |
7 | require 'cassandra_types'
8 |
9 | module CassandraThrift
10 | VERSION = %q"19.32.0"
11 |
12 | end
13 |
--------------------------------------------------------------------------------
/vendor/1.2/gen-rb/cassandra_constants.rb:
--------------------------------------------------------------------------------
1 | #
2 | # Autogenerated by Thrift Compiler (0.9.0)
3 | #
4 | # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
5 | #
6 |
7 | require 'thrift'
8 | require 'cassandra_types'
9 |
10 | module CassandraThrift
11 | VERSION = %q"19.35.0"
12 |
13 | end
14 |
--------------------------------------------------------------------------------