├── .gitignore
├── LICENSE
├── README.md
├── Rakefile
├── bin
└── sbt-launch-0.7.1.jar
├── config
├── cassandra.in.sh
├── log4j.properties
└── storage-conf.xml
├── lib
├── apache-cassandra-0.6.0-rc1.jar
├── libthrift-r917130.jar
└── specs-1.6.2.jar
├── project
├── build.properties
└── build
│ └── Scalandra.scala
└── src
├── main
└── scala
│ └── com
│ └── nodeta
│ └── scalandra
│ ├── Client.scala
│ ├── Connection.scala
│ ├── ConnectionProvider.scala
│ ├── ConsistencyLevels.scala
│ ├── Order.scala
│ ├── Path.scala
│ ├── Range.scala
│ ├── Serialization.scala
│ ├── SlicePredicate.scala
│ ├── client
│ ├── Base.scala
│ ├── Read.scala
│ └── Write.scala
│ ├── map
│ ├── Base.scala
│ ├── CassandraMap.scala
│ ├── ColumnFamily.scala
│ ├── Keyspace.scala
│ └── Record.scala
│ ├── pool
│ ├── Factory.scala
│ ├── Pool.scala
│ └── PoolWrapper.scala
│ └── serializer
│ ├── LongSerializer.scala
│ ├── Serializer.scala
│ ├── StringSerializer.scala
│ └── UUIDSerializer.scala
└── test
└── scala
└── com
└── nodeta
└── scalandra
├── ClientTest.scala
├── MappingTest.scala
├── PathTest.scala
└── PoolTest.scala
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | target/*
3 | reports/*
4 | dist/*
5 | build/*
6 | lib_managed/*
7 | project/boot
8 | project/build/target
9 | *~
10 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2009 Nodeta Oy
2 |
3 | Permission is hereby granted, free of charge, to any person
4 | obtaining a copy of this software and associated documentation
5 | files (the "Software"), to deal in the Software without
6 | restriction, including without limitation the rights to use,
7 | copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | copies of the Software, and to permit persons to whom the
9 | Software is furnished to do so, subject to the following
10 | conditions:
11 |
12 | The above copyright notice and this permission notice shall be
13 | included in all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 | OTHER DEALINGS IN THE SOFTWARE.
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Scalandra
2 | =========
3 |
4 | Scalandra is a Scala wrapper for Cassandra's Thrift API. We currently target Cassandra 0.5.
5 |
6 | Data in Cassandra is essentially a huge multi-dimensional map. Scalandra aims to provide a map-like interface to Cassandra with all the bells and whistles supported by Cassandra API.
7 |
8 | Scaladoc is located at http://nodeta.github.com/scalandra/.
9 |
10 | Features
11 | --------
12 |
13 | * works with Cassandra 0.5
14 | * treat Cassandra as a huge Scala Map
15 | * connection pool for efficient connectivity
16 | * (de)serialization API for easy manipulation
17 |
18 | Example
19 | -------
20 |
21 | ### Cassandra access and manipulation
22 |
23 |
24 | import com.nodeta.scalandra._
25 | import com.nodeta.scalandra.serializer.StringSerializer
26 |
27 | val serialization = new Serialization(
28 | StringSerializer,
29 | StringSerializer,
30 | StringSerializer
31 | )
32 | val cassandra = new Client(
33 | Connection("127.0.0.1", 9162),
34 | "Keyspace1",
35 | serialization,
36 | ConsistencyLevels.one
37 | )
38 |
39 | cassandra.ColumnFamily("Standard1")("row")("column1") = "value"
40 | cassandra.ColumnFamily("Standard1")("row")("column2") = "value"
41 | cassandra.ColumnFamily("Standard1")("row")("column3") = "value"
42 | // or just cassandra.ColumnFamily("Standard1")("row") = Map("column1" -> "value", ...)
43 |
44 | cassandra.ColumnFamily("Standard1")("row")("column1")
45 | // => "value"
46 |
47 | val range = Range(Some("column2"), None, Ascending, 100)
48 | cassandra.ColumnFamily("Standard1")("row").slice(range)
49 | // => Map("column2" -> "value", "column3" -> "value")
50 |
51 | cassandra.ColumnFamily("Standard1")("row").slice(List("column1", "column2"))
52 | // => Map("column1" -> "value", "column3" -> "value")
53 |
54 |
55 | ### Connection Pool
56 |
57 | For more complicate applications, connection pooling is usually necessary. Scalandra provides simple and type-safe connection pool based on Apache Commons Pool.
58 |
59 |
60 | import com.nodeta.scalandra.ConnectionProvider
61 | val pool = new ConnectionProvider("127.0.0.1", 9160)
62 |
63 | pool { connection =>
64 | val client = new Client(
65 | connection,
66 | "Keyspace",
67 | serialization,
68 | ConsistencyLevels.default
69 | )
70 | // do something
71 | }
72 |
73 |
74 | Running tests
75 | -------------
76 |
77 | Cassandra tests can be run using rake test
, which setups and runs a suitable Cassandra instance for testing purposes.
78 |
79 | Future development
80 | ------------------
81 |
82 | * API for batch mutations
83 | * Scala 2.8 support
84 | * Add support for multiple hosts in connection pool
85 |
86 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | require 'open-uri'
2 | require 'uri'
3 |
4 | task :default => [:test]
5 |
6 | def cassandra_running?
7 | File.exists?(Dir.pwd + "/cassandra/cassandra.pid")
8 | end
9 |
10 |
11 | namespace :cassandra do
12 | version = "0.6.0"
13 | subversion = "-rc1"
14 | url = "http://www.nic.funet.fi/pub/mirrors/apache.org/cassandra/#{version}/apache-cassandra-#{version}#{subversion}-bin.tar.gz"
15 |
16 | desc "Setup Cassandra"
17 | task :setup do
18 | raise "Cassandra is already installed" if File.exists?(Dir.pwd + "/cassandra")
19 | puts url
20 | sh "tar -zxf #{URI.parse(url).open.path}"
21 | sh "mv apache-cassandra-#{version}#{subversion} cassandra"
22 | sh "mv cassandra/conf cassandra/default_conf"
23 | sh "ln -nfs #{Dir.pwd}/config cassandra/conf"
24 | end
25 |
26 | desc "Cleanup cassandra files"
27 | task :cleanup do
28 | FileUtils.rm_r(Dir.pwd + '/data') if File.exists?(Dir.pwd + "/data")
29 | FileUtils.rm_r(Dir.pwd + '/log') if File.exists?(Dir.pwd + "/log")
30 | end
31 |
32 | desc "Start Cassandra"
33 | task :start do
34 | raise "Cassandra already running" if cassandra_running?
35 | Rake::Task["cassandra:setup"].execute unless File.exists?(Dir.pwd + "/cassandra")
36 | ENV["CASSANDRA_INCLUDE"] = "./config/cassandra.in.sh"
37 | sh "./cassandra/bin/cassandra -p #{Dir.pwd + "/cassandra/cassandra.pid"}"
38 | end
39 |
40 | desc "Stop Cassandra"
41 | task :stop do
42 | raise "Cassandra not running" unless cassandra_running?
43 | sh "kill #{File.open("cassandra/cassandra.pid").read}"
44 | end
45 | end
46 |
47 | desc "Invoke SBT"
48 | task :sbt, :command do |task, command|
49 | c = command.class <= String ? command : command["command"]
50 | sh "java -Xmx512M -jar bin/sbt-launch-0.7.1.jar #{c}"
51 | end
52 |
53 | desc "Compile Scalandra"
54 | task :compile do
55 | Rake::Task["sbt"].execute("compile")
56 | end
57 |
58 | desc "Fetch dependencies"
59 | task :dependencies do
60 | Rake::Task["sbt"].execute("update")
61 | end
62 |
63 | desc "Test scalandra"
64 | task :test => :dependencies do
65 | begin
66 | Rake::Task["cassandra:stop"].execute if cassandra_running?
67 | Rake::Task["cassandra:cleanup"].execute
68 | Rake::Task["cassandra:start"].execute
69 | Rake::Task["sbt"].execute("test")
70 | ensure
71 | Rake::Task["cassandra:stop"].execute
72 | end
73 | end
--------------------------------------------------------------------------------
/bin/sbt-launch-0.7.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nodeta/scalandra/3d9df2f158243fab2da66e7b9f59c73b6d4f9b05/bin/sbt-launch-0.7.1.jar
--------------------------------------------------------------------------------
/config/cassandra.in.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 |
18 | cassandra_home=`dirname $0`/..
19 |
20 | # The directory where Cassandra's configs live (required)
21 | CASSANDRA_CONF=$cassandra_home/conf
22 |
23 | # This can be the path to a jar file, or a directory containing the
24 | # compiled classes. NOTE: This isn't needed by the startup script,
25 | # it's just used here in constructing the classpath.
26 | cassandra_bin=$cassandra_home/build/classes
27 | #cassandra_bin=$cassandra_home/build/cassandra.jar
28 |
29 | # JAVA_HOME can optionally be set here
30 | #JAVA_HOME=/usr/local/jdk6
31 |
32 | # The java classpath (required)
33 | CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
34 |
35 | for jar in $cassandra_home/lib/*.jar; do
36 | CLASSPATH=$CLASSPATH:$jar
37 | done
38 |
39 | # Arguments to pass to the JVM
40 | JVM_OPTS=" \
41 | -ea \
42 | -Xms128M \
43 | -Xmx1G \
44 | -XX:SurvivorRatio=8 \
45 | -XX:TargetSurvivorRatio=90 \
46 | -XX:+AggressiveOpts \
47 | -XX:+UseParNewGC \
48 | -XX:+UseConcMarkSweepGC \
49 | -XX:+CMSParallelRemarkEnabled \
50 | -XX:+HeapDumpOnOutOfMemoryError \
51 | -XX:SurvivorRatio=128 \
52 | -XX:MaxTenuringThreshold=0"
--------------------------------------------------------------------------------
/config/log4j.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # for production, you should probably set the root to INFO
18 | # and the pattern to %c instead of %l. (%l is slower.)
19 |
20 | # output messages into a rolling log file as well as stdout
21 | log4j.rootLogger=DEBUG,R
22 |
23 | # rolling log file
24 | log4j.appender.R=org.apache.log4j.RollingFileAppender
25 | log4j.appender.file.maxFileSize=20MB
26 | log4j.appender.file.maxBackupIndex=50
27 | log4j.appender.R.layout=org.apache.log4j.PatternLayout
28 | log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
29 | # Edit the next line to point to your logs directory
30 | log4j.appender.R.File=./log/system.log
31 |
32 | # Application logging options
33 | #log4j.logger.com.facebook=DEBUG
34 | #log4j.logger.com.facebook.infrastructure.gms=DEBUG
35 | #log4j.logger.com.facebook.infrastructure.db=DEBUG
36 |
--------------------------------------------------------------------------------
/config/storage-conf.xml:
--------------------------------------------------------------------------------
1 |
19 |
20 |
21 |
22 |
23 |
24 |
28 | Test Cluster
29 |
30 |
46 | false
47 |
48 |
56 |
57 |
58 |
103 |
104 |
107 |
108 |
113 |
120 |
129 | org.apache.cassandra.locator.RackUnawareStrategy
130 |
131 |
132 | 1
133 |
134 |
141 | org.apache.cassandra.locator.EndPointSnitch
142 |
143 |
144 |
145 |
146 |
156 | org.apache.cassandra.auth.AllowAllAuthenticator
157 |
158 |
172 | org.apache.cassandra.dht.OrderPreservingPartitioner
173 |
174 |
186 |
187 |
188 |
193 | ./data/commitlog
194 |
195 | ./data/data
196 |
197 |
198 |
199 |
204 |
205 | 127.0.0.1
206 |
207 |
208 |
209 |
210 |
211 |
212 | 10000
213 |
214 | 128
215 |
216 |
217 |
218 |
219 |
228 | localhost
229 |
230 | 7003
231 |
232 | 7004
233 |
234 |
242 | localhost
243 |
244 | 9162
245 |
250 | false
251 |
252 |
253 |
254 |
255 |
256 |
257 |
267 | auto
268 |
269 |
275 | 512
276 |
277 |
283 | 64
284 |
285 |
292 | 32
293 | 8
294 |
295 |
304 | 64
305 |
306 |
313 | 64
314 |
318 | 256
319 |
324 | 0.3
325 |
334 | 60
335 |
336 |
342 | 8
343 | 32
344 |
345 |
358 | periodic
359 |
364 | 10000
365 |
373 |
374 |
375 |
382 | 864000
383 |
384 |
--------------------------------------------------------------------------------
/lib/apache-cassandra-0.6.0-rc1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nodeta/scalandra/3d9df2f158243fab2da66e7b9f59c73b6d4f9b05/lib/apache-cassandra-0.6.0-rc1.jar
--------------------------------------------------------------------------------
/lib/libthrift-r917130.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nodeta/scalandra/3d9df2f158243fab2da66e7b9f59c73b6d4f9b05/lib/libthrift-r917130.jar
--------------------------------------------------------------------------------
/lib/specs-1.6.2.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nodeta/scalandra/3d9df2f158243fab2da66e7b9f59c73b6d4f9b05/lib/specs-1.6.2.jar
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | #
2 | #Tue Feb 23 23:17:46 EET 2010
3 | project.name=scalandra
4 | project.organization=com.nodeta
5 | scala.version=2.7.7
6 | project.version=0.2.0
7 | sbt.version=0.7.1
8 | def.scala.version=2.7.7
9 | build.scala.versions=2.7.7
10 | project.initialize=false
11 |
--------------------------------------------------------------------------------
/project/build/Scalandra.scala:
--------------------------------------------------------------------------------
1 | import sbt._
2 |
3 | class ScalandraProject(info: ProjectInfo) extends DefaultProject(info) {
4 | val ibiblioRepo = "iBiblio Maven 2 Repository" at "http://www.ibiblio.org/maven2"
5 | val commonsPool = "commons-pool" % "commons-pool" % "1.5.4"
6 | val slf4j = "org.slf4j" % "slf4j-simple" % "1.5.11"
7 | }
8 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/Client.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | import serializer.{Serializer, NonSerializer}
4 | import map.{ColumnFamily => Fam, StandardColumnFamily => CF, SuperColumnFamily => SCF}
5 |
6 | import org.apache.cassandra.{thrift => cassandra}
7 | import java.lang.IllegalArgumentException
8 |
9 | /**
10 | * This class is a lightweight wrapper for thrift. It supports three levels of
11 | * serialization.
12 | *
13 | * @author Ville Lautanala
14 | * @param connection An open connection to cassandra
15 | * @param keyspace Keyspace in which all actions are performed
16 | * @param serializer Serialization parameters
17 | * @param consistnecy ConsistencyLevels to use
18 | *
19 | * @see com.nodeta.scalandra.client.Read
20 | * @see com.nodeta.scalandra.client.Write
21 | */
22 | class Client[A, B, C](
23 | val connection : Connection,
24 | val keyspace : String,
25 | val serializer : Serialization[A, B, C],
26 | val consistency : ConsistencyLevels
27 | ) extends client.Base[A, B, C] with client.Read[A, B, C] with client.Write[A, B, C] with map.Keyspace[A, B, C] {
28 | def this(c : Connection, keyspace : String, serialization : Serialization[A, B, C]) = {
29 | this(c, keyspace, serialization, ConsistencyLevels())
30 | }
31 | protected val client = this
32 | protected val cassandra = connection.client
33 |
34 | case class Path(columnFamily : String) extends scalandra.Path[A, B] {
35 | protected val serializer = client.serializer
36 | }
37 |
38 | case class ColumnParent(columnFamily : String, superColumn : Option[A]) extends scalandra.ColumnParent[A, B] {
39 | protected val serializer = client.serializer
40 | }
41 |
42 | case class ColumnPath(columnFamily : String, superColumn : Option[A], column : B) extends scalandra.ColumnPath[A, B] {
43 | protected val serializer = client.serializer
44 | }
45 | }
46 |
47 | object Client {
48 | def apply(connection : Connection, keyspace : String) : Client[Array[Byte], Array[Byte], Array[Byte]] = {
49 | new Client(connection, keyspace, Serialization(NonSerializer, NonSerializer, NonSerializer), ConsistencyLevels())
50 | }
51 |
52 | def apply[A, B, C](connection : Connection, keyspace : String, serialization : Serialization[A, B, C]) : Client[A, B, C] = {
53 | new Client(connection, keyspace, serialization, ConsistencyLevels())
54 | }
55 |
56 | def apply[A, B, C](connection : Connection, keyspace : String, serialization : Serialization[A, B, C], consistency : ConsistencyLevels) : Client[A, B, C] = {
57 | new Client(connection, keyspace, serialization, consistency)
58 | }
59 |
60 | def apply[T](connection : Connection, keyspace : String, serializer : Serializer[T]) : Client[T, T, T] = {
61 | new Client(connection, keyspace, Serialization(serializer, serializer, serializer), ConsistencyLevels())
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/Connection.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | import org.apache.thrift.protocol.TBinaryProtocol
4 | import org.apache.thrift.transport.TSocket
5 | import org.apache.cassandra.thrift.Cassandra
6 |
7 | import java.io.{Closeable, Flushable}
8 |
9 |
10 | /**
11 | * Wrapper for cassandra socket connection. Automatically opens a connection
12 | * when an object is instantiated.
13 | *
14 | * @author Ville Lautanala
15 | * @param host Hostname or IP address of Cassandra server
16 | * @param port Port to connect
17 | */
18 | class Connection(host : String, port : Int, timeout : Int) extends Closeable with Flushable {
19 | def this() = this("127.0.0.1", 9160, 0)
20 | def this(host : String) = this(host, 9160, 0)
21 | def this(port : Int) = this("127.0.0.1", port, 0)
22 | def this(host : String, port : Int) = this(host, port, 0)
23 |
24 | private val socket = new TSocket(host, port, timeout)
25 |
26 | /**
27 | * Unwrapped cassandra client
28 | */
29 | val client = new Cassandra.Client(new TBinaryProtocol(socket))
30 | socket.open()
31 |
32 | /**
33 | * (Re-)Open socket to cassandra if connection is not already opened
34 | */
35 | def open() {
36 | if (!isOpen) socket.open()
37 | }
38 |
39 | /**
40 | * Close connection to cassandra
41 | */
42 | def close() {
43 | socket.close()
44 | }
45 |
46 | /**
47 | * Check connection status
48 | */
49 | def isOpen() : Boolean = {
50 | socket.isOpen()
51 | }
52 |
53 | def flush() {
54 | socket.flush()
55 | }
56 | }
57 |
58 | object Connection {
59 | def apply() : Connection = new Connection()
60 | def apply(host : String) : Connection = new Connection(host)
61 | def apply(port : Int) : Connection = new Connection(port)
62 | def apply(host : String, port : Int) : Connection = new Connection(host, port)
63 | def apply(host : String, port : Int, timeout : Int) : Connection = new Connection(host, port, timeout)
64 | }
65 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/ConnectionProvider.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | /**
4 | * Factory for Pooled Connection objects.
5 | *
6 | * @author Ville Lautanala
7 | */
8 | case class ConnectionProvider(host : String, port : Int) extends pool.Factory[Connection] {
9 | def build() = {
10 | new Connection(host, port)
11 | }
12 |
13 | def destroy(c : Connection) = c.close
14 | def validate(c : Connection) = c.isOpen
15 | def activate(c : Connection) = { if (!c.isOpen) c.open }
16 | def passivate(c : Connection) = c.flush
17 | }
18 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/ConsistencyLevels.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | import org.apache.cassandra.thrift.ConsistencyLevel
4 | import org.apache.cassandra.thrift.ConsistencyLevel._
5 |
6 | case class ConsistencyLevels(read : ConsistencyLevel, write : ConsistencyLevel) {}
7 |
8 | object ConsistencyLevels extends (() => ConsistencyLevels) {
9 | lazy val default = { ConsistencyLevels(ONE, ZERO) }
10 | lazy val one = { ConsistencyLevels(ONE, ONE) }
11 | lazy val quorum = { ConsistencyLevels(QUORUM, QUORUM) }
12 | lazy val all = { ConsistencyLevels(ALL, ALL) }
13 |
14 | def apply() : ConsistencyLevels = default
15 | }
16 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/Order.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | /**
4 | * Interface to determine sort order when doind range queries.
5 | */
6 | trait Order {
7 | def toBoolean : Boolean
8 | }
9 |
10 | /**
11 | * Ascending sort order
12 | */
13 | case object Ascending extends Order {
14 | implicit def toBoolean() : Boolean = false
15 | }
16 |
17 | /**
18 | * Descending sort order
19 | */
20 | case object Descending extends Order {
21 | implicit def toBoolean() : Boolean = true
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/Path.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | import org.apache.cassandra.{thrift => cassandra}
4 | import org.apache.cassandra.thrift.ThriftGlue
5 |
6 | trait Path[A, B] {
7 | protected def serializer : Serialization[A, B, _]
8 | def columnFamily : String
9 | def /(_sC : Option[A]) = {
10 | val parent = this
11 | new ColumnParent[A, B] {
12 | protected def serializer = parent.serializer
13 | def superColumn = _sC
14 | def columnFamily = parent.columnFamily
15 | }
16 | }
17 |
18 | def /(c : B) : ColumnPath[A, B] = {
19 | val parent = this
20 | new ColumnPath[A, B] {
21 | protected def serializer = parent.serializer
22 | def columnFamily = parent.columnFamily
23 | def superColumn = None
24 | val column = c
25 | }
26 | }
27 |
28 | def toColumnParent : cassandra.ColumnParent = {
29 | ThriftGlue.createColumnParent(columnFamily, null)
30 | }
31 |
32 | def toColumnPath : cassandra.ColumnPath = {
33 | ThriftGlue.createColumnPath(columnFamily, null, null)
34 | }
35 | override def toString = {
36 | "Path(" + columnFamily + ")"
37 | }
38 | }
39 |
40 | trait ColumnParent[A, B] extends Path[A, B] {
41 | def superColumn : Option[A]
42 |
43 | override def /(c : B) : ColumnPath[A, B] = {
44 | val parent = this
45 | new ColumnPath[A, B] {
46 | protected def serializer = parent.serializer
47 | def columnFamily = parent.columnFamily
48 | def superColumn = parent.superColumn
49 | val column = c
50 | }
51 | }
52 |
53 | lazy protected val _superColumn : Array[Byte] = {
54 | superColumn.map(serializer.superColumn.serialize(_)).getOrElse(null)
55 | }
56 |
57 | override def toColumnParent : cassandra.ColumnParent = {
58 | ThriftGlue.createColumnParent(columnFamily, _superColumn)
59 | }
60 |
61 | override def toColumnPath : cassandra.ColumnPath = {
62 | ThriftGlue.createColumnPath(columnFamily, _superColumn, null)
63 | }
64 |
65 | override def toString = {
66 | "ColumnParent(" + columnFamily + "," + superColumn + ")"
67 | }
68 | }
69 |
70 | trait ColumnPath[A, B] extends ColumnParent[A, B] {
71 | def column : B
72 |
73 | lazy protected val _column : Array[Byte] = {
74 | serializer.column.serialize(column)
75 | }
76 |
77 | override def toColumnPath : cassandra.ColumnPath = {
78 | ThriftGlue.createColumnPath(columnFamily, _superColumn, _column)
79 | }
80 |
81 | override def toString = {
82 | "ColumnPath(" + columnFamily + "," + superColumn + "," + column + ")"
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/Range.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | /**
4 | * Range class used in range based limiting
5 | *
6 | * @author Ville Lautanala
7 | */
8 | case class Range[T](start : Option[T], finish : Option[T], order : Order, count : Int) {}
9 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/Serialization.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | import serializer.Serializer
4 |
5 | case class Serialization[A, B, C](superColumn : Serializer[A],
6 | column : Serializer[B],
7 | value : Serializer[C]) {}
8 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/SlicePredicate.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra
2 |
3 | /**
4 | * Predicate used to restrict results.
5 | *
6 | * @author Ville Lautanala
7 | */
8 | trait SlicePredicate[T] {
9 | val columns : Iterable[T]
10 | val range : Option[Range[T]]
11 | }
12 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/client/Base.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.client
2 |
3 | import org.apache.cassandra.thrift.Cassandra
4 | import java.lang.IllegalArgumentException
5 | import com.nodeta.scalandra.serializer.Serializer
6 |
7 | /**
8 | * Base interface for all client actions.
9 | *
10 | * @author Ville Lautanala
11 | */
12 | trait Base[A, B, C] {
13 | private val self = this
14 | protected val cassandra : Cassandra.Client
15 | protected val keyspace : String
16 | def consistency : ConsistencyLevels
17 |
18 | protected val serializer : Serialization[A, B, C]
19 |
20 | class InvalidPathException(reason : String) extends IllegalArgumentException(reason) {}
21 |
22 | case class StandardSlice(columns : Iterable[B], range : Option[Range[B]]) extends SlicePredicate[B] {
23 | def this(columns : Iterable[B]) = this(columns, None)
24 | def this(range : Range[B]) = this(Nil, Some(range))
25 | }
26 |
27 | object StandardSlice {
28 | def apply(columns : Iterable[B]) : StandardSlice = apply(columns, None)
29 | def apply(range : Range[B]) : StandardSlice = apply(Nil, Some(range))
30 | }
31 |
32 | case class SuperSlice(columns : Iterable[A], range : Option[Range[A]]) extends SlicePredicate[A] {
33 | def this(columns : Iterable[A]) = this(columns, None)
34 | def this(range : Range[A]) = this(Nil, Some(range))
35 | }
36 |
37 | object SuperSlice {
38 | def apply(columns : Iterable[A]) : SuperSlice = apply(columns, None)
39 | def apply(range : Range[A]) : SuperSlice = apply(Nil, Some(range))
40 | }
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/client/Read.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.client
2 |
3 | import com.nodeta.scalandra.serializer.{Serializer, NonSerializer}
4 |
5 | import org.apache.cassandra.thrift
6 | import org.apache.cassandra.thrift.{NotFoundException, ThriftGlue}
7 |
8 | import java.util.{List => JavaList}
9 | import scala.collection.jcl.{ArrayList, Conversions, Map => JavaMap}
10 | import scala.collection.immutable.ListMap
11 |
12 | import scalandra.{ColumnPath, ColumnParent}
13 |
14 | /**
15 | * This mixin contains all read-only actions
16 | *
17 | * @author Ville Lautanala
18 | */
19 | trait Read[A, B, C] { this : Base[A, B, C] =>
20 | private def convert[T](predicate : SlicePredicate[T], serializer : Serializer[T]) : thrift.SlicePredicate = {
21 | val items = predicate.columns match {
22 | case Nil =>
23 | if (predicate.range.isDefined) null else Nil
24 | case columns => columns.map(serializer.serialize(_))
25 | }
26 |
27 | val range = predicate.range match {
28 | case None => null
29 | case Some(r) =>
30 | new thrift.SliceRange(
31 | r.start.map(serializer.serialize(_)).getOrElse(serializer.empty),
32 | r.finish.map(serializer.serialize(_)).getOrElse(serializer.empty),
33 | r.order.toBoolean,
34 | r.count
35 | )
36 | }
37 |
38 | ThriftGlue.createSlicePredicate(items, range)
39 | }
40 |
41 | private def convert(s : StandardSlice) : thrift.SlicePredicate = {
42 | convert(s, serializer.column)
43 | }
44 |
45 | private def convert(s : SuperSlice) : thrift.SlicePredicate = {
46 | convert(s, serializer.superColumn)
47 | }
48 |
49 | /**
50 | * Number of columns with specified column path
51 | */
52 | def count(key : String, path : ColumnParent[A, B]) : Int = {
53 | cassandra.get_count(keyspace, key, path.toColumnParent, consistency.read)
54 | }
55 |
56 | /**
57 | * Get description of keyspace columnfamilies
58 | */
59 | def describe() : Map[String, Map[String, String]] = {
60 | def convertMap[T](m : java.util.Map[T, java.util.Map[T, T]]) : Map[T, Map[T, T]] = {
61 | Map.empty ++ Conversions.convertMap(m).map { case(columnFamily, description) =>
62 | (columnFamily -> (Map.empty ++ Conversions.convertMap(description)))
63 | }
64 | }
65 |
66 | convertMap(cassandra.describe_keyspace(keyspace))
67 | }
68 |
69 | def apply(key : String, path : ColumnPath[A, B]) = get(key, path)
70 | def apply(key : String, path : ColumnParent[A, B]) = get(key, path)
71 |
72 | /* Get multiple columns from StandardColumnFamily */
73 | def get(keys : Iterable[String], path : ColumnPath[A, B]) : Map[String, Option[(B, C)]] = {
74 | (ListMap() ++ multigetAny(keys, path).map { case(k, v) =>
75 | (k, getColumn(v))
76 | })
77 | }
78 |
79 | /* Get multiple super columns from SuperColumnFamily */
80 | def get(keys : Iterable[String], path : ColumnParent[A, B]) : Map[String, Option[(A , Map[B, C])]] = {
81 | (ListMap() ++ multigetAny(keys, path).map { case(k, v) =>
82 | (k, getSuperColumn(v))
83 | })
84 | }
85 |
86 | private def multigetAny(keys : Iterable[String], path : Path[A, B]) : JavaMap[String, thrift.ColumnOrSuperColumn]= {
87 | JavaMap(cassandra.multiget(keyspace, keys, path.toColumnPath, consistency.read))
88 | }
89 |
90 | /**
91 | * Get single column
92 | * @param key Row key
93 | * @param path Path to column
94 | */
95 | def get(key : String, path : ColumnPath[A, B]) : Option[C] = {
96 | try {
97 | cassandra.get(
98 | keyspace,
99 | key,
100 | path.toColumnPath,
101 | consistency.read
102 | ).column match {
103 | case null => None
104 | case x : thrift.Column => Some(serializer.value.deserialize(x.value))
105 | }
106 | } catch {
107 | case e : NotFoundException => None
108 | }
109 | }
110 |
111 | /**
112 | * Get supercolumn
113 | * @param key Row key
114 | * @param path Path to super column
115 | */
116 | def get(key : String, path : ColumnParent[A, B]) : Option[Map[B, C]] = {
117 | try {
118 | getSuperColumn(cassandra.get(keyspace, key, path.toColumnPath, consistency.read)).map(_._2)
119 | } catch {
120 | case e : NotFoundException => None
121 | }
122 | }
123 |
124 | /**
125 | * Slice columns
126 | * @param path Path to record or super column
127 | * @param predicate Search conditions and limits
128 | */
129 | def get(key : String, path : Path[A, B], predicate : StandardSlice) : Map[B, C] = {
130 | ListMap[B, C](cassandra.get_slice(
131 | keyspace,
132 | key,
133 | path.toColumnParent,
134 | convert(predicate),
135 | consistency.read
136 | ).map(getColumn(_).getOrElse({
137 | throw new NotFoundException()
138 | })) : _*)
139 | }
140 |
141 | /**
142 | * Slice super columns
143 | * @param path Path to record
144 | * @param predicate Search conditions and limits
145 | */
146 | def get(key : String, path : Path[A, B], predicate : SuperSlice) : Map[A, Map[B, C]] = {
147 | ListMap(cassandra.get_slice(
148 | keyspace,
149 | key,
150 | path.toColumnParent,
151 | convert(predicate),
152 | consistency.read
153 | ).map(getSuperColumn(_).get) : _*)
154 | }
155 |
156 | /**
157 | * Slice multiple standard column family records
158 | */
159 | def get(keys : Iterable[String], path : Path[A, B], predicate : StandardSlice) : Map[String, Map[B, C]] = {
160 | val result = cassandra.multiget_slice(keyspace, keys, path.toColumnParent, convert(predicate), consistency.read)
161 | ListMap() ++ Conversions.convertMap(result).map { case(key, value) =>
162 | key -> (ListMap() ++ value.map(getColumn(_).get))
163 | }
164 | }
165 |
166 | /**
167 | * Slice multiple super column family records
168 | */
169 | def get(keys : Iterable[String], path : Path[A, B], predicate : SuperSlice) : Map[String, Map[A, Map[B, C]]] = {
170 | val result = cassandra.multiget_slice(keyspace, keys, path.toColumnParent, convert(predicate), consistency.read)
171 | ListMap() ++ Conversions.convertMap(result).map { case(key, value) =>
172 | key -> (ListMap() ++ value.map(getSuperColumn(_).get))
173 | }
174 | }
175 |
176 | /**
177 | * List keys in single keyspace/columnfamily pair
178 | */
179 | def keys(columnFamily : String, start : Option[String], finish : Option[String], count : Int) : List[String] = {
180 | val slice = ThriftGlue.createSlicePredicate(
181 | null,
182 | new thrift.SliceRange(serializer.value.empty, serializer.value.empty, true, 1)
183 | )
184 |
185 | val parent = ThriftGlue.createColumnParent(columnFamily, null)
186 |
187 | cassandra.get_range_slice(keyspace, parent, slice, start.getOrElse(""), finish.getOrElse(""), count, consistency.read).map(_.key)
188 | }
189 |
190 | /**
191 | * Get slice range for super column family
192 | */
193 | def get(path : Path[A, B], predicate : SuperSlice, start : Option[String], finish : Option[String], count : Int) : Map[String, Map[A, Map[B, C]]] = {
194 | val result = cassandra.get_range_slice(keyspace, path.toColumnParent, convert(predicate), start.getOrElse(""), finish.getOrElse(""), count, consistency.read)
195 | ListMap(result.map { keySlice =>
196 | (keySlice.key -> ListMap(keySlice.columns.map(getSuperColumn(_).get) : _*))
197 | } : _*)
198 | }
199 |
200 | /**
201 | * Get slice range for standard column family
202 | */
203 | def get(path : Path[A, B], predicate : StandardSlice, start : Option[String], finish : Option[String], count : Int) : Map[String, Map[B, C]] = {
204 | val result = cassandra.get_range_slice(keyspace, path.toColumnParent, convert(predicate), start.getOrElse(""), finish.getOrElse(""), count, consistency.read)
205 | ListMap(result.map { keySlice =>
206 | (keySlice.key -> ListMap(keySlice.columns.map(getColumn(_).get) : _*))
207 | } : _*)
208 | }
209 |
210 | private def resultMap(results : JavaList[thrift.Column]) : Map[B, C] = {
211 | val r : List[thrift.Column] = results // Implicit conversion
212 | ListMap(r.map(c => (serializer.column.deserialize(c.name) -> serializer.value.deserialize(c.value))).toSeq : _*)
213 | }
214 |
215 | private def superResultMap(results : JavaList[thrift.SuperColumn]) : Map[A, Map[B, C]] = {
216 | val r : List[thrift.SuperColumn] = results // Implicit conversion
217 | ListMap(r.map(c => (serializer.superColumn.deserialize(c.name) -> resultMap(c.columns))).toSeq : _*)
218 | }
219 |
220 | private def getSuperColumn(c : thrift.ColumnOrSuperColumn) : Option[Pair[A, Map[B, C]]] = {
221 | c.super_column match {
222 | case null => None
223 | case x : thrift.SuperColumn => Some(serializer.superColumn.deserialize(x.name) -> resultMap(x.columns))
224 | }
225 | }
226 |
227 | private def getColumn(c : thrift.ColumnOrSuperColumn) : Option[Pair[B, C]] = {
228 | c.column match {
229 | case null => None
230 | case x : thrift.Column => Some(serializer.column.deserialize(x.name) -> serializer.value.deserialize(x.value))
231 | }
232 | }
233 |
234 | implicit private def convertList[T](list : JavaList[T]) : List[T] = {
235 | List[T]() ++ Conversions.convertList[T](list)
236 | }
237 |
238 | implicit private def convertCollection[T](list : Iterable[T]) : JavaList[T] = {
239 | if (list eq null) null else
240 | (new ArrayList() ++ list).underlying
241 | }
242 | }
243 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/client/Write.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.client
2 |
3 | import org.apache.cassandra.thrift
4 | import org.apache.cassandra.thrift.ThriftGlue
5 | import java.util.{List => JavaList}
6 | import scala.collection.jcl.{ArrayList, LinkedHashMap}
7 |
8 | /**
9 | * This mixin contains all write-only actions
10 | *
11 | * @author Ville Lautanala
12 | */
13 | trait Write[A, B, C] { this : Base[A, B, C] =>
14 | /**
15 | * Insert or update value of single column
16 | */
17 | def update(key : String, path : ColumnPath[A, B], value : C) {
18 | cassandra.insert(keyspace,key, path.toColumnPath, this.serializer.value.serialize(value), System.currentTimeMillis, consistency.write)
19 | }
20 |
21 | def update(key : String, path : ColumnParent[A, B], value : Iterable[Pair[B, C]]) {
22 | path.superColumn match {
23 | case Some(sc) => insertSuper(key, path / None, Map(sc -> value))
24 | case None => insertNormal(key, path, value)
25 | }
26 | }
27 |
28 | def update(key : String, path : Path[A, B], data : Iterable[Pair[A, Iterable[Pair[B, C]]]]) {
29 | insertSuper(key, path, data)
30 | }
31 |
32 | private def insert(key : String, path : Path[A, B], data : java.util.List[thrift.ColumnOrSuperColumn]) {
33 | val mutation = new LinkedHashMap[String, JavaList[thrift.ColumnOrSuperColumn]]
34 | mutation(path.columnFamily) = data
35 |
36 | cassandra.batch_insert(keyspace, key, mutation.underlying, consistency.write)
37 | }
38 |
39 | /**
40 | * Insert collection of values in a standard column family/key pair
41 | */
42 | def insertNormal(key : String, path : Path[A, B], data : Iterable[Pair[B, C]]) {
43 | def convert(data : Iterable[Pair[B, C]]) : java.util.List[thrift.ColumnOrSuperColumn] = {
44 | (new ArrayList() ++ data.map { case(k, v) =>
45 | ThriftGlue.createColumnOrSuperColumn_Column(
46 | new thrift.Column(serializer.column.serialize(k), this.serializer.value.serialize(v), System.currentTimeMillis))
47 | }).underlying
48 | }
49 | insert(key, path, convert(data))
50 | }
51 |
52 | private def convertToColumnList(data : Iterable[Pair[B, C]]) : JavaList[thrift.Column] = {
53 | (new ArrayList[thrift.Column] ++ data.map { case(k, v) =>
54 | new thrift.Column(serializer.column.serialize(k), serializer.value.serialize(v), System.currentTimeMillis)
55 | }).underlying
56 | }
57 |
58 |
59 | /**
60 | * Insert collection of values in a super column family/key pair
61 | */
62 | def insertSuper(key : String, path : Path[A, B], data : Iterable[Pair[A, Iterable[Pair[B, C]]]]) {
63 | val cfm = new LinkedHashMap[String, JavaList[thrift.ColumnOrSuperColumn]]
64 |
65 | val list = (new ArrayList() ++ data.map { case(key, value) =>
66 | ThriftGlue.createColumnOrSuperColumn_SuperColumn(
67 | new thrift.SuperColumn(serializer.superColumn.serialize(key), convertToColumnList(value))
68 | )
69 | }).underlying
70 |
71 | insert(key, path, list)
72 | }
73 |
74 | /**
75 | * Remove all values from a path
76 | *
77 | * @param path Path to be removed
78 | */
79 | def remove(key : String, path : Path[A, B]) {
80 | cassandra.remove(keyspace, key, path.toColumnPath, System.currentTimeMillis, consistency.write)
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/map/Base.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.map
2 |
3 | import serializer.Serializer
4 |
5 | trait Base[A, B ,C] {
6 | protected val client : Client[A, B, C]
7 | }
8 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/map/CassandraMap.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.map
2 |
3 | trait CassandraMap[A, B] extends scala.collection.Map[A, B] {
4 | def slice(r : Range[A]) : CassandraMap[A, B]
5 |
6 | def slice(l : Iterable[A]) : CassandraMap[A, B]
7 |
8 | def remove(key : A) : CassandraMap[A, B]
9 |
10 | def update(key : A, value : B) : CassandraMap[A, B]
11 | }
12 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/map/ColumnFamily.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.map
2 |
3 | import org.apache.cassandra.thrift.InvalidRequestException
4 |
5 | class UnsupportedActionException(s : String) extends Exception(s) {}
6 |
7 | trait ColumnFamily[A] extends CassandraMap[String, A] { this : Base[_, _, _] =>
8 | protected val path : Path[_, _]
9 |
10 | protected def build(key : String) : A
11 |
12 | protected sealed class RecordIterator(iter : Iterator[String]) extends Iterator[(String, A)] {
13 | def hasNext = iter.hasNext
14 | def next() = {
15 | val key = iter.next()
16 | (key -> build(key))
17 | }
18 | }
19 |
20 | protected sealed class KeyIterator extends Iterator[String] {
21 | var start : Option[String] = None
22 | var buffer : Iterator[String] = Nil.elements
23 | var end = false
24 |
25 | private def updateBuffer() {
26 | if (end) return
27 | val keys = client.keys(path.columnFamily, start, None, 100)
28 | if (keys.isEmpty || keys.size < 100) end = true
29 | buffer = keys.elements
30 | if (!keys.isEmpty) start = Some(keys.last + ' ')
31 | }
32 |
33 | def hasNext : Boolean = {
34 | if (buffer.hasNext) return true
35 | updateBuffer()
36 | buffer.hasNext
37 | }
38 | def next : String = buffer.next
39 | }
40 |
41 | def elements : Iterator[(String, A)] = new RecordIterator(keys)
42 |
43 | override def keys : Iterator[String] = new KeyIterator()
44 |
45 | def get(key : String) = Some(build(key))
46 | lazy val size = { elements.toList.size }
47 |
48 | }
49 |
50 | class StandardColumnFamily[A, B, C](protected val path : Path[A, B], protected val client : Client[A, B, C]) extends ColumnFamily[StandardRecord[A, B, C]] with Base[A, B, C] {
51 | def this(columnFamily : String, client : Client[A, B, C]) = this(client.Path(columnFamily), client)
52 |
53 | protected def build(key : String) = {
54 | new StandardRecord(key, path / None, client)
55 | }
56 |
57 | sealed protected trait ListPredicate extends StandardColumnFamily[A, B, C] {
58 | def constraint : Iterable[String]
59 | override def keys = constraint.elements
60 | }
61 |
62 | sealed protected trait RangePredicate extends StandardColumnFamily[A, B, C] {
63 | def constraint : Range[String]
64 | override def keys = {
65 | this.client.get(path, this.client.StandardSlice(Nil), this.constraint.start, this.constraint.finish, this.constraint.count).keys
66 | }
67 | }
68 |
69 | def slice(r : Range[String]) = {
70 | new StandardColumnFamily(path, client) with RangePredicate {
71 | val constraint = r
72 | }
73 | }
74 |
75 | def slice(r : Iterable[String]) = {
76 | new StandardColumnFamily(path, client) with ListPredicate {
77 | val constraint = r
78 | }
79 | }
80 |
81 | def map(column : B) : Map[String, C] = {
82 | multiget(path / column)
83 | }
84 |
85 | protected def multiget(x : ColumnPath[A, B]) : Map[String, C] = {
86 | val r = client.get(path, client.StandardSlice(List(x.column)), None, None, 20000000)
87 | scala.collection.immutable.ListMap() ++ r.flatMap { case(key, value) =>
88 | value.get(x.column) match {
89 | case Some(column) => List((key, column))
90 | case None => Nil
91 | }
92 | }
93 | }
94 |
95 | def remove(key : String) = {
96 | client.remove(key, path)
97 | this
98 | }
99 |
100 | def update(key : String, value : StandardRecord[A, B, C]) = {
101 | client(key, path / None) = value
102 | this
103 | }
104 | def update(key : String, value : Iterable[(B, C)]) = {
105 | client(key, path / None) = value
106 | this
107 | }
108 | }
109 |
110 | class SuperColumnFamily[A, B, C](protected val path : Path[A, B], protected val client : Client[A, B, C]) extends ColumnFamily[SuperRecord[A, B, C]] with Base[A, B, C] {
111 | def this(columnFamily : String, client : Client[A, B, C]) = this(client.Path(columnFamily), client)
112 | protected def build(key : String) = {
113 | val parent = this
114 | new SuperRecord[A, B, C](key, path, client)
115 | }
116 |
117 | sealed protected trait ListPredicate extends SuperColumnFamily[A, B, C] {
118 | def constraint : Iterable[String]
119 | override def keys = constraint.elements
120 | }
121 |
122 | sealed protected trait RangePredicate extends SuperColumnFamily[A, B, C] {
123 | def constraint : Range[String]
124 | override def keys = {
125 | this.client.get(path, this.client.SuperSlice(Nil), this.constraint.start, this.constraint.finish, this.constraint.count).keys
126 | }
127 | }
128 |
129 |
130 |
131 | def slice(r : Range[String]) = {
132 | new SuperColumnFamily(path, client) with RangePredicate {
133 | val constraint = r
134 | }
135 | }
136 |
137 | def slice(r : Iterable[String]) = {
138 | new SuperColumnFamily(path, client) with ListPredicate {
139 | val constraint = r
140 | }
141 | }
142 |
143 | def remove(key : String) = {
144 | client.remove(key, path)
145 | this
146 | }
147 |
148 | def update(key : String, value : SuperRecord[A, B, C]) = {
149 | client(key, path) = value
150 | this
151 | }
152 | def update(key : String, value : Iterable[(A, Iterable[(B, C)])]) = {
153 | client(key, path) = value
154 | this
155 | }
156 | }
157 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/map/Keyspace.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.map
2 |
3 | import map.{ColumnFamily => CF, SuperColumnFamily => SCF}
4 |
5 |
6 | trait Keyspace[A, B, C] extends scala.collection.Map[String, ColumnFamily[_]] with Base[A, B, C] {
7 | /**
8 | * ColumnFamily map instantiated using client instance
9 | */
10 | case class ColumnFamily(columnFamily : String) extends StandardColumnFamily[A, B, C](client.Path(columnFamily), client) {}
11 |
12 | /**
13 | * SuperColumnFamily map instantiated using client instance
14 | */
15 | case class SuperColumnFamily(columnFamily : String) extends SCF[A, B, C](client.Path(columnFamily), client) {}
16 | val keyspace : String
17 |
18 | lazy private val schema = { client.describe }
19 | lazy private val columnFamilies = {
20 | schema.map { case(name, description) =>
21 | (name -> buildColumnFamily(name))
22 | }
23 | }
24 |
25 | def get(columnFamily : String) : Option[CF[_]] = {
26 | schema.get(columnFamily) match {
27 | case None => None
28 | case Some(cF) => Some(buildColumnFamily(columnFamily))
29 | }
30 | }
31 |
32 | def elements = columnFamilies.elements
33 |
34 | def size = schema.size
35 |
36 | private def buildColumnFamily(columnFamily : String) : CF[_] = {
37 | schema(columnFamily)("Type") match {
38 | case "Super" => SuperColumnFamily(columnFamily)
39 | case "Standard" => ColumnFamily(columnFamily)
40 | }
41 | }
42 |
43 | override def toString() = {
44 | "Keyspace(" + columnFamilies.map { case (name, instance) =>
45 | name + " -> " + instance.getClass.getSimpleName
46 | }.mkString(",") + ")"
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/map/Record.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.map
2 |
3 | import scala.collection.immutable.ListMap
4 | import scala.collection.mutable.{Map => MMap}
5 |
6 | trait Record[A, B] extends CassandraMap[A, B] {
7 | val key : String
8 | val path : Path[_, _]
9 |
10 | def size = {
11 | elements.toList.size
12 | }
13 | }
14 |
15 | class StandardRecord[A, B, C](val key : String, val path : ColumnParent[A, B], protected val client : Client[A, B, C]) extends Record[B, C] with Base[A, B, C] {
16 | lazy private val defaultRange = {
17 | Range[B](None, None, Ascending, 2147483647)
18 | }
19 |
20 | sealed protected trait ListPredicate extends StandardRecord[A, B, C] {
21 | def constraint : Iterable[B]
22 | override def elements = {
23 | this.client.get(this.key, this.path, this.client.StandardSlice(constraint)).elements
24 | }
25 | }
26 |
27 | sealed protected trait RangePredicate extends StandardRecord[A, B, C] {
28 | def constraint : Range[B]
29 | override def elements = {
30 | this.client.get(this.key, this.path, this.client.StandardSlice(this.constraint)).elements
31 | }
32 | }
33 |
34 | def elements = {
35 | client.get(key, path, client.StandardSlice(defaultRange)).elements
36 | }
37 |
38 | def get(column : B) : Option[C] = {
39 | client.get(key, path / column)
40 | }
41 |
42 | def slice(r : Range[B]) = {
43 | new StandardRecord(key, path, client) with RangePredicate {
44 | val constraint = r
45 | }
46 | }
47 |
48 | def slice(r : Iterable[B]) = {
49 | new StandardRecord(key, path, client) with ListPredicate {
50 | val constraint = r
51 | }
52 | }
53 |
54 | def remove(column : B) = {
55 | client.remove(key, path / column)
56 | this
57 | }
58 |
59 | def update(column : B, value : C) = {
60 | client(key, path / column) = value
61 | this
62 | }
63 | }
64 |
65 | class SuperRecord[A, B, C](val key : String, val path : Path[A, B], protected val client : Client[A, B, C]) extends Record[A, scala.collection.Map[B, C]] with Base[A, B, C] {
66 | lazy private val defaultRange = {
67 | Range[A](None, None, Ascending, 2147483647)
68 | }
69 |
70 | sealed protected trait ListPredicate extends SuperRecord[A, B, C] {
71 | def constraint : Iterable[A]
72 | override def elements = {
73 | this.client.get(this.key, this.path, this.client.SuperSlice(constraint)).elements
74 | }
75 | }
76 |
77 | sealed protected trait RangePredicate extends SuperRecord[A, B, C] {
78 | def constraint : Range[A]
79 | override def elements = {
80 | this.client.get(this.key, this.path, this.client.SuperSlice(this.constraint)).elements
81 | }
82 | }
83 |
84 | def elements = {
85 | client.get(key, path, client.SuperSlice(defaultRange)).elements
86 | }
87 |
88 | def get(column : A) : Option[scala.collection.Map[B, C]] = {
89 | Some(new StandardRecord(key, path / Some(column), client))
90 | }
91 |
92 | def slice(r : Range[A]) = {
93 | new SuperRecord(key, path, client) with RangePredicate {
94 | val constraint = r
95 | }
96 | }
97 |
98 | def slice(r : Iterable[A]) = {
99 | new SuperRecord(key, path, client) with ListPredicate {
100 | val constraint = r
101 | }
102 | }
103 |
104 | def remove(column : A) = {
105 | client.remove(key, path / Some(column))
106 | this
107 | }
108 |
109 | def update(column : A, value : scala.collection.Map[B, C]) = {
110 | updated(column, value)
111 | this
112 | }
113 |
114 | def update(column : A, value : Iterable[(B, C)]) = {
115 | updated(column, value)
116 | this
117 | }
118 |
119 | private def updated(column : A, value : Iterable[(B, C)]) {
120 | client(key, path / Some(column)) = value
121 | }
122 | }
123 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/pool/Factory.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.pool
2 |
3 | /**
4 | * A type-safe interface for object factories.
5 | *
6 | * @author Ville Lautanala
7 | */
8 | trait Factory[T] {
9 | def build() : T
10 | def destroy(t : T) : Unit
11 | def validate(t : T) : Boolean
12 | def activate(t : T) : Unit
13 | def passivate(t : T) : Unit
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/pool/Pool.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.pool
2 |
3 | /**
4 | * A type-safe interface used for pool implementations.
5 | *
6 | * @author Ville Lautanala
7 | */
8 | trait Pool[T] extends java.io.Closeable {
9 | def borrow() : T
10 | def restore(t : T) : Unit
11 | def invalidate(t : T) : Unit
12 | def add() : Unit
13 | def idle() : Int
14 | def active() : Int
15 | def clear() : Unit
16 |
17 | def apply[R](f : T => R) = {
18 | val item = this.borrow()
19 | try {
20 | f(item)
21 | } finally {
22 | this.restore(item)
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/pool/PoolWrapper.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.pool
2 |
3 | import org.apache.commons.pool._
4 | import org.apache.commons.pool.impl._
5 |
6 | /**
7 | * This class wraps Apache ObjectPool to a type-safe scala interface
8 | *
9 | * @author Ville Lautanala
10 | */
11 | class PoolWrapper[T](pool : ObjectPool) extends Pool[T] {
12 | def borrow() : T = pool.borrowObject.asInstanceOf[T]
13 | def restore(t : T) = pool.returnObject(t)
14 | def invalidate(t : T) = pool.invalidateObject(t)
15 | def add() = pool.addObject
16 | def idle() : Int = pool.getNumIdle
17 | def active() : Int = pool.getNumActive
18 | def clear() : Unit = pool.clear
19 | def close() : Unit = pool.close
20 | }
21 |
22 |
23 | /**
24 | * Pool factory. Uses a soft reference based pool implementation
25 | */
26 | object SoftReferencePool {
27 | /**
28 | * Build a new Pool using given factory.
29 | */
30 | def apply[T](f : Factory[T]) : PoolWrapper[T] = {
31 | new PoolWrapper(new SoftReferenceObjectPool(PoolFactoryWrapper(f)))
32 | }
33 | }
34 |
35 | /**
36 | * Pool factory. Uses a stack based implementation
37 | */
38 | object StackPool {
39 | /**
40 | * Build a new Pool using given factory.
41 | */
42 | def apply[T](f : Factory[T]) : PoolWrapper[T] = {
43 | new PoolWrapper(new StackObjectPool(PoolFactoryWrapper(f)))
44 | }
45 | }
46 |
47 | /**
48 | * This class wraps scala based interface to apache's PoolableObjectFactory.
49 | * It is used when instantiating Apache pools.
50 | *
51 | * @author Ville Lautanala
52 | * @param factory Object factory used to create instances
53 | */
54 | case class PoolFactoryWrapper[T](factory : Factory[T]) extends PoolableObjectFactory {
55 | def makeObject : Object = factory.build.asInstanceOf[Object]
56 | def destroyObject(o : Object) : Unit = factory.destroy(o.asInstanceOf[T])
57 | def validateObject(o : Object) : Boolean = factory.validate(o.asInstanceOf[T])
58 | def activateObject(o : Object) : Unit = factory.activate(o.asInstanceOf[T])
59 | def passivateObject(o : Object) : Unit = factory.passivate(o.asInstanceOf[T])
60 | }
61 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/serializer/LongSerializer.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.serializer
2 |
3 | /**
4 | * This serializer serializes 64-bit integers (Long) to byte arrays in
5 | * Big-Endian order.
6 | */
7 | object LongSerializer extends Serializer[Long] {
8 | import java.nio.ByteBuffer
9 |
10 | def serialize(l : Long) = {
11 | val bytes = new Array[Byte](8)
12 | ByteBuffer.wrap(bytes).asLongBuffer.put(l)
13 | bytes
14 | }
15 |
16 | def deserialize(a : Array[Byte]) = {
17 | ByteBuffer.wrap(a).asLongBuffer.get
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/serializer/Serializer.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.serializer
2 |
3 | /**
4 | * Interface for serializers.
5 | *
6 | * These are used to serialize and deserialize data going to and coming from
7 | * cassandra.
8 | *
9 | * @author Ville Lautanala
10 | */
11 | trait Serializer[T] {
12 | /**
13 | * Serialize value to byte array
14 | */
15 | def serialize(t : T) : Array[Byte]
16 |
17 | /**
18 | * Deserialize value from byte array
19 | */
20 | def deserialize(a : Array[Byte]) : T
21 |
22 | def apply(t : T) : Array[Byte] = serialize(t)
23 | def unapply(a : Array[Byte]) : Option[T] = Some(deserialize(a))
24 |
25 | /**
26 | * Empty collection for serializer
27 | *
28 | * @return Empty byte array or a value representing smallest
29 | * possible value
30 | */
31 | val empty : Array[Byte] = new Array[Byte](0)
32 |
33 | def << (t : T) : Array[Byte] = serialize(t)
34 | def >> (t : Array[Byte]) : T = deserialize(t)
35 | }
36 |
37 | /**
38 | * This serializer is used when raw data is handled. It does nothing to data.
39 | */
40 | object NonSerializer extends Serializer[Array[Byte]] {
41 | def serialize(a : Array[Byte]) = a
42 | def deserialize(a : Array[Byte]) = a
43 | }
44 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/serializer/StringSerializer.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.serializer
2 |
3 | /**
4 | * This serializer handles UTF-8 encoded strings
5 | */
6 | object StringSerializer extends Serializer[String] {
7 | def serialize(s : String) = {
8 | if (s ne null)
9 | s.getBytes("UTF-8")
10 | else
11 | empty
12 | }
13 | def deserialize(a : Array[Byte]) = new String(a, "UTF-8")
14 | }
15 |
--------------------------------------------------------------------------------
/src/main/scala/com/nodeta/scalandra/serializer/UUIDSerializer.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.serializer
2 |
3 | import java.util.UUID
4 |
5 | /**
6 | * This serializer serializes A UUID to byte arrays in
7 | * Big-Endian order.
8 | */
9 | object UUIDSerializer extends Serializer[UUID] {
10 | import java.nio.ByteBuffer
11 |
12 | def serialize(u : UUID) = {
13 | val bytes = new Array[Byte](16)
14 | val longBuffer = ByteBuffer.wrap(bytes).asLongBuffer
15 | longBuffer.put(u.getMostSignificantBits).put(u.getLeastSignificantBits)
16 | bytes
17 | }
18 |
19 | def deserialize(a : Array[Byte]) = {
20 | val longBuffer = ByteBuffer.wrap(a).asLongBuffer
21 | new UUID(longBuffer.get, longBuffer.get)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/test/scala/com/nodeta/scalandra/ClientTest.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.tests
2 |
3 | import org.specs._
4 | import com.nodeta.scalandra.serializer.StringSerializer
5 |
6 | class ClientTest extends Specification {
7 | shareVariables()
8 | val connection = Connection(9162)
9 | val cassandra = new Client(connection, "Keyspace1", Serialization(StringSerializer, StringSerializer, StringSerializer), ConsistencyLevels.quorum)
10 | import cassandra.{StandardSlice, SuperSlice, ColumnParent, ColumnPath}
11 |
12 | doLast {
13 | connection.close
14 | }
15 |
16 | "data modification" should {
17 | setSequential()
18 | /* Data to be inserted */
19 | val jsmith = Map("first" -> "John", "last" -> "Smith", "age" -> "53", "foo" -> "bar")
20 |
21 | // Location for data in standard CF
22 | val path = cassandra.ColumnParent("Standard1", None)
23 | // Location for data in super CF
24 | val superPath = cassandra.ColumnParent("Super1", None)
25 |
26 | // Row key
27 | val key = "test/jsmith"
28 |
29 | // Index data, for SCF
30 | val index = Map("1" -> Map("foo" -> null, "bar" -> null), "2" -> Map("blah" -> "meh"), "3" -> Map("nothing" -> "here"))
31 |
32 | "insert" in {
33 | "be able to add and get data to a normal column family" in {
34 | // Given: John is inserted to Cassandra
35 | jsmith("first") must equalTo("John")
36 | cassandra(key, path) = jsmith
37 |
38 | // Then: It should still have its old values.
39 | def result = { cassandra.get(key, path, StandardSlice(Range[String](None, None, Ascending, 1000))) }
40 | result("first") must equalTo(jsmith("first")).eventually
41 | result("last") must equalTo(jsmith("last")).eventually
42 | result("age") must equalTo(jsmith("age")).eventually
43 | }
44 |
45 | "be able to add and get data to a super column family" in {
46 | // Given: Data is inserted to Cassandra
47 | cassandra(key, superPath) = index
48 |
49 | // Then: It should still have its old values.
50 | def result = { cassandra.get(key, superPath, SuperSlice(Range[String](None, None, Ascending, 1000))) }
51 | result.keySet must containAll(List("1", "2", "3")).eventually
52 | result("2")("blah") must equalTo("meh").eventually
53 | }
54 |
55 | "should be able to insert single value" in {
56 | val value = (Math.random * 10000).toInt.toString
57 | val path = cassandra.ColumnPath("Standard1", None, "Random")
58 | val key = "random-test"
59 | // Given: Value is inserted to Cassandra
60 | cassandra(key, path) = value
61 |
62 | // Then: It should be readable from Cassandra.
63 | cassandra.get(key, path) must beSomething.which(_ must equalTo(value)).eventually
64 | }
65 |
66 | }
67 |
68 | "remove" in {
69 | "be able to remove single column" in {
70 | // Given that age column is defined and the column is removed
71 | cassandra.get(key, path, StandardSlice(Range[String](None, None, Ascending, 1000))) must haveKey("age")
72 | cassandra.remove(key, path / "age")
73 |
74 | // Then: age column should not have value
75 | cassandra.get(key, path / "age") must eventually(beNone)
76 | }
77 |
78 | "be able to remove entire row" in {
79 | cassandra.get(key, cassandra.ColumnPath("Standard1", None, "first")) must beSomething
80 | // Given: John is removed from Cassandra
81 | cassandra.remove(key, cassandra.ColumnParent("Standard1", None))
82 | // Then: It should not return anything
83 | cassandra.get(key, cassandra.ColumnPath("Standard1", None, "first")) must eventually(beNone)
84 | }
85 | }
86 | }
87 |
88 | "count" should {
89 | val path = ColumnParent("Standard1", None)
90 | val superPath = ColumnParent("Super1", None)
91 | val data = Map("lol" -> "cat", "cheez" -> "burger")
92 | doFirst {
93 | cassandra("count", path) = data
94 | cassandra("count", superPath) = Map("internet" -> data)
95 | }
96 |
97 | "should be able to count columns in a row" in {
98 | cassandra.count("count", path) must eventually(equalTo(data.size))
99 | }
100 | "should be able to count supercolumns" in {
101 | cassandra.count("count", superPath) must eventually(equalTo(1))
102 | }
103 | "should be able to count columns in a supercolumn" in {
104 | cassandra.count("count", superPath / Some("internet")) must eventually(equalTo(data.size))
105 | }
106 |
107 | doLast {
108 | cassandra.remove("count", path)
109 | cassandra.remove("count", superPath)
110 | }
111 | }
112 |
113 | "column slicing" should {
114 | // Paths for data
115 | val path = cassandra.ColumnParent("Standard1", None)
116 | // Row key
117 | val key = "slicing"
118 |
119 | doFirst { // Insert data
120 | val jsmith = Map("first" -> "John", "last" -> "Smith", "age" -> "53")
121 | cassandra(key, path) = jsmith
122 | }
123 |
124 | "return columns using column list as filter" in {
125 | val result = cassandra.get(key, path, StandardSlice(List("first", "age")))
126 | result must haveSize(2)
127 | result must not have the key("last")
128 | }
129 |
130 | "be able to filter columns using start and finish parameter" in {
131 | val result = cassandra.get(key, path, StandardSlice(Range(Some("first"), Some("last"), Ascending, 1000)))
132 | result must eventually(haveSize(2))
133 | result.keySet must containAll(List("first", "last"))
134 | }
135 |
136 | "be able to filter columns using only start parameter" in {
137 | val result = cassandra.get(key, path, StandardSlice(Range(Some("first"), None, Ascending, 1000)))
138 | result must eventually(haveSize(2))
139 | result.keySet must containAll(List("first", "last"))
140 | }
141 |
142 | "be able to filter columns using only finish parameter" in {
143 | val result = cassandra.get(key, path, StandardSlice(Range(None, Some("last"), Ascending, 2)))
144 | result.size must be(2)
145 | result.keySet must containAll(List("age", "first"))
146 | }
147 |
148 | "use sort parameter to sort columns" in {
149 | val result = cassandra.get(key, path, StandardSlice(Range[String](None, None, Descending, 1)))
150 | result must eventually(haveKey("last"))
151 | }
152 |
153 | "be able to limit results" in {
154 | val result = cassandra.get(key, path, StandardSlice(Range[String](None, None, Descending, 2)))
155 | result must eventually(haveSize(2))
156 | }
157 |
158 | doLast {
159 | cassandra.remove(key, path)
160 | }
161 | }
162 |
163 | "super column slicing" should {
164 | val superPath = cassandra.ColumnParent("Super1", None)
165 | val key = "slicing"
166 | doFirst {
167 | val index = Map("1" -> Map("foo" -> null, "bar" -> null), "2" -> Map("blah" -> "meh"), "3" -> Map("nothing" -> "here"))
168 | cassandra(key, superPath) = index
169 | }
170 |
171 | "work using collection of super columns" in {
172 | val result = cassandra.get(key, superPath, SuperSlice(List("2", "3")))
173 | result must eventually(haveSize(2))
174 | result must not have the key("1")
175 | }
176 |
177 | "contain standard column data" in {
178 | val result = cassandra.get(key, superPath, SuperSlice(List("1", "3")))
179 | result must eventually(haveSize(2))
180 | result("3")("nothing") must eventually(equalTo("here"))
181 | }
182 |
183 | "be able to filter columns using start and finish parameter" in {
184 | val result = cassandra.get(key, superPath, SuperSlice(Range(Some("1"), Some("2"), Ascending, 1000)))
185 | result must eventually(haveSize(2))
186 | result must eventually(haveKey("1"))
187 | result must eventually(haveKey("2"))
188 | }
189 |
190 | "work on subcolumns" in {
191 | val result = cassandra.get(key, superPath / Some("1"), StandardSlice(Range[String](None, None, Descending, 1)))
192 | result must eventually(haveKey("foo"))
193 | }
194 |
195 | doLast {
196 | cassandra.remove(key, superPath)
197 | }
198 | }
199 |
200 | "single column fetching" should {
201 | val path = ColumnParent("Super1", None)
202 | val key = "singleColumnFetch"
203 | val data = Map("1" -> Map("2" -> "3", "4" -> "5"))
204 | doFirst {
205 | cassandra(key, path) = data
206 | cassandra(key, ColumnParent("Standard1", None)) = data("1")
207 | }
208 |
209 | "be able to get super column using path" in {
210 | cassandra.get(key, path / Some("1")) must eventually(beSomething)
211 | cassandra.get(key, path / Some("1")).get.keySet must eventually(containAll(data("1").keySet))
212 | }
213 |
214 | "return None when column is not found" in {
215 | cassandra.get(key, path / Some("doesntexist")) must eventually(beNone)
216 | }
217 |
218 | "be able to get column from super column using path" in {
219 | cassandra.get(key, (path / Some("1") / "2")) must eventually(equalTo(Some("3")))
220 | }
221 |
222 | "be able to get column from standard column family using path" in {
223 | val result = cassandra.get(key, ColumnPath("Standard1", None, "4"))
224 | result must equalTo(Some(data("1")("4")))
225 | }
226 |
227 | doLast {
228 | cassandra.remove(key, path)
229 | cassandra.remove(key, ColumnParent("Standard1", None))
230 | }
231 | }
232 |
233 | "multiple record fetching" should {
234 | doFirst {
235 | for(i <- (0 until 5)) {
236 | cassandra("multiget:" + i.toString, ColumnParent("Standard1", None)) = Map("test" -> "data", "foo" -> "bar")
237 | }
238 | }
239 |
240 | "find all existing records" in {
241 | val result = cassandra.get(List("multiget:1", "multiget:3", "multiget:6"), ColumnPath("Standard1", None, "foo"))
242 |
243 | result("multiget:1") must beSomething
244 | result("multiget:3") must beSomething
245 | result("multiget:6") must beNone
246 | result must not have the key("2")
247 | }
248 |
249 | doLast {
250 | for(i <- (0 until 5)) {
251 | cassandra.remove("multiget:" + i.toString, ColumnParent("Standard1", None))
252 | }
253 | }
254 | }
255 |
256 | "multiple record slicing" should {
257 | doFirst {
258 | val data = Map("a" -> "b", "c" -> "d")
259 | val sdata = Map("a" -> Map("b" ->"c"), "d" -> Map("e" -> "f"))
260 |
261 | for(i <- (0 until 5)) {
262 | cassandra("multi:" + i.toString, ColumnParent("Standard1", None)) = data
263 | cassandra("multi:" + i.toString, ColumnParent("Super1", None)) = sdata
264 | }
265 | }
266 |
267 | "using key range" in {
268 | "find values from standard column family" in {
269 | val r = cassandra.get(ColumnParent("Standard1", None), StandardSlice(List("a", "c")), Some("multi:2"), None, 3)
270 | r.size must be(3)
271 | r must haveKey("multi:2")
272 | r("multi:2") must haveKey("a")
273 | }
274 | "find values from super column family" in {
275 | val r = cassandra.get(ColumnParent("Super1", None), SuperSlice(List("a", "d")), Some("multi:3"), Some("multi:4"), 3)
276 | r.size must be(2)
277 | r must haveKey("multi:3")
278 | r must haveKey("multi:4")
279 | r("multi:4") must haveKey("a")
280 | }
281 | }
282 |
283 | "using list of keys" in {
284 | val keys = List("multi:1", "multi:3")
285 | "find values from standard column family" in {
286 | val r = cassandra.get(keys, ColumnParent("Standard1", None), StandardSlice(List("a", "e")))
287 | r must haveKey("multi:1")
288 | r("multi:1") must haveKey("a")
289 | r("multi:3") must notHaveKey("b")
290 | }
291 | "find values from super column family" in {
292 | val r = cassandra.get(keys, ColumnParent("Super1", None), SuperSlice(Range(Some("a"), Some("d"), Ascending, 100)))
293 | r("multi:1") must haveKey("a")
294 | r("multi:1") must notHaveKey("b")
295 | r("multi:3") must haveKey("d")
296 | r("multi:3")("d") must haveKey("e")
297 | }
298 |
299 | doLast {
300 | for(i <- (0 until 5)) {
301 | cassandra.remove("multi:" + i.toString, ColumnParent("Standard1", None))
302 | cassandra.remove("multi:" + i.toString, ColumnParent("Super1", None))
303 | }
304 | }
305 | }
306 | }
307 |
308 | "order preservation" should {
309 |
310 | // Ugly hack
311 | def pad(s : Int, length : Int) : String = {
312 | ("0" * (length - s.toString.length)) + s.toString
313 | }
314 |
315 | "work on columns in super column family" in {
316 | val superPath = cassandra.ColumnParent("Super1", None)
317 | val key = "ordering-test"
318 | val randomizer = new scala.util.Random(10)
319 | cassandra.remove(key, superPath)
320 | val superData = (0 until 50).map(pad(_, 3) ->(randomizer.nextInt.toString)).toList
321 |
322 | superData.map { case(key, data) =>
323 | cassandra(key, superPath) = Map("1" -> Map(key -> data))
324 | }
325 |
326 | cassandra(key, superPath / None) = Map("1" -> superData)
327 |
328 | cassandra.get(key, superPath / Some("1")).get.keys.toList must eventually(containInOrder(superData.map(_._1).toList))
329 | }
330 |
331 | "work on standard columns" in {
332 | val path = cassandra.ColumnParent("Standard1", None)
333 | val key = "ordering-test"
334 | cassandra.remove(key, path)
335 | val data = (0 until 25).map { i =>
336 | (('z' - i).toChar.toString -> i.toString)
337 | }.toList
338 |
339 | cassandra(key, path) = data
340 | cassandra.get(key, path, StandardSlice(Range[String](None, None, Descending, 1000))) must eventually(containInOrder(data))
341 | }
342 | }
343 |
344 | "key ranges" should {
345 | doFirst {
346 | // Insert data
347 | val p1 = ColumnPath("Super1", Some("superColumn"), "column")
348 | cassandra("range1", p1) = "foo"
349 | cassandra("range2", p1) = "bar"
350 |
351 | val p2 = ColumnPath("Standard1", None, "column")
352 | cassandra("range1", p2) = "foo"
353 | cassandra("range2", p2) = "foo"
354 | }
355 |
356 | "be able to list key ranges" in {
357 | val path = ColumnParent("Super1", None)
358 | val r = cassandra.get(path, SuperSlice(List("superColumn")), Some("range"), Some("range3"), 100)
359 | r must haveKey("range1")
360 | r must haveKey("range2")
361 | }
362 |
363 | "contain super column data" in {
364 | val path = ColumnParent("Super1", None)
365 | val r = cassandra.get(path, SuperSlice(List("superColumn")), Some("range"), Some("range3"), 100)
366 | r must haveKey("range1")
367 | r("range1") must haveKey("superColumn")
368 | }
369 |
370 | "contain column data in super column" in {
371 | val path = ColumnParent("Super1", Some("superColumn"))
372 | val r = cassandra.get(path, StandardSlice(List("column")), Some("range"), Some("range3"), 100)
373 |
374 | r must haveKey("range1")
375 | r("range1") must haveKey("column")
376 | }
377 |
378 | "contain column data from standard column family" in {
379 | val path = ColumnParent("Standard1", None)
380 | val r = cassandra.get(path, StandardSlice(List("column")), Some("range"), Some("range3"), 100)
381 |
382 | r must haveKey("range1")
383 | r("range1") must haveKey("column")
384 | }
385 |
386 | "not contain any data if empty slice is given" in {
387 | val path = ColumnParent("Standard1", None)
388 | val r = cassandra.get(path, StandardSlice(Nil), Some("range"), Some("range3"), 100)
389 |
390 | r must haveKey("range1")
391 | r("range1") must notHaveKey("column")
392 | }
393 |
394 | doLast {
395 | // Remove data
396 | cassandra.remove("range1", ColumnParent("Super1", None))
397 | cassandra.remove("range2", ColumnParent("Super1", None))
398 |
399 | cassandra.remove("range1", ColumnParent("Standard1", None))
400 | cassandra.remove("range2", ColumnParent("Standard1", None))
401 | }
402 | }
403 | }
404 |
--------------------------------------------------------------------------------
/src/test/scala/com/nodeta/scalandra/MappingTest.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.tests
2 |
3 | import org.specs._
4 | import com.nodeta.scalandra.map._
5 | import com.nodeta.scalandra.serializer._
6 |
7 | class MappingTest extends Specification {
8 | def client(c : Connection) : Client[String, String, String] = {
9 | new Client(connection, "Keyspace1", Serialization(StringSerializer, StringSerializer, StringSerializer), ConsistencyLevels.quorum)
10 | }
11 |
12 | def cassandraMap[A, B](map : CassandraMap[String, A], data : scala.collection.Map[String, B]) = {
13 | val keys = map.keySet.toList.sort(_.compareTo(_) < 0)
14 | "be able to slice by names" in {
15 | val l = List(keys.first, keys.last)
16 | map.slice(l).keySet must containAll(l)
17 | }
18 | "be able to slice by range" in {
19 | val l = keys.drop(1).dropRight(1)
20 |
21 | val r = map.slice(Range(Some(l.first), Some(l.last), Ascending, l.size))
22 | r.keySet must haveTheSameElementsAs(l)
23 | }
24 | }
25 |
26 | def record[A, B](map : CassandraMap[String, A], data : scala.collection.Map[String, B], lazyFetch : Boolean) = {
27 | cassandraMap(map, data)
28 | "be able to remove column" in {
29 | val key = map.keySet.toList.last
30 | map.slice(List(key)) must haveSize(1) // .get may be lazy, so slice is used instead
31 | map.remove(key)
32 | Thread.sleep(25)
33 | map.slice(List(key)) must haveSize(0)
34 | }
35 |
36 | "be able to list its columns" in {
37 | map.keySet must haveSize(data.size)
38 | }
39 |
40 |
41 | "column access" in {
42 | "existing column" in {
43 | "get returns column in option" in {
44 | map.get(data.keys.next) must beSomething
45 | }
46 |
47 | "apply returns column value" in {
48 | val key = data.keys.next
49 | map(key) must equalTo(data(key))
50 | }
51 | }
52 |
53 | if (!lazyFetch) {
54 | "nonexisting column" in {
55 | "get returns none" in {
56 | map.get("zomg, doesn't exist") must beNone
57 | }
58 |
59 | "apply raises exception" in {
60 | map("zomg, doesn't exist") must throwA[java.util.NoSuchElementException]
61 | }
62 | }
63 | }
64 | }
65 | }
66 |
67 |
68 | val connection = Connection(9162)
69 | val cassandra = client(connection)
70 |
71 | doAfter {
72 | connection.close()
73 | }
74 |
75 | doLast {
76 | val connection = Connection(9162)
77 | val cassandra = client(connection)
78 | 0.until(20).foreach { i =>
79 | cassandra.remove("row-" + i, cassandra.ColumnParent("Standard1", None))
80 | cassandra.remove("row-" + i, cassandra.ColumnParent("Super1", None))
81 | }
82 | }
83 |
84 | "Keyspace" should {
85 | val keyspace = new Keyspace[String, String, String] {
86 | protected val client = cassandra
87 | val keyspace = "Keyspace1"
88 | }
89 |
90 | "be able to list its ColumnFamilies" in {
91 | keyspace.keySet must containAll(List("Standard1", "Standard2"))
92 | }
93 | }
94 |
95 | "ColumnFamily" should {
96 | val data = insertStandardData()
97 |
98 | "list its rows" in {
99 | cassandra.ColumnFamily("Standard1").keySet must containAll(data.keySet)
100 | }
101 | }
102 |
103 | "StandardColumnFamily" should {
104 | val data = insertStandardData()
105 | val cf = new StandardColumnFamily("Standard1", cassandra)
106 |
107 | "provide cassandra map functionality" in cassandraMap(cf, data)
108 |
109 | "multiget values" in {
110 | val key = data.values.next.keys.next
111 | cf.map(key) must notBeEmpty
112 | }
113 |
114 | "row modification" in {
115 | val key = Math.random.toString.substring(2)
116 |
117 | "insert new data" in {
118 | cf(key) = Map("test" -> "value")
119 | cassandra.get(key, cassandra.ColumnPath("Standard1", None, "test")) must beSomething
120 | cassandra.remove(key, cassandra.Path("Standard1"))
121 | }
122 |
123 | "remove data" in {
124 | cassandra(key, cassandra.ColumnPath("Standard1", None, "test2")) = "lolz"
125 | cf.remove(key)
126 |
127 | Thread.sleep(25)
128 | cassandra.get(key, cassandra.ColumnPath("Standard1", None, "test2")) must beNone
129 | }
130 | }
131 |
132 | "be able to create row instances without any requests" in {
133 | connection.close() // Connection should not be needed
134 | try {
135 | val cf = new StandardColumnFamily("Standard1", cassandra)
136 |
137 | val r = cf("Row")
138 | cf.get("RowFooasoafso")
139 | } catch {
140 | case _ => fail("Thrift was called")
141 | }
142 | connection.isOpen must be(false)
143 | }
144 | }
145 |
146 | "SuperColumnFamily" should {
147 | val data = insertSuperData()
148 | val cf = new SuperColumnFamily("Super1", cassandra)
149 |
150 | "provide cassandra map functionality" in cassandraMap(cf, data)
151 |
152 | "row modification" in {
153 | val key = Math.random.toString.substring(2)
154 |
155 | "insert new data" in {
156 | cf(key) = Map("lol" -> Map("test" -> "value"))
157 | Thread.sleep(25)
158 | cassandra.get(key, cassandra.ColumnPath("Super1", Some("lol"), "test")) must beSomething
159 | cassandra.remove(key, cassandra.Path("Super1"))
160 | }
161 |
162 | "remove data" in {
163 | cassandra(key, cassandra.ColumnPath("Super1", Some("cat"), "test2")) = "lolz"
164 | cf.remove(key)
165 |
166 | Thread.sleep(25)
167 | cassandra.get(key, cassandra.ColumnPath("Super1", Some("cat"), "test2")) must beNone
168 | }
169 | }
170 | }
171 |
172 | "StandardRecord" should {
173 | val Pair(key, data) = insertStandardData().elements.next
174 | val row = new StandardRecord(key, cassandra.ColumnParent("Standard1", None), cassandra)
175 |
176 | "provide cassandra map functionality" in record(row, data, false)
177 |
178 | "load data lazily from cassandra" in {
179 | connection.close()
180 | "while creating new instance" in {
181 | try {
182 | new StandardRecord("row-test", cassandra.ColumnParent("Standard1", None), cassandra)
183 | } catch {
184 | case _ => fail("Request is made")
185 | }
186 | connection.isOpen must equalTo(false)
187 | }
188 | }
189 | }
190 |
191 | "SuperColumn" should {
192 | val Pair(key, row) = insertSuperData().elements.next
193 | val Pair(superColumn, data) = row.elements.next
194 |
195 | val r = new StandardRecord(key, cassandra.ColumnParent("Super1", Some(superColumn)), cassandra)
196 |
197 | "provide cassandra map functionality" in record(r, data, false)
198 |
199 | "load data lazily from cassandra" in {
200 | connection.close()
201 | "while creating new instance" in {
202 | try {
203 | new StandardRecord("row-test", cassandra.ColumnParent("Super1", Some(superColumn)), cassandra)
204 | } catch {
205 | case _ => fail("Request is made")
206 | }
207 | connection.isOpen must equalTo(false)
208 | }
209 | }
210 | }
211 |
212 |
213 | "SuperRecord" should {
214 | val Pair(key, data) = insertSuperData().elements.next
215 | val row = new SuperRecord(key, cassandra.Path("Super1"), cassandra)
216 |
217 | "provide cassandra map functionality" in record(row, data, true)
218 |
219 | "load data lazily from cassandra" in {
220 | connection.close()
221 | "while creating new instance" in {
222 | try {
223 | new SuperRecord("row-test", cassandra.ColumnParent("Super1", None), cassandra)
224 | } catch {
225 | case _ => fail("Request is made")
226 | }
227 | connection.isOpen must equalTo(false)
228 | }
229 | "while accessing sub column" in {
230 | try {
231 | new SuperRecord("row-test", cassandra.ColumnParent("Super1", None), cassandra).get("LOL")
232 | } catch {
233 | case _ => fail("Request is made")
234 | }
235 | connection.isOpen must equalTo(false)
236 | }
237 | }
238 | }
239 |
240 | def insertStandardData() : Map[String, scala.collection.Map[String, String]] = {
241 | val connection = Connection(9162)
242 | val cassandra = client(connection)
243 |
244 | val row = Map((0 until 50).map { i =>
245 | val s = ('a' + i).toChar.toString
246 | (("column-" + s) -> s)
247 | } : _*)
248 |
249 | try {
250 | Map(0.until(20).map {
251 | i => cassandra("row-" + i, cassandra.ColumnParent("Standard1", None)) = row
252 | ("row-" + i -> row)
253 | } : _*)
254 | } finally {
255 | Thread.sleep(50)
256 | connection.close()
257 | }
258 | }
259 |
260 | def insertSuperData() : Map[String, scala.collection.Map[String, scala.collection.Map[String, String]]] = {
261 | val connection = Connection(9162)
262 | val cassandra = client(connection)
263 |
264 | def buildMap(n : Int) : Map[String, String] = {
265 | Map((0 until n).map { i =>
266 | val s = ('a' + i).toChar.toString
267 | (("column-" + s) -> s)
268 | } : _*)
269 | }
270 |
271 | val row = Map((0 until 50).map { i =>
272 | val s = ('a' + i).toChar.toString
273 | (("supercolumn-" + s) -> buildMap(i+1))
274 | } : _*)
275 |
276 | try {
277 | Map(0.until(20).map { i =>
278 | cassandra("row-" + i, cassandra.Path("Super1")) = row
279 | (("row-" + i) -> row)
280 | } : _*)
281 | } finally {
282 | Thread.sleep(25)
283 | connection.close()
284 | }
285 | }
286 | }
287 |
--------------------------------------------------------------------------------
/src/test/scala/com/nodeta/scalandra/PathTest.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.tests
2 |
3 | import org.specs._
4 | import com.nodeta.scalandra.serializer._
5 |
6 | class PathTest extends Specification {
7 | "Path" should {
8 | val _serializer = Serialization(StringSerializer, StringSerializer, StringSerializer)
9 | val path = new scalandra.Path[String, String] {
10 | val serializer = _serializer
11 | val columnFamily = "test"
12 | }
13 |
14 | "Path should create ColumnParent with only column family defined" in {
15 | val parent = path.toColumnParent
16 | parent.column_family must equalTo("test")
17 | parent.super_column must beNull
18 | }
19 | "Path should create ColumnPath with only column family defined" in {
20 | val parent = path.toColumnPath
21 | parent.column_family must equalTo("test")
22 | parent.super_column must beNull
23 | parent.column must beNull
24 | }
25 |
26 | "ColumnParent should not have column" in {
27 | val path = new ColumnParent[String, String] {
28 | val serializer = _serializer
29 | val columnFamily = "test"
30 | val superColumn = Some("test")
31 | }
32 | path.toColumnPath.column must beNull
33 | }
34 |
35 | "ColumnPath should have column" in {
36 | val path = new ColumnPath[String, String] {
37 | val serializer = _serializer
38 | val columnFamily = "test"
39 | val superColumn = Some("test")
40 | val column = "test"
41 | }
42 | path.toColumnPath.column must notBeNull
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/src/test/scala/com/nodeta/scalandra/PoolTest.scala:
--------------------------------------------------------------------------------
1 | package com.nodeta.scalandra.tests
2 |
3 | import org.specs._
4 | import com.nodeta.scalandra.ConnectionProvider
5 | import com.nodeta.scalandra.pool.StackPool
6 |
7 | class PoolTest extends Specification {
8 | "Connection pool" should {
9 | val pool = StackPool(ConnectionProvider("localhost", 9162))
10 | "expose connections to blocks of code" in {
11 | pool { connection =>
12 | connection.isOpen must equalTo(true)
13 | "foo"
14 | } must equalTo("foo")
15 | }
16 | "not create excess connections" in {
17 | pool { connection =>
18 | pool.active must equalTo(1)
19 | }
20 |
21 | pool.idle must equalTo(1)
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------