├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── LICENSE.md ├── Makefile ├── README.md ├── config └── config.exs ├── lib ├── cassandra.ex ├── cassandra │ ├── cache.ex │ ├── cluster.ex │ ├── cluster │ │ ├── schema │ │ │ ├── fetcher.ex │ │ │ ├── fetcher │ │ │ │ └── v3_0_x.ex │ │ │ ├── partitioner.ex │ │ │ ├── partitioner │ │ │ │ └── murmur3.ex │ │ │ ├── replication_strategy.ex │ │ │ └── replication_strategy │ │ │ │ ├── local.ex │ │ │ │ ├── none.ex │ │ │ │ └── simple.ex │ │ └── watcher.ex │ ├── connection.ex │ ├── connection_error.ex │ ├── host.ex │ ├── keyspace.ex │ ├── load_balancing.ex │ ├── load_balancing │ │ ├── policy.ex │ │ ├── round_robin.ex │ │ └── token_aware.ex │ ├── murmur3.ex │ ├── session.ex │ ├── session │ │ ├── connection_manager.ex │ │ ├── executor.ex │ │ └── worker.ex │ ├── statement.ex │ └── uuid.ex ├── cql.ex └── cql │ ├── batch.ex │ ├── batch_query.ex │ ├── consistency.ex │ ├── data_types.ex │ ├── data_types │ ├── date.ex │ ├── decoder.ex │ ├── encoder.ex │ ├── time.ex │ └── timestamp.ex │ ├── error.ex │ ├── event.ex │ ├── execute.ex │ ├── frame.ex │ ├── lz4.ex │ ├── metadata.ex │ ├── options.ex │ ├── prepare.ex │ ├── query.ex │ ├── query_params.ex │ ├── ready.ex │ ├── register.ex │ ├── request.ex │ ├── result.ex │ ├── result │ ├── prepared.ex │ ├── rows.ex │ ├── schema_change.ex │ ├── set_keyspace.ex │ └── void.ex │ ├── startup.ex │ └── supported.ex ├── mix.exs ├── mix.lock ├── native └── murmur_nif.c └── test ├── cassandra ├── cache_test.exs ├── cluster_test.exs ├── murmur3_test.exs ├── schema │ └── partitioner │ │ └── murmur3_test.exs └── session_test.exs ├── cql ├── frame_test.exs └── requests_test.exs ├── data_types_test.exs ├── integration └── data_types_test.exs └── test_helper.exs /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps 9 | 10 | # Where 3rd-party dependencies like ExDoc output generated docs. 11 | /doc 12 | 13 | # If the VM crashes, it generates a dump, let's ignore it too. 14 | erl_crash.dump 15 | 16 | # Also ignore archive artifacts (built via "mix archive.build"). 17 | *.ez 18 | 19 | /priv/*.so 20 | 21 | /.elixir_ls -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: elixir 2 | sudo: true 3 | services: 4 | - cassandra 5 | elixir: 6 | - 1.4 7 | - 1.5 8 | otp_release: 9 | - 19.3 10 | - 20.0 11 | addons: 12 | apt: 13 | packages: 14 | - build-essential 15 | git: 16 | depth: 1 17 | env: 18 | - MIX_ENV=test 19 | before_install: 20 | - mix local.rebar --force 21 | - mix local.hex --force 22 | - mix deps.get 23 | - rm deps/lz4/c_src/*.o 24 | script: mix coveralls.travis 25 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 1.0.1 4 | 5 | * Freeze version `1.0` features and release stable version 6 | * Upgrade deps 7 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # License 2 | 3 | Copyright (c) 2016 CafeBazaar 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ERLANG_PATH = $(shell erl -eval 'io:format("~s", [lists:concat([code:root_dir(), "/erts-", erlang:system_info(version), "/include"])])' -s init stop -noshell) 2 | CFLAGS = -O3 -std=c99 -fPIC -Wall -Wextra -Wno-implicit-fallthrough -I$(ERLANG_PATH) 3 | CC = gcc 4 | 5 | KERNEL_NAME = $(shell uname -s) 6 | ifeq ($(KERNEL_NAME),Linux) 7 | CFLAGS += -shared 8 | endif 9 | ifeq ($(KERNEL_NAME),Darwin) 10 | CFLAGS += -undefined dynamic_lookup -dynamiclib 11 | endif 12 | 13 | all: murmur_nif.so 14 | 15 | murmur_nif.so: 16 | mkdir -p priv 17 | $(CC) $(CFLAGS) -o priv/murmur_nif.so native/murmur_nif.c 18 | 19 | clean: 20 | rm priv/murmur_nif.so 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cassandra 2 | 3 | [![Build Status](https://travis-ci.org/cafebazaar/elixir-cassandra.svg?branch=master)](https://travis-ci.org/cafebazaar/elixir-cassandra) 4 | [![Hex.pm](https://img.shields.io/hexpm/v/cassandra.svg?maxAge=2592000)](https://hex.pm/packages/cassandra) 5 | [![Hex.pm](https://img.shields.io/hexpm/l/cassandra.svg?maxAge=2592000)](https://github.com/cafebazaar/elixir-cassandra/blob/master/LICENSE.md) 6 | [![Coverage Status](https://coveralls.io/repos/github/cafebazaar/elixir-cassandra/badge.svg?branch=master)](https://coveralls.io/github/cafebazaar/elixir-cassandra?branch=master) 7 | 8 | An Elixir driver for Apache Cassandra. 9 | 10 | This driver works with Cassandra Query Language version 3 (CQL3) and Cassandra's native protocol v4. 11 | 12 | ## Features 13 | 14 | * Automatic peer discovery 15 | * Automatic connection managment (reconnect on connection loss and discover new nodes) 16 | * Configurable load-balancing/reconnection policies 17 | * Asynchronous execution through Tasks 18 | * Prepared statements with named and position based values 19 | * Token based load-balancing policy 20 | * Automatic prepare and cache prepared statements per host 21 | * Result paging 22 | 23 | ## Installation 24 | 25 | Add `cassandra` to your list of dependencies in `mix.exs`: 26 | 27 | ```elixir 28 | def deps do 29 | [{:cassandra, "~> 1.0.0-beta.5"}] 30 | end 31 | ``` 32 | 33 | ## Quick Start 34 | 35 | ```elixir 36 | defmodule Repo do 37 | use Cassandra 38 | end 39 | 40 | {:ok, _} = Repo.start_link 41 | # uses "127.0.0.1:9042" as contact point by default 42 | # discovers other nodes on first connection 43 | 44 | Repo.execute """ 45 | CREATE KEYSPACE IF NOT EXISTS test 46 | WITH replication = {'class':'SimpleStrategy','replication_factor':1}; 47 | """, consistency: :all 48 | 49 | Repo.execute """ 50 | CREATE TABLE IF NOT EXISTS test.users ( 51 | id timeuuid, 52 | name varchar, 53 | age int, 54 | PRIMARY KEY (id) 55 | ); 56 | """, consistency: :all 57 | 58 | insert = "INSERT INTO test.users (id, name, age) VALUES (?, ?, ?);" 59 | 60 | users = [ 61 | %{name: "Bilbo", age: 50}, 62 | %{name: "Frodo", age: 33}, 63 | %{name: "Gandolf", age: 2019}, 64 | ] 65 | 66 | users 67 | |> Task.async_stream(&Repo.execute(insert, values: [Cassandra.UUID.v1, &1.name, &1.age])) 68 | |> Enum.to_list 69 | 70 | Repo.execute("SELECT * FROM test.users;") 71 | 72 | # %CQL.Result.Rows{ 73 | # columns: ["id", "age", "name"], 74 | # rows_count: 3, 75 | # rows: [ 76 | # ["831e5df2-a0e1-11e6-b9af-6d2c86545d91", 2019, "Gandolf"], 77 | # ["831e5df1-a0e1-11e6-b9af-6d2c86545d91", 33, "Frodo"], 78 | # ["831e5df0-a0e1-11e6-b9af-6d2c86545d91", 50, "Bilbo"] 79 | # ] 80 | # } 81 | ``` 82 | 83 | ## Todo 84 | 85 | * [ ] Compression 86 | * [ ] Batch statement 87 | * [ ] Authentication and SSL encryption 88 | * [ ] User Defined Types 89 | 90 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | use Mix.Config 2 | 3 | config :cassandra, Repo, 4 | adapter: Cassandra.Ecto.Adapter, 5 | keyspace: "test", 6 | contact_points: ["172.17.0.2"] 7 | 8 | -------------------------------------------------------------------------------- /lib/cassandra.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra do 2 | @moduledoc """ 3 | Is a helper to create a session on a Cassandra cluster 4 | 5 | ## Example 6 | 7 | ```elixir 8 | defmodule Repo do 9 | use Cassandra, keyspace: "test" 10 | end 11 | 12 | # Start the repo 13 | {:ok, _} = Repo.start_link 14 | 15 | # Execute statements 16 | {:ok, rows} = Repo.execute("SELECT * FROM users;") 17 | ``` 18 | """ 19 | 20 | use Application 21 | use Supervisor 22 | 23 | alias Cassandra.{Cluster, Session} 24 | 25 | defmacro __using__(opts \\ []) do 26 | quote do 27 | use Supervisor 28 | 29 | @cluster __MODULE__.Cassandra.Cluster 30 | @session __MODULE__.Cassandra.Session 31 | @cache __MODULE__.Cassandra.Session.Cache 32 | @connection_manager __MODULE__.Cassandra.Session.ConnectionManager 33 | 34 | def start_link(options \\ []) do 35 | Supervisor.start_link(__MODULE__, options) 36 | end 37 | 38 | def init(options) do 39 | opts = unquote(opts) 40 | config = case Keyword.fetch(opts, :otp_app) do 41 | {:ok, app} -> Application.get_env(app, __MODULE__, []) 42 | :error -> opts 43 | end 44 | 45 | options = 46 | options 47 | |> Keyword.merge(config) 48 | |> Keyword.merge([ 49 | cluster: @cluster, 50 | session: @session, 51 | cache: @cache, 52 | connection_manager: @connection_manager, 53 | ]) 54 | 55 | children = [ 56 | worker(Cluster, [options]), 57 | supervisor(Session, [@cluster, options]), 58 | ] 59 | 60 | supervise(children, strategy: :rest_for_one) 61 | end 62 | 63 | def execute(query, options \\ []) do 64 | Session.execute(@session, query, options) 65 | end 66 | 67 | def run_stream(query, func, options \\ []) do 68 | Session.run_stream(@session, query, func, options) 69 | end 70 | end 71 | end 72 | 73 | ### Application Callbacks ### 74 | 75 | def start(_type, options) do 76 | Supervisor.start_link(__MODULE__, options) 77 | end 78 | 79 | ### Supervisor Callbacks ### 80 | 81 | def init(_options) do 82 | children = [ 83 | worker(Cassandra.UUID, []), 84 | ] 85 | 86 | supervise(children, strategy: :one_for_one) 87 | end 88 | end 89 | -------------------------------------------------------------------------------- /lib/cassandra/cache.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cache do 2 | @moduledoc false 3 | 4 | def new(nil), do: :error 5 | def new(name) do 6 | options = [ 7 | :set, 8 | :public, 9 | :named_table, 10 | read_concurrency: true, 11 | ] 12 | case :ets.new(name, options) do 13 | ^name -> {:ok, name} 14 | _ -> :error 15 | end 16 | end 17 | 18 | def put(cache, key, value) do 19 | true = :ets.insert(cache, {key, value}) 20 | value 21 | end 22 | 23 | def put_new_lazy(cache, key, func) do 24 | with :error <- fetch(cache, key), 25 | {:ok, value} <- func.() 26 | do 27 | put(cache, key, value) 28 | else 29 | {:ok, value} -> value 30 | :error -> :error 31 | {:error, reason} -> {:error, reason} 32 | error -> {:error, error} 33 | end 34 | end 35 | 36 | def fetch(cache, key) do 37 | case :ets.lookup(cache, key) do 38 | [{^key, value}] -> {:ok, value} 39 | _ -> :error 40 | end 41 | end 42 | 43 | def delete(cache, key) do 44 | true = :ets.delete(cache, key) 45 | :ok 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /lib/cassandra/cluster.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster do 2 | @moduledoc """ 3 | Represents a cassandra cluster. It serves as a Session factory and a collection of metadata. 4 | 5 | It always keeps a control connection open to one of cluster hosts to get notified about 6 | topological and status changes in the cluster, and keeps its metadata is sync. 7 | """ 8 | 9 | use GenServer 10 | 11 | require Logger 12 | 13 | alias Cassandra.{Connection, ConnectionError, Host, Cache} 14 | alias Cassandra.Cluster.{Schema, Watcher} 15 | 16 | @defaults [ 17 | fetcher: Schema.Fetcher.V3_0_x, 18 | cluster_name: nil, 19 | data_center: nil, 20 | contact_points: [{127, 0, 0, 1}], 21 | port: 9042, 22 | connection_timeout: 1000, 23 | timeout: 5000, 24 | ] 25 | 26 | @valid_options Keyword.keys(@defaults) ++ [:retry, :cache] 27 | 28 | @max_tries 5 29 | 30 | ### Client API ### 31 | 32 | @doc """ 33 | Starts a Cluster process without links (outside of a supervision tree). 34 | 35 | See start_link/1 for more information. 36 | """ 37 | def start(options \\ []) do 38 | server_options = case Keyword.get(options, :cluster) do 39 | nil -> [] 40 | name -> [name: name] 41 | end 42 | GenServer.start(__MODULE__, options, server_options) 43 | end 44 | 45 | @doc """ 46 | Starts a Cluster process linked to the current process. 47 | 48 | `options` is the keyword list of options: 49 | 50 | * `:contact_points` - The initial list host of addresses. Note that the entire list 51 | of cluster members will be discovered automatically once a connection to any 52 | hosts from the original list is successful. (default: `["127.0.0.1"]`) 53 | * `:port` - Cassandra native protocol port (default: `9042`) 54 | * `:connection_timeout` - connection timeout in milliseconds (defult: `5000`) 55 | * `:timeout` - request execution timeout in milliseconds (default: `:infinity`) 56 | 57 | ## Return values 58 | 59 | It returns `{:ok, pid}` when connection to one of `contact_points` established and metadata fetched, 60 | on any error it returns `{:error, reason}`. 61 | """ 62 | def start_link(options \\ []) do 63 | server_options = case Keyword.get(options, :cluster) do 64 | nil -> [] 65 | name -> [name: name] 66 | end 67 | GenServer.start_link(__MODULE__, options, server_options) 68 | end 69 | 70 | @doc """ 71 | Returns replications containing `partition_key` of `keyspace` 72 | """ 73 | def find_replicas(cluster, keyspace, partition_key) do 74 | GenServer.call(cluster, {:find_replicas, keyspace, partition_key}) 75 | end 76 | 77 | @doc """ 78 | Returns list of `cluster`s `Cassandra.Host`s 79 | """ 80 | def hosts(cluster) do 81 | GenServer.call(cluster, :hosts) 82 | end 83 | 84 | @doc """ 85 | Returns list of `cluster`s up `Cassandra.Host`s 86 | """ 87 | def up_hosts(cluster) do 88 | GenServer.call(cluster, :up_hosts) 89 | end 90 | 91 | @doc """ 92 | Returns list of cluster hosts matching given list if `ips` 93 | """ 94 | def host(cluster, ips) do 95 | GenServer.call(cluster, {:host, ips}) 96 | end 97 | 98 | @doc false 99 | def register(cluster) do 100 | register(cluster, self()) 101 | end 102 | 103 | @doc false 104 | def register(cluster, pid) do 105 | GenServer.call(cluster, {:register, pid}) 106 | end 107 | 108 | ### GenServer Callbacks ### 109 | 110 | @doc false 111 | def init(options) do 112 | options = 113 | @defaults 114 | |> Keyword.merge(options) 115 | |> Keyword.take(@valid_options) 116 | 117 | with {socket, supported, local_data} <- select_socket(options), 118 | {:ok, schema} <- Schema.Fetcher.fetch(local_data, socket, options[:fetcher]), 119 | {:ok, watcher} <- Watcher.start_link(options) 120 | do 121 | cache = 122 | case Cache.new(Keyword.get(options, :cache, nil)) do 123 | {:ok, name} -> name 124 | :error -> nil 125 | end 126 | 127 | initial_state = %{ 128 | cache: cache, 129 | socket: socket, 130 | options: options, 131 | fetcher: options[:fetcher], 132 | watcher: watcher, 133 | supported: supported, 134 | local_data: local_data, 135 | listeners: [], 136 | } 137 | 138 | state = 139 | initial_state 140 | |> Map.merge(schema) 141 | |> refresh_schema 142 | 143 | {:ok, state} 144 | else 145 | error = %ConnectionError{} -> {:stop, error} 146 | error = {:error, _reason} -> {:stop, error} 147 | :error -> {:stop, :no_cache_name} 148 | end 149 | end 150 | 151 | @doc false 152 | def handle_call(:hosts, _from, state) do 153 | hosts = Map.values(state.hosts) 154 | 155 | {:reply, hosts, state} 156 | end 157 | 158 | @doc false 159 | def handle_call(:up_hosts, _from, state) do 160 | up_hosts = 161 | state.hosts 162 | |> Map.values 163 | |> Enum.filter(&Host.up?/1) 164 | 165 | {:reply, up_hosts, state} 166 | end 167 | 168 | @doc false 169 | def handle_call({:host, ips}, _from, state) when is_list(ips) do 170 | hosts = 171 | state.hosts 172 | |> Map.take(ips) 173 | |> Map.values 174 | 175 | {:reply, hosts, state} 176 | end 177 | 178 | @doc false 179 | def handle_call({:host, ip}, _from, state) do 180 | {:reply, state.hosts[ip], state} 181 | end 182 | 183 | @doc false 184 | def handle_call({:find_replicas, keyspace, partition_key}, _from, state) do 185 | token = state.partitioner.create_token(partition_key) 186 | replication_token = insertion_point(state.token_ring, token) 187 | hosts = 188 | case get_in(state, [:keyspaces, keyspace]) do 189 | nil -> [] 190 | keyspace -> 191 | case List.keyfind(keyspace.replications, replication_token, 0) do 192 | nil -> [] 193 | {_, hosts} -> hosts 194 | end 195 | end 196 | {:reply, hosts, state} 197 | end 198 | 199 | @doc false 200 | def handle_call({:register, pid}, _from, state) do 201 | if Process.alive?(pid) and not pid in state.listeners do 202 | Process.monitor(pid) 203 | {:reply, :ok, Map.update(state, :listeners, [pid], &[pid | &1])} 204 | else 205 | {:reply, :error, state} 206 | end 207 | end 208 | 209 | @doc false 210 | def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do 211 | {:noreply, Map.update(state, :listeners, [], &List.delete(&1, pid))} 212 | end 213 | 214 | @doc false 215 | def handle_info({:host, :found, {ip, _}}, state) do 216 | Logger.info("new host found: #{inspect ip}") 217 | args = [ip, state.data_center, state.parser] 218 | 219 | state = 220 | case schema(:fetch_peer, args, state) do 221 | {{:ok, host}, state} -> 222 | Enum.each(state.listeners, &send(&1, {:host, :found, host})) 223 | put_in(state, [:hosts, ip], host) 224 | 225 | _ -> state 226 | end 227 | 228 | {:noreply, refresh_schema(state)} 229 | end 230 | 231 | @doc false 232 | def handle_info({:host, :lost, {ip, _}}, state) do 233 | Logger.warn("host lost: #{inspect ip}") 234 | {host, state} = pop_in(state, [:hosts, ip]) 235 | unless is_nil(host) do 236 | Enum.each(state.listeners, &send(&1, {:host, :lost, host})) 237 | end 238 | {:noreply, refresh_schema(state)} 239 | end 240 | 241 | @doc false 242 | def handle_info({:host, status, {ip, _}}, state) do 243 | Logger.info("host #{status} #{inspect ip}") 244 | {_, state} = get_and_update_in state, [:hosts, ip], fn 245 | nil -> :pop 246 | host -> 247 | host = Host.toggle(host, status) 248 | Enum.each(state.listeners, &send(&1, {:host, status, host})) 249 | {:ok, host} 250 | end 251 | {:noreply, state} 252 | end 253 | 254 | @doc false 255 | def handle_info({:keyspace, :dropped, keyspace}, state) do 256 | Logger.info("Keyspace dropped: #{keyspace}") 257 | {_, state} = pop_in(state, [:keyspaces, keyspace]) 258 | {:noreply, state} 259 | end 260 | 261 | @doc false 262 | def handle_info({:keyspace, change, name}, state) do 263 | Logger.info("Keyspace #{change}: #{name}") 264 | state = 265 | case schema(:fetch_keyspace, [name], state) do 266 | {{:ok, keyspace}, state} -> 267 | put_in(state, [:keyspaces, name], keyspace) 268 | 269 | _ -> state 270 | end 271 | {:noreply, refresh_schema(state)} 272 | end 273 | 274 | @doc false 275 | def handle_info(:connected, state) do 276 | with {{:ok, schema}, state} <- schema(:fetch, [state.local_data], state) do 277 | state = 278 | state 279 | |> Map.merge(schema) 280 | |> refresh_schema 281 | 282 | Enum.each(state.listeners, &send(&1, :refresh)) 283 | 284 | {:noreply, state} 285 | else 286 | error -> {:stop, error, state} 287 | end 288 | end 289 | 290 | @doc false 291 | def handle_info({:tcp_closed, socket}, %{socket: socket} = state) do 292 | with {socket, supported, local_data} <- select_socket(state.options) do 293 | {:noreply, %{state | socket: socket, supported: supported, local_data: local_data}} 294 | else 295 | _ -> {:noreply, %{state | socket: nil}} 296 | end 297 | end 298 | 299 | @doc false 300 | def handle_info({:table, _change, {_keyspace, _table}}, state) do 301 | {:noreply, state} 302 | end 303 | 304 | @doc false 305 | def select_socket(options) do 306 | options 307 | |> Connection.stream 308 | |> Stream.flat_map(&fetch_local_data(&1, options)) 309 | |> Enum.take(1) 310 | |> case do 311 | [result] -> result 312 | [] -> ConnectionError.new("select contact point", "not available") 313 | end 314 | end 315 | 316 | ### Helpers ### 317 | 318 | defp schema(_, _, %{socket: nil}, @max_tries) do 319 | ConnectionError.new("reconnection", "faield") 320 | end 321 | 322 | defp schema(func, args, %{socket: nil} = state, tries) do 323 | with {socket, supported, local_data} <- select_socket(state.options) do 324 | schema(func, args, %{state | socket: socket, supported: supported, local_data: local_data}) 325 | else 326 | _ -> schema(func, args, state, tries + 1) 327 | end 328 | end 329 | 330 | defp schema(func, args, state) do 331 | case apply(Schema.Fetcher, func, args ++ [state.socket, state.fetcher]) do 332 | %ConnectionError{reason: :closed} -> 333 | schema(func, args, %{state | socket: nil}, 0) 334 | result -> 335 | {result, state} 336 | end 337 | end 338 | 339 | defp refresh_schema(schema) do 340 | host_tokens = Enum.map(schema.hosts, fn {ip, host} -> {ip, host.tokens} end) 341 | token_hosts = token_hosts(host_tokens) 342 | token_ring = token_ring(host_tokens, token_hosts) 343 | keyspaces = put_replications(schema.keyspaces, token_ring, schema) 344 | 345 | Map.merge(schema, %{ 346 | keyspaces: keyspaces, 347 | token_ring: token_ring, 348 | }) 349 | end 350 | 351 | defp token_ring(host_tokens, token_hosts) do 352 | list = 353 | host_tokens 354 | |> Keyword.values 355 | |> Enum.concat 356 | |> Enum.sort 357 | |> Enum.uniq 358 | 359 | [head | _] = list 360 | Enum.map(list ++ [head], &{&1, Map.get(token_hosts, &1)}) 361 | end 362 | 363 | defp token_hosts(host_tokens) do 364 | host_tokens 365 | |> Enum.flat_map(fn {ip, tokens} -> Enum.map(tokens, &{&1, ip}) end) 366 | |> Enum.into(%{}) 367 | end 368 | 369 | defp put_replications(keyspaces, token_ring, schema) do 370 | keyspaces_with_hash = Enum.map keyspaces, fn {_, keyspace} -> 371 | {keyspace, :erlang.phash2(keyspace.replication)} 372 | end 373 | 374 | replications = 375 | keyspaces_with_hash 376 | |> Enum.uniq_by(&elem(&1, 1)) 377 | |> Enum.map(fn {keyspace, hash} -> 378 | reps = Schema.ReplicationStrategy.replications(keyspace.replication, token_ring, schema) 379 | {hash, reps} 380 | end) 381 | |> Enum.into(%{}) 382 | 383 | keyspaces_with_hash 384 | |> Enum.map(fn {keyspace, hash} -> {keyspace.name, Map.put(keyspace, :replications, replications[hash])} end) 385 | |> Enum.into(%{}) 386 | end 387 | 388 | defp insertion_point([{a,_}], _), do: a 389 | defp insertion_point([{a,_}, {b,_} | _], item) when a < item and item <= b, do: b 390 | defp insertion_point([_ | tail], item), do: insertion_point(tail, item) 391 | 392 | defp fetch_local_data({_host, socket, supported}, options) do 393 | with {:ok, local_data} <- Schema.Fetcher.fetch_local(socket, options[:fetcher]), 394 | true <- bootstrapped?(local_data), 395 | true <- in_data_center?(local_data, options[:data_center]), 396 | true <- named?(local_data, options[:cluster_name]) 397 | do 398 | [{socket, supported, local_data}] 399 | else 400 | _ -> [] 401 | end 402 | end 403 | 404 | defp bootstrapped?(local_data) do 405 | Map.get(local_data, "bootstrapped") == "COMPLETED" 406 | end 407 | 408 | defp named?(_, nil), do: true 409 | defp named?(local_data, name) do 410 | Map.get(local_data, "cluster_name") == name 411 | end 412 | 413 | defp in_data_center?(_, nil), do: true 414 | defp in_data_center?(local_data, data_center) do 415 | Map.get(local_data, "data_center") == data_center 416 | end 417 | end 418 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/fetcher.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.Fetcher do 2 | alias Cassandra.{Connection, Host, Keyspace} 3 | alias Cassandra.Cluster.Schema 4 | 5 | @type cql :: binary 6 | @type ip :: :inet.ip_address 7 | @type keyspace_name :: String.t 8 | 9 | @callback select_local() :: cql 10 | @callback select_peers() :: cql 11 | @callback select_peer(ip) :: cql 12 | @callback select_keyspace(keyspace_name) :: cql 13 | @callback select_keyspaces() :: cql 14 | @callback select_tables() :: cql 15 | @callback select_columns() :: cql 16 | @callback select_indexes() :: cql 17 | 18 | def fetch(local_data, connection, version \\ Schema.Fetcher.V3_0_x) do 19 | data_center = Map.get(local_data, "data_center") 20 | cluster_name = Map.get(local_data, "cluster_name") 21 | partitioner = Schema.Partitioner.partitioner(local_data) 22 | parser = &partitioner.parse_token/1 23 | local = Host.new(local_data, :up, data_center, parser) 24 | 25 | with {:ok, peers} <- fetch_peers(data_center, parser, connection, version), 26 | {:ok, keyspaces} <- fetch_keyspaces(connection, version) 27 | do 28 | hosts_map = 29 | [local | peers] 30 | |> Enum.map(&{&1.ip, &1}) 31 | |> Enum.into(%{}) 32 | 33 | keyspaces_map = 34 | keyspaces 35 | |> Enum.map(&{&1.name, &1}) 36 | |> Enum.into(%{}) 37 | 38 | schema = %{ 39 | local: local, 40 | hosts: hosts_map, 41 | keyspaces: keyspaces_map, 42 | partitioner: partitioner, 43 | data_center: data_center, 44 | cluster_name: cluster_name, 45 | parser: parser, 46 | } 47 | 48 | {:ok, schema} 49 | end 50 | end 51 | 52 | def fetch_local(connection, version) do 53 | connection 54 | |> Connection.query(version.select_local) 55 | |> one 56 | end 57 | 58 | def fetch_peers(data_center, parser, connection, version) do 59 | with %CQL.Result.Rows{} = rows <- Cassandra.Connection.query(connection, version.select_peers) do 60 | peers = 61 | rows 62 | |> CQL.Result.Rows.to_map 63 | |> Enum.map(&Host.new(&1, :get, data_center, parser)) 64 | |> Enum.reject(&is_nil/1) 65 | 66 | {:ok, peers} 67 | end 68 | end 69 | 70 | def fetch_peer(ip, data_center, parser, connection, version) do 71 | connection 72 | |> Connection.query(version.select_peer(ip)) 73 | |> one(&Host.new(&1, :down, data_center, parser)) 74 | end 75 | 76 | def fetch_keyspaces(connection, version) do 77 | with %CQL.Result.Rows{} = rows <- Cassandra.Connection.query(connection, version.select_keyspaces) do 78 | keyspaces = 79 | rows 80 | |> CQL.Result.Rows.to_map 81 | |> Enum.map(&Keyspace.new/1) 82 | |> Enum.reject(&is_nil/1) 83 | 84 | {:ok, keyspaces} 85 | end 86 | end 87 | 88 | def fetch_keyspace(name, connection, version) do 89 | connection 90 | |> Connection.query(version.select_keyspace(name)) 91 | |> one(&Keyspace.new/1) 92 | end 93 | 94 | defp one(%CQL.Result.Rows{} = rows) do 95 | case CQL.Result.Rows.to_map(rows) do 96 | [] -> {:error, :not_found} 97 | [one] -> {:ok, one} 98 | [_|_] -> {:error, :many} 99 | end 100 | end 101 | defp one(error), do: error 102 | 103 | defp one(result, mapper) do 104 | case one(result) do 105 | {:ok, value} -> {:ok, mapper.(value)} 106 | error -> error 107 | end 108 | end 109 | end 110 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/fetcher/v3_0_x.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.Fetcher.V3_0_x do 2 | @behaviour Cassandra.Cluster.Schema.Fetcher 3 | 4 | @select_local CQL.encode!(%CQL.Query{query: "SELECT * FROM system.local;"}) 5 | @select_peers CQL.encode!(%CQL.Query{query: "SELECT * FROM system.peers;"}) 6 | 7 | def select_local, do: @select_local 8 | def select_peers, do: @select_peers 9 | 10 | def select_peer(ip) do 11 | CQL.encode!(%CQL.Query{query: "SELECT * FROM system.peers WHERE peer='#{ip_to_string(ip)}';"}) 12 | end 13 | 14 | def select_keyspace(name) do 15 | CQL.encode!(%CQL.Query{query: "SELECT * FROM system_schema.keyspaces WHERE keyspace_name='#{name}';"}) 16 | end 17 | 18 | def select_keyspaces, do: select_schema("keyspaces") 19 | def select_tables, do: select_schema("tables") 20 | def select_columns, do: select_schema("columns") 21 | def select_indexes, do: select_schema("indexes") 22 | 23 | defp select_schema(name) do 24 | CQL.encode!(%CQL.Query{query: "SELECT * FROM system_schema.#{name};"}) 25 | end 26 | 27 | defp ip_to_string({_, _, _, _} = ip) do 28 | ip 29 | |> Tuple.to_list 30 | |> Enum.join(".") 31 | end 32 | 33 | defp ip_to_string({_, _, _, _, _, _} = ip) do 34 | ip 35 | |> Tuple.to_list 36 | |> Enum.map(&Integer.to_string(&1, 16)) 37 | |> Enum.join(":") 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/partitioner.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.Partitioner do 2 | @type partition_key :: term 3 | @type token :: term 4 | 5 | @callback create_token(partition_key) :: token 6 | @callback parse_token(String.t) :: token 7 | 8 | def partitioner(%{"partitioner" => "org.apache.cassandra.dht.Murmur3Partitioner"}) do 9 | Cassandra.Cluster.Schema.Partitioner.Murmur3 10 | end 11 | 12 | def partitioner(_) do 13 | nil 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/partitioner/murmur3.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.Partitioner.Murmur3 do 2 | @behaviour Cassandra.Cluster.Schema.Partitioner 3 | 4 | @long_min -2 |> :math.pow(63) |> trunc 5 | @long_max 2 |> :math.pow(63) |> trunc |> Kernel.-(1) 6 | 7 | def create_token(partition_key) do 8 | case Cassandra.Murmur3.x64_128(partition_key) do 9 | @long_min -> @long_max 10 | hash -> hash 11 | end 12 | end 13 | 14 | def parse_token(token_string) do 15 | String.to_integer(token_string) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/replication_strategy.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.ReplicationStrategy do 2 | alias Cassandra.Cluster.Schema.ReplicationStrategy.{Simple, Local, None} 3 | 4 | @default_strategy None 5 | @strategies %{ 6 | "org.apache.cassandra.locator.SimpleStrategy" => Simple, 7 | "org.apache.cassandra.locator.LocalStrategy" => Local, 8 | } 9 | 10 | def replications(replication, token_ring, schema) do 11 | strategy(replication).replications(replication, token_ring, schema) 12 | end 13 | 14 | def strategy(replication) do 15 | class = Map.get(replication, "class") 16 | Map.get(@strategies, class, @default_strategy) 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/replication_strategy/local.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.ReplicationStrategy.Local do 2 | def replications(_replications, token_ring, schema) do 3 | Enum.map(token_ring, fn {token, _host} -> {token, [schema.local.ip]} end) 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/replication_strategy/none.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.ReplicationStrategy.None do 2 | def replications(_replications, token_ring, _schema) do 3 | Enum.map(token_ring, fn {token, host} -> {token, [host]} end) 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/schema/replication_strategy/simple.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.ReplicationStrategy.Simple do 2 | def replications(replication, token_ring, _schema) do 3 | size = Enum.count(token_ring) 4 | factor = Map.get(replication, "replication_factor", "1") 5 | factor = case Integer.parse(factor) do 6 | {x, ""} when size < x -> size 7 | {factor, ""} -> factor 8 | _ -> 1 9 | end 10 | 11 | token_ring 12 | |> Enum.with_index 13 | |> Enum.map(fn {{token, _host}, i} -> 14 | hosts = 15 | token_ring 16 | |> Stream.cycle 17 | |> Stream.drop(i) 18 | |> Stream.take(size) 19 | |> Stream.map(fn {_, host} -> host end) 20 | |> Stream.uniq 21 | |> Enum.take(factor) 22 | 23 | {token, hosts} 24 | end) 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /lib/cassandra/cluster/watcher.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Watcher do 2 | use GenServer 3 | 4 | require Logger 5 | 6 | alias Cassandra.{Cluster, Connection, ConnectionError} 7 | 8 | @initial_backoff 1000 9 | @max_backoff 12_000 10 | @register_events CQL.encode!(%CQL.Register{}) 11 | 12 | def start_link(options) do 13 | with {:ok, pid} <- GenServer.start_link(__MODULE__, options, server_options(options)), 14 | :ok <- register(pid) 15 | do 16 | {:ok, pid} 17 | end 18 | end 19 | 20 | def register(watcher) do 21 | GenServer.call(watcher, {:register, self()}) 22 | end 23 | 24 | def register(watcher, pid) do 25 | GenServer.call(watcher, {:register, pid}) 26 | end 27 | 28 | ### GenServer Callbacks ### 29 | 30 | def init(options) do 31 | backoff = @initial_backoff 32 | with {:ok, socket} <- setup(options) do 33 | {:ok, %{socket: socket, options: options, listeners: [], backoff: backoff}} 34 | else 35 | %ConnectionError{} -> 36 | Process.send_after(self(), :connect, backoff) 37 | {:ok, %{socket: nil, options: options, listeners: [], backoff: next_backoff(backoff)}} 38 | end 39 | end 40 | 41 | def handle_call({:register, pid}, _from, state) do 42 | if Process.alive?(pid) and not pid in state.listeners do 43 | Process.monitor(pid) 44 | {:reply, :ok, Map.update(state, :listeners, [pid], &[pid | &1])} 45 | else 46 | {:reply, :error, state} 47 | end 48 | end 49 | 50 | def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do 51 | {:noreply, Map.update(state, :listeners, [], &List.delete(&1, pid))} 52 | end 53 | 54 | def handle_info({:tcp, socket, data}, %{socket: socket} = state) do 55 | with {:ok, %CQL.Frame{body: cql_event}} <- CQL.decode(data) do 56 | case event_type(cql_event) do 57 | :error -> :ok 58 | event -> Enum.each(state.listeners, &send(&1, event)) 59 | end 60 | end 61 | {:noreply, state} 62 | end 63 | 64 | def handle_info(:connect, %{socket: nil, backoff: backoff} = state) do 65 | Logger.info("Cassandra watcher tring to connect ...") 66 | with {:ok, socket} <- setup(state.options) do 67 | Logger.info("Cassandra watcher connected") 68 | Enum.each(state.listeners, &send(&1, :connected)) 69 | {:noreply, %{state | socket: socket, backoff: @initial_backoff}} 70 | else 71 | %ConnectionError{} -> 72 | Process.send_after(self(), :connect, backoff) 73 | {:noreply, %{state | socket: nil, backoff: next_backoff(backoff)}} 74 | end 75 | end 76 | 77 | def handle_info({:tcp_closed, socket}, %{socket: socket} = state) do 78 | Logger.warn("Cassandra watcher connection lost") 79 | handle_info(:connect, %{state | socket: nil}) 80 | end 81 | 82 | ### 83 | 84 | defp setup(options) do 85 | with {socket, _, _} <- Cluster.select_socket(options), 86 | :ok <- register_events(socket), 87 | :ok <- :inet.setopts(socket, active: true) 88 | do 89 | {:ok, socket} 90 | end 91 | end 92 | 93 | def register_events(socket) do 94 | socket 95 | |> Connection.query(@register_events) 96 | |> ready? 97 | end 98 | 99 | def ready?(%CQL.Ready{}), do: :ok 100 | def ready?(error), do: error 101 | 102 | defp next_backoff(n) do 103 | m = min(n * 1.2, @max_backoff) 104 | trunc((0.2 * (:rand.uniform + 1) * m) + m) 105 | end 106 | 107 | defp event_type(%CQL.Event{type: type, info: %{change: change, address: address}}) 108 | when type in ["TOPOLOGY_CHANGE", "STATUS_CHANGE"] 109 | do 110 | change = 111 | case change do 112 | "NEW_NODE" -> :found 113 | "REMOVED_NODE" -> :lost 114 | "UP" -> :up 115 | "DOWN" -> :down 116 | end 117 | {:host, change, address} 118 | end 119 | 120 | defp event_type(%CQL.Event{type: type, info: %{change: change, target: target, options: %{keyspace: keyspace}}}) 121 | when type == "SCHEMA_CHANGE" and target == "KEYSPACE" 122 | do 123 | change = 124 | case change do 125 | "CREATED" -> :created 126 | "UPDATED" -> :updated 127 | "DROPPED" -> :dropped 128 | end 129 | {:keyspace, change, keyspace} 130 | end 131 | 132 | defp event_type(%CQL.Event{type: type, info: %{change: change, target: target, options: %{keyspace: keyspace, table: table}}}) 133 | when type == "SCHEMA_CHANGE" and target == "TABLE" 134 | do 135 | change = 136 | case change do 137 | "CREATED" -> :created 138 | "UPDATED" -> :updated 139 | "DROPPED" -> :dropped 140 | end 141 | {:table, change, {keyspace, table}} 142 | end 143 | 144 | defp event_type(_event), do: :error 145 | 146 | defp server_options(options) do 147 | case Keyword.get(options, :watcher) do 148 | nil -> [] 149 | name -> [name: name] 150 | end 151 | end 152 | end 153 | -------------------------------------------------------------------------------- /lib/cassandra/connection.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Connection do 2 | @moduledoc false 3 | 4 | use DBConnection 5 | require Logger 6 | 7 | alias Cassandra.{Host, Statement, ConnectionError} 8 | alias CQL.Result.SetKeyspace 9 | 10 | @defaults [ 11 | port: 9042, 12 | host: {127, 0, 0, 1}, 13 | connect_timeout: 1000, 14 | timeout: 5000, 15 | ] 16 | 17 | @header_length 9 18 | @startup_request_lz4 CQL.encode!(%CQL.Startup{options: %{"CQL_VERSION" => "3.0.0", "COMPRESSION" => "lz4"}}) 19 | @startup_request CQL.encode!(%CQL.Startup{options: %{"CQL_VERSION" => "3.0.0"}}) 20 | @options_request CQL.encode!(%CQL.Options{}) 21 | 22 | ### API ### 23 | 24 | def run_query(host, query, options \\ []) do 25 | with {:ok, request} <- CQL.encode(%CQL.Query{query: query, params: CQL.QueryParams.new(options)}), 26 | {:ok, %{socket: socket}} <- connect(host, options) 27 | do 28 | result = query(socket, request) 29 | :gen_tcp.close(socket) 30 | result 31 | end 32 | else 33 | {:error, %ConnectionError{} = error} -> error 34 | error -> error 35 | end 36 | 37 | def query(socket, request, timeout \\ @defaults[:timeout]) do 38 | with :ok <- tcp_send(socket, request), 39 | {:ok, result} <- receive_response(socket, timeout) 40 | do 41 | result 42 | end 43 | end 44 | 45 | def stream(options) do 46 | options 47 | |> Keyword.get(:contact_points, ["127.0.0.1"]) 48 | |> stream(options) 49 | end 50 | 51 | def stream(hosts, options) do 52 | hosts 53 | |> Stream.map(&{&1, connect(&1, options)}) 54 | |> Stream.filter(&ok?/1) 55 | |> Stream.map(fn {host, {:ok, %{socket: socket, options: options}}} -> {host, socket, options} end) 56 | end 57 | 58 | def connect({host, port}, options) do 59 | options 60 | |> Keyword.merge([host: host, port: port]) 61 | |> connect 62 | end 63 | 64 | def connect(%Host{ip: ip}, options) do 65 | connect(ip, options) 66 | end 67 | 68 | def connect(host, options) do 69 | options 70 | |> Keyword.put(:host, host) 71 | |> connect 72 | end 73 | 74 | ### DBConnection Callbacks ### 75 | 76 | def connect(options) do 77 | options = Keyword.merge(@defaults, options) 78 | host = get_host(options) 79 | port = options[:port] 80 | timeout = options[:timeout] 81 | connect_timeout = options[:connect_timeout] 82 | keyspace = options[:keyspace] 83 | tcp_options = [ 84 | :binary, 85 | {:active, false}, 86 | {:keepalive, true}, 87 | {:packet, :raw}, 88 | ] 89 | with {:ok, socket} <- :gen_tcp.connect(host, port, tcp_options, connect_timeout), 90 | {:ok, options} <- fetch_options(socket, timeout), 91 | :ok <- handshake(socket, timeout, options), 92 | :ok <- set_keyspace(socket, keyspace, timeout) 93 | do 94 | {:ok, %{socket: socket, host: host, timeout: timeout, options: options, last_cursor: 0, cursors: %{}}} 95 | else 96 | {:error, reason} when is_atom(reason) -> {:error, ConnectionError.new("TCP Connect", reason)} 97 | error -> {:error, error} 98 | end 99 | end 100 | 101 | def checkin(state), do: {:ok, state} 102 | def checkout(state), do: {:ok, state} 103 | 104 | def disconnect(_error, %{socket: socket}) do 105 | :gen_tcp.close(socket) 106 | end 107 | 108 | def handle_close(statement, _options, state) do 109 | {:ok, statement, state} 110 | end 111 | 112 | def handle_execute(_statement, %CQL.Error{} = error, _options, state) do 113 | {:error, error, state} 114 | end 115 | 116 | def handle_execute(%Statement{}, {request, _params}, _options, state) do 117 | with {:ok, result} <- fetch(request, state.socket, state.timeout) do 118 | {:ok, result, state} 119 | else 120 | error = %ConnectionError{} -> {:disconnect, error, state} 121 | error -> {:error, error, state} 122 | end 123 | end 124 | 125 | def handle_prepare(%CQL.Error{} = error, _options, state) do 126 | {:error, error, state} 127 | end 128 | 129 | def handle_prepare(%Statement{} = statement, _options, state) do 130 | with {:ok, frame} <- fetch(statement.request, state.socket, state.timeout) do 131 | {:ok, %{statement | response: frame}, state} 132 | else 133 | error = %ConnectionError{} -> {:disconnect, error, state} 134 | error -> {:error, error, state} 135 | end 136 | end 137 | 138 | def handle_declare(_statement, {_request, params}, _options, state) do 139 | cursor = next_cursor(state.last_cursor) 140 | {:ok, {cursor, params}, %{state | last_cursor: cursor}} 141 | end 142 | 143 | def handle_first(%Statement{} = statement, {cursor, params}, options, state) do 144 | handle_next(statement, {cursor, params}, options, state) 145 | end 146 | 147 | def handle_next(%Statement{} = statement, {cursor, params}, _options, state) do 148 | paging_state = Map.get(state.cursors, cursor) 149 | execute = %CQL.Execute{prepared: statement.prepared, params: %{params | paging_state: paging_state}} 150 | with {:ok, request} <- CQL.encode(execute), 151 | {:ok, frame} <- fetch(request, state.socket, state.timeout), 152 | %CQL.Result.Rows{paging_state: paging_state} = rows <- CQL.Result.Rows.decode_meta(frame) 153 | do 154 | if is_nil(paging_state) do 155 | {:deallocate, rows, state} 156 | else 157 | next_state = Map.update!(state, :cursors, &Map.put(&1, cursor, paging_state)) 158 | {:ok, rows, next_state} 159 | end 160 | else 161 | error = %ConnectionError{} -> {:disconnect, error, state} 162 | error -> {:error, error, state} 163 | end 164 | end 165 | 166 | def handle_deallocate(_statement, {cursor, _params}, _options, state) do 167 | next_state = Map.update(state, :cursors, %{}, &Map.delete(&1, cursor)) 168 | {:ok, cursor, next_state} 169 | end 170 | 171 | def ping(%{socket: socket, timeout: timeout} = state) do 172 | with {:ok, _options} <- fetch_options(socket, timeout) do 173 | {:ok, state} 174 | else 175 | error -> {:disconnect, error, state} 176 | end 177 | end 178 | 179 | ### Helpers ### 180 | 181 | defp fetch(request, socket, timeout) do 182 | with :ok <- tcp_send(socket, request), 183 | {:ok, frame} <- receive_frame(socket, timeout) 184 | do 185 | {:ok, frame} 186 | end 187 | end 188 | 189 | defp tcp_send(socket, request) do 190 | with :ok <- :gen_tcp.send(socket, request) do 191 | :ok 192 | else 193 | {:error, reason} -> ConnectionError.new("TCP send", reason) 194 | end 195 | end 196 | 197 | defp tcp_receive(socket, bytes, timeout) do 198 | with {:ok, data} <- :gen_tcp.recv(socket, bytes, timeout) do 199 | {:ok, data} 200 | else 201 | {:error, reason} -> ConnectionError.new("TCP receive", reason) 202 | end 203 | end 204 | 205 | defp receive_response(socket, timeout) do 206 | with {:ok, frame} <- receive_frame(socket, timeout), 207 | {:ok, %CQL.Frame{body: body}} <- CQL.decode(frame) 208 | do 209 | {:ok, body} 210 | end 211 | end 212 | 213 | defp receive_frame(socket, timeout) do 214 | with {:ok, header} <- tcp_receive(socket, @header_length, timeout), 215 | {:ok, body} <- receive_body(socket, header, timeout), 216 | {:ok, frame} <- CQL.decode_error(header <> body) 217 | do 218 | {:ok, frame} 219 | else 220 | error = %CQL.Error{} -> error 221 | error = %ConnectionError{} -> error 222 | end 223 | end 224 | 225 | defp receive_body(socket, header, timeout) do 226 | case CQL.Frame.body_length(header) do 227 | {:ok, 0} -> {:ok, <<>>} 228 | {:ok, n} -> tcp_receive(socket, n, timeout) 229 | error -> error 230 | end 231 | end 232 | 233 | defp set_keyspace(_, nil, _), do: :ok 234 | defp set_keyspace(socket, keyspace, timeout) do 235 | query = %CQL.Query{query: "USE #{keyspace}"} 236 | with {:ok, request} <- CQL.encode(query), 237 | :ok <- tcp_send(socket, request), 238 | {:ok, %SetKeyspace{name: ^keyspace}} <- receive_response(socket, timeout) 239 | do 240 | :ok 241 | else 242 | %CQL.Error{} = error -> 243 | Logger.error(Exception.format_banner(:error, error, [])) 244 | :ok 245 | 246 | error -> error 247 | end 248 | end 249 | 250 | defp fetch_options(socket, timeout) do 251 | with :ok <- tcp_send(socket, @options_request), 252 | {:ok, %CQL.Supported{options: options}} <- receive_response(socket, timeout) 253 | do 254 | {:ok, Enum.into(options, %{})} 255 | end 256 | end 257 | 258 | defp handshake(socket, timeout, options) do 259 | startup_request = 260 | if "lz4" in Map.get(options, "COMPRESSION", []) do 261 | @startup_request_lz4 262 | else 263 | @startup_request 264 | end 265 | 266 | with :ok <- tcp_send(socket, startup_request), 267 | {:ok, %CQL.Ready{}} <- receive_response(socket, timeout) 268 | do 269 | :ok 270 | end 271 | end 272 | 273 | defp get_host(options) do 274 | case Keyword.get(options, :host) do 275 | nil -> @defaults[:host] 276 | %Cassandra.Host{ip: ip} -> ip 277 | address when is_bitstring(address) -> to_charlist(address) 278 | inet -> inet 279 | end 280 | end 281 | 282 | defp ok?({_, {:ok, _}}), do: true 283 | defp ok?(_), do: false 284 | 285 | defp next_cursor(500_000), do: 1 286 | defp next_cursor(n) , do: n + 1 287 | end 288 | -------------------------------------------------------------------------------- /lib/cassandra/connection_error.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.ConnectionError do 2 | defexception [:action, :reason] 3 | 4 | def new(action, reason) do 5 | struct(__MODULE__, [action: action, reason: reason]) 6 | end 7 | 8 | def message(%__MODULE__{action: action, reason: reason}) do 9 | "#{action} #{format(reason)}" 10 | end 11 | 12 | defp format(reason) when is_atom(reason) do 13 | case :inet.format_error(reason) do 14 | 'unknown POSIX error' -> inspect(reason) 15 | reason -> String.Chars.to_string(reason) 16 | end 17 | end 18 | defp format(reason) when is_binary(reason), do: reason 19 | end 20 | -------------------------------------------------------------------------------- /lib/cassandra/host.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Host do 2 | @moduledoc """ 3 | Represents a Cassandra host 4 | """ 5 | 6 | defstruct [ 7 | :ip, 8 | :id, 9 | :data_center, 10 | :rack, 11 | :distance, 12 | :release_version, 13 | :schema_version, 14 | :tokens, 15 | :status, 16 | ] 17 | 18 | @doc """ 19 | Creates a new Host struct from given data came from Cassandra `system.peers` or `system.local` tabels 20 | """ 21 | def new(data, status \\ :down, data_center \\ nil, parser \\ fn x -> x end) do 22 | with {:ok, ip} <- peer_ip(data), 23 | {:ok, host} <- from_data(data, parser), 24 | distance <- distance(host, data_center) 25 | do 26 | status = case status do 27 | :up -> :up 28 | :down -> :down 29 | _ -> ping(ip) 30 | end 31 | %{host | ip: ip, status: status, distance: distance} 32 | else 33 | :error -> nil 34 | end 35 | end 36 | 37 | def distance(host, data_center) 38 | def distance(%__MODULE__{data_center: _}, nil), do: :ignore 39 | def distance(%__MODULE__{data_center: x}, x), do: :local 40 | def distance(%__MODULE__{data_center: _}, _), do: :remote 41 | 42 | @doc """ 43 | Chacks whether the `host` status is up or not 44 | """ 45 | def up?(host) 46 | def up?({_, host}), do: up?(host) 47 | def up?(%__MODULE__{} = host), do: host.status == :up 48 | 49 | @doc """ 50 | Chacks whether the `host` status is down or not 51 | """ 52 | def down?(host) 53 | def down?({_, host}), do: down?(host) 54 | def down?(%__MODULE__{} = host), do: host.status == :down 55 | 56 | @doc """ 57 | Toggles the `host` status to `status`. 58 | When `status` is `:get` it tries to get host status in network 59 | """ 60 | def toggle(%__MODULE__{} = host, status) 61 | when status == :up or status == :down do 62 | %{host | status: status} 63 | end 64 | 65 | def toggle(%__MODULE__{ip: ip} = host, :get) do 66 | %{host | status: ping(ip)} 67 | end 68 | 69 | ### Helpers ### 70 | 71 | defp peer_ip(%{"broadcast_address" => ip}) when not is_nil(ip), do: {:ok, ip} 72 | defp peer_ip(%{"rpc_address" => {0, 0, 0, 0}, "peer" => peer}), do: {:ok, peer} 73 | defp peer_ip(%{"rpc_address" => nil, "peer" => peer}), do: {:ok, peer} 74 | defp peer_ip(%{"rpc_address" => ip}) when not is_nil(ip), do: {:ok, ip} 75 | defp peer_ip(_), do: :error 76 | 77 | defp from_data(%{ 78 | "host_id" => id, 79 | "data_center" => data_center, 80 | "rack" => rack, 81 | "release_version" => release_version, 82 | "schema_version" => schema_version, 83 | "tokens" => tokens, 84 | }, parser) 85 | do 86 | host = %__MODULE__{ 87 | id: id, 88 | data_center: data_center, 89 | rack: rack, 90 | release_version: release_version, 91 | schema_version: schema_version, 92 | tokens: Enum.map(tokens, parser), 93 | } 94 | {:ok, host} 95 | end 96 | defp from_data(_, _), do: :error 97 | 98 | defp ping(%__MODULE__{ip: ip}), do: ping(ip) 99 | defp ping(ip, port \\ 9042, timeout \\ 100) do 100 | case :gen_tcp.connect(ip, port, [active: false], timeout) do 101 | {:ok, socket} -> 102 | :gen_tcp.close(socket) 103 | :up 104 | _ -> 105 | :down 106 | end 107 | end 108 | end 109 | -------------------------------------------------------------------------------- /lib/cassandra/keyspace.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Keyspace do 2 | defstruct [:name, durable_writes: true, replication: %{}, tables: %{}, replications: %{}] 3 | 4 | def new(%{ 5 | "keyspace_name" => name, 6 | "durable_writes" => durable_writes, 7 | "replication" => replication, 8 | }) 9 | do 10 | %__MODULE__{ 11 | name: name, 12 | durable_writes: durable_writes, 13 | replication: replication, 14 | } 15 | end 16 | def new(_), do: nil 17 | end 18 | -------------------------------------------------------------------------------- /lib/cassandra/load_balancing.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.LoadBalancing do 2 | @moduledoc false 3 | 4 | alias Cassandra.LoadBalancing.Policy 5 | alias Cassandra.Session.ConnectionManager 6 | 7 | @distances [:ignore, :local, :remote] 8 | 9 | defmacro distances, do: @distances 10 | 11 | def plan(statement, balancer, cluster, connection_manager) do 12 | if Keyword.get(statement.options, :on_coordinator, false) do 13 | %{statement | connections: ConnectionManager.connections(connection_manager)} 14 | else 15 | Policy.plan(balancer, statement, cluster, connection_manager) 16 | end 17 | end 18 | 19 | def count(balancer, host) do 20 | Policy.count(balancer, host) 21 | end 22 | 23 | def take([], _), do: [] 24 | def take(list, n) do 25 | list 26 | |> Stream.cycle 27 | |> Enum.take(n) 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/cassandra/load_balancing/policy.ex: -------------------------------------------------------------------------------- 1 | defprotocol Cassandra.LoadBalancing.Policy do 2 | @moduledoc """ 3 | Protocol to specify a load balancing policy 4 | """ 5 | 6 | def plan(balancer, statement, schema, connections) 7 | 8 | @doc """ 9 | Returns number of connections to open to the `host` where host is a `{ip, data_center, rack}` tuple 10 | """ 11 | def count(balancer, host) 12 | end 13 | -------------------------------------------------------------------------------- /lib/cassandra/load_balancing/round_robin.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.LoadBalancing.RoundRobin do 2 | @moduledoc """ 3 | Round robin load balancing policy 4 | 5 | ## Acceptable args 6 | 7 | * `:num_connections` - number of connections to open for each host (default: `1`) 8 | * `:max_tries` - number of connections to try before on request fail (default: `3`) 9 | """ 10 | 11 | defstruct [num_connections: 10, max_tries: 3] 12 | 13 | def new(args) do 14 | struct(__MODULE__, args) 15 | end 16 | 17 | defimpl Cassandra.LoadBalancing.Policy do 18 | alias Cassandra.LoadBalancing 19 | alias Cassandra.Session.ConnectionManager 20 | 21 | def plan(balancer, statement, _cluster, connection_manager) do 22 | connections = 23 | connection_manager 24 | |> ConnectionManager.connections 25 | |> Enum.shuffle 26 | |> LoadBalancing.take(balancer.max_tries) 27 | 28 | %{statement | connections: connections} 29 | end 30 | 31 | def count(balancer, _) do 32 | balancer.num_connections 33 | end 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /lib/cassandra/load_balancing/token_aware.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.LoadBalancing.TokenAware do 2 | @moduledoc """ 3 | Token aware balancing policy 4 | 5 | ## Acceptable args 6 | 7 | * `:num_connections` - number of connections to open for each host (default: `1`) 8 | * `:max_tries` - number of connections to try before on request fail (default: `3`) 9 | """ 10 | 11 | defstruct [ 12 | wrapped: %Cassandra.LoadBalancing.RoundRobin{}, 13 | ] 14 | 15 | def new([]), do: %__MODULE__{} 16 | def new([{policy, args}]) do 17 | wrapped = policy.new(args) 18 | %__MODULE__{wrapped: wrapped} 19 | end 20 | 21 | defimpl Cassandra.LoadBalancing.Policy do 22 | alias Cassandra.{Cluster, Statement, LoadBalancing} 23 | alias Cassandra.Session.ConnectionManager 24 | 25 | def plan(balancer, %Statement{keyspace: keyspace, partition_key: partition_key} = statement, cluster, connection_manager) 26 | when not is_nil(keyspace) and not is_nil(partition_key) 27 | do 28 | case Cluster.find_replicas(cluster, keyspace, partition_key) do 29 | [] -> 30 | {:error, :keyspace_not_found} 31 | replicas -> 32 | connections = 33 | connection_manager 34 | |> ConnectionManager.connections(replicas) 35 | |> LoadBalancing.take(balancer.wrapped.max_tries) 36 | 37 | %Statement{statement | connections: connections} 38 | end 39 | end 40 | 41 | def plan(balancer, statement, cluster, connection_manager) do 42 | LoadBalancing.Policy.plan(balancer.wrapped, statement, cluster, connection_manager) 43 | end 44 | 45 | def count(balancer, host) do 46 | LoadBalancing.Policy.count(balancer.wrapped, host) 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/cassandra/murmur3.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Murmur3 do 2 | @on_load :load_nif 3 | 4 | def x64_128(key, seed \\ 0) 5 | 6 | def x64_128(key, seed) when is_list(key) do 7 | native_x64_128(key, seed) 8 | end 9 | 10 | def x64_128(key, seed) when is_binary(key) do 11 | key 12 | |> :erlang.binary_to_list 13 | |> x64_128(seed) 14 | end 15 | 16 | def x64_128(key, seed) do 17 | key 18 | |> :erlang.term_to_binary 19 | |> x64_128(seed) 20 | end 21 | 22 | def load_nif do 23 | path = :filename.join(:code.priv_dir(:cassandra), 'murmur_nif') 24 | :ok = :erlang.load_nif(path, 0) 25 | end 26 | 27 | defp native_x64_128(_key, _seed), do: exit(:nif_not_loaded) 28 | end 29 | -------------------------------------------------------------------------------- /lib/cassandra/session.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Session do 2 | use Supervisor 3 | 4 | alias Cassandra.Session.{Executor, ConnectionManager} 5 | 6 | @default_balancer {Cassandra.LoadBalancing.TokenAware, []} 7 | 8 | @defaults [ 9 | connection_manager: ConnectionManager, 10 | session: Cassandra.Session, 11 | pool: DBConnection.Poolboy, 12 | idle_timeout: 30_000, 13 | queue: false, 14 | executor_pool: [ 15 | size: 10, 16 | owerflow_size: 0, 17 | strategy: :lifo, 18 | ], 19 | ] 20 | 21 | def start_link(cluster, options \\ []) do 22 | Supervisor.start_link(__MODULE__, [cluster, options]) 23 | end 24 | 25 | def execute(pool, query, options \\ []) when is_list(options) do 26 | timeout = Keyword.get(options, :timeout, 15_000) 27 | :poolboy.transaction(pool, &Executor.execute(&1, query, options), timeout) 28 | end 29 | 30 | @doc """ 31 | Executes a query and streams chunks of the results. 32 | 33 | `func` must be a function of arity one which receives the stream as parameter. 34 | 35 | ### Options 36 | 37 | * `:page_size` - number of rows in each chunk (Cassandra recommends against using values below 100) 38 | 39 | ### Example 40 | 41 | Session.run_stream(session, "SELECT name, age FROM users;", &Enum.to_list/1, page_size: 2) 42 | 43 | """ 44 | def run_stream(pool, query, func, options \\ []) when is_list(options) do 45 | timeout = Keyword.get(options, :timeout, 15_000) 46 | :poolboy.transaction(pool, &Executor.stream(&1, query, func, options), timeout) 47 | end 48 | 49 | def init([cluster, options]) do 50 | {balancer_policy, balancer_args} = Keyword.get(options, :balancer, @default_balancer) 51 | balancer = balancer_policy.new(balancer_args) 52 | 53 | options = 54 | @defaults 55 | |> Keyword.merge(options) 56 | |> Keyword.put(:balancer, balancer) 57 | 58 | {executor_pool_options, options} = Keyword.pop(options, :executor_pool) 59 | executor_pool_options = [ 60 | name: {:local, Keyword.fetch!(options, :session)}, 61 | strategy: Keyword.get(executor_pool_options, :strategy, :lifo), 62 | size: Keyword.get(executor_pool_options, :size, 10), 63 | max_overflow: Keyword.get(executor_pool_options, :owerflow, 0), 64 | worker_module: Executor, 65 | ] 66 | 67 | children = [ 68 | worker(ConnectionManager, [cluster, options]), 69 | :poolboy.child_spec(Executor, executor_pool_options, [cluster, options]), 70 | ] 71 | 72 | supervise(children, strategy: :one_for_one) 73 | end 74 | end 75 | -------------------------------------------------------------------------------- /lib/cassandra/session/connection_manager.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Session.ConnectionManager do 2 | use GenServer 3 | 4 | alias Cassandra.{Cluster, Connection, LoadBalancing} 5 | 6 | ### API ### 7 | 8 | def start_link(cluster, options) do 9 | with {:ok, balancer} <- Keyword.fetch(options, :balancer) do 10 | server_options = case Keyword.get(options, :connection_manager) do 11 | nil -> [] 12 | name -> [name: name] 13 | end 14 | GenServer.start_link(__MODULE__, [cluster, balancer, options], server_options) 15 | else 16 | :error -> {:error, :missing_balancer} 17 | end 18 | end 19 | 20 | def connections(manager) do 21 | GenServer.call(manager, :connections) 22 | end 23 | 24 | def connections(manager, ip_list) do 25 | GenServer.call(manager, {:connections, ip_list}) 26 | end 27 | 28 | ### GenServer Callbacks ### 29 | 30 | def init([cluster, balancer, options]) do 31 | state = %{ 32 | cluster: cluster, 33 | balancer: balancer, 34 | options: options, 35 | connections: [], 36 | } 37 | 38 | Cluster.register(cluster) 39 | 40 | connections = connect_to_up_hosts(state) 41 | 42 | {:ok, %{state | connections: connections}} 43 | end 44 | 45 | def handle_call(:connections, _from, %{connections: connections} = state) do 46 | {:reply, connections, state} 47 | end 48 | 49 | def handle_call({:connections, ips}, _from, %{connections: connections} = state) do 50 | ips = ips |> List.wrap |> MapSet.new 51 | reply = Enum.filter(connections, fn {ip, _pid} -> ip in ips end) 52 | 53 | {:reply, reply, state} 54 | end 55 | 56 | def handle_info({:host, :up, host}, state) do 57 | connections = 58 | case start_connection(host, state.balancer, state.options) do 59 | {:ok, ip, pid} -> [{ip, pid} | state.connections] 60 | _ -> state.connections 61 | end 62 | 63 | {:noreply, %{state | connections: connections}} 64 | end 65 | 66 | def handle_info({:host, status, host}, state) 67 | when status in [:down, :lost] 68 | do 69 | connections = 70 | case List.keyfind(state.connections, host.ip, 0) do 71 | nil -> state.connections 72 | {_, pid} = item -> 73 | GenServer.stop(pid) 74 | List.delete(state.connections, item) 75 | end 76 | {:noreply, %{state | connections: connections}} 77 | end 78 | 79 | def handle_info({:host, _status, _host}, state) do 80 | {:noreply, state} 81 | end 82 | 83 | def handle_info(:refresh, state) do 84 | connections = 85 | case state.connections do 86 | [] -> connect_to_up_hosts(state) 87 | connections -> connections 88 | end 89 | 90 | {:noreply, %{state | connections: connections}} 91 | end 92 | 93 | def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do 94 | connections = List.keydelete(state.connections, pid, 1) 95 | handle_info(:refresh, %{state | connections: connections}) 96 | end 97 | 98 | ### Helpers ### 99 | 100 | defp connect_to_up_hosts(state) do 101 | state.cluster 102 | |> Cluster.up_hosts 103 | |> Enum.map(&start_connection(&1, state.balancer, state.options)) 104 | |> Enum.filter(&match?({:ok, _, _}, &1)) 105 | |> Enum.map(fn {:ok, ip, pid} -> {ip, pid} end) 106 | end 107 | 108 | defp connection_options(host, count, options) do 109 | Keyword.merge(options, [ 110 | host: host.ip, 111 | pool_size: count, 112 | ]) 113 | end 114 | 115 | defp start_connection(host, balancer, options) do 116 | count = LoadBalancing.count(balancer, host) 117 | with {:ok, pid} <- DBConnection.start_link(Connection, connection_options(host, count, options)) do 118 | Process.monitor(pid) 119 | Process.unlink(pid) 120 | {:ok, host.ip, pid} 121 | end 122 | end 123 | end 124 | -------------------------------------------------------------------------------- /lib/cassandra/session/executor.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Session.Executor do 2 | use GenServer 3 | @behaviour :poolboy_worker 4 | 5 | require Logger 6 | 7 | alias Cassandra.{LoadBalancing, Statement, Cache} 8 | alias CQL.Result.Prepared 9 | 10 | ### API ### 11 | 12 | def start_link(cluster, options) do 13 | with {:ok, balancer} <- Keyword.fetch(options, :balancer), 14 | {:ok, connection_manager} <- Keyword.fetch(options, :connection_manager), 15 | {:ok, cache} <- Keyword.fetch(options, :cache) 16 | do 17 | options = Keyword.drop(options, [:balancer, :connection_manager]) 18 | GenServer.start_link(__MODULE__, [cluster, balancer, connection_manager, cache, options]) 19 | else 20 | :error -> {:error, :missing_param} 21 | end 22 | end 23 | 24 | def execute(executor, query, options, timeout \\ 15_000) when is_binary(query) and is_list(options) do 25 | GenServer.call(executor, {:execute, query, options}, timeout) 26 | end 27 | 28 | def stream(executor, query, func, options, timeout \\ 15_000) when is_binary(query) and is_list(options) do 29 | options = Keyword.put(options, :streamer, func) 30 | GenServer.call(executor, {:execute, query, options}, timeout) 31 | end 32 | 33 | ### poolboy_worker callbacks ### 34 | 35 | def start_link([cluster, options]) do 36 | start_link(cluster, options) 37 | end 38 | 39 | ### GenServer Callbacks ### 40 | 41 | @doc false 42 | def init([cluster, balancer, connection_manager, cache, options]) do 43 | state = %{ 44 | cache: cache, 45 | options: options, 46 | cluster: cluster, 47 | balancer: balancer, 48 | connection_manager: connection_manager, 49 | } 50 | 51 | {:ok, state} 52 | end 53 | 54 | @doc false 55 | def handle_call({:execute, query, options}, from, state) do 56 | run_options = Keyword.put(state.options, :log, options[:log]) 57 | 58 | query 59 | |> Statement.new(options, state.options) 60 | |> Map.put(:streamer, Keyword.get(options, :streamer)) 61 | |> LoadBalancing.plan(state.balancer, state.cluster, state.connection_manager) 62 | |> run_async(run_options, from) 63 | 64 | {:noreply, state} 65 | end 66 | 67 | def handle_cast({:run, statement, options, from}, state) do 68 | case run(statement, options, state.cache) do 69 | {:stop, :no_connection} -> 70 | GenServer.reply(from, Cassandra.ConnectionError.new("execute", "no connection")) 71 | {:stop, :no_connection, state} 72 | result -> 73 | GenServer.reply(from, result) 74 | {:noreply, state} 75 | end 76 | end 77 | 78 | ### Helpers ### 79 | 80 | defp run_async(statement, options, from) do 81 | GenServer.cast(self(), {:run, statement, options, from}) 82 | end 83 | 84 | defp prepare_on(ip, connection, statement, options, cache) do 85 | prepare = fn -> 86 | DBConnection.prepare(connection, statement, Keyword.put(options, :for_cache, true)) 87 | end 88 | 89 | with %Prepared{} = prepared <- Cache.put_new_lazy(cache, cache_key(statement, ip), prepare) do 90 | Statement.put_prepared(statement, prepared) 91 | end 92 | rescue 93 | error -> {:error, error} 94 | end 95 | 96 | defp cache_key(%Statement{query: query, options: options}, ip) do 97 | :erlang.phash2({query, options, ip}) 98 | end 99 | 100 | defp run_on(connection, %Statement{streamer: streamer} = statement, options) do 101 | if is_nil(streamer) do 102 | DBConnection.execute(connection, statement, statement.values, options) 103 | else 104 | DBConnection.run(connection, &streamer.(DBConnection.stream(&1, statement, statement.values, options)), options) 105 | end 106 | rescue 107 | error -> {:error, error} 108 | end 109 | 110 | defp run(%Statement{connections: []}, _options, _cache) do 111 | {:stop, :no_connection} 112 | end 113 | 114 | defp run(%Statement{connections: [{ip, connection} | connections]} = statement, options, cache) do 115 | result = 116 | with %Statement{} = prepared <- prepare_on(ip, connection, statement, options, cache) do 117 | run_on(connection, prepared, options) 118 | end 119 | 120 | case result do 121 | {:error, %CQL.Error{code: :unprepared}} -> 122 | Cache.delete(cache, cache_key(statement, ip)) 123 | run(statement, options, cache) 124 | 125 | {:error, %CQL.Error{} = error} -> 126 | error 127 | 128 | {:error, reason} -> 129 | Logger.warn("#{__MODULE__} got error: #{inspect reason}") 130 | run(%Statement{statement | connections: connections}, options, cache) 131 | 132 | {:ok, result} -> result 133 | result -> result 134 | end 135 | end 136 | end 137 | -------------------------------------------------------------------------------- /lib/cassandra/session/worker.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Session.Worker do 2 | @moduledoc false 3 | 4 | require Logger 5 | 6 | def send_request(request, from, conns, retry) do 7 | profile = %{start_time: :erlang.monotonic_time} 8 | send_request(request, from, conns, retry, profile) 9 | end 10 | 11 | def send_request(_, from, [], _, profile) do 12 | queue_time = :erlang.monotonic_time - profile.start_time 13 | GenServer.reply from, 14 | profile 15 | |> Map.update(:queue_times, [queue_time], &[queue_time | &1]) 16 | |> Map.put(:result, {:error, :no_more_connections}) 17 | end 18 | 19 | def send_request(request, from, [conn | conns], {retry?, args}, profile) do 20 | queue_time = :erlang.monotonic_time - profile.start_time 21 | {query_time, result} = :timer.tc(Cassandra.Connection, :send, [conn, request, :infinity]) 22 | 23 | profile = 24 | profile 25 | |> Map.update(:queue_times, [queue_time], &[queue_time | &1]) 26 | |> Map.update(:query_times, [query_time], &[query_time | &1]) 27 | |> Map.update(:connections, [conn], &[conn | &1]) 28 | 29 | {retry, args} = apply(retry?, [request, result | args]) 30 | if retry do 31 | send_request(request, from, conns, {retry?, args}, profile) 32 | else 33 | GenServer.reply(from, Map.put(profile, :result, result)) 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /lib/cassandra/statement.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Statement do 2 | defstruct [ 3 | :query, 4 | :options, 5 | :params, 6 | :prepared, 7 | :request, 8 | :response, 9 | :keyspace, 10 | :partition_key, 11 | :partition_key_picker, 12 | :values, 13 | :connections, 14 | :streamer, 15 | ] 16 | 17 | def new(query, options \\ []) do 18 | %__MODULE__{ 19 | query: query, 20 | options: Keyword.delete(options, :values), 21 | values: Keyword.get(options, :values, []), 22 | } 23 | end 24 | 25 | def new(query, options, defaults) do 26 | options = Keyword.put_new_lazy options, :consistency, fn -> 27 | consistency(query, defaults) 28 | end 29 | new(query, options) 30 | end 31 | 32 | def put_values(statement, values) do 33 | partition_key = partition_key(statement, values) 34 | %__MODULE__{statement | partition_key: partition_key, values: values} 35 | end 36 | 37 | def update_pk(%__MODULE__{values: values} = statement) do 38 | put_values(statement, values || []) 39 | end 40 | 41 | def put_prepared(statement, prepared) do 42 | %__MODULE__{statement | prepared: prepared} 43 | |> clean 44 | |> set_pk_picker 45 | |> update_pk 46 | end 47 | 48 | def clean(statement) do 49 | %__MODULE__{statement | request: nil, response: nil, connections: nil} 50 | end 51 | 52 | def set_pk_picker(%__MODULE__{partition_key_picker: picker} = statement) 53 | when is_function(picker) 54 | do 55 | statement 56 | end 57 | 58 | def set_pk_picker(%__MODULE__{prepared: %{metadata: %{global_spec: %{keyspace: keyspace}, pk_indices: [index]}}} = statement) do 59 | %__MODULE__{statement | partition_key_picker: &Enum.at(&1, index), keyspace: keyspace} 60 | end 61 | 62 | def set_pk_picker(statement), do: statement 63 | 64 | defp partition_key(%__MODULE__{partition_key_picker: picker}, values) 65 | when is_function(picker) 66 | do 67 | picker.(values) 68 | end 69 | defp partition_key(_, _), do: nil 70 | 71 | defp consistency(query, defaults) do 72 | key = 73 | if read?(query) do 74 | :read_consistency 75 | else 76 | :write_consistency 77 | end 78 | 79 | Keyword.get(defaults, key, :quorum) 80 | end 81 | 82 | defp read?("SELECT" <> _), do: true 83 | defp read?(_), do: false 84 | 85 | defimpl DBConnection.Query do 86 | alias Cassandra.Statement 87 | 88 | def encode(statement, values, options) do 89 | params = 90 | (statement.options || []) 91 | |> Keyword.merge(options) 92 | |> Keyword.put(:values, values) 93 | |> CQL.QueryParams.new 94 | 95 | execute = %CQL.Execute{prepared: statement.prepared, params: params} 96 | with {:ok, request} <- CQL.encode(execute) do 97 | {request, params} 98 | end 99 | end 100 | 101 | def decode(_statement, %CQL.Result.Rows{} = rows, _options) do 102 | CQL.Result.Rows.decode_rows(rows) 103 | end 104 | 105 | def decode(_statement, result, _options) do 106 | with {:ok, %CQL.Frame{body: body}} <- CQL.decode(result) do 107 | body 108 | end 109 | end 110 | 111 | def describe(statement, options) do 112 | with {:ok, %CQL.Frame{body: %CQL.Result.Prepared{} = prepared}} <- CQL.decode(statement.response) do 113 | if options[:for_cache] do 114 | prepared 115 | else 116 | Statement.put_prepared(statement, prepared) 117 | end 118 | end 119 | end 120 | 121 | def parse(statement, _options) do 122 | prepare = %CQL.Prepare{query: statement.query} 123 | with {:ok, request} <- CQL.encode(prepare) do 124 | %Statement{statement | request: request} 125 | end 126 | end 127 | end 128 | end 129 | -------------------------------------------------------------------------------- /lib/cassandra/uuid.ex: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.UUID do 2 | @moduledoc false 3 | use GenServer 4 | 5 | ### API ### 6 | 7 | def start_link do 8 | GenServer.start_link(__MODULE__, [], name: __MODULE__) 9 | end 10 | 11 | def v1, do: GenServer.call(__MODULE__, :v1) 12 | def v4, do: GenServer.call(__MODULE__, :v4) 13 | 14 | ### GenServer Callbacks ### 15 | 16 | def init(_) do 17 | {:ok, {get_clock_sequense(), get_node()}} 18 | end 19 | 20 | def handle_call(:v1, _from, {clock_sequense, node} = state) do 21 | {:reply, UUID.uuid1(clock_sequense, node), state} 22 | end 23 | 24 | def handle_call(:v4, _from, state) do 25 | {:reply, UUID.uuid4, state} 26 | end 27 | 28 | ### Helpers ### 29 | 30 | defp get_node(), do: :inet.getifaddrs |> get_node 31 | defp get_node({:ok, list}), do: get_node(list) 32 | defp get_node([{_if_name, if_config} | rest]) do 33 | case :lists.keyfind(:hwaddr, 1, if_config) do 34 | :false -> 35 | get_node(rest) 36 | {:hwaddr, hw_addr} -> 37 | if Enum.all?(hw_addr, fn(n) -> n == 0 end) do 38 | get_node(rest) 39 | else 40 | :erlang.list_to_binary(hw_addr) 41 | end 42 | end 43 | end 44 | defp get_node(_) do 45 | <> = :crypto.strong_rand_bytes(6) 46 | <> 47 | end 48 | 49 | defp get_clock_sequense() do 50 | <> = :crypto.strong_rand_bytes(2) 51 | <> 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /lib/cql.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL do 2 | @moduledoc false 3 | 4 | def decode(buffer) do 5 | with {:ok, frame} <- CQL.Frame.decode(buffer) do 6 | {:ok, decode_body(frame)} 7 | else 8 | _ -> CQL.Error.new("unexpected bytes") 9 | end 10 | end 11 | 12 | def decode_error(frame) do 13 | if CQL.Frame.is_error?(frame) do 14 | with {:ok, %CQL.Frame{body: error}} <- decode(frame) do 15 | error 16 | end 17 | else 18 | {:ok, frame} 19 | end 20 | end 21 | 22 | def decode_body(nil), do: nil 23 | 24 | def decode_body(%CQL.Frame{operation: operation, body: body} = frame) do 25 | body = case operation do 26 | :ERROR -> CQL.Error.decode(body) 27 | :READY -> CQL.Ready.decode(body) 28 | :RESULT -> CQL.Result.decode(body) 29 | :SUPPORTED -> CQL.Supported.decode(body) 30 | :EVENT -> CQL.Event.decode(body) 31 | _ -> body 32 | end 33 | 34 | %CQL.Frame{frame | body: body} 35 | end 36 | 37 | def encode(request, stream \\ 0) do 38 | with {operation, body} <- CQL.Request.encode(request) do 39 | frame = %CQL.Frame{operation: operation, body: body, stream: stream} 40 | cql = CQL.Frame.encode(frame) 41 | {:ok, cql} 42 | end 43 | end 44 | 45 | def encode!(request, stream \\ 0) do 46 | with {:ok, cql} <- encode(request, stream) do 47 | cql 48 | else 49 | error -> raise error 50 | end 51 | end 52 | 53 | defdelegate set_stream_id(request, id), to: CQL.Frame 54 | end 55 | -------------------------------------------------------------------------------- /lib/cql/batch.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Batch do 2 | @moduledoc """ 3 | Represents a CQL batch statement 4 | """ 5 | 6 | import CQL.DataTypes.Encoder 7 | 8 | require Bitwise 9 | 10 | alias CQL.{Request, Batch} 11 | 12 | defstruct [ 13 | type: :logged, 14 | queries: [], 15 | consistency: :one, 16 | serial_consistency: nil, 17 | timestamp: nil, 18 | ] 19 | 20 | defimpl Request do 21 | @types %{ 22 | logged: 0, 23 | unlogged: 1, 24 | counter: 2, 25 | } 26 | 27 | @flags %{ 28 | :with_serial_consistency => 0x10, 29 | :with_default_timestamp => 0x20, 30 | :with_names => 0x40, 31 | } 32 | 33 | def encode(%Batch{} = b) do 34 | has_timestamp = is_integer(b.timestamp) and b.timestamp > 0 35 | 36 | flags = 37 | [] 38 | |> prepend(:with_serial_consistency, b.serial_consistency) 39 | |> prepend(:with_default_timestamp, has_timestamp) 40 | |> names_to_flag(@flags) 41 | 42 | queries = Enum.map(b.queries, &CQL.BatchQuery.encode(&1)) 43 | 44 | if Enum.any?(queries, &match?(:error, &1)) do 45 | :error 46 | else 47 | body = 48 | [] 49 | |> prepend(byte(@types[b.type])) 50 | |> prepend(short(Enum.count(b.queries))) 51 | |> prepend(Enum.join(queries)) 52 | |> prepend(consistency(b.consistency)) 53 | |> prepend(byte(flags)) 54 | |> prepend_not_nil(b.serial_consistency, :consistency) 55 | |> prepend(b.timestamp, has_timestamp) 56 | |> Enum.reverse 57 | |> Enum.join 58 | 59 | {:BATCH, body} 60 | end 61 | end 62 | 63 | def encode(_), do: :error 64 | end 65 | end 66 | -------------------------------------------------------------------------------- /lib/cql/batch_query.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.BatchQuery do 2 | @moduledoc """ 3 | Represents a CQL batch statement subquery 4 | """ 5 | 6 | import CQL.DataTypes.Encoder 7 | 8 | defstruct [:query, :values] 9 | 10 | @kind %{ 11 | query: 0, 12 | prepared: 1, 13 | } 14 | 15 | @doc false 16 | def encode(%__MODULE__{query: %CQL.Result.Prepared{id: id} = prepared, values: values}) do 17 | with {:ok, zipped} <- ok(zip(prepared.metadata.column_types, values)), 18 | {:ok, encoded_values} <- ok(values(zipped)) 19 | do 20 | Enum.join([ 21 | byte(@kind[:prepared]), 22 | short_bytes(id), 23 | encoded_values, 24 | ]) 25 | end 26 | end 27 | 28 | def encode(%__MODULE__{query: query, values: nil}) when is_binary(query) do 29 | Enum.join([ 30 | byte(@kind[:query]), 31 | long_string(query), 32 | short(0), 33 | ]) 34 | end 35 | 36 | def encode(_), do: CQL.Error.new("invalid request") 37 | end 38 | -------------------------------------------------------------------------------- /lib/cql/consistency.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Consistency do 2 | @moduledoc false 3 | 4 | @codes %{ 5 | :any => 0x00, 6 | :one => 0x01, 7 | :two => 0x02, 8 | :three => 0x03, 9 | :quorum => 0x04, 10 | :all => 0x05, 11 | :local_quorum => 0x06, 12 | :each_quorum => 0x07, 13 | :serial => 0x08, 14 | :local_serial => 0x09, 15 | :local_one => 0x0A, 16 | } 17 | 18 | @names @codes 19 | |> Enum.map(fn {x, y} -> {y, x} end) 20 | |> Enum.into(%{}) 21 | 22 | def code(name) do 23 | Map.fetch!(@codes, name) 24 | end 25 | 26 | def name(code) do 27 | Map.fetch!(@names, code) 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/cql/data_types.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.DataTypes do 2 | @moduledoc false 3 | 4 | @kinds %{ 5 | 0x01 => :ascii, 6 | 0x02 => :bigint, 7 | 0x03 => :blob, 8 | 0x04 => :boolean, 9 | 0x05 => :counter, 10 | 0x06 => :decimal, 11 | 0x07 => :double, 12 | 0x08 => :float, 13 | 0x09 => :int, 14 | 0x0B => :timestamp, 15 | 0x0C => :uuid, 16 | 0x0D => :varchar, 17 | 0x0E => :varint, 18 | 0x0F => :timeuuid, 19 | 0x10 => :inet, 20 | 0x11 => :date, 21 | 0x12 => :time, 22 | 0x13 => :smallint, 23 | 0x14 => :tinyint, 24 | 0x20 => :list, 25 | 0x21 => :map, 26 | 0x22 => :set, 27 | 0x30 => :udt, 28 | 0x31 => :tuple, 29 | } 30 | 31 | def kind({id, nil}), do: kind(id) 32 | def kind({id, value}), do: {kind(id), value} 33 | def kind(id) when is_integer(id) do 34 | Map.fetch!(@kinds, id) 35 | end 36 | 37 | defdelegate encode(sepc), to: CQL.DataTypes.Encoder 38 | defdelegate encode(value, type), to: CQL.DataTypes.Encoder 39 | 40 | defdelegate decode(spec), to: CQL.DataTypes.Decoder 41 | defdelegate decode(value, type), to: CQL.DataTypes.Decoder 42 | end 43 | -------------------------------------------------------------------------------- /lib/cql/data_types/date.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.DataTypes.Date do 2 | @moduledoc false 3 | 4 | @epoch :calendar.date_to_gregorian_days({1970, 1, 1}) - trunc(:math.pow(2, 31)) 5 | 6 | def decode(<>) do 7 | {:ok, date} = 8 | days + @epoch 9 | |> :calendar.gregorian_days_to_date 10 | |> Date.from_erl 11 | 12 | {date, rest} 13 | end 14 | 15 | def encode(%Date{} = date), do: date |> Date.to_erl |> encode 16 | def encode({_, _, _} = date) do 17 | days = :calendar.date_to_gregorian_days(date) 18 | n = days - @epoch 19 | <> 20 | end 21 | 22 | def encode(value) do 23 | CQL.DataTypes.Encoder.invalid(:date, value) 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /lib/cql/data_types/decoder.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.DataTypes.Decoder do 2 | @moduledoc false 3 | 4 | require Bitwise 5 | require Logger 6 | 7 | def decode({type, buffer}), do: decode(buffer, type) 8 | def decode(buffer, type) do 9 | {value, ""} = dec(buffer, type) 10 | value 11 | end 12 | 13 | def byte(<>) do 14 | {n, rest} 15 | end 16 | 17 | def boolean(<<0::8, rest::bytes>>), do: {false, rest} 18 | def boolean(<<_::8, rest::bytes>>), do: {true, rest} 19 | 20 | def tinyint(<>) do 21 | {n, rest} 22 | end 23 | 24 | def short(<>) do 25 | {n, rest} 26 | end 27 | 28 | def int(<>) do 29 | {n, rest} 30 | end 31 | 32 | def long(<>) do 33 | {n, rest} 34 | end 35 | 36 | def float(<>) do 37 | {x, rest} 38 | end 39 | 40 | def double(<>) do 41 | {x, rest} 42 | end 43 | 44 | def string({len, buffer}) do 45 | <> = buffer 46 | {str, rest} 47 | end 48 | 49 | def string(buffer) do 50 | buffer |> short |> string 51 | end 52 | 53 | def long_string(buffer) do 54 | buffer |> int |> string 55 | end 56 | 57 | def uuid(<>) do 58 | {UUID.binary_to_string!(uuid), rest} 59 | end 60 | 61 | def string_list({n, buffer}) do 62 | ntimes(n, :string, buffer) 63 | end 64 | 65 | def string_list(buffer) do 66 | buffer |> short |> string_list 67 | end 68 | 69 | def bytes({len, buffer}) when is_integer(len) and len < 0 do 70 | {nil, buffer} 71 | end 72 | 73 | def bytes({len, buffer}) do 74 | <> = buffer 75 | {str, rest} 76 | end 77 | 78 | def bytes(buffer) do 79 | buffer |> int |> bytes 80 | end 81 | 82 | def short_bytes(buffer) do 83 | buffer |> short |> bytes 84 | end 85 | 86 | def inet(<>) do 87 | {{a, b, c, d}, ""} 88 | end 89 | 90 | def inet( 91 | <> 99 | ) do 100 | {{a, b, c, d, e, f, g, h}, ""} 101 | end 102 | 103 | def inet(<>) do 104 | {ip, _} = inet(data) 105 | {port, buffer} = int(buffer) 106 | {{ip, port}, buffer} 107 | end 108 | 109 | def string_map({n, buffer}) do 110 | key_value = fn buf -> 111 | {key, buf} = string(buf) 112 | {val, buf} = string(buf) 113 | {{key, val}, buf} 114 | end 115 | ntimes(n, key_value, buffer) 116 | end 117 | 118 | def string_map(buffer) do 119 | buffer |> short |> string_map 120 | end 121 | 122 | def string_multimap({n, buffer}) do 123 | key_value = fn buf -> 124 | {key, buf} = string(buf) 125 | {val, buf} = string_list(buf) 126 | {{key, val}, buf} 127 | end 128 | ntimes(n, key_value, buffer) 129 | end 130 | 131 | def string_multimap(buffer) do 132 | buffer |> short |> string_multimap 133 | end 134 | 135 | def bytes_map({n, buffer}) do 136 | key_value = fn buf -> 137 | {key, buf} = string(buf) 138 | {val, buf} = bytes(buf) 139 | {{key, val}, buf} 140 | end 141 | ntimes(n, key_value, buffer) 142 | end 143 | 144 | def bytes_map(buffer) do 145 | buffer |> short |> bytes_map 146 | end 147 | 148 | def list(buffer, type) do 149 | {n, buffer} = int(buffer) 150 | {list, rest} = ntimes(n, :bytes, buffer) 151 | {Enum.map(list, &decode(&1, type)), rest} 152 | end 153 | 154 | def map(buffer, {ktype, vtype}) do 155 | {n, buffer} = int(buffer) 156 | {list, rest} = ntimes(2 * n, :bytes, buffer) 157 | map = 158 | list 159 | |> Enum.chunk(2) 160 | |> Enum.map(fn [k, v] -> {decode(k, ktype), decode(v, vtype)} end) 161 | |> Enum.into(%{}) 162 | {map, rest} 163 | end 164 | 165 | def set(buffer, type) do 166 | {list, buffer} = list(buffer, type) 167 | {MapSet.new(list), buffer} 168 | end 169 | 170 | def tuple(buffer, types) do 171 | {n, buffer} = short(buffer) 172 | {list, rest} = ntimes(n, :bytes, buffer) 173 | tuple = 174 | list 175 | |> Enum.zip(types) 176 | |> Enum.map(fn {buf, t} -> decode(buf, t) end) 177 | |> List.to_tuple 178 | 179 | {tuple, rest} 180 | end 181 | 182 | def varint({size, buffer}) do 183 | size = size * 8 184 | <> = buffer 185 | {n, rest} 186 | end 187 | 188 | def varint(buffer) do 189 | buffer |> int |> varint 190 | end 191 | 192 | def decimal(buffer) do 193 | {scale, buffer} = int(buffer) 194 | {unscaled, buffer} = varint(buffer) 195 | {{unscaled, scale}, buffer} 196 | end 197 | 198 | def blob(buffer) do 199 | try do 200 | term = :erlang.binary_to_term(buffer) 201 | {term, ""} 202 | rescue 203 | ArgumentError -> {buffer, ""} 204 | end 205 | end 206 | 207 | def date(buffer), do: CQL.DataTypes.Date.decode(buffer) 208 | def time(buffer), do: CQL.DataTypes.Time.decode(buffer) 209 | def timestamp(b), do: CQL.DataTypes.Timestamp.decode(b) 210 | 211 | def consistency(buffer) do 212 | {code, buffer} = short(buffer) 213 | {CQL.Consistency.name(code), buffer} 214 | end 215 | 216 | ### Helpers ### 217 | 218 | def flag?(flag, flags) do 219 | Bitwise.band(flag, flags) == flag 220 | end 221 | 222 | def flag_to_names(flag, flags) do 223 | flags 224 | |> Enum.filter(fn {_, code} -> Bitwise.band(flag, code) == code end) 225 | |> Enum.map(fn {name, _} -> name end) 226 | end 227 | 228 | def ntimes(n, func, buffer) do 229 | ntimes(n, func, buffer, []) 230 | end 231 | 232 | def ntimes(0, _, buffer, items) do 233 | {Enum.reverse(items), buffer} 234 | end 235 | 236 | def ntimes(n, func, buffer, items) do 237 | {item, buffer} = ap(func, buffer) 238 | ntimes(n - 1, func, buffer, [item | items]) 239 | end 240 | 241 | def unpack(buffer, meta) do 242 | Enum.reduce(meta, {%{}, buffer}, &pick/2) 243 | end 244 | 245 | ### Utils ### 246 | 247 | defp pick({name, {func, key, predicate}}, {map, buffer}) do 248 | pick({name, {func, [when: predicate.(Map.get(map, key))]}}, {map, buffer}) 249 | end 250 | 251 | defp pick({_, {_, [when: false]}}, {map, buffer}) do 252 | {map, buffer} 253 | end 254 | 255 | defp pick({name, {func, [when: true]}}, {map, buffer}) do 256 | pick({name, func}, {map, buffer}) 257 | end 258 | 259 | defp pick({name, {func, [when: flag]}}, {map, buffer}) do 260 | pick({name, {func, [when: flag?(flag, map.flags)]}}, {map, buffer}) 261 | end 262 | 263 | defp pick({name, {func, [unless: boolean]}}, {map, buffer}) when is_boolean(boolean) do 264 | pick({name, {func, [when: !boolean]}}, {map, buffer}) 265 | end 266 | 267 | defp pick({name, func}, {map, buffer}) do 268 | {value, buffer} = ap(func, buffer) 269 | {Map.put(map, name, value), buffer} 270 | end 271 | 272 | defp ap(func, buffer) when is_atom(func) do 273 | apply(__MODULE__, func, [buffer]) 274 | end 275 | 276 | defp ap(func, buffer) when is_function(func) do 277 | func.(buffer) 278 | end 279 | 280 | defp dec(nil, _ ), do: {nil, ""} 281 | defp dec(buffer, :ascii ), do: {buffer, ""} 282 | defp dec(buffer, :bigint ), do: long(buffer) 283 | defp dec(buffer, :blob ), do: blob(buffer) 284 | defp dec(buffer, :boolean ), do: boolean(buffer) 285 | defp dec(buffer, :counter ), do: long(buffer) 286 | defp dec(buffer, :date ), do: date(buffer) 287 | defp dec(buffer, :decimal ), do: decimal(buffer) 288 | defp dec(buffer, :double ), do: double(buffer) 289 | defp dec(buffer, :float ), do: float(buffer) 290 | defp dec(buffer, :inet ), do: inet(buffer) 291 | defp dec(buffer, :int ), do: int(buffer) 292 | defp dec(buffer, :smallint ), do: short(buffer) 293 | defp dec(buffer, :text ), do: {buffer, ""} 294 | defp dec(buffer, :time ), do: time(buffer) 295 | defp dec(buffer, :timestamp), do: timestamp(buffer) 296 | defp dec(buffer, :timeuuid ), do: uuid(buffer) 297 | defp dec(buffer, :tinyint ), do: tinyint(buffer) 298 | defp dec(buffer, :uuid ), do: uuid(buffer) 299 | defp dec(buffer, :varchar ), do: {buffer, ""} 300 | defp dec(buffer, :varint ), do: varint(buffer) 301 | 302 | defp dec(buffer, {:list, type}), do: list(buffer, type) 303 | defp dec(buffer, {:map, type}), do: map(buffer, type) 304 | defp dec(buffer, {:set, type}), do: set(buffer, type) 305 | defp dec(buffer, {:tuple, types}), do: tuple(buffer, types) 306 | 307 | defp dec(_buffer, {_type, size}) when is_integer(size) and size < 0, do: nil 308 | defp dec(buffer, {type, _size}), do: dec(buffer, type) 309 | end 310 | -------------------------------------------------------------------------------- /lib/cql/data_types/encoder.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.DataTypes.Encoder do 2 | @moduledoc false 3 | 4 | require Bitwise 5 | require Logger 6 | 7 | def encode(nil), do: encode({nil, nil}) 8 | def encode(%NaiveDateTime{} = value), do: encode({:timestamp, value}) 9 | def encode(%DateTime{} = value), do: encode({:timestamp, value}) 10 | def encode(%Time{} = value), do: encode({:time, value}) 11 | def encode(%Date{} = value), do: encode({:date, value}) 12 | def encode(value) when is_integer(value), do: encode({:int, value}) 13 | def encode(value) when is_float(value), do: encode({:double, value}) 14 | def encode(value) when is_binary(value), do: encode({:text, value}) 15 | def encode(value) when is_boolean(value), do: encode({:boolean, value}) 16 | def encode({_,_,_,_} = value), do: encode({:inet, value}) 17 | def encode({_,_,_,_,_,_, _, _} = value), do: encode({:inet, value}) 18 | 19 | def encode({type, value}), do: encode(value, type) 20 | 21 | def encode(value, type), do: type |> enc(value) |> bytes 22 | 23 | def byte(n) when is_integer(n), do: <> 24 | def byte(x), do: invalid(:byte, x) 25 | 26 | def boolean(false), do: byte(0) 27 | def boolean(true), do: byte(1) 28 | def boolean(x), do: invalid(:boolean, x) 29 | 30 | def tinyint(n) when is_integer(n), do: <> 31 | def tinyint(x), do: invalid(:tinyint, x) 32 | 33 | def signed_short(n) when is_integer(n), do: <> 34 | def signed_short(x), do: invalid(:signed_short, x) 35 | 36 | def short(n) when is_integer(n), do: <> 37 | def short(x), do: invalid(:short, x) 38 | 39 | def int(n) when is_integer(n), do: <> 40 | def int(x), do: invalid(:int, x) 41 | 42 | def long(n) when is_integer(n), do: <> 43 | def long(x), do: invalid(:long, x) 44 | 45 | def float(x) when is_float(x), do: <> 46 | def float(x), do: invalid(:float, x) 47 | 48 | def double(x) when is_float(x), do: <> 49 | def double(x), do: invalid(:double, x) 50 | 51 | def string(str) when is_binary(str), do: (str |> String.length |> short) <> <> 52 | def string(x), do: invalid(:string, x) 53 | 54 | def long_string(str) when is_binary(str), do: (str |> String.length |> int) <> <> 55 | def long_string(x), do: invalid(:long_string, x) 56 | 57 | def uuid(str) when is_binary(str) do 58 | try do 59 | UUID.string_to_binary!(str) 60 | rescue 61 | ArgumentError -> invalid(:uuid, str) 62 | end 63 | end 64 | def uuid(x), do: invalid(:uuid, x) 65 | 66 | def string_list(list) when is_list(list) do 67 | if Enum.all?(list, &is_binary/1) do 68 | n = Enum.count(list) 69 | buffer = list |> Enum.map(&string/1) |> Enum.join 70 | short(n) <> <> 71 | else 72 | invalid(:string_list, list) 73 | end 74 | end 75 | def string_list(x), do: invalid(:string_list, x) 76 | 77 | def bytes(nil), do: int(-1) 78 | def bytes(bytes) when is_binary(bytes), do: int(byte_size(bytes)) <> <> 79 | def bytes(x), do: invalid(:bytes, x) 80 | 81 | def short_bytes(nil), do: int(-1) 82 | def short_bytes(bytes) when is_binary(bytes), do: short(byte_size(bytes)) <> <> 83 | def short_bytes(x), do: invalid(:short_bytes, x) 84 | 85 | 86 | def inet({_, _, _, _} = ip) do 87 | ip 88 | |> Tuple.to_list 89 | |> Enum.map(&byte/1) 90 | |> Enum.join() 91 | end 92 | 93 | def inet({_, _, _, _, _, _, _, _} = ip) do 94 | ip 95 | |> Tuple.to_list 96 | |> Enum.map(&short/1) 97 | |> Enum.join() 98 | end 99 | def inet(x), do: invalid(:inet, x) 100 | 101 | def string_map(map) when is_map(map) do 102 | if map |> Map.values |> Enum.all?(&is_binary/1) do 103 | n = Enum.count(map) 104 | buffer = map |> Enum.map(fn {k, v} -> string(k) <> string(v) end) |> Enum.join 105 | short(n) <> <> 106 | else 107 | invalid(:string_map, map) 108 | end 109 | end 110 | def string_map(x), do: invalid(:string_map, x) 111 | 112 | def string_multimap(map) when is_map(map) do 113 | map 114 | |> Enum.map(fn {k, v} -> {k, string_list(v)} end) 115 | |> string_map 116 | end 117 | def string_multimap(x), do: invalid(:string_multimap, x) 118 | 119 | def bytes_map(map) do 120 | size = Enum.count(map) 121 | buffer = 122 | map 123 | |> Enum.map(fn {k, v} -> string(k) <> bytes(v) end) 124 | |> Enum.join 125 | 126 | short(size) <> <> 127 | end 128 | 129 | def list(list, type) do 130 | size = Enum.count(list) 131 | buffer = 132 | list 133 | |> Enum.map(&encode(&1, type)) 134 | |> Enum.join 135 | 136 | int(size) <> <> 137 | end 138 | 139 | def map(map, {type}), do: map(map, {:text, type}) 140 | def map(map, {ktype, vtype}) do 141 | size = Enum.count(map) 142 | buffer = 143 | map 144 | |> Enum.map(fn {k, v} -> encode(k, ktype) <> encode(v, vtype) end) 145 | |> Enum.join 146 | 147 | int(size) <> <> 148 | end 149 | 150 | def set(set, type) do 151 | set |> MapSet.to_list |> list(type) 152 | end 153 | 154 | def tuple(tuple, types) when is_tuple(tuple) do 155 | list = Tuple.to_list(tuple) 156 | size = Enum.count(list) 157 | buffer = 158 | list 159 | |> Enum.zip(types) 160 | |> Enum.map(fn {v, t} -> encode(v, t) end) 161 | |> Enum.join 162 | 163 | short(size) <> <> 164 | end 165 | def tuple(x, _), do: invalid(:tuple, x) 166 | 167 | def varint(n) when is_integer(n) do 168 | bytes = int_bytes(n) 169 | bits = bytes * 8 170 | int(bytes) <> <> 171 | end 172 | def varint(x), do: invalid(:varint, x) 173 | 174 | def decimal({unscaled, scale}) do 175 | int(scale) <> varint(unscaled) 176 | end 177 | 178 | def text(value) when is_atom(value), do: Atom.to_string(value) 179 | def text(value) when is_binary(value), do: value 180 | def text(x), do: invalid(:text, x) 181 | 182 | def blob(value), do: :erlang.term_to_binary(value) 183 | 184 | def date(date), do: CQL.DataTypes.Date.encode(date) 185 | def time(time), do: CQL.DataTypes.Time.encode(time) 186 | def timestamp(t), do: CQL.DataTypes.Timestamp.encode(t) 187 | 188 | def consistency(name) when is_atom(name) do 189 | name |> CQL.Consistency.code |> short 190 | end 191 | def consistency(x), do: invalid(:consistency_atom, x) 192 | 193 | ### Helpers ### 194 | 195 | def prepend(list, item), do: [item | list] 196 | def prepend(list, _, false), do: list 197 | def prepend(list, item, true), do: [item | list] 198 | def prepend(list, _, nil), do: list 199 | def prepend(list, item, _), do: [item | list] 200 | def prepend_not_nil(list, nil, _func), do: list 201 | def prepend_not_nil(list, item, func), do: [apply(__MODULE__, func, [item]) | list] 202 | 203 | def ok(%CQL.Error{} = error), do: error 204 | def ok(value), do: {:ok, value} 205 | 206 | def names_to_flag(names, flags) do 207 | names 208 | |> Enum.map(&Map.fetch!(flags, &1)) 209 | |> Enum.reduce(0, &Bitwise.bor(&1, &2)) 210 | end 211 | 212 | def zip(types, values) when is_map(values) do 213 | zip(types, Enum.to_list(values)) 214 | end 215 | 216 | def zip(types, [{key, _} | _] = values) when is_list(values) and is_atom(key) do 217 | values = 218 | values 219 | |> Enum.map(fn {k, v} -> {Atom.to_string(k), v} end) 220 | |> Enum.into(%{}) 221 | 222 | Enum.map(types, fn {name, type} -> {type, values[name]} end) 223 | end 224 | 225 | def zip(types, values) when is_list(values) do 226 | types 227 | |> Keyword.values 228 | |> Enum.zip(values) 229 | end 230 | 231 | def zip(_, values) when is_nil(values), do: nil 232 | 233 | def zip(_, values), do: CQL.Error.new("invalid values", "Expected a list or a map for values found: #{inspect values}") 234 | 235 | def values(list) when is_list(list) do 236 | parts = Enum.map(list, &CQL.DataTypes.encode/1) 237 | 238 | if Enum.any?(parts, &match?(%CQL.Error{}, &1)) do 239 | [error] = 240 | parts 241 | |> Stream.filter(&match?(%CQL.Error{}, &1)) 242 | |> Enum.take(1) 243 | 244 | error 245 | else 246 | n = Enum.count(list) 247 | Enum.join([short(n) | parts]) 248 | end 249 | end 250 | 251 | def values(map) when is_map(map) do 252 | parts = Enum.flat_map map, fn {k, v} -> 253 | [string(to_string(k)), CQL.DataTypes.encode(v)] 254 | end 255 | 256 | if Enum.any?(parts, &match?(%CQL.Error{}, &1)) do 257 | [error] = 258 | parts 259 | |> Stream.filter(&match?(%CQL.Error{}, &1)) 260 | |> Enum.take(1) 261 | 262 | error 263 | else 264 | n = Enum.count(map) 265 | Enum.join([short(n) | parts]) 266 | end 267 | end 268 | 269 | def values(_), do: CQL.Error.new("invalid values", "Expected a list or a map for values") 270 | 271 | ### Utils ### 272 | 273 | defp int_bytes(x, acc \\ 0) 274 | defp int_bytes(x, acc) when x > 127 and x < 256, do: acc + 2 275 | defp int_bytes(x, acc) when x <= 127 and x >= -128, do: acc + 1 276 | defp int_bytes(x, acc) when x < -128 and x >= -256, do: acc + 2 277 | defp int_bytes(x, acc), do: int_bytes(Bitwise.bsr(x, 8), acc + 1) 278 | 279 | defp enc(:blob, value), do: blob(value) 280 | 281 | defp enc(_type, nil), do: int(-1) 282 | defp enc(_type, :not_set), do: int(-2) 283 | 284 | defp enc(:ascii, value), do: value 285 | defp enc(:bigint, value), do: long(value) 286 | defp enc(:boolean, true), do: byte(1) 287 | defp enc(:boolean, false), do: byte(0) 288 | defp enc(:counter, value), do: long(value) 289 | defp enc(:date, value), do: date(value) 290 | defp enc(:decimal, value), do: decimal(value) 291 | defp enc(:double, value), do: double(value) 292 | defp enc(:float, value), do: float(value) 293 | defp enc(:inet, value), do: inet(value) 294 | defp enc(:int, value), do: int(value) 295 | defp enc(:smallint, value), do: short(value) 296 | defp enc(:text, value), do: text(value) 297 | defp enc(:time, value), do: time(value) 298 | defp enc(:timestamp, value), do: timestamp(value) 299 | defp enc(:timeuuid, value), do: uuid(value) 300 | defp enc(:tinyint, value), do: tinyint(value) 301 | defp enc(:uuid, value), do: uuid(value) 302 | defp enc(:varchar, value), do: text(value) 303 | defp enc(:varint, value), do: varint(value) 304 | 305 | defp enc({:list, type}, value), do: list(value, type) 306 | defp enc({:map, type}, value), do: map(value, type) 307 | defp enc({:set, type}, value), do: set(value, type) 308 | defp enc({:tuple, types}, value), do: tuple(value, types) 309 | 310 | def invalid(_, %CQL.Error{} = error), do: error 311 | def invalid(type, value) do 312 | CQL.Error.new("invalid value", "Expected a '#{type}' found: #{inspect value}") 313 | end 314 | end 315 | -------------------------------------------------------------------------------- /lib/cql/data_types/time.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.DataTypes.Time do 2 | @moduledoc false 3 | 4 | def decode(<>) do 5 | seconds = nanoseconds |> div(1000_000_000) 6 | nano = nanoseconds |> rem(1000_000_000) |> div(1000) 7 | 8 | {:ok, time} = 9 | seconds 10 | |> :calendar.seconds_to_time 11 | |> Time.from_erl({nano, 6}) 12 | 13 | {time, rest} 14 | end 15 | 16 | def encode(%Time{microsecond: {microseconds, _}} = time) do 17 | seconds = 18 | time 19 | |> Time.to_erl 20 | |> :calendar.time_to_seconds 21 | 22 | micro = seconds * 1000_000 + microseconds 23 | nano = micro * 1000 24 | <> 25 | end 26 | 27 | def encode(value) do 28 | CQL.DataTypes.Encoder.invalid(:time, value) 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /lib/cql/data_types/timestamp.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.DataTypes.Timestamp do 2 | @moduledoc false 3 | 4 | @epoch :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}) 5 | 6 | def decode(<>) do 7 | seconds = div(milliseconds, 1000) 8 | micro = rem(milliseconds, 1000) * 1000 9 | 10 | {:ok, timestamp} = 11 | seconds + @epoch 12 | |> :calendar.gregorian_seconds_to_datetime 13 | |> NaiveDateTime.from_erl({micro, 3}) 14 | 15 | {timestamp, rest} 16 | end 17 | 18 | def encode(%DateTime{} = timestamp) do 19 | timestamp |> DateTime.to_naive |> encode 20 | end 21 | 22 | def encode(%NaiveDateTime{microsecond: {microseconds, _}} = timestamp) do 23 | seconds = 24 | timestamp 25 | |> NaiveDateTime.to_erl 26 | |> :calendar.datetime_to_gregorian_seconds 27 | 28 | milliseconds = div(microseconds, 1000) 29 | n = (seconds - @epoch) * 1000 + milliseconds 30 | <> 31 | end 32 | 33 | def encode(value) do 34 | CQL.DataTypes.Encoder.invalid(:timestamp, value) 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /lib/cql/error.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Error do 2 | @moduledoc false 3 | 4 | import CQL.DataTypes.Decoder 5 | 6 | defexception [:code, :message, :info] 7 | 8 | @codes %{ 9 | 0x0000 => :server_error, 10 | 0x000A => :protocol_error, 11 | 0x0100 => :authentication_error, 12 | 0x1000 => :unavailable, 13 | 0x1001 => :overloaded, 14 | 0x1002 => :is_bootstrapping, 15 | 0x1003 => :truncate_error, 16 | 0x1100 => :write_timeout, 17 | 0x1200 => :read_timeout, 18 | 0x1300 => :read_failure, 19 | 0x2000 => :syntax_error, 20 | 0x2100 => :unauthorized, 21 | 0x2200 => :invalid, 22 | 0x2300 => :config_error, 23 | 0x2400 => :already_exists, 24 | 0x2500 => :unprepared, 25 | } 26 | 27 | def new(message, info \\ "") do 28 | %__MODULE__{code: :invalid, message: message, info: info} 29 | end 30 | 31 | def message(%__MODULE__{code: code, message: message, info: ""}) do 32 | "[#{code}] #{message}" 33 | end 34 | 35 | def message(%__MODULE__{code: code, message: message, info: info}) do 36 | "[#{code}] #{message}: #{inspect info}" 37 | end 38 | 39 | def decode(buffer) do 40 | {error, rest} = unpack buffer, 41 | code: :int, 42 | message: :string 43 | 44 | code = Map.get(@codes, error.code) 45 | 46 | {info, ""} = case code do 47 | :unavailable -> 48 | unpack rest, 49 | consistency: :consistency, 50 | required: :int, 51 | alive: :int 52 | 53 | :write_timeout -> 54 | unpack rest, 55 | consistency: :consistency, 56 | received: :int, 57 | blockfor: :int, 58 | write_type: :string 59 | 60 | :write_failure -> 61 | unpack rest, 62 | consistency: :consistency, 63 | received: :int, 64 | blockfor: :int, 65 | num_failures: :int, 66 | write_type: :string 67 | 68 | :read_timeout -> 69 | unpack rest, 70 | consistency: :consistency, 71 | received: :int, 72 | blockfor: :int, 73 | data_present: :boolean 74 | 75 | :read_failure -> 76 | unpack rest, 77 | consistency: :consistency, 78 | received: :int, 79 | blockfor: :int, 80 | num_failures: :int, 81 | data_present: :boolean 82 | 83 | :function_failure -> 84 | unpack rest, 85 | keyspace: :string, 86 | function: :string, 87 | arg_types: :string_list 88 | 89 | :already_exists -> 90 | unpack rest, 91 | keyspace: :string, 92 | table: :string 93 | 94 | :unprepared -> 95 | unpack rest, 96 | id: :short_bytes 97 | 98 | _any_other -> 99 | {rest, ""} 100 | end 101 | 102 | %__MODULE__{code: code, message: error.message, info: info} 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /lib/cql/event.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Event do 2 | @moduledoc """ 3 | Represents a CQL event 4 | """ 5 | 6 | import CQL.DataTypes.Decoder 7 | 8 | defstruct [:type, :info] 9 | 10 | @doc false 11 | def decode(body) do 12 | {type, buffer} = string(body) 13 | 14 | info = case type do 15 | "TOPOLOGY_CHANGE" -> 16 | {info, ""} = unpack buffer, 17 | change: :string, 18 | address: :inet 19 | 20 | info 21 | 22 | "STATUS_CHANGE" -> 23 | {info, ""} = unpack buffer, 24 | change: :string, 25 | address: :inet 26 | 27 | info 28 | 29 | "SCHEMA_CHANGE" -> 30 | {info, buffer} = unpack buffer, 31 | change: :string, 32 | target: :string 33 | 34 | {options, ""} = case info.target do 35 | "KEYSPACE" -> 36 | unpack buffer, 37 | keyspace: :string 38 | 39 | "TABLE" -> 40 | unpack buffer, 41 | keyspace: :string, 42 | table: :string 43 | 44 | "TYPE" -> 45 | unpack buffer, 46 | keyspace: :string, 47 | type: :string 48 | 49 | "FUNCTION" -> 50 | unpack buffer, 51 | keyspace: :string, 52 | name: :string, 53 | args: :string_list 54 | 55 | "AGGREGATE" -> 56 | unpack buffer, 57 | keyspace: :string, 58 | name: :string, 59 | args: :string_list 60 | end 61 | 62 | Map.put(info, :options, options) 63 | end 64 | 65 | %__MODULE__{type: type, info: info} 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /lib/cql/execute.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Execute do 2 | @moduledoc """ 3 | Represents a CQL execute statement 4 | """ 5 | 6 | import CQL.DataTypes.Encoder 7 | 8 | alias CQL.{Request, QueryParams} 9 | alias CQL.Result.Prepared 10 | 11 | defstruct [ 12 | :prepared, 13 | :params, 14 | ] 15 | 16 | defimpl Request do 17 | def encode(%CQL.Execute{prepared: %Prepared{id: id, metadata: %{column_types: column_types}}, params: %QueryParams{} = params}) do 18 | with {:ok, zipped} <- ok(zip(column_types, params.values)), 19 | {:ok, encoded_params} <- ok(QueryParams.encode(%{params | values: zipped})) 20 | do 21 | {:EXECUTE, short_bytes(id) <> encoded_params} 22 | end 23 | end 24 | 25 | def encode(_), do: CQL.Error.new("invalid execute request") 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /lib/cql/frame.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Frame do 2 | @moduledoc false 3 | 4 | require Logger 5 | 6 | import CQL.DataTypes.Encoder 7 | 8 | alias CQL.DataTypes.Decoder 9 | 10 | defstruct [ 11 | version: 0x04, 12 | flags: [], 13 | stream: 0, 14 | operation: 0, 15 | length: 0, 16 | warnings: [], 17 | tracing_id: nil, 18 | body: "", 19 | ] 20 | 21 | @operations %{ 22 | :ERROR => 0x00, 23 | :STARTUP => 0x01, 24 | :READY => 0x02, 25 | :AUTHENTICATE => 0x03, 26 | :OPTIONS => 0x05, 27 | :SUPPORTED => 0x06, 28 | :QUERY => 0x07, 29 | :RESULT => 0x08, 30 | :PREPARE => 0x09, 31 | :EXECUTE => 0x0A, 32 | :REGISTER => 0x0B, 33 | :EVENT => 0x0C, 34 | :BATCH => 0x0D, 35 | :AUTH_CHALLENGE => 0x0E, 36 | :AUTH_RESPONSE => 0x0F, 37 | :AUTH_SUCCESS => 0x10, 38 | } 39 | 40 | @operation_names @operations |> Enum.map(fn {k, v} -> {v, k} end) |> Enum.into(%{}) 41 | 42 | @flags %{ 43 | :compression => 0x01, 44 | :tracing => 0x02, 45 | :custom_payload => 0x04, 46 | :warning => 0x08, 47 | } 48 | 49 | def encode(%__MODULE__{} = f) do 50 | IO.iodata_to_binary [ 51 | byte(f.version), 52 | byte(names_to_flag(f.flags, @flags)), 53 | signed_short(f.stream), 54 | byte(Map.fetch!(@operations, f.operation)), 55 | int(byte_size(f.body)), 56 | f.body, 57 | ] 58 | end 59 | 60 | def body_length(<<_::40, length::integer-32>>), do: {:ok, length} 61 | def body_length(_), do: CQL.Error.new("invalid frame header") 62 | 63 | def is_error?(<<_::32, 0::integer-8, _::binary>>), do: true 64 | def is_error?(_), do: false 65 | 66 | def decode_header(<< 67 | version::integer-8, 68 | flags::integer-8, 69 | stream::signed-integer-16, 70 | opcode::integer-8, 71 | length::integer-32 72 | >>) 73 | do 74 | {:ok, %{version: version, 75 | flags: flags, 76 | stream: stream, 77 | opcode: opcode, 78 | length: length, 79 | }} 80 | end 81 | def decode_header(_), do: CQL.Error.new("invalid frame header") 82 | 83 | def decode(<< 84 | version::integer-8, 85 | flags::integer-8, 86 | stream::signed-integer-16, 87 | opcode::integer-8, 88 | length::integer-32, 89 | body::binary-size(length), 90 | >>) 91 | do 92 | flags = Decoder.flag_to_names(flags, @flags) 93 | 94 | body = 95 | if :compression in flags do 96 | with {:ok, uncompressed} <- CQL.LZ4.unpack(body) do 97 | uncompressed 98 | else 99 | {:error, reason} -> CQL.Error.new("frame: #{inspect reason}") 100 | end 101 | else 102 | body 103 | end 104 | 105 | {tracing_id, body} = 106 | if :tracing in flags do 107 | Decoder.uuid(body) 108 | else 109 | {nil, body} 110 | end 111 | 112 | {warnings, body} = 113 | if :warning in flags do 114 | Decoder.string_list(body) 115 | else 116 | {[], body} 117 | end 118 | 119 | Enum.each(warnings, &Logger.warn/1) 120 | 121 | frame = %__MODULE__{ 122 | version: version, 123 | flags: flags, 124 | warnings: warnings, 125 | tracing_id: tracing_id, 126 | stream: stream, 127 | operation: Map.fetch!(@operation_names, opcode), 128 | length: length, 129 | body: body, 130 | } 131 | 132 | {:ok, frame} 133 | end 134 | 135 | def decode(_), do: CQL.Error.new("invalid frame") 136 | 137 | def set_stream_id(<>, id) do 138 | {:ok, <>} 139 | end 140 | 141 | def set_stream_id(_, _), do: CQL.Error.new("invalid frame") 142 | end 143 | -------------------------------------------------------------------------------- /lib/cql/lz4.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.LZ4 do 2 | def unpack(<>) do 3 | :lz4.uncompress(binary, size) 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /lib/cql/metadata.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.MetaData do 2 | @moduledoc false 3 | 4 | import CQL.DataTypes.Decoder 5 | 6 | require Bitwise 7 | 8 | @flags %{ 9 | :global_spec => 0x01, 10 | :has_more_pages => 0x02, 11 | :no_metadata => 0x04, 12 | } 13 | 14 | def decode(buffer, pk_indices \\ false) do 15 | {meta, buffer} = unpack buffer, 16 | flags: :int, 17 | columns_count: :int, 18 | pk_indices: {&pk_indices/1, when: pk_indices}, 19 | paging_state: {:bytes, when: @flags.has_more_pages} 20 | 21 | no_meta? = flag?(@flags.no_metadata, meta.flags) 22 | global? = flag?(@flags.global_spec, meta.flags) 23 | 24 | case {no_meta?, global?} do 25 | {true, _} -> 26 | {meta, buffer} 27 | {false, true} -> 28 | {global_spec, buffer} = global_spec(buffer) 29 | {column_types, buffer} = ntimes(meta.columns_count, &column_type/1, buffer) 30 | {Map.merge(meta, %{column_types: column_types, global_spec: global_spec}), buffer} 31 | {false, false} -> 32 | {specs, buffer} = column_specs(meta.columns_count, buffer) 33 | {Map.merge(meta, specs), buffer} 34 | end 35 | end 36 | 37 | def pk_indices(buffer) do 38 | {pk_count, buffer} = int(buffer) 39 | ntimes(pk_count, :short, buffer) 40 | end 41 | 42 | def global_spec(buffer) do 43 | unpack buffer, 44 | keyspace: :string, 45 | table: :string 46 | end 47 | 48 | def column_specs(n, buffer) do 49 | {specs, buffer} = ntimes(n, &column_spec/1, buffer) 50 | {tables, types} = Enum.unzip(specs) 51 | {%{column_types: types, column_specs: tables}, buffer} 52 | end 53 | 54 | def column_spec(buffer) do 55 | {keyspace, buffer} = string(buffer) 56 | {table, buffer} = string(buffer) 57 | {name, buffer} = string(buffer) 58 | {type, buffer} = option(buffer) 59 | {{{keyspace, table}, {name, type}}, buffer} 60 | end 61 | 62 | def column_type(buffer) do 63 | {name, buffer} = string(buffer) 64 | {type, buffer} = option(buffer) 65 | {{name, type}, buffer} 66 | end 67 | 68 | def option(buffer) do 69 | {id, buffer} = short(buffer) 70 | {value, buffer} = case id do 71 | 0x00 -> string(buffer) 72 | 0x20 -> option(buffer) 73 | 0x21 -> options_pair(buffer) 74 | 0x22 -> option(buffer) 75 | 0x30 -> {nil, buffer} # TODO: UDT 76 | 0x31 -> options(buffer) 77 | _ -> {nil, buffer} 78 | end 79 | 80 | {CQL.DataTypes.kind({id, value}), buffer} 81 | end 82 | 83 | def options(buffer) do 84 | {n, buffer} = short(buffer) 85 | ntimes(n, &option/1, buffer) 86 | end 87 | 88 | def options_pair(buffer) do 89 | {option1, buffer} = option(buffer) 90 | {option2, buffer} = option(buffer) 91 | 92 | {{option1, option2}, buffer} 93 | end 94 | end 95 | -------------------------------------------------------------------------------- /lib/cql/options.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Options do 2 | @moduledoc """ 3 | Represents a CQL options request statement 4 | """ 5 | 6 | defstruct [] 7 | 8 | defimpl CQL.Request do 9 | def encode(%CQL.Options{}) do 10 | {:OPTIONS, ""} 11 | end 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /lib/cql/prepare.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Prepare do 2 | @moduledoc """ 3 | Represents a CQL prepare statement 4 | """ 5 | 6 | import CQL.DataTypes.Encoder 7 | 8 | alias CQL.{Request, Prepare} 9 | 10 | defstruct [query: ""] 11 | 12 | defimpl Request do 13 | def encode(%Prepare{query: query}) do 14 | with {:ok, body} <- ok(long_string(query)) do 15 | {:PREPARE, body} 16 | end 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /lib/cql/query.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Query do 2 | @moduledoc """ 3 | Represents a CQL query statement 4 | """ 5 | 6 | import CQL.DataTypes.Encoder 7 | 8 | alias CQL.{Request, QueryParams} 9 | 10 | defstruct [ 11 | query: "", 12 | params: %QueryParams{}, 13 | ] 14 | 15 | defimpl Request do 16 | def encode(%CQL.Query{query: query, params: %QueryParams{} = params}) do 17 | with {:ok, encoded_query} <- ok(long_string(query)), 18 | {:ok, encoded_params} <- ok(QueryParams.encode(params)) 19 | do 20 | {:QUERY, encoded_query <> encoded_params} 21 | end 22 | end 23 | 24 | def encode(_), do: CQL.Error.new("invalid query request") 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /lib/cql/query_params.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.QueryParams do 2 | @moduledoc """ 3 | Represents a CQL query/execute statements parameters 4 | """ 5 | import CQL.DataTypes.Encoder 6 | 7 | require Bitwise 8 | 9 | defstruct [ 10 | consistency: :one, 11 | values: nil, 12 | skip_metadata: false, 13 | page_size: nil, 14 | paging_state: nil, 15 | serial_consistency: nil, 16 | timestamp: nil, 17 | ] 18 | 19 | @valid_keys [ 20 | :consistency, 21 | :values, 22 | :skip_metadata, 23 | :page_size, 24 | :paging_state, 25 | :serial_consistency, 26 | :timestamp, 27 | ] 28 | 29 | @flags %{ 30 | :values => 0x01, 31 | :skip_metadata => 0x02, 32 | :page_size => 0x04, 33 | :with_paging_state => 0x08, 34 | :with_serial_consistency => 0x10, 35 | :with_default_timestamp => 0x20, 36 | :with_names => 0x40, 37 | } 38 | 39 | def new(options) when is_list(options) do 40 | if Keyword.keyword?(options) do 41 | struct(__MODULE__, Keyword.take(options, @valid_keys)) 42 | else 43 | struct(__MODULE__) 44 | end 45 | end 46 | 47 | def new(options) when is_map(options) do 48 | struct(__MODULE__, Map.take(options, @valid_keys)) 49 | end 50 | 51 | def new(_) do 52 | struct(__MODULE__) 53 | end 54 | 55 | def encode(q = %__MODULE__{values: values}) when is_nil(values) do 56 | encode(q, false, false, nil) 57 | end 58 | 59 | def encode(q = %__MODULE__{values: values}) when is_list(values) or is_map(values) do 60 | if Enum.empty?(values) do 61 | encode(q, false, false, nil) 62 | else 63 | with {:ok, encoded} <- ok(values(values)) do 64 | encode(q, true, is_map(values), encoded) 65 | end 66 | end 67 | end 68 | 69 | def encode(_), do: CQL.Error.new("invalud params") 70 | 71 | defp encode(q, has_values, has_names, values) do 72 | has_timestamp = is_integer(q.timestamp) and q.timestamp > 0 73 | 74 | flags = 75 | [] 76 | |> prepend(:values, has_values) 77 | |> prepend(:skip_metadata, q.skip_metadata) 78 | |> prepend(:page_size, q.page_size) 79 | |> prepend(:with_paging_state, q.paging_state) 80 | |> prepend(:with_serial_consistency, q.serial_consistency) 81 | |> prepend(:with_default_timestamp, has_timestamp) 82 | |> prepend(:with_names, has_names) 83 | |> names_to_flag(@flags) 84 | |> byte 85 | 86 | q.consistency 87 | |> consistency 88 | |> List.wrap 89 | |> prepend(flags) 90 | |> prepend(values, has_values) 91 | |> prepend_not_nil(q.page_size, :int) 92 | |> prepend_not_nil(q.paging_state, :bytes) 93 | |> prepend_not_nil(q.serial_consistency, :consistency) 94 | |> prepend(q.timestamp, has_timestamp) 95 | |> Enum.reverse 96 | |> Enum.join 97 | end 98 | end 99 | -------------------------------------------------------------------------------- /lib/cql/ready.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Ready do 2 | @moduledoc false 3 | 4 | defstruct [] 5 | 6 | def decode(_buffer) do 7 | %__MODULE__{} 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /lib/cql/register.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Register do 2 | @moduledoc """ 3 | Represents a CQL Register statement 4 | """ 5 | 6 | import CQL.DataTypes.Encoder 7 | alias CQL.Request 8 | 9 | @types [ 10 | "TOPOLOGY_CHANGE", 11 | "STATUS_CHANGE", 12 | "SCHEMA_CHANGE", 13 | ] 14 | 15 | defstruct [types: @types] 16 | 17 | defimpl Request do 18 | def encode(%CQL.Register{types: types}) do 19 | with {:ok, body} <- ok(string_list(types)) do 20 | {:REGISTER, body} 21 | end 22 | end 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/cql/request.ex: -------------------------------------------------------------------------------- 1 | defprotocol CQL.Request do 2 | @moduledoc false 3 | 4 | @fallback_to_any true 5 | def encode(request) 6 | end 7 | 8 | defimpl CQL.Request, for: Any do 9 | def encode(_), do: :error 10 | end 11 | -------------------------------------------------------------------------------- /lib/cql/result.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Result do 2 | @moduledoc false 3 | 4 | import CQL.DataTypes.Decoder 5 | 6 | alias CQL.Result 7 | 8 | def decode(buffer) do 9 | {kind, rest} = int(buffer) 10 | case kind do 11 | 0x01 -> Result.Void.decode(rest) 12 | 0x02 -> Result.Rows.decode(rest) 13 | 0x03 -> Result.SetKeyspace.decode(rest) 14 | 0x04 -> Result.Prepared.decode(rest) 15 | 0x05 -> Result.SchemaChange.decode(rest) 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /lib/cql/result/prepared.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Result.Prepared do 2 | @moduledoc """ 3 | Represents a CQL prepared result 4 | """ 5 | 6 | import CQL.DataTypes.Decoder 7 | 8 | alias CQL.MetaData 9 | 10 | defstruct [ 11 | :id, 12 | :metadata, 13 | :result_metadata, 14 | ] 15 | 16 | def decode(buffer) do 17 | {data, _rest} = unpack buffer, 18 | id: :short_bytes, 19 | metadata: &MetaData.decode(&1, true) 20 | 21 | struct(__MODULE__, data) 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /lib/cql/result/rows.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Result.Rows do 2 | @moduledoc """ 3 | Represents a CQL rows result 4 | """ 5 | 6 | import CQL.DataTypes.Decoder, except: [decode: 2] 7 | 8 | defstruct [ 9 | :columns, 10 | :columns_count, 11 | :column_types, 12 | :rows, 13 | :rows_count, 14 | :paging_state, 15 | ] 16 | 17 | @doc false 18 | def decode_meta(buffer) do 19 | with {:ok, %CQL.Frame{body: body, operation: :RESULT}} <- CQL.Frame.decode(buffer), 20 | {0x02, rest} <- int(body) 21 | do 22 | decode(rest, false) 23 | else 24 | _ -> CQL.Error.new("trying to decode meta from frame which is not a RESULT.ROWS") 25 | end 26 | end 27 | 28 | def decode_rows(%__MODULE__{} = r) do 29 | {rows, ""} = ntimes(r.rows_count, row_content(r.column_types, r.columns_count), r.rows) 30 | %{r | rows: rows} 31 | end 32 | 33 | @doc false 34 | def decode(buffer, decode_rows \\ true) do 35 | {meta, buffer} = unpack buffer, 36 | metadata: &CQL.MetaData.decode/1, 37 | rows_count: :int 38 | 39 | columns_count = meta.metadata.columns_count 40 | {columns, column_types} = Enum.unzip(meta.metadata.column_types) 41 | 42 | rows = 43 | if decode_rows do 44 | {rows, ""} = ntimes(meta.rows_count, row_content(column_types, columns_count), buffer) 45 | rows 46 | else 47 | buffer 48 | end 49 | 50 | %__MODULE__{ 51 | columns: columns, 52 | columns_count: columns_count, 53 | column_types: column_types, 54 | rows: rows, 55 | rows_count: meta.rows_count, 56 | paging_state: Map.get(meta.metadata, :paging_state), 57 | } 58 | end 59 | 60 | @doc """ 61 | Joins a list of Rows, as they where result of a single query 62 | """ 63 | def join(rows_list) do 64 | rows_list 65 | |> Enum.reduce(fn row, %{rows_count: n, rows: list} = acc -> 66 | %{acc | rows_count: n + row.rows_count, rows: list ++ row.rows} 67 | end) 68 | |> Map.put(:paging_state, nil) 69 | end 70 | 71 | @doc """ 72 | Converts a Rows struct to a list of keyword lists with column names as keys 73 | """ 74 | def to_keyword(%__MODULE__{columns: columns, rows: rows}) do 75 | Enum.map(rows, &zip_to(columns, &1, [])) 76 | end 77 | 78 | @doc """ 79 | Converts a Rows struct to a list of maps with column names as keys 80 | """ 81 | def to_map(%__MODULE__{columns: columns, rows: rows}) do 82 | Enum.map(rows, &zip_to(columns, &1, %{})) 83 | end 84 | 85 | defp zip_to(keys, values, into) do 86 | keys 87 | |> Enum.zip(values) 88 | |> Enum.into(into) 89 | end 90 | 91 | defp row_content(types, count) do 92 | fn binary -> 93 | {row, rest} = ntimes(count, &bytes/1, binary) 94 | {parse(row, types), rest} 95 | end 96 | end 97 | 98 | defp parse(row_content, types) do 99 | types 100 | |> Enum.zip(row_content) 101 | |> Enum.map(&CQL.DataTypes.decode/1) 102 | end 103 | end 104 | -------------------------------------------------------------------------------- /lib/cql/result/schema_change.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Result.SchemaChange do 2 | @moduledoc """ 3 | Represents a CQL schema change result 4 | """ 5 | 6 | import CQL.DataTypes.Decoder 7 | 8 | defstruct [ 9 | :change_type, 10 | :target, 11 | :options, 12 | ] 13 | 14 | def decode(buffer) do 15 | {data, ""} = unpack buffer, 16 | change_type: :string, 17 | target: :string, 18 | keyspace: {:string, :target, &(&1 != "KEYSPACE")}, 19 | name: :string 20 | 21 | options = if data.target != "KEYSPACE" do 22 | {data.keyspace, data.name} 23 | else 24 | data.name 25 | end 26 | 27 | %__MODULE__{ 28 | change_type: data.change_type, 29 | target: data.target, 30 | options: options, 31 | } 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /lib/cql/result/set_keyspace.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Result.SetKeyspace do 2 | @moduledoc """ 3 | Represents a CQL set keyspace result 4 | """ 5 | 6 | import CQL.DataTypes.Decoder 7 | 8 | defstruct [:name] 9 | 10 | def decode(binary) do 11 | {keypace, ""} = string(binary) 12 | %__MODULE__{name: keypace} 13 | end 14 | end 15 | 16 | -------------------------------------------------------------------------------- /lib/cql/result/void.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Result.Void do 2 | @moduledoc false 3 | 4 | defstruct [] 5 | 6 | def decode("") do 7 | %__MODULE__{} 8 | end 9 | end 10 | 11 | -------------------------------------------------------------------------------- /lib/cql/startup.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Startup do 2 | @moduledoc false 3 | 4 | import CQL.DataTypes.Encoder 5 | 6 | alias CQL.{Request, Startup} 7 | 8 | defstruct [options: %{"CQL_VERSION" => "3.0.0"}] 9 | 10 | defimpl Request do 11 | def encode(%Startup{options: options}) do 12 | with {:ok, body} <- ok(string_map(options)) do 13 | {:STARTUP, body} 14 | end 15 | end 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/cql/supported.ex: -------------------------------------------------------------------------------- 1 | defmodule CQL.Supported do 2 | @moduledoc """ 3 | Represents a CQL supported response 4 | """ 5 | 6 | import CQL.DataTypes.Decoder 7 | 8 | defstruct [options: %{}] 9 | 10 | def decode(body) do 11 | {options, ""} = string_multimap(body) 12 | 13 | %__MODULE__{options: options} 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Mixfile do 2 | use Mix.Project 3 | 4 | def project, do: [ 5 | app: :cassandra, 6 | version: version(), 7 | name: "Cassandra", 8 | elixir: "~> 1.4", 9 | build_embedded: Mix.env == :prod, 10 | start_permanent: Mix.env == :prod, 11 | compilers: [:elixir_make | Mix.compilers], 12 | test_coverage: [tool: ExCoveralls], 13 | preferred_cli_env: [ 14 | "coveralls": :test, 15 | "coveralls.detail": :test, 16 | "coveralls.post": :test, 17 | "coveralls.html": :test, 18 | ], 19 | source_url: "https://github.com/cafebazaar/elixir-cassandra", 20 | description: "A pure Elixir driver for Apache Cassandra", 21 | package: package(), 22 | docs: docs(), 23 | deps: deps(), 24 | ] 25 | 26 | def application, do: [ 27 | mod: {Cassandra, []}, 28 | applications: [:logger, :db_connection], 29 | ] 30 | 31 | defp deps, do: [ 32 | {:db_connection, "~> 1.1"}, 33 | {:elixir_make, "~> 0.4", runtime: false}, 34 | {:ex_doc, "~> 0.18.0", only: :dev}, 35 | {:excheck, "~> 0.5", only: :test}, 36 | {:excoveralls, "~> 0.6", only: :test}, 37 | {:lz4, "~> 0.2"}, 38 | {:poolboy, "~> 1.5"}, 39 | {:triq, "~> 1.3", only: :test}, 40 | {:uuid, "~> 1.1"}, 41 | ] 42 | 43 | 44 | defp version, do: "1.0.2" 45 | 46 | defp docs, do: [ 47 | main: "readme", 48 | extras: ["README.md"], 49 | ] 50 | 51 | defp package, do: [ 52 | licenses: ["Apache 2.0"], 53 | maintainers: ["Ali Rajabi", "Hassan Zamani"], 54 | links: %{ 55 | "Github" => "https://github.com/cafebazaar/elixir-cassandra", 56 | "Docs" => "https://hexdocs.pm/cassandra/#{version()}/", 57 | }, 58 | files: ~w(mix.exs lib native Makefile README.md LICENSE.md), 59 | ] 60 | end 61 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "certifi": {:hex, :certifi, "2.3.1", "d0f424232390bf47d82da8478022301c561cf6445b5b5fb6a84d49a9e76d2639", [:rebar3], [{:parse_trans, "3.2.0", [hex: :parse_trans, repo: "hexpm", optional: false]}], "hexpm"}, 3 | "connection": {:hex, :connection, "1.0.4", "a1cae72211f0eef17705aaededacac3eb30e6625b04a6117c1b2db6ace7d5976", [:mix], [], "hexpm"}, 4 | "db_connection": {:hex, :db_connection, "1.1.3", "89b30ca1ef0a3b469b1c779579590688561d586694a3ce8792985d4d7e575a61", [:mix], [{:connection, "~> 1.0.2", [hex: :connection, repo: "hexpm", optional: false]}, {:poolboy, "~> 1.5", [hex: :poolboy, repo: "hexpm", optional: true]}, {:sbroker, "~> 1.0", [hex: :sbroker, repo: "hexpm", optional: true]}], "hexpm"}, 5 | "earmark": {:hex, :earmark, "1.2.6", "b6da42b3831458d3ecc57314dff3051b080b9b2be88c2e5aa41cd642a5b044ed", [:mix], [], "hexpm"}, 6 | "elixir_make": {:hex, :elixir_make, "0.4.2", "332c649d08c18bc1ecc73b1befc68c647136de4f340b548844efc796405743bf", [:mix], [], "hexpm"}, 7 | "ex_doc": {:hex, :ex_doc, "0.18.4", "4406b8891cecf1352f49975c6d554e62e4341ceb41b9338949077b0d4a97b949", [:mix], [{:earmark, "~> 1.1", [hex: :earmark, repo: "hexpm", optional: false]}], "hexpm"}, 8 | "excheck": {:hex, :excheck, "0.6.0", "f8595a8ac2c0abc0d060c1a4fce7d26f41574543366a52d5f3c84de30a69747b", [:mix], [], "hexpm"}, 9 | "excoveralls": {:hex, :excoveralls, "0.10.0", "a4508bdd408829f38e7b2519f234b7fd5c83846099cda348efcb5291b081200c", [:mix], [{:hackney, "~> 1.13", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"}, 10 | "exjsx": {:hex, :exjsx, "4.0.0", "60548841e0212df401e38e63c0078ec57b33e7ea49b032c796ccad8cde794b5c", [:mix], [{:jsx, "~> 2.8.0", [hex: :jsx, optional: false]}]}, 11 | "exprintf": {:hex, :exprintf, "0.2.0", "6c97364c75ddb848d0d6142ea3d882567369fc60f0b88a009f41470cab068a56", [:mix], []}, 12 | "exprof": {:hex, :exprof, "0.2.0", "b03f50d0d33e2f18c8e047d9188ba765dc32daba0b553ed717a98a78561d5eaf", [:mix], [{:exprintf, "~> 0.1", [hex: :exprintf, optional: false]}]}, 13 | "hackney": {:hex, :hackney, "1.13.0", "24edc8cd2b28e1c652593833862435c80661834f6c9344e84b6a2255e7aeef03", [:rebar3], [{:certifi, "2.3.1", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "5.1.2", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "1.0.1", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "1.0.2", [hex: :mimerl, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "1.1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm"}, 14 | "idna": {:hex, :idna, "5.1.2", "e21cb58a09f0228a9e0b95eaa1217f1bcfc31a1aaa6e1fdf2f53a33f7dbd9494", [:rebar3], [{:unicode_util_compat, "0.3.1", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm"}, 15 | "jason": {:hex, :jason, "1.1.1", "d3ccb840dfb06f2f90a6d335b536dd074db748b3e7f5b11ab61d239506585eb2", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"}, 16 | "jsx": {:hex, :jsx, "2.8.2", "7acc7d785b5abe8a6e9adbde926a24e481f29956dd8b4df49e3e4e7bcc92a018", [:mix, :rebar3], []}, 17 | "lz4": {:hex, :lz4, "0.2.4", "b8199e2b8d97a0730df9fb9ef02c26873989cc0be8b3be746cb5cd451986d487", [:rebar3], [], "hexpm"}, 18 | "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm"}, 19 | "mimerl": {:hex, :mimerl, "1.0.2", "993f9b0e084083405ed8252b99460c4f0563e41729ab42d9074fd5e52439be88", [:rebar3], [], "hexpm"}, 20 | "parse_trans": {:hex, :parse_trans, "3.2.0", "2adfa4daf80c14dc36f522cf190eb5c4ee3e28008fc6394397c16f62a26258c2", [:rebar3], [], "hexpm"}, 21 | "poolboy": {:hex, :poolboy, "1.5.1", "6b46163901cfd0a1b43d692657ed9d7e599853b3b21b95ae5ae0a777cf9b6ca8", [:rebar], [], "hexpm"}, 22 | "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.1", "28a4d65b7f59893bc2c7de786dec1e1555bd742d336043fe644ae956c3497fbe", [:make, :rebar], [], "hexpm"}, 23 | "triq": {:hex, :triq, "1.3.0", "d9ed60f3cd2b6bacbb721bc9873e67e07b02e5b97c63d40db35b12670a7f1bf4", [:rebar3], [], "hexpm"}, 24 | "unicode_util_compat": {:hex, :unicode_util_compat, "0.3.1", "a1f612a7b512638634a603c8f401892afbf99b8ce93a45041f8aaca99cadb85e", [:rebar3], [], "hexpm"}, 25 | "uuid": {:hex, :uuid, "1.1.8", "e22fc04499de0de3ed1116b770c7737779f226ceefa0badb3592e64d5cfb4eb9", [:mix], [], "hexpm"}, 26 | } 27 | -------------------------------------------------------------------------------- /native/murmur_nif.c: -------------------------------------------------------------------------------- 1 | #include "erl_nif.h" 2 | 3 | /*----------------------------------------------------------------------------- 4 | * MurmurHash3 was written by Austin Appleby, and is placed in the public 5 | * domain. The author hereby disclaims copyright to this source code. 6 | */ 7 | 8 | //----------------------------------------------------------------------------- 9 | // Platform-specific functions and macros 10 | 11 | // Microsoft Visual Studio 12 | 13 | #if defined(_MSC_VER) 14 | 15 | typedef unsigned char uint8_t; 16 | typedef unsigned long uint32_t; 17 | typedef unsigned __int64 uint64_t; 18 | 19 | typedef char int8_t; 20 | typedef long int32_t; 21 | typedef __int64 int64_t; 22 | 23 | #define FORCE_INLINE __forceinline 24 | 25 | #include 26 | 27 | #define ROTL32(x,y) _rotl(x,y) 28 | #define ROTL64(x,y) _rotl64(x,y) 29 | 30 | #define BIG_CONSTANT(x) (x) 31 | 32 | // Other compilers 33 | 34 | #else // defined(_MSC_VER) 35 | 36 | #include 37 | 38 | #define FORCE_INLINE inline __attribute__((always_inline)) 39 | 40 | inline uint32_t rotl32 ( int32_t x, int8_t r ) 41 | { 42 | // cast to unsigned for logical right bitshift (to match C* MM3 implementation) 43 | return (x << r) | ((int32_t) (((uint32_t) x) >> (32 - r))); 44 | } 45 | 46 | inline int64_t rotl64 ( int64_t x, int8_t r ) 47 | { 48 | // cast to unsigned for logical right bitshift (to match C* MM3 implementation) 49 | return (x << r) | ((int64_t) (((uint64_t) x) >> (64 - r))); 50 | } 51 | 52 | #define ROTL32(x,y) rotl32(x,y) 53 | #define ROTL64(x,y) rotl64(x,y) 54 | 55 | #define BIG_CONSTANT(x) (x##LL) 56 | 57 | #endif // !defined(_MSC_VER) 58 | 59 | //----------------------------------------------------------------------------- 60 | // Block read - if your platform needs to do endian-swapping or can only 61 | // handle aligned reads, do the conversion here 62 | 63 | FORCE_INLINE int64_t getblock ( const int64_t * p, int i ) 64 | { 65 | return p[i]; 66 | } 67 | 68 | //----------------------------------------------------------------------------- 69 | // Finalization mix - force all bits of a hash block to avalanche 70 | 71 | FORCE_INLINE int64_t fmix ( int64_t k ) 72 | { 73 | // cast to unsigned for logical right bitshift (to match C* MM3 implementation) 74 | k ^= ((uint64_t) k) >> 33; 75 | k *= BIG_CONSTANT(0xff51afd7ed558ccd); 76 | k ^= ((uint64_t) k) >> 33; 77 | k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); 78 | k ^= ((uint64_t) k) >> 33; 79 | 80 | return k; 81 | } 82 | 83 | int64_t x64_128 (const void * key, const int len, 84 | const uint32_t seed) 85 | { 86 | const int8_t * data = (const int8_t*)key; 87 | const int nblocks = len / 16; 88 | 89 | int64_t h1 = seed; 90 | int64_t h2 = seed; 91 | 92 | int64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); 93 | int64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); 94 | int64_t k1 = 0; 95 | int64_t k2 = 0; 96 | 97 | const int64_t * blocks = (const int64_t *)(data); 98 | const int8_t * tail = (const int8_t*)(data + nblocks*16); 99 | 100 | // ---------- body ----------- 101 | 102 | int i; 103 | for(i = 0; i < nblocks; i++) 104 | { 105 | int64_t k1 = getblock(blocks,i*2+0); 106 | int64_t k2 = getblock(blocks,i*2+1); 107 | 108 | k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; 109 | 110 | h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; 111 | 112 | k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; 113 | 114 | h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; 115 | } 116 | 117 | //---------- tail ---------- 118 | 119 | switch(len & 15) 120 | { 121 | case 15: k2 ^= ((int64_t) (tail[14])) << 48; 122 | case 14: k2 ^= ((int64_t) (tail[13])) << 40; 123 | case 13: k2 ^= ((int64_t) (tail[12])) << 32; 124 | case 12: k2 ^= ((int64_t) (tail[11])) << 24; 125 | case 11: k2 ^= ((int64_t) (tail[10])) << 16; 126 | case 10: k2 ^= ((int64_t) (tail[ 9])) << 8; 127 | case 9: k2 ^= ((int64_t) (tail[ 8])) << 0; 128 | k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; 129 | 130 | case 8: k1 ^= ((int64_t) (tail[ 7])) << 56; 131 | case 7: k1 ^= ((int64_t) (tail[ 6])) << 48; 132 | case 6: k1 ^= ((int64_t) (tail[ 5])) << 40; 133 | case 5: k1 ^= ((int64_t) (tail[ 4])) << 32; 134 | case 4: k1 ^= ((int64_t) (tail[ 3])) << 24; 135 | case 3: k1 ^= ((int64_t) (tail[ 2])) << 16; 136 | case 2: k1 ^= ((int64_t) (tail[ 1])) << 8; 137 | case 1: k1 ^= ((int64_t) (tail[ 0])) << 0; 138 | k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; 139 | }; 140 | 141 | // ---------- finalization --------- 142 | 143 | h1 ^= len; h2 ^= len; 144 | 145 | h1 += h2; 146 | h2 += h1; 147 | 148 | h1 = fmix(h1); 149 | h2 = fmix(h2); 150 | 151 | h1 += h2; 152 | h2 += h1; 153 | 154 | return h1; 155 | } 156 | 157 | static ERL_NIF_TERM x64_128_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) 158 | { 159 | int cell; 160 | int64_t ret; 161 | int8_t * key; 162 | uint32_t seed, len; 163 | ERL_NIF_TERM head, tail; 164 | 165 | tail = argv[0]; 166 | 167 | if (argc != 2 || 168 | !enif_get_list_length(env, tail, &len) || 169 | !enif_get_uint(env, argv[1], &seed)) { 170 | return enif_make_badarg(env); 171 | } 172 | key = malloc(len * sizeof(int8_t)); 173 | for (uint32_t i = 0; i < len; i++) { 174 | if (!enif_get_list_cell(env, tail, &head, &tail) || 175 | !enif_get_int(env, head, &cell)) { 176 | return enif_make_badarg(env); 177 | } 178 | key[i] = (int8_t) cell; 179 | } 180 | ret = x64_128((void *) key, len, seed); 181 | return enif_make_int64(env, ret); 182 | } 183 | 184 | static ErlNifFunc nif_funcs[] = { 185 | {"native_x64_128", 2, x64_128_nif, 0} 186 | }; 187 | 188 | int load (ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) { 189 | (void) env; 190 | (void) priv_data; 191 | (void) load_info; 192 | return 0; 193 | } 194 | 195 | int upgrade (ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) { 196 | (void) env; 197 | (void) priv_data; 198 | (void) old_priv_data; 199 | (void) load_info; 200 | return 0; 201 | } 202 | 203 | void unload (ErlNifEnv* env, void* priv_data) { 204 | (void) env; 205 | (void) priv_data; 206 | } 207 | 208 | ERL_NIF_INIT(Elixir.Cassandra.Murmur3, nif_funcs, load, load, upgrade, unload) 209 | -------------------------------------------------------------------------------- /test/cassandra/cache_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.CacheTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Cassandra.Cache 5 | 6 | setup_all do 7 | {:ok, cache} = Cache.new(__MODULE__) 8 | 9 | {:ok, %{cache: cache}} 10 | end 11 | 12 | describe "#new" do 13 | test "without name" do 14 | assert :error = Cache.new(nil) 15 | end 16 | 17 | test "with name" do 18 | assert {:ok, :table_name} = Cache.new(:table_name) 19 | end 20 | end 21 | 22 | describe "#fetch" do 23 | test "fetches given key from cache", %{cache: cache} do 24 | value = %{this: "is", a: "value"} 25 | assert ^value = Cache.put(cache, :fetch_test, value) 26 | assert {:ok, ^value} = Cache.fetch(cache, :fetch_test) 27 | end 28 | 29 | test "returns :error when key is missing", %{cache: cache} do 30 | assert :error = Cache.fetch(cache, :missing_key_test) 31 | end 32 | end 33 | 34 | describe "#delete" do 35 | test "deleted given key from cache", %{cache: cache} do 36 | value = [:value, :to, :delete] 37 | assert ^value = Cache.put(cache, :delete_test, value) 38 | assert :ok = Cache.delete(cache, :delete_test) 39 | assert :error = Cache.fetch(cache, :delete_test) 40 | end 41 | end 42 | 43 | describe "#put_new_lazy" do 44 | test "puts result of function in cache if key is missing", %{cache: cache} do 45 | value = "the value" 46 | func = fn -> {:ok, value} end 47 | assert ^value = Cache.put_new_lazy(cache, :put_new_lazy_test_put, func) 48 | assert {:ok, ^value} = Cache.fetch(cache, :put_new_lazy_test_put) 49 | end 50 | 51 | test "with error returning func", %{cache: cache} do 52 | assert {:error, :some_value} = Cache.put_new_lazy(cache, :put_new_lazy_test_error1, fn -> :some_value end) 53 | assert {:error, :reason} = Cache.put_new_lazy(cache, :put_new_lazy_test_error2, fn -> {:error, :reason} end) 54 | end 55 | 56 | test "do not call func if key exists", %{cache: cache} do 57 | func = fn -> raise "Must not be called" end 58 | assert 1 = Cache.put(cache, :put_new_lazy_test_call, 1) 59 | assert 1 = Cache.put_new_lazy(cache, :put_new_lazy_test_call, func) 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /test/cassandra/cluster_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.ClusterTest do 2 | use ExUnit.Case 3 | 4 | alias Cassandra.{Cluster, Host, ConnectionError} 5 | 6 | @moduletag :capture_log 7 | 8 | @host Cassandra.TestHelper.host 9 | 10 | setup_all do 11 | {:ok, cluster} = Cluster.start_link(contact_points: [@host]) 12 | {:ok, %{cluster: cluster}} 13 | end 14 | 15 | describe "no available contact point" do 16 | test "#start_link" do 17 | assert {:error, %ConnectionError{reason: "not available"}} = Cluster.start(port: 9111) 18 | end 19 | end 20 | 21 | test "#host", %{cluster: cluster} do 22 | assert [host = %Host{ip: ip} | _] = Cluster.hosts(cluster) 23 | assert ^host = Cluster.host(cluster, ip) 24 | assert [^host] = Cluster.host(cluster, [ip]) 25 | end 26 | 27 | test "#hosts", %{cluster: cluster} do 28 | assert Enum.all?(Cluster.hosts(cluster), &match?(%Host{}, &1)) 29 | end 30 | 31 | test "#up_hosts", %{cluster: cluster} do 32 | assert Enum.all?(Cluster.up_hosts(cluster), &match?(%Host{status: :up}, &1)) 33 | end 34 | 35 | test "#find_replicas", %{cluster: cluster} do 36 | assert [{_, _, _, _}] = Cluster.find_replicas(cluster, "system", "test") 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /test/cassandra/murmur3_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Murmur3Test do 2 | use ExUnit.Case, async: true 3 | 4 | test "#x64_128" do 5 | tests = [ 6 | {"123", -7468325962851647638}, 7 | {String.duplicate("\x00\xff\x10\xfa\x99", 10), 5837342703291459765}, 8 | {String.duplicate("\xfe", 8), -8927430733708461935}, 9 | {String.duplicate("\x10", 8), 1446172840243228796}, 10 | {"9223372036854775807", 7162290910810015547}, 11 | {"\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0E\x0F\x10", -5563837382979743776}, 12 | {"\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0E\x0F\x10\x11", -1513403162740402161}, 13 | {"\x03\x04\x05\x06\a\b\t\n\v\f\r\x0E\x0F\x10\x11\x12", -495360443712684655}, 14 | {"\x04\x05\x06\a\b\t\n\v\f\r\x0E\x0F\x10\x11\x12\x13", 1734091135765407943}, 15 | {"\x05\x06\a\b\t\n\v\f\r\x0E\x0F\x10\x11\x12\x13\x14", -3199412112042527988}, 16 | {"\x06\a\b\t\n\v\f\r\x0E\x0F\x10\x11\x12\x13\x14\x15", -6316563938475080831}, 17 | {"\a\b\t\n\v\f\r\x0E\x0F\x10\x11\x12\x13\x14\x15\x16", 8228893370679682632}, 18 | {"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 5457549051747178710}, 19 | {"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", -2824192546314762522}, 20 | {"\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE", -833317529301936754}, 21 | {"\x00\x01\x02\x03\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 6463632673159404390}, 22 | {"\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE", -1672437813826982685}, 23 | {"\xFE\xFE\xFE\xFE", 4566408979886474012}, 24 | {"\x00\x00\x00\x00", -3485513579396041028}, 25 | {"\x00\x01\x7F\x7F", 6573459401642635627}, 26 | {"\x00\xFF\xFF\xFF", 123573637386978882}, 27 | {"\xFF\x01\x02\x03", -2839127690952877842}, 28 | {"\x00\x01\x02\x03\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", 6463632673159404390}, 29 | {"\xE2\xE7", -8582699461035929883}, 30 | {"\xE2\xE7\xE2\xE7\xE2\xE7\x01", 2222373981930033306}, 31 | ] 32 | for {string, hash} <- tests do 33 | assert hash == Cassandra.Murmur3.x64_128(string) 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /test/cassandra/schema/partitioner/murmur3_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Cluster.Schema.Partitioner.Murmur3Test do 2 | use ExUnit.Case, async: true 3 | 4 | alias Cassandra.Cluster.Schema.Partitioner.Murmur3 5 | 6 | test "#create_token" do 7 | tests = [ 8 | {"123", -7468325962851647638}, 9 | {String.duplicate("\x00\xff\x10\xfa\x99", 10), 5837342703291459765}, 10 | {String.duplicate("\xfe", 8), -8927430733708461935}, 11 | {String.duplicate("\x10", 8), 1446172840243228796}, 12 | {"9223372036854775807", 7162290910810015547}, 13 | ] 14 | for {parition_key, token} <- tests do 15 | assert token == Murmur3.create_token(parition_key) 16 | end 17 | end 18 | 19 | test "#parse_token" do 20 | tests = [ 21 | {"-7468325962851647638", -7468325962851647638}, 22 | {"5837342703291459765", 5837342703291459765}, 23 | {"-8927430733708461935", -8927430733708461935}, 24 | {"1446172840243228796", 1446172840243228796}, 25 | {"7162290910810015547", 7162290910810015547}, 26 | ] 27 | for {string, token} <- tests do 28 | assert token == Murmur3.parse_token(string) 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /test/cassandra/session_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.SessionTest do 2 | use Cassandra.SessionCase, 3 | table: "people", 4 | create: """ 5 | id uuid, 6 | name varchar, 7 | age int, 8 | PRIMARY KEY (id) 9 | """ 10 | 11 | @moduletag capture_log: true 12 | 13 | test "execute", %{session: session} do 14 | assert %CQL.Result.Rows{} = Session.execute(session, "SELECT * FROM system_schema.tables") 15 | end 16 | 17 | test "execute on default keyspace", %{session: session} do 18 | assert %CQL.Result.Rows{} = Session.execute(session, "SELECT * FROM people") 19 | end 20 | 21 | test "insert", %{session: session} do 22 | insert = "INSERT INTO #{@table} (id, name, age) VALUES (now(), :name, :age);" 23 | 24 | characters = [ 25 | %{name: "Bilbo", age: 50}, 26 | %{name: "Frodo", age: 33}, 27 | %{name: "Gandolf", age: 2019}, 28 | ] 29 | 30 | assert characters 31 | |> Enum.map(&Session.execute(session, insert, values: &1)) 32 | |> Enum.all?(&match?(%CQL.Result.Void{}, &1)) 33 | 34 | assert %CQL.Result.Rows{rows_count: 3, columns: ["name", "age"]} = 35 | rows = Session.execute(session, "SELECT name, age FROM #{@table};") 36 | 37 | for char <- characters do 38 | assert !is_nil(Enum.find(rows.rows, fn [name, age] -> name == char[:name] and age == char[:age] end)) 39 | end 40 | 41 | assert [%CQL.Result.Rows{rows_count: 2}, %CQL.Result.Rows{rows_count: 1}] = 42 | Session.run_stream(session, "SELECT name, age FROM #{@table};", &Enum.to_list/1, page_size: 2) 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /test/cql/frame_test.exs: -------------------------------------------------------------------------------- 1 | defmodule CQL.FrameTest do 2 | use ExUnit.Case, async: true 3 | 4 | test "#body_length" do 5 | assert {:ok, 10} = CQL.Frame.body_length(<<0::40, 10::integer-32>>) 6 | assert %CQL.Error{message: "invalid frame header"} = CQL.Frame.body_length(<<0::40, 10::integer-32, "extra bytes">>) 7 | assert %CQL.Error{message: "invalid frame header"} = CQL.Frame.body_length(<<>>) 8 | end 9 | 10 | test "#decode_header" do 11 | assert %CQL.Error{message: "invalid frame header"} = CQL.Frame.decode_header(<<>>) 12 | end 13 | 14 | test "#decode" do 15 | assert %CQL.Error{message: "invalid frame"} = CQL.Frame.decode(<<>>) 16 | end 17 | 18 | test "#set_stream_id" do 19 | assert %CQL.Error{message: "invalid frame"} = CQL.Frame.set_stream_id(<<>>, 1) 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /test/cql/requests_test.exs: -------------------------------------------------------------------------------- 1 | defmodule CQL.RequestsTest do 2 | use ExUnit.Case, async: true 3 | 4 | test "startup" do 5 | assert {:ok, frame} = CQL.encode(%CQL.Startup{}) 6 | assert {:ok, %CQL.Frame{operation: :STARTUP}} = CQL.Frame.decode(frame) 7 | end 8 | 9 | test "options" do 10 | assert {:ok, frame} = CQL.encode(%CQL.Options{}) 11 | assert {:ok, %CQL.Frame{operation: :OPTIONS, body: ""}} = CQL.Frame.decode(frame) 12 | end 13 | 14 | test "register" do 15 | assert {:ok, frame} = CQL.encode(%CQL.Register{}) 16 | assert {:ok, %CQL.Frame{operation: :REGISTER}} = CQL.Frame.decode(frame) 17 | assert {:ok, _} = CQL.encode(%CQL.Register{types: ["STATUS_CHANGE"]}) 18 | assert %CQL.Error{code: :invalid, info: info} = CQL.encode(%CQL.Register{types: "TEST"}) 19 | assert info =~ "Expected a 'string_list'" 20 | end 21 | 22 | test "query" do 23 | assert {:ok, frame} = CQL.encode(%CQL.Query{query: "TEST"}) 24 | assert {:ok, %CQL.Frame{operation: :QUERY}} = CQL.Frame.decode(frame) 25 | assert %CQL.Error{code: :invalid, message: "invalid query request"} = CQL.encode(%CQL.Query{query: "test", params: nil}) 26 | assert %CQL.Error{code: :invalid, info: info} = CQL.encode(%CQL.Query{query: []}) 27 | assert info =~ "Expected a 'long_string'" 28 | end 29 | 30 | test "prepare" do 31 | assert {:ok, frame} = CQL.encode(%CQL.Prepare{query: "TEST"}) 32 | assert {:ok, %CQL.Frame{operation: :PREPARE}} = CQL.Frame.decode(frame) 33 | end 34 | 35 | test "execute" do 36 | assert {:ok, frame} = CQL.encode(%CQL.Execute{prepared: %CQL.Result.Prepared{metadata: %{column_types: []}}, params: %CQL.QueryParams{}}) 37 | assert {:ok, %CQL.Frame{operation: :EXECUTE}} = CQL.Frame.decode(frame) 38 | assert %CQL.Error{code: :invalid, message: "invalid execute request"} = CQL.encode(%CQL.Execute{}) 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /test/data_types_test.exs: -------------------------------------------------------------------------------- 1 | defmodule CQL.DataTypesTest do 2 | use ExUnit.Case 3 | use ExCheck 4 | 5 | import CQL.DataTypes 6 | 7 | property "ascii" do 8 | for_all bin in binary() do 9 | bin == bin |> encode(:ascii) |> drop_size |> decode(:ascii) 10 | end 11 | end 12 | 13 | property "bigint" do 14 | for_all n in int() do 15 | n == n |> encode(:bigint) |> has_size(8) |> decode(:bigint) 16 | end 17 | end 18 | 19 | property "blob" do 20 | for_all bin in binary() do 21 | bin == bin |> encode(:blob) |> drop_size |> decode(:blob) 22 | end 23 | end 24 | 25 | test "blob terms" do 26 | terms = [ 27 | nil, 28 | 100, 29 | 100.001, 30 | [1, 2, 3], 31 | %{a: 1, b: 2, c: "last"}, 32 | true, 33 | ] 34 | 35 | for term <- terms do 36 | assert term == term |> encode(:blob) |> drop_size |> decode(:blob) 37 | end 38 | end 39 | 40 | property "boolean" do 41 | for_all b in bool() do 42 | b == b |> encode(:boolean) |> has_size(1) |> decode(:boolean) 43 | end 44 | end 45 | 46 | property "counter" do 47 | for_all n in int() do 48 | n == n |> encode(:counter) |> has_size(8) |> decode(:counter) 49 | end 50 | end 51 | 52 | test "date" do 53 | date = DateTime.utc_now |> DateTime.to_date 54 | assert date == date |> encode(:date) |> drop_size |> decode(:date) 55 | end 56 | 57 | test "time" do 58 | time = DateTime.utc_now |> DateTime.to_time 59 | assert time == time |> encode(:time) |> drop_size |> decode(:time) 60 | 61 | time = ~T[01:20:59.999999] 62 | assert time == time |> encode(:time) |> drop_size |> decode(:time) 63 | 64 | time = ~T[01:20:33.567890] 65 | assert time == time |> encode(:time) |> drop_size |> decode(:time) 66 | end 67 | 68 | test "timestamp with naive time" do 69 | time = ~N[2016-02-03 04:05:06.007] 70 | assert time == time |> encode(:timestamp) |> drop_size |> decode(:timestamp) 71 | end 72 | 73 | test "timestamp with DateTime" do 74 | time = DateTime.utc_now 75 | naive = time |> DateTime.to_naive |> Map.update!(:microsecond, fn {n, 6} -> {div(n, 1000) * 1000, 3} end) 76 | assert naive == time |> encode(:timestamp) |> drop_size |> decode(:timestamp) 77 | end 78 | 79 | test "decimal" do 80 | xs = [ 81 | {111222333444555666777888999000, 30}, 82 | {-100200300400500600700800900, 89}, 83 | {9374756681239761865712657819245, 98}, 84 | ] 85 | for x <- xs do 86 | assert x == x |> encode(:decimal) |> drop_size |> decode(:decimal) 87 | end 88 | end 89 | 90 | property "decimal" do 91 | for_all x in {pos_integer(), int()} do 92 | x == x |> encode(:decimal) |> drop_size |> decode(:decimal) 93 | end 94 | end 95 | 96 | test "double" do 97 | xs = [ 98 | 1.2345, 99 | 0.987654321, 100 | -23.591, 101 | ] 102 | for x <- xs do 103 | assert x == x |> encode(:double) |> has_size(8) |> decode(:double) 104 | end 105 | end 106 | 107 | test "float" do 108 | xs = [ 109 | 1.235, 110 | 0.981, 111 | -23.590, 112 | ] 113 | for x <- xs do 114 | assert trunc(x * 1000) == x |> encode(:float) |> has_size(4) |> decode(:float) |> Kernel.*(1000) |> trunc 115 | end 116 | end 117 | 118 | test "inet" do 119 | nets = [ 120 | {127, 0, 0, 1}, 121 | {192, 168, 100, 102}, 122 | ] 123 | for net <- nets do 124 | assert net == net |> encode(:inet) |> has_size(4) |> decode(:inet) 125 | end 126 | end 127 | 128 | test "inet v6" do 129 | nets = [ 130 | {0, 0, 0, 0, 0, 0, 0, 1}, 131 | ] 132 | for net <- nets do 133 | assert net == net |> encode(:inet) |> has_size(16) |> decode(:inet) 134 | end 135 | end 136 | 137 | property "int" do 138 | for_all n in int() do 139 | n == n |> encode(:int) |> has_size(4) |> decode(:int) 140 | end 141 | end 142 | 143 | test "list" do 144 | lists = [ 145 | {:int, [10, 20, 30]}, 146 | {:text, ["name", "example", "sample"]}, 147 | ] 148 | for {type, list} <- lists do 149 | assert list == list |> encode({:list, type}) |> drop_size |> decode({:list, type}) 150 | end 151 | end 152 | 153 | test "map" do 154 | maps = [ 155 | {{:text, :int}, %{"a" => 10, "b" => 20, "c" => 30}}, 156 | {{:text, :text}, %{"aaa" => "name", "bbb" => "example", "ccc" => "sample"}}, 157 | {{:int, :double}, %{1 => 11.1, 10 => 22.2, 100 => 33.3}}, 158 | ] 159 | for {type, map} <- maps do 160 | assert map == map |> encode({:map, type}) |> drop_size |> decode({:map, type}) 161 | end 162 | 163 | map = %{a: 10, b: 20, c: 30} 164 | [{type, expected} | _] = maps 165 | assert expected == map |> encode({:map, type}) |> drop_size |> decode({:map, type}) 166 | end 167 | 168 | test "tuple" do 169 | types = [:int, :double, :text, :int, :float] 170 | tuple = {123, 23.983, "Test", 91, 1.0} 171 | assert tuple == tuple |> encode({:tuple, types}) |> drop_size |> decode({:tuple, types}) 172 | end 173 | 174 | test "varint" do 175 | xs = [ 176 | 9988776655443322110987654321, 177 | -19477209892471957969713409154091853, 178 | 89769087908775467436532432, 179 | 1000000000000000000000000000, 180 | ] 181 | for x <- xs do 182 | assert x == x |> encode(:varint) |> drop_size |> decode(:varint) 183 | end 184 | end 185 | 186 | test "uuid" do 187 | uuids = [UUID.uuid1, UUID.uuid4] 188 | for uuid <- uuids do 189 | assert uuid == uuid |> encode(:uuid) |> drop_size |> decode(:uuid) 190 | end 191 | 192 | assert %CQL.Error{code: :invalid, info: info} = encode("bad id", :uuid) 193 | assert info =~ "Expected a 'uuid'" 194 | end 195 | 196 | defp drop_size(<<_::integer-32, rest::bytes>>), do: rest 197 | 198 | defp has_size(buffer, size) do 199 | <> = buffer 200 | if n == size do 201 | value 202 | else 203 | buffer 204 | end 205 | end 206 | end 207 | -------------------------------------------------------------------------------- /test/integration/data_types_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cassandra.Integration.DataTypesTest do 2 | use Cassandra.SessionCase, 3 | table: "data_types_test", 4 | create: """ 5 | f_ascii ascii, 6 | f_bigint bigint, 7 | f_blob blob, 8 | f_boolean boolean, 9 | f_date date, 10 | f_decimal decimal, 11 | f_double double, 12 | f_float float, 13 | f_inet inet, 14 | f_int int, 15 | f_smallint smallint, 16 | f_text text, 17 | f_time time, 18 | f_timestamp timestamp, 19 | f_timeuuid timeuuid, 20 | f_tinyint tinyint, 21 | f_uuid uuid, 22 | f_varchar varchar, 23 | f_varint varint, 24 | f_map1 map, 25 | f_map2 map, 26 | f_list1 list, 27 | f_list2 list, 28 | f_set set, 29 | PRIMARY KEY (f_timeuuid, f_timestamp, f_uuid) 30 | """ 31 | 32 | test "DATA TYPES", %{session: session} do 33 | query = """ 34 | INSERT INTO #{@table} ( 35 | f_ascii, 36 | f_bigint, 37 | f_blob, 38 | f_boolean, 39 | f_date, 40 | f_decimal, 41 | f_double, 42 | f_float, 43 | f_inet, 44 | f_int, 45 | f_smallint, 46 | f_text, 47 | f_time, 48 | f_timestamp, 49 | f_timeuuid, 50 | f_tinyint, 51 | f_uuid, 52 | f_varchar, 53 | f_varint, 54 | f_map1, 55 | f_map2, 56 | f_list1, 57 | f_list2, 58 | f_set 59 | ) VALUES ( 60 | :f_ascii, 61 | :f_bigint, 62 | :f_blob, 63 | :f_boolean, 64 | :f_date, 65 | :f_decimal, 66 | :f_double, 67 | :f_float, 68 | :f_inet, 69 | :f_int, 70 | :f_smallint, 71 | :f_text, 72 | :f_time, 73 | :f_timestamp, 74 | :f_timeuuid, 75 | :f_tinyint, 76 | :f_uuid, 77 | :f_varchar, 78 | :f_varint, 79 | :f_map1, 80 | :f_map2, 81 | :f_list1, 82 | :f_list2, 83 | :f_set 84 | ); 85 | """ 86 | data = %{ 87 | f_ascii: "abcdefgh", 88 | f_bigint: 1000000000, 89 | f_blob: <<1,2,3,4,5,6,7,8,9,10,11,12>>, 90 | f_boolean: true, 91 | f_date: ~D[2016-12-18], 92 | f_decimal: {12345, 6}, 93 | f_double: 1.985, 94 | f_float: -1.5, 95 | f_inet: {127, 0, 0, 1}, 96 | f_int: 1000000, 97 | f_smallint: 100, 98 | f_text: "Hello World برای همه", 99 | f_time: ~T[01:20:33.567890], 100 | f_timestamp: ~N[2016-02-03 04:05:06.007], 101 | f_timeuuid: UUID.uuid1(), 102 | f_tinyint: 1, 103 | f_uuid: UUID.uuid4(), 104 | f_varchar: "Some یونی کد string", 105 | f_varint: 1122334455667788990099887766, 106 | f_map1: %{"foo" => "bar", "baz" => "biz"}, 107 | f_map2: %{1 => true, 2 => false}, 108 | f_list1: ["a", "bb", "ccc", "dddd"], 109 | f_list2: [10, 20, 30, 40], 110 | f_set: MapSet.new([10, 20, 10, 20, 30]), 111 | } 112 | 113 | assert %CQL.Result.Void{} = Session.execute(session, query, values: data) 114 | assert %CQL.Result.Rows{} = rows = Session.execute(session, "SELECT * FROM #{@table} LIMIT 1;") 115 | 116 | [result] = CQL.Result.Rows.to_map(rows) 117 | 118 | assert true = 119 | data 120 | |> Enum.map(fn {key, value} -> result[Atom.to_string(key)] == value end) 121 | |> Enum.all? 122 | end 123 | end 124 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExCheck.start 2 | Logger.configure(level: :info) 3 | 4 | # Code.require_file("support/cluster_manager.exs", __DIR__) 5 | 6 | defmodule Cassandra.SessionCase do 7 | defmacro __using__(options) do 8 | quote bind_quoted: [options: options] do 9 | use ExUnit.Case 10 | 11 | alias Cassandra.{Cluster, Session, Statement} 12 | 13 | @host Cassandra.TestHelper.host 14 | @keyspace Cassandra.TestHelper.keyspace 15 | @table "#{@keyspace}.#{options[:table]}" 16 | @create_table "CREATE TABLE #{@table} (#{options[:create]});" 17 | @truncate_table "TRUNCATE #{@table};" 18 | 19 | setup_all do 20 | session = __MODULE__.Session 21 | options = [ 22 | contact_points: [@host], 23 | session: session, 24 | keyspace: @keyspace, 25 | cache: __MODULE__.Cache, 26 | ] 27 | {:ok, cluster} = Cluster.start_link(options) 28 | {:ok, _} = Session.start_link(cluster, options) 29 | 30 | %CQL.Result.SchemaChange{} = Session.execute(session, @create_table) 31 | 32 | {:ok, %{session: session}} 33 | end 34 | 35 | setup %{session: session} do 36 | %CQL.Result.Void{} = Session.execute(session, @truncate_table) 37 | {:ok, %{session: session}} 38 | end 39 | end 40 | end 41 | end 42 | 43 | defmodule Cassandra.TestHelper do 44 | alias Cassandra.Connection 45 | 46 | @keyspace "elixir_cassandra_test" 47 | 48 | def keyspace, do: @keyspace 49 | 50 | def host do 51 | (System.get_env("CASSANDRA_CONTACT_POINTS") || "127.0.0.1") 52 | |> String.split(",") 53 | |> hd 54 | end 55 | 56 | def drop_keyspace, do: CQL.encode!(%CQL.Query{query: 57 | "DROP KEYSPACE IF EXISTS #{@keyspace};" 58 | }) 59 | 60 | def create_keyspace, do: CQL.encode!(%CQL.Query{query: ~s( 61 | CREATE KEYSPACE #{@keyspace} 62 | WITH replication = { 63 | 'class': 'SimpleStrategy', 64 | 'replication_factor': 1 65 | }; 66 | )}) 67 | 68 | def setup do 69 | {:ok, %{socket: socket}} = Connection.connect(host: host()) 70 | Connection.query(socket, drop_keyspace()) 71 | %CQL.Result.SchemaChange{} = Connection.query(socket, create_keyspace()) 72 | :gen_tcp.close(socket) 73 | end 74 | 75 | def teardown do 76 | {:ok, %{socket: socket}} = Connection.connect(host: host()) 77 | %CQL.Result.SchemaChange{} = Connection.query(socket, drop_keyspace()) 78 | :gen_tcp.close(socket) 79 | end 80 | end 81 | 82 | System.at_exit fn _ -> 83 | Cassandra.TestHelper.teardown 84 | end 85 | 86 | Cassandra.TestHelper.setup 87 | 88 | ExUnit.start 89 | --------------------------------------------------------------------------------