├── config ├── docs.exs ├── config.exs ├── test.exs └── dev.exs ├── guides ├── operations │ ├── mutate_rows.md │ ├── mutate_row.md │ └── read_rows.md ├── introduction │ ├── overview.md │ └── installation.md └── mutations │ └── single_row.md ├── lib ├── admin │ └── table_admin │ │ ├── modify_column_families.ex │ │ ├── modification.ex │ │ ├── table.ex │ │ ├── gc_rule.ex │ │ └── table_admin.ex ├── grpc │ ├── admin │ │ ├── common.pb.ex │ │ ├── instance.pb.ex │ │ ├── table.pb.ex │ │ ├── bigtable_table_admin.pb.ex │ │ └── bigtable_instance_admin.pb.ex │ ├── data │ │ ├── status.pb.ex │ │ ├── http.pb.ex │ │ ├── bigtable.pb.ex │ │ └── data.pb.ex │ ├── client_stub.ex │ └── protos │ │ └── bigtable.proto ├── supervisor.ex ├── bigtable.ex ├── utils.ex ├── connection │ └── worker.ex ├── auth.ex ├── data │ ├── mutate_rows.ex │ ├── mutate_row.ex │ ├── sample_row_keys.ex │ ├── read_rows.ex │ ├── read_modify_write_row.ex │ ├── check_and_mutate_row.ex │ ├── mutations.ex │ ├── row_set.ex │ └── chunk_reader.ex ├── request.ex └── connection.ex ├── test ├── test_helper.exs ├── data │ ├── sample_row_keys_test.exs │ ├── mutate_row_test.exs │ ├── mutate_rows_test.exs │ ├── row_set_test.exs │ ├── read_rows_test.exs │ ├── read_modify_write_row_test.exs │ ├── mutations_test.exs │ ├── row_filter_test.exs │ └── check_and_mutate_row_test.exs ├── connection │ └── connection_test.exs ├── admin │ ├── table.exs │ └── gc_rule_test.exs └── google_acceptance │ └── read_rows_acceptance_test.exs ├── .formatter.exs ├── .travis.yml ├── Dockerfile ├── docker-compose.yaml ├── .dockerignore ├── .github └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── .gitignore ├── LICENSE ├── README.md ├── mix.exs ├── README_old.md └── mix.lock /config/docs.exs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /guides/operations/mutate_rows.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lib/admin/table_admin/modify_column_families.ex: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | use Mix.Config 2 | 3 | import_config "#{Mix.env()}.exs" 4 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.configure(exclude: [external: true]) 2 | 3 | ExUnit.start() 4 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /lib/admin/table_admin/modification.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Admin.Modification do 2 | @moduledoc false 3 | def create(_id) do 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /guides/introduction/overview.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | Elixir client library for Google Bigtable 4 | 5 | ## Warning! 6 | 7 | WORK IN PROGRESS. DOCUMENTATION MAY BE INCORRECT. DO NOT USE IN PRODUCTION. 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: generic 2 | 3 | services: docker 4 | 5 | install: 6 | - docker-compose up --exit-code-from bigtable-test --build 7 | after_success: 8 | - bash <(curl -s https://codecov.io/bash) 9 | -------------------------------------------------------------------------------- /config/test.exs: -------------------------------------------------------------------------------- 1 | use Mix.Config 2 | 3 | config :goth, 4 | disabled: true 5 | 6 | config :bigtable, 7 | project: "dev", 8 | instance: "dev", 9 | table: "test", 10 | endpoint: "localhost:9035", 11 | ssl: false 12 | -------------------------------------------------------------------------------- /lib/grpc/admin/common.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Bigtable.Admin.V2.StorageType do 2 | @moduledoc false 3 | use Protobuf, enum: true, syntax: :proto3 4 | 5 | field :STORAGE_TYPE_UNSPECIFIED, 0 6 | field :SSD, 1 7 | field :HDD, 2 8 | end 9 | -------------------------------------------------------------------------------- /config/dev.exs: -------------------------------------------------------------------------------- 1 | use Mix.Config 2 | 3 | config :bigtable, 4 | project: "dev", 5 | instance: "dev", 6 | table: "dev", 7 | endpoint: "localhost:9035", 8 | ssl: false 9 | 10 | config :mix_test_watch, 11 | tasks: [ 12 | "test" 13 | ], 14 | clear: true 15 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM elixir:alpine 2 | ENV BIGTABLE_EMULATOR_HOST="bigtable-emulator:9035" 3 | ENV MIX_ENV=test 4 | WORKDIR /app 5 | RUN apk update && apk add --no-cache bash && apk add --no-cache curl 6 | RUN mix local.rebar --force && mix local.hex --force 7 | COPY . . 8 | RUN mix do deps.get, deps.compile, compile 9 | CMD mix coverage 10 | -------------------------------------------------------------------------------- /lib/supervisor.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Supervisor do 2 | @moduledoc false 3 | use Supervisor 4 | 5 | def start_link(_) do 6 | Supervisor.start_link(__MODULE__, :ok, name: __MODULE__) 7 | end 8 | 9 | def init(:ok) do 10 | children = [ 11 | Bigtable.Connection, 12 | {DynamicSupervisor, name: Bigtable.ChunkReader.Supervisor, strategy: :one_for_one} 13 | ] 14 | 15 | Supervisor.init(children, strategy: :one_for_one) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/grpc/data/status.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Rpc.Status do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | code: integer, 7 | message: String.t(), 8 | details: [Google.Protobuf.Any.t()] 9 | } 10 | defstruct [:code, :message, :details] 11 | 12 | field(:code, 1, type: :int32) 13 | field(:message, 2, type: :string) 14 | field(:details, 3, repeated: true, type: Google.Protobuf.Any) 15 | end 16 | -------------------------------------------------------------------------------- /lib/admin/table_admin/table.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Admin.Table do 2 | @moduledoc """ 3 | Provides functionality for building `Google.Bigtable.Admin.V2.Table`. 4 | """ 5 | 6 | alias Google.Bigtable.Admin.V2 7 | 8 | def build(column_families) when is_map(column_families) do 9 | families = 10 | column_families 11 | |> Map.new(fn {name, gc_rule} -> 12 | {name, V2.ColumnFamily.new(gc_rule: gc_rule)} 13 | end) 14 | 15 | V2.Table.new(column_families: families) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | bigtable-emulator: 4 | image: jscott22/bigtable-emulator 5 | command: 6 | [ 7 | "-cf", 8 | "dev.test.cf1,dev.test.cf2,dev.test.otherFamily,dev.dev.cf1,dev.dev.cf2,dev.dev.otherFamily", 9 | ] 10 | ports: 11 | - "9035:9035" 12 | expose: 13 | - "9035" 14 | bigtable-test: 15 | build: 16 | dockerfile: Dockerfile 17 | context: . 18 | volumes: 19 | - ./cover:/app/cover 20 | depends_on: 21 | - bigtable-emulator 22 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | /.elixir_ls/ 4 | .vscode 5 | 6 | # The directory Mix downloads your dependencies sources to. 7 | /deps/ 8 | 9 | # Where 3rd-party dependencies like ExDoc output generated docs. 10 | /doc/ 11 | 12 | # Ignore .fetch files in case you like to edit your project deps locally. 13 | /.fetch 14 | 15 | # If the VM crashes, it generates a dump, let's ignore it too. 16 | erl_crash.dump 17 | 18 | # Also ignore archive artifacts (built via "mix archive.build"). 19 | *.ez 20 | 21 | # Ignore package tarball (built via "mix hex.build"). 22 | bigtable-*.tar 23 | 24 | /priv/googleapis 25 | 26 | /secret/service.json 27 | .DS_Store 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /lib/bigtable.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable do 2 | @moduledoc """ 3 | Elixir client library for Google Bigtable 4 | """ 5 | use Application 6 | 7 | @doc false 8 | def start(_type, _args) do 9 | poolboy_config = [ 10 | {:name, {:local, :connection_pool}}, 11 | {:worker_module, Bigtable.Connection.Worker}, 12 | {:size, Application.get_env(:bigtable, :pool_size, 128)}, 13 | {:max_overflow, Application.get_env(:bigtable, :pool_overflow, 0)} 14 | ] 15 | 16 | children = [ 17 | Bigtable.Supervisor, 18 | :poolboy.child_spec(:connection_pool, poolboy_config, []) 19 | ] 20 | 21 | opts = [strategy: :one_for_one, name: Bigtable] 22 | Supervisor.start_link(children, opts) 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/utils.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Utils do 2 | @moduledoc false 3 | 4 | @spec configured_table_name() :: binary() 5 | def configured_table_name do 6 | instance = configured_instance_name() 7 | table = Application.get_env(:bigtable, :table) 8 | 9 | "#{instance}/tables/#{table}" 10 | end 11 | 12 | @spec configured_instance_name() :: binary() 13 | def configured_instance_name do 14 | project = get_project() 15 | instance = get_instance() 16 | "projects/#{project}/instances/#{instance}" 17 | end 18 | 19 | @spec get_project() :: binary() 20 | defp get_project do 21 | Application.get_env(:bigtable, :project) 22 | end 23 | 24 | @spec get_instance() :: binary() 25 | defp get_instance do 26 | Application.get_env(:bigtable, :instance) 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | /.elixir_ls/ 4 | .vscode 5 | # If you run "mix test --cover", coverage assets end up here. 6 | /cover/ 7 | 8 | # The directory Mix downloads your dependencies sources to. 9 | /deps/ 10 | 11 | # Where 3rd-party dependencies like ExDoc output generated docs. 12 | /doc/ 13 | 14 | # Ignore .fetch files in case you like to edit your project deps locally. 15 | /.fetch 16 | 17 | # If the VM crashes, it generates a dump, let's ignore it too. 18 | erl_crash.dump 19 | 20 | # Also ignore archive artifacts (built via "mix archive.build"). 21 | *.ez 22 | 23 | # Ignore package tarball (built via "mix hex.build"). 24 | bigtable-*.tar 25 | 26 | /priv/googleapis 27 | 28 | /secret/service.json 29 | /lib/typed/test_schemas.ex 30 | .DS_Store 31 | -------------------------------------------------------------------------------- /test/data/sample_row_keys_test.exs: -------------------------------------------------------------------------------- 1 | defmodule SampleRowKeysTest do 2 | @moduledoc false 3 | alias Bigtable.SampleRowKeys 4 | 5 | use ExUnit.Case 6 | 7 | doctest SampleRowKeys 8 | 9 | describe "SampleRowKeys.build()" do 10 | test "should build a SampleRowKeysRequest with configured table" do 11 | assert SampleRowKeys.build() == expected_request() 12 | end 13 | 14 | test "should build a ReadRowsRequest with custom table" do 15 | table_name = "custom-table" 16 | 17 | assert SampleRowKeys.build(table_name) == expected_request(table_name) 18 | end 19 | end 20 | 21 | defp expected_request(table_name \\ Bigtable.Utils.configured_table_name()) do 22 | %Google.Bigtable.V2.SampleRowKeysRequest{ 23 | app_profile_id: "", 24 | table_name: table_name 25 | } 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /guides/operations/mutate_row.md: -------------------------------------------------------------------------------- 1 | ### Mutate Row 2 | 3 | #### SetCell 4 | 5 | ```elixir 6 | alias Bigtable.{Mutations, MutateRow} 7 | 8 | Mutations.build("Ride#123") 9 | |> Mutations.set_cell("ride", "foo", "bar") 10 | |> MutateRow.mutate 11 | ``` 12 | 13 | #### DeleteFromColumn 14 | 15 | ```elixir 16 | alias Bigtable.{Mutations, MutateRow} 17 | 18 | Mutations.build("Ride#123") 19 | |> Mutations.delete_from_column("ride", "foo") 20 | |> MutateRow.mutate 21 | ``` 22 | 23 | #### DeleteFromFamily 24 | 25 | ```elixir 26 | alias Bigtable.{Mutations, MutateRow} 27 | 28 | Mutations.build("Ride#123") 29 | |> Mutations.delete_from_family("ride") 30 | |> MutateRow.mutate 31 | ``` 32 | 33 | #### DeleteFromRow 34 | 35 | ```elixir 36 | alias Bigtable.{Mutations, MutateRow} 37 | 38 | Mutations.build("Ride#123") 39 | |> Mutations.delete_from_row() 40 | |> MutateRow.mutate 41 | ``` 42 | -------------------------------------------------------------------------------- /guides/mutations/single_row.md: -------------------------------------------------------------------------------- 1 | ### Mutations - Single Row 2 | 3 | #### SetCell 4 | 5 | ```elixir 6 | alias Bigtable.{Mutations, MutateRow} 7 | 8 | Mutations.build("Ride#123") 9 | |> Mutations.set_cell("ride", "foo", "bar") 10 | |> MutateRow.mutate 11 | ``` 12 | 13 | #### DeleteFromColumn 14 | 15 | ```elixir 16 | alias Bigtable.{Mutations, MutateRow} 17 | 18 | Mutations.build("Ride#123") 19 | |> Mutations.delete_from_column("ride", "foo") 20 | |> MutateRow.mutate 21 | ``` 22 | 23 | #### DeleteFromFamily 24 | 25 | ```elixir 26 | alias Bigtable.{Mutations, MutateRow} 27 | 28 | Mutations.build("Ride#123") 29 | |> Mutations.delete_from_family("ride") 30 | |> MutateRow.mutate 31 | ``` 32 | 33 | #### DeleteFromRow 34 | 35 | ```elixir 36 | alias Bigtable.{Mutations, MutateRow} 37 | 38 | Mutations.build("Ride#123") 39 | |> Mutations.delete_from_row() 40 | |> MutateRow.mutate 41 | ``` 42 | -------------------------------------------------------------------------------- /test/connection/connection_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ConnectionTest do 2 | alias Bigtable.Connection 3 | use ExUnit.Case 4 | 5 | doctest Connection 6 | 7 | describe "Connection.get_connection() " do 8 | test "should return a Channel struct" do 9 | [host, port] = 10 | Connection.get_endpoint() 11 | |> String.split(":") 12 | 13 | expected = %GRPC.Channel{ 14 | adapter: GRPC.Adapter.Gun, 15 | adapter_payload: %{conn_pid: "MockPid"}, 16 | cred: nil, 17 | host: host, 18 | port: String.to_integer(port), 19 | scheme: "http" 20 | } 21 | 22 | connection = Connection.connect() 23 | 24 | result = %{ 25 | connection 26 | | adapter_payload: %{connection.adapter_payload | conn_pid: "MockPid"} 27 | } 28 | 29 | assert result == expected 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /lib/connection/worker.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Connection.Worker do 2 | @moduledoc false 3 | alias Bigtable.Connection 4 | use GenServer 5 | 6 | def start_link(_) do 7 | GenServer.start_link(__MODULE__, nil, []) 8 | end 9 | 10 | def get_connection(pid) do 11 | GenServer.call(pid, :get_connection) 12 | end 13 | 14 | def init(_) do 15 | Process.flag(:trap_exit, true) 16 | {:ok, Connection.connect()} 17 | end 18 | 19 | def handle_call(:get_connection, _from, state) do 20 | {:reply, state, state} 21 | end 22 | 23 | def handle_info({:EXIT, _from, reason}, state) do 24 | disconnect(state) 25 | {:stop, reason, state} 26 | end 27 | 28 | def handle_info(_msg, state) do 29 | {:noreply, state} 30 | end 31 | 32 | def terminate(_reason, state) do 33 | disconnect(state) 34 | state 35 | end 36 | 37 | defp disconnect(connection) do 38 | Connection.disconnect(connection) 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /lib/grpc/client_stub.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Service do 2 | @moduledoc false 3 | use GRPC.Service, name: "google.bigtable.v2.Bigtable" 4 | 5 | alias Google.Bigtable.V2 6 | 7 | rpc(:ReadRows, V2.ReadRowsRequest, stream(V2.ReadRowsResponse)) 8 | rpc(:MutateRow, V2.MutateRowRequest, stream(V2.MutateRowResponse)) 9 | rpc(:MutateRows, V2.MutateRowsRequest, stream(V2.MutateRowsResponse)) 10 | rpc(:CheckAndMutateRow, V2.CheckAndMutateRowRequest, stream(V2.CheckAndMutateRowResponse)) 11 | rpc(:SampleRowKeys, V2.SampleRowKeysRequest, stream(V2.SampleRowKeysResponse)) 12 | rpc(:ReadModifyWriteRow, V2.ReadModifyWriteRowRequest, stream(V2.ReadModifyWriteRowResponse)) 13 | 14 | rpc( 15 | :ListTables, 16 | Google.Bigtable.Admin.V2.ListTablesRequest, 17 | stream(Google.Bigtable.Admin.V2.ListTablesResponse) 18 | ) 19 | end 20 | 21 | defmodule Bigtable.Stub do 22 | @moduledoc false 23 | use GRPC.Stub, service: Bigtable.Service 24 | end 25 | -------------------------------------------------------------------------------- /guides/introduction/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## Mix Dependency 4 | 5 | ```elixir 6 | #mix.exs 7 | def deps do 8 | [ 9 | {:bigtable, "~> 0.7.0"}, 10 | ] 11 | end 12 | ``` 13 | 14 | ## Configuration 15 | 16 | #### Local Development Using Bigtable Emulator 17 | 18 | ```elixir 19 | #dev.exs 20 | config :bigtable, 21 | project: "project", 22 | instance: "instance", 23 | table: "table_name", # Default table name to use in requests 24 | endpoint: "localhost:9035", 25 | ssl: false 26 | 27 | config :goth, 28 | disabled: true 29 | ``` 30 | 31 | #### Production Configuration 32 | 33 | ```elixir 34 | #prod.exs 35 | config :bigtable, 36 | project: "project_id", 37 | instance: "instance_id", 38 | # Default table name to use in requests 39 | table: "table_name", 40 | # Optional connection pool size. Defaults to 128 41 | pool_size: 128, 42 | # Optional connection pool overflow when size is exceeded 43 | pool_overflow: 128 44 | ``` 45 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /test/data/mutate_row_test.exs: -------------------------------------------------------------------------------- 1 | defmodule MutateRowTest do 2 | @moduledoc false 3 | alias Bigtable.{MutateRow, Mutations} 4 | 5 | use ExUnit.Case 6 | 7 | setup do 8 | [ 9 | entry: Mutations.build("Test#123") 10 | ] 11 | end 12 | 13 | describe "MutateRow.build() " do 14 | test "should build a MutateRowRequest with configured table", context do 15 | result = context.entry |> MutateRow.build() 16 | 17 | assert result == expected_request() 18 | end 19 | 20 | test "should build a MutateRowRequest with custom table", context do 21 | table_name = "custom-table" 22 | 23 | result = 24 | context.entry 25 | |> MutateRow.build(table_name) 26 | 27 | assert result == expected_request(table_name) 28 | end 29 | end 30 | 31 | defp expected_request(table_name \\ Bigtable.Utils.configured_table_name()) do 32 | %Google.Bigtable.V2.MutateRowRequest{ 33 | app_profile_id: "", 34 | mutations: [], 35 | row_key: "Test#123", 36 | table_name: table_name 37 | } 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Bzzt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /lib/admin/table_admin/gc_rule.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Admin.GcRule do 2 | @moduledoc """ 3 | Provides functions for creating garbage collection rules 4 | """ 5 | alias Google.Bigtable.Admin.V2 6 | alias Google.Protobuf.Duration 7 | alias V2.GcRule.{Intersection, Union} 8 | 9 | def max_num_versions(limit) when is_integer(limit) do 10 | V2.GcRule.new(rule: {:max_num_versions, limit}) 11 | end 12 | 13 | def max_age(milliseconds) do 14 | duration = build_duration(milliseconds) 15 | V2.GcRule.new(rule: {:max_age, duration}) 16 | end 17 | 18 | def intersection(rules) when is_list(rules) do 19 | V2.GcRule.new(rule: {:intersection, Intersection.new(rules: rules)}) 20 | end 21 | 22 | def union(rules) when is_list(rules) do 23 | V2.GcRule.new(rule: {:union, Union.new(rules: rules)}) 24 | end 25 | 26 | defp build_duration(milliseconds) do 27 | {seconds, remainder} = 28 | ~w|div rem|a 29 | |> Enum.map(&apply(Kernel, &1, [milliseconds, 1000])) 30 | |> List.to_tuple() 31 | 32 | Duration.new( 33 | seconds: seconds, 34 | nanos: remainder * 1_000_000 35 | ) 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /lib/admin/table_admin/table_admin.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Admin.TableAdmin do 2 | @moduledoc """ 3 | Provides functions to build `Google.Bigtable.Admin.V2.ListTablesRequest` and submit them to Bigtable. 4 | """ 5 | alias Bigtable.{Request, Utils} 6 | alias Google.Bigtable.Admin.V2 7 | alias V2.BigtableTableAdmin.Stub 8 | 9 | def list_tables(opts \\ []) do 10 | opts 11 | |> Keyword.put_new(:parent, Utils.configured_instance_name()) 12 | |> V2.ListTablesRequest.new() 13 | |> Request.process_request(&Stub.list_tables/3) 14 | end 15 | 16 | def create_table(table, table_id, opts \\ []) do 17 | V2.CreateTableRequest.new( 18 | parent: Keyword.get(opts, :parent, Utils.configured_instance_name()), 19 | table_id: table_id, 20 | table: table, 21 | initial_splits: Keyword.get(opts, :initial_splits, []) 22 | ) 23 | |> Request.process_request(&Stub.create_table/3) 24 | end 25 | 26 | def delete_table(name) do 27 | V2.DeleteTableRequest.new(name: name) 28 | |> Request.process_request(&Stub.delete_table/3) 29 | end 30 | 31 | def get_table(name) do 32 | V2.GetTableRequest.new(name: name) 33 | |> Request.process_request(&Stub.get_table/3) 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /lib/auth.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Auth do 2 | @moduledoc false 3 | 4 | @scopes [ 5 | "https://www.googleapis.com/auth/bigtable.data", 6 | "https://www.googleapis.com/auth/bigtable.data.readonly", 7 | "https://www.googleapis.com/auth/cloud-bigtable.data", 8 | "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", 9 | "https://www.googleapis.com/auth/cloud-platform", 10 | "https://www.googleapis.com/auth/cloud-platform.read-only", 11 | "https://www.googleapis.com/auth/bigtable.admin", 12 | "https://www.googleapis.com/auth/bigtable.admin.cluster", 13 | "https://www.googleapis.com/auth/bigtable.admin.instance", 14 | "https://www.googleapis.com/auth/bigtable.admin.table", 15 | "https://www.googleapis.com/auth/cloud-bigtable.admin", 16 | "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", 17 | "https://www.googleapis.com/auth/cloud-bigtable.admin.table" 18 | ] 19 | 20 | @doc """ 21 | Returns the current `Goth.Token` that will be used to authorize Bigtable requests 22 | """ 23 | @spec get_token() :: Goth.Token.t() 24 | def get_token do 25 | case Application.get_env(:goth, :disabled, false) do 26 | true -> 27 | %{token: ""} 28 | 29 | false -> 30 | {:ok, token} = 31 | @scopes 32 | |> Enum.join(" ") 33 | |> Goth.Token.for_scope() 34 | 35 | token 36 | end 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /lib/data/mutate_rows.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.MutateRows do 2 | @moduledoc """ 3 | Provides functionality for building and submitting a `Google.Bigtable.V2.MutateRowsRequest`. 4 | """ 5 | alias Bigtable.{Request, Utils} 6 | alias Google.Bigtable.V2 7 | alias V2.Bigtable.Stub 8 | 9 | @type response :: {:ok, V2.MutateRowsResponse.t()} | {:error, any()} 10 | 11 | @doc """ 12 | Builds a `Google.Bigtable.V2.MutateRowsRequest` given a `Google.Bigtable.V2.MutateRowsRequest.Entry` and optional table name. 13 | """ 14 | @spec build(list(V2.MutateRowsRequest.Entry.t()), binary()) :: V2.MutateRowsRequest.t() 15 | def build(entries, table_name \\ Utils.configured_table_name()) 16 | when is_binary(table_name) and is_list(entries) do 17 | V2.MutateRowsRequest.new( 18 | table_name: table_name, 19 | entries: entries 20 | ) 21 | end 22 | 23 | @doc """ 24 | Submits a `Google.Bigtable.V2.MutateRowsRequest` to Bigtable. 25 | 26 | Can be called with either a list of `Google.Bigtable.V2.MutateRowsRequest.Entry` or a `Google.Bigtable.V2.MutateRowsRequest`. 27 | 28 | Returns a `Google.Bigtable.V2.MutateRowsResponse` 29 | """ 30 | @spec mutate(V2.MutateRowsRequest.t()) :: response() 31 | def mutate(%V2.MutateRowsRequest{} = request) do 32 | request 33 | |> Request.process_request(&Stub.mutate_rows/3, stream: true) 34 | end 35 | 36 | @spec mutate([V2.MutateRowsRequest.Entry.t()]) :: response() 37 | def mutate(entries) when is_list(entries) do 38 | entries 39 | |> build() 40 | |> mutate 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /lib/data/mutate_row.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.MutateRow do 2 | @moduledoc """ 3 | Provides functionality for building and submitting a `Google.Bigtable.V2.MutateRowRequest`. 4 | """ 5 | alias Bigtable.{Request, Utils} 6 | alias Google.Bigtable.V2 7 | alias V2.Bigtable.Stub 8 | alias V2.MutateRowsRequest.Entry 9 | 10 | @type response :: {:ok, V2.MutateRowResponse.t()} | {:error, any()} 11 | 12 | @doc """ 13 | Builds a `Google.Bigtable.V2.MutateRowRequest` given a `Google.Bigtable.V2.MutateRowsRequest.Entry` and optional table name. 14 | """ 15 | @spec build(V2.MutateRowsRequest.Entry.t(), binary()) :: V2.MutateRowRequest.t() 16 | def build(%Entry{} = row_mutations, table_name \\ Utils.configured_table_name()) 17 | when is_binary(table_name) do 18 | V2.MutateRowRequest.new( 19 | table_name: table_name, 20 | row_key: row_mutations.row_key, 21 | mutations: row_mutations.mutations 22 | ) 23 | end 24 | 25 | @doc """ 26 | Submits a `Google.Bigtable.V2.MutateRowRequest` given either a `Google.Bigtable.V2.MutateRowsRequest.Entry` or a `Google.Bigtable.V2.MutateRowRequest`. 27 | 28 | Returns a `Google.Bigtable.V2.MutateRowResponse`. 29 | """ 30 | @spec mutate(V2.MutateRowRequest.t()) :: response() 31 | def mutate(%V2.MutateRowRequest{} = request) do 32 | request 33 | |> Request.process_request(&Stub.mutate_row/3, single: true) 34 | end 35 | 36 | @spec mutate(V2.MutateRowsRequest.Entry.t()) :: response() 37 | def mutate(%Entry{} = entry) do 38 | entry 39 | |> build() 40 | |> mutate() 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /lib/data/sample_row_keys.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.SampleRowKeys do 2 | @moduledoc """ 3 | Provides functionality for building and submitting `Google.Bigtable.V2.SampleRowKeysRequest`. 4 | """ 5 | alias Bigtable.Request 6 | alias Google.Bigtable.V2 7 | alias V2.Bigtable.Stub 8 | 9 | @doc """ 10 | Builds a `Google.Bigtable.V2.SampleRowKeysRequest` given a row_key and optional table name. 11 | 12 | Defaults to configured table name. 13 | 14 | ## Examples 15 | 16 | ### Default Table 17 | iex> Bigtable.SampleRowKeys.build() 18 | %Google.Bigtable.V2.SampleRowKeysRequest{ 19 | app_profile_id: "", 20 | table_name: "projects/dev/instances/dev/tables/test", 21 | } 22 | 23 | ### Custom Table 24 | iex> table_name = "projects/project-id/instances/instance-id/tables/table-name" 25 | iex> Bigtable.SampleRowKeys.build(table_name) 26 | %Google.Bigtable.V2.SampleRowKeysRequest{ 27 | app_profile_id: "", 28 | table_name: "projects/project-id/instances/instance-id/tables/table-name", 29 | } 30 | """ 31 | @spec build(binary()) :: V2.SampleRowKeysRequest.t() 32 | def build(table_name \\ Bigtable.Utils.configured_table_name()) 33 | when is_binary(table_name) do 34 | V2.SampleRowKeysRequest.new(table_name: table_name, app_profile_id: "") 35 | end 36 | 37 | @doc """ 38 | Submits a `Google.Bigtable.V2.SampleRowKeysRequest` to Bigtable. 39 | """ 40 | @spec read(V2.SampleRowKeysRequest.t()) :: {:ok, V2.SampleRowKeysResponse} | {:error, any()} 41 | def read(%V2.SampleRowKeysRequest{} = request \\ build()) do 42 | request 43 | |> Request.process_request(&Stub.sample_row_keys/3, stream: true) 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /test/data/mutate_rows_test.exs: -------------------------------------------------------------------------------- 1 | defmodule MutateRowsTest do 2 | @moduledoc false 3 | 4 | alias Bigtable.{MutateRows, Mutations} 5 | use ExUnit.Case 6 | 7 | setup do 8 | [ 9 | entries: [Mutations.build("Test#123"), Mutations.build("Test#124")] 10 | ] 11 | end 12 | 13 | describe "MutateRow.build() " do 14 | test "should build a MutateRowsRequest with configured table", context do 15 | expected = %Google.Bigtable.V2.MutateRowsRequest{ 16 | app_profile_id: "", 17 | entries: [ 18 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 19 | mutations: [], 20 | row_key: "Test#123" 21 | }, 22 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 23 | mutations: [], 24 | row_key: "Test#124" 25 | } 26 | ], 27 | table_name: Bigtable.Utils.configured_table_name() 28 | } 29 | 30 | result = context.entries |> MutateRows.build() 31 | 32 | assert result == expected 33 | end 34 | 35 | test "should build a MutateRowsRequest with custom table", context do 36 | table_name = "custom-table" 37 | 38 | expected = %Google.Bigtable.V2.MutateRowsRequest{ 39 | app_profile_id: "", 40 | entries: [ 41 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 42 | mutations: [], 43 | row_key: "Test#123" 44 | }, 45 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 46 | mutations: [], 47 | row_key: "Test#124" 48 | } 49 | ], 50 | table_name: table_name 51 | } 52 | 53 | result = 54 | context.entries 55 | |> MutateRows.build(table_name) 56 | 57 | assert result == expected 58 | end 59 | end 60 | end 61 | -------------------------------------------------------------------------------- /lib/grpc/data/http.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Api.Http do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | rules: [Google.Api.HttpRule.t()], 7 | fully_decode_reserved_expansion: boolean 8 | } 9 | defstruct [:rules, :fully_decode_reserved_expansion] 10 | 11 | field(:rules, 1, repeated: true, type: Google.Api.HttpRule) 12 | field(:fully_decode_reserved_expansion, 2, type: :bool) 13 | end 14 | 15 | defmodule Google.Api.HttpRule do 16 | @moduledoc false 17 | use Protobuf, syntax: :proto3 18 | 19 | @type t :: %__MODULE__{ 20 | pattern: {atom, any}, 21 | selector: String.t(), 22 | body: String.t(), 23 | response_body: String.t(), 24 | additional_bindings: [Google.Api.HttpRule.t()] 25 | } 26 | defstruct [:pattern, :selector, :body, :response_body, :additional_bindings] 27 | 28 | oneof(:pattern, 0) 29 | field(:selector, 1, type: :string) 30 | field(:get, 2, type: :string, oneof: 0) 31 | field(:put, 3, type: :string, oneof: 0) 32 | field(:post, 4, type: :string, oneof: 0) 33 | field(:delete, 5, type: :string, oneof: 0) 34 | field(:patch, 6, type: :string, oneof: 0) 35 | field(:custom, 8, type: Google.Api.CustomHttpPattern, oneof: 0) 36 | field(:body, 7, type: :string) 37 | field(:response_body, 12, type: :string) 38 | field(:additional_bindings, 11, repeated: true, type: Google.Api.HttpRule) 39 | end 40 | 41 | defmodule Google.Api.CustomHttpPattern do 42 | @moduledoc false 43 | use Protobuf, syntax: :proto3 44 | 45 | @type t :: %__MODULE__{ 46 | kind: String.t(), 47 | path: String.t() 48 | } 49 | defstruct [:kind, :path] 50 | 51 | field(:kind, 1, type: :string) 52 | field(:path, 2, type: :string) 53 | end 54 | -------------------------------------------------------------------------------- /lib/request.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Request do 2 | @moduledoc false 3 | alias Bigtable.{Auth, Connection} 4 | alias Connection.Worker 5 | 6 | @spec process_request(any(), function(), list()) :: {:ok, any()} | {:error, any()} 7 | def process_request(request, request_fn, opts \\ []) do 8 | response = 9 | :poolboy.transaction( 10 | :connection_pool, 11 | fn pid -> 12 | token = Auth.get_token() 13 | 14 | pid 15 | |> Worker.get_connection() 16 | |> request_fn.(request, get_metadata(token)) 17 | end, 18 | 10_000 19 | ) 20 | 21 | handle_response(response, opts) 22 | end 23 | 24 | @spec handle_response(any(), list()) :: {:ok, any()} | {:error, any()} 25 | defp handle_response({:ok, response, _headers}, opts) do 26 | if Keyword.get(opts, :stream, false) do 27 | processed = 28 | response 29 | |> process_stream() 30 | 31 | {:ok, processed} 32 | else 33 | {:ok, response} 34 | end 35 | end 36 | 37 | defp handle_response(error, _opts) do 38 | case error do 39 | {:error, _msg} -> 40 | error 41 | 42 | msg -> 43 | {:error, msg} 44 | end 45 | end 46 | 47 | @spec process_stream(Enumerable.t()) :: [{:ok | :error, any}] 48 | defp process_stream(stream) do 49 | stream 50 | |> Stream.take_while(&remaining_resp?/1) 51 | |> Enum.to_list() 52 | end 53 | 54 | @spec remaining_resp?({:ok | :error | :trailers, any()}) :: boolean() 55 | defp remaining_resp?({status, _}), do: status != :trailers 56 | 57 | @spec get_metadata(map()) :: Keyword.t() 58 | defp get_metadata(%{token: token}) do 59 | metadata = %{authorization: "Bearer #{token}"} 60 | [metadata: metadata, content_type: "application/grpc", return_headers: true] 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /lib/connection.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Connection do 2 | @moduledoc false 3 | 4 | use GenServer 5 | @default_endpoint "bigtable.googleapis.com:443" 6 | 7 | ## Client API 8 | def start_link(_opts) do 9 | GenServer.start_link(__MODULE__, :ok, name: __MODULE__) 10 | end 11 | 12 | @doc """ 13 | Connects to Bigtable and returns a `GRPC.Channel`. 14 | """ 15 | @spec connect() :: GRPC.Channel.t() 16 | def connect do 17 | GenServer.call(__MODULE__, :connect) 18 | end 19 | 20 | @doc """ 21 | Disconnects from the provided `GRPC.Channel`. 22 | """ 23 | @spec disconnect(GRPC.Channel.t()) :: :ok 24 | def disconnect(channel) do 25 | GenServer.cast(__MODULE__, {:disconnect, channel}) 26 | end 27 | 28 | # Server Callbacks 29 | @spec init(:ok) :: {:ok, map()} 30 | def init(:ok) do 31 | {:ok, %{endpoint: get_endpoint(), opts: build_opts()}} 32 | end 33 | 34 | def handle_call(:connect, _from, %{endpoint: endpoint, opts: opts} = state) do 35 | {:ok, channel} = 36 | GRPC.Stub.connect( 37 | endpoint, 38 | opts 39 | ) 40 | 41 | {:reply, channel, state} 42 | end 43 | 44 | def handle_cast({:disconnect, channel}, state) do 45 | GRPC.Stub.disconnect(channel) 46 | {:noreply, state} 47 | end 48 | 49 | def handle_info(_msg, state) do 50 | {:noreply, state} 51 | end 52 | 53 | @spec build_opts() :: list() 54 | defp build_opts do 55 | if Application.get_env(:bigtable, :ssl, true) do 56 | [ 57 | cred: %GRPC.Credential{ 58 | ssl: [] 59 | } 60 | ] 61 | else 62 | [] 63 | end 64 | end 65 | 66 | @spec get_endpoint() :: binary() 67 | def get_endpoint do 68 | emulator = System.get_env("BIGTABLE_EMULATOR_HOST") 69 | endpoint = Application.get_env(:bigtable, :endpoint, @default_endpoint) 70 | 71 | if emulator != nil do 72 | emulator 73 | else 74 | endpoint 75 | end 76 | end 77 | end 78 | -------------------------------------------------------------------------------- /test/admin/table.exs: -------------------------------------------------------------------------------- 1 | defmodule TableAdminTest do 2 | alias Bigtable.Admin.{GcRule, Table, TableAdmin} 3 | alias Google.Bigtable.Admin.V2 4 | 5 | use ExUnit.Case 6 | 7 | doctest TableAdmin 8 | 9 | describe("Bigtable.Admin.TableAdmin.list_tables/2") do 10 | test("should list existing tables") do 11 | {:ok, response} = TableAdmin.list_tables() 12 | 13 | expected = [ 14 | %V2.Table{ 15 | cluster_states: %{}, 16 | column_families: %{}, 17 | granularity: 0, 18 | name: "projects/dev/instances/dev/tables/dev" 19 | }, 20 | %V2.Table{ 21 | cluster_states: %{}, 22 | column_families: %{}, 23 | granularity: 0, 24 | name: "projects/dev/instances/dev/tables/test" 25 | } 26 | ] 27 | 28 | sorted = Enum.sort(response.tables, fn t1, t2 -> t1.name < t2.name end) 29 | 30 | assert sorted == expected 31 | end 32 | end 33 | 34 | describe("Bigtagble.Admin.TableAdmin.create_table") do 35 | setup do 36 | table_name = "projects/dev/instances/dev/tables/created" 37 | 38 | on_exit(fn -> 39 | {:ok, _} = TableAdmin.delete_table(table_name) 40 | end) 41 | 42 | [table_name: table_name] 43 | end 44 | 45 | test "should create a table", context do 46 | {:ok, initial} = TableAdmin.list_tables() 47 | 48 | refute matching_table?( 49 | initial.tables, 50 | context.table_name 51 | ) 52 | 53 | cf = %{ 54 | "cf1" => GcRule.max_age(30_000) 55 | } 56 | 57 | cf 58 | |> Table.build() 59 | |> TableAdmin.create_table("created") 60 | 61 | {:ok, after_insert} = TableAdmin.list_tables() 62 | 63 | assert matching_table?(after_insert.tables, context.table_name) 64 | end 65 | end 66 | 67 | defp matching_table?(tables, table_name), 68 | do: Enum.any?(tables, &(Map.get(&1, :name) == table_name)) 69 | end 70 | -------------------------------------------------------------------------------- /test/data/row_set_test.exs: -------------------------------------------------------------------------------- 1 | defmodule RowSetTest do 2 | alias Bigtable.RowSet 3 | 4 | use ExUnit.Case 5 | 6 | doctest RowSet 7 | 8 | setup do 9 | [ 10 | request: %Google.Bigtable.V2.ReadRowsRequest{ 11 | app_profile_id: "", 12 | filter: %Google.Bigtable.V2.RowFilter{ 13 | filter: 14 | {:chain, 15 | %Google.Bigtable.V2.RowFilter.Chain{ 16 | filters: [] 17 | }} 18 | }, 19 | rows: nil, 20 | rows_limit: 0, 21 | table_name: Bigtable.Utils.configured_table_name() 22 | }, 23 | row_key: "Test#123", 24 | row_keys: ["Test#123", "Test#124"] 25 | ] 26 | end 27 | 28 | describe "RowSet.row_keys()" do 29 | test "should apply a single row key in a V2.RowSet to a V2.ReadRowsRequest", 30 | context do 31 | expected = expected_row_keys(context.row_key) 32 | 33 | result = RowSet.row_keys(context.request, context.row_key) 34 | 35 | assert result.rows == expected 36 | end 37 | 38 | test "should apply multiple row keys in a V2.RowSet to a V2.ReadRowsRequest", 39 | context do 40 | expected = expected_row_keys(context.row_keys) 41 | 42 | result = RowSet.row_keys(context.request, context.row_keys) 43 | 44 | assert result.rows == expected 45 | end 46 | 47 | test "should apply a row key to the default V2.ReadRowsRequest" do 48 | end 49 | end 50 | 51 | defp expected_row_keys(keys) when is_list(keys) do 52 | %Google.Bigtable.V2.RowSet{row_keys: keys, row_ranges: []} 53 | end 54 | 55 | defp expected_row_keys(key) do 56 | expected_row_keys([key]) 57 | end 58 | 59 | # defp expected_request(filter) do 60 | # %Google.Bigtable.V2.ReadRowsRequest{ 61 | # app_profile_id: "", 62 | # filter: filter, 63 | # rows: nil, 64 | # rows_limit: 0, 65 | # table_name: Bigtable.Utils.configured_table_name() 66 | # } 67 | # end 68 | end 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bigtable 2 | 3 | Elixir client library for Google Bigtable. 4 | 5 | [![Hex.pm](https://img.shields.io/hexpm/v/bigtable.svg)](https://hex.pm/packages/bigtable) 6 | [![Build Status](https://travis-ci.org/bzzt/bigtable.svg?branch=master)](https://travis-ci.org/bzzt/bigtable) 7 | [![codecov](https://codecov.io/gh/bzzt/bigtable/branch/master/graph/badge.svg)](https://codecov.io/gh/bzzt/bigtable) 8 | [![codebeat badge](https://codebeat.co/badges/6203650d-db88-4c48-9173-948cc3404145)](https://codebeat.co/projects/github-com-bzzt-bigtable-master) 9 | [![Built with Spacemacs](https://cdn.rawgit.com/syl20bnr/spacemacs/442d025779da2f62fc86c2082703697714db6514/assets/spacemacs-badge.svg)](http://spacemacs.org) 10 | 11 | ## Documentation 12 | 13 | Documentation available at https://hexdocs.pm/bigtable/ 14 | 15 | ## Installation 16 | 17 | The package can be installed as: 18 | 19 | ```elixir 20 | def deps do 21 | [{:bigtable, "~> 0.7.0"}] 22 | end 23 | ``` 24 | 25 | ## Warning! 26 | 27 | **WORK IN PROGRESS. DOCUMENTATION MAY BE INCORRECT. DO NOT USE IN PRODUCTION.** 28 | 29 | ## Feature List 30 | 31 | 32 | ### Data API 33 | 34 | #### Operations: 35 | 36 | - [x] Check And Mutate Row 37 | - [x] Mutate Row 38 | - [x] Mutate Rows 39 | - [x] Read Modify Write Row 40 | - [x] Read Rows 41 | - [x] Sample Row Keys 42 | 43 | #### Mutations: 44 | 45 | - [x] Delete From Column 46 | - [x] Delete From Family 47 | - [x] Delete From Row 48 | - [x] Set Cell 49 | 50 | #### Row Sets: 51 | 52 | - [x] Row Keys 53 | - [x] Row Ranges 54 | 55 | #### Row Filters: 56 | 57 | - [x] Block All 58 | - [x] Cells Per Column Limit 59 | - [x] Cells Per Row Limit 60 | - [x] Cells Per Row Offset 61 | - [x] Chain 62 | - [x] Column Qualifier Regex 63 | - [x] Column Range 64 | - [x] Family Name Regex 65 | - [x] Pass All 66 | - [x] Row Key Regex 67 | - [x] Strip Value Transformer 68 | - [x] Timestamp Range 69 | - [x] Value Regex 70 | - [ ] Apply Label Transformer 71 | - [ ] Condition 72 | - [ ] Interleave 73 | - [ ] Row Sample 74 | - [ ] Value Range 75 | 76 | ### Admin API 77 | 78 | #### Table Admin 79 | - [x] Create Table 80 | - [x] Delete Table 81 | - [x] Get Table 82 | - [x] List Tables 83 | - [ ] Check Consistency 84 | - [ ] Drop Row Range 85 | - [ ] Generate Consistency Token 86 | - [ ] Modify Column Families 87 | -------------------------------------------------------------------------------- /guides/operations/read_rows.md: -------------------------------------------------------------------------------- 1 | # Read Rows 2 | 3 | ## All Rows 4 | 5 | ### Default Table 6 | 7 | ```elixir 8 | alias Bigtable.ReadRows 9 | 10 | ReadRows.read() 11 | ``` 12 | 13 | ### Custom Table 14 | 15 | ```elixir 16 | alias Bigtable.ReadRows 17 | 18 | ReadRows.read("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 19 | ``` 20 | 21 | ## Single Row Key 22 | 23 | ### Default Table 24 | 25 | ```elixir 26 | alias Bigtable.{ReadRows, RowSet} 27 | 28 | RowSet.row_keys("Ride#123") 29 | |> ReadRows.read() 30 | ``` 31 | 32 | ### Custom Table 33 | 34 | ```elixir 35 | alias Bigtable.{ReadRows, RowSet} 36 | 37 | ReadRows.build("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 38 | |> RowSet.row_keys("Ride#123") 39 | |> ReadRows.read() 40 | ``` 41 | 42 | ## Multiple Row Keys 43 | 44 | ### Default Table 45 | 46 | ```elixir 47 | alias Bigtable.{ReadRows, RowSet} 48 | 49 | RowSet.row_keys(["Ride#123", "Ride#124"]) 50 | |> ReadRows.read() 51 | ``` 52 | 53 | ### Custom Table 54 | 55 | ```elixir 56 | alias Bigtable.{ReadRows, RowSet} 57 | 58 | ReadRows.build("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 59 | |> RowSet.row_keys(["Ride#123", "Ride#124"]) 60 | |> ReadRows.read() 61 | ``` 62 | 63 | ## Single Row Range 64 | 65 | ### Default Table (inclusive range) 66 | 67 | ```elixir 68 | alias Bigtable.{ReadRows, RowSet} 69 | 70 | RowSet.row_range("Ride#121", "Ride#124") 71 | |> ReadRows.read() 72 | ``` 73 | 74 | ### Default Table (exclusive range) 75 | 76 | ```elixir 77 | alias Bigtable.{ReadRows, RowSet} 78 | 79 | RowSet.row_range("Ride#121", "Ride#124", false) 80 | |> ReadRows.read() 81 | ``` 82 | 83 | ## Multiple Row Ranges 84 | 85 | ### Default Table (inclusive ranges) 86 | 87 | ```elixir 88 | alias Bigtable.{ReadRows, RowSet} 89 | 90 | ranges = [ 91 | {"Ride#121", "Ride#124"}, 92 | {"Ride#128", "Ride#131"} 93 | ] 94 | 95 | RowSet.row_ranges(ranges) 96 | |> ReadRows.read() 97 | ``` 98 | 99 | ### Default Table (exclusive ranges) 100 | 101 | ```elixir 102 | alias Bigtable.{ReadRows, RowSet} 103 | 104 | ranges = [ 105 | {"Ride#121", "Ride#124"}, 106 | {"Ride#128", "Ride#131"} 107 | ] 108 | 109 | RowSet.row_ranges(ranges, false) 110 | |> ReadRows.read() 111 | ``` 112 | 113 | ### Custom Table 114 | 115 | ```elixir 116 | alias Bigtable.{ReadRows, RowSet} 117 | 118 | ReadRows.build("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 119 | |> RowSet.row_range("Ride#121", "Ride#124") 120 | |> ReadRows.read() 121 | ``` 122 | 123 | ## Filtering Results 124 | 125 | ```elixir 126 | alias Bigtable.{ReadRows, RowSet} 127 | alias ReadRows.Filter 128 | 129 | RowSet.row_keys("Ride#123") 130 | |> Filter.cells_per_column(5) 131 | |> ReadRows.read() 132 | ``` 133 | -------------------------------------------------------------------------------- /lib/data/read_rows.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.ReadRows do 2 | @moduledoc """ 3 | Provides functionality for to building and submitting a `Google.Bigtable.V2.ReadRowsRequest`. 4 | """ 5 | alias Bigtable.ChunkReader 6 | alias Bigtable.{Request, Utils} 7 | alias Google.Bigtable.V2 8 | alias V2.Bigtable.Stub 9 | 10 | @type response :: {:ok, ChunkReader.chunk_reader_result()} | {:error, any()} 11 | 12 | @doc """ 13 | Builds a `Google.Bigtable.V2.ReadRowsRequest` given an optional table name. 14 | 15 | Defaults to the configured table name if none is provided. 16 | 17 | ## Examples 18 | iex> table_name = "projects/project-id/instances/instance-id/tables/table-name" 19 | iex> Bigtable.ReadRows.build(table_name) 20 | %Google.Bigtable.V2.ReadRowsRequest{ 21 | app_profile_id: "", 22 | filter: nil, 23 | rows: nil, 24 | rows_limit: 0, 25 | table_name: "projects/project-id/instances/instance-id/tables/table-name" 26 | } 27 | """ 28 | @spec build(binary()) :: V2.ReadRowsRequest.t() 29 | def build(table_name \\ Utils.configured_table_name()) when is_binary(table_name) do 30 | V2.ReadRowsRequest.new(table_name: table_name, app_profile_id: "") 31 | end 32 | 33 | @doc """ 34 | Submits a `Google.Bigtable.V2.ReadRowsRequest` to Bigtable. 35 | 36 | Can be called with either a `Google.Bigtable.V2.ReadRowsRequest` or an optional table name. 37 | """ 38 | @spec read(V2.ReadRowsRequest.t() | binary()) :: response() 39 | def read(table_name \\ Utils.configured_table_name()) 40 | 41 | def read(%V2.ReadRowsRequest{} = request) do 42 | request 43 | |> Request.process_request(&Stub.read_rows/3, stream: true) 44 | |> handle_response() 45 | end 46 | 47 | def read(table_name) when is_binary(table_name) do 48 | table_name 49 | |> build() 50 | |> read() 51 | end 52 | 53 | defp handle_response({:error, _} = response), do: response 54 | 55 | defp handle_response({:ok, response}) do 56 | response 57 | |> Enum.filter(&contains_chunks?/1) 58 | |> Enum.flat_map(fn {:ok, resp} -> resp.chunks end) 59 | |> process_chunks() 60 | end 61 | 62 | defp process_chunks(chunks) do 63 | {:ok, cr} = ChunkReader.open() 64 | 65 | chunks 66 | |> process_chunks(nil, cr) 67 | end 68 | 69 | defp process_chunks([], _result, chunk_reader) do 70 | ChunkReader.close(chunk_reader) 71 | end 72 | 73 | defp process_chunks(_chunks, {:error, _}, chunk_reader) do 74 | ChunkReader.close(chunk_reader) 75 | end 76 | 77 | defp process_chunks([h | t], _result, chunk_reader) do 78 | result = 79 | chunk_reader 80 | |> ChunkReader.process(h) 81 | 82 | process_chunks(t, result, chunk_reader) 83 | end 84 | 85 | defp contains_chunks?({:ok, response}), do: !Enum.empty?(response.chunks) 86 | end 87 | -------------------------------------------------------------------------------- /lib/data/read_modify_write_row.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.ReadModifyWriteRow do 2 | @moduledoc """ 3 | Provides functionality for building and submitting a `Google.Bigtable.V2.ReadModifyWriteRowRequest`. 4 | """ 5 | alias Bigtable.Request 6 | alias Google.Bigtable.V2 7 | alias V2.Bigtable.Stub 8 | 9 | alias Google.Bigtable.V2.{ 10 | ReadModifyWriteRowRequest, 11 | ReadModifyWriteRowResponse, 12 | ReadModifyWriteRule 13 | } 14 | 15 | @type response :: {:ok, ReadModifyWriteRowResponse.t()} | {:error, binary()} 16 | 17 | @doc """ 18 | Builds a `Google.Bigtable.V2.ReadModifyWriteRowRequest` given a row key and optional table name. 19 | """ 20 | @spec build(binary(), binary()) :: ReadModifyWriteRowRequest.t() 21 | def build(table_name \\ Bigtable.Utils.configured_table_name(), row_key) 22 | when is_binary(table_name) and is_binary(row_key) do 23 | ReadModifyWriteRowRequest.new(table_name: table_name, app_profile_id: "", row_key: row_key) 24 | end 25 | 26 | @spec append_value( 27 | Google.Bigtable.V2.ReadModifyWriteRowRequest.t(), 28 | binary(), 29 | binary(), 30 | binary() 31 | ) :: Google.Bigtable.V2.ReadModifyWriteRowRequest.t() 32 | def append_value(%ReadModifyWriteRowRequest{} = request, family_name, column_qualifier, value) 33 | when is_binary(family_name) and is_binary(column_qualifier) and is_binary(value) do 34 | ReadModifyWriteRule.new( 35 | family_name: family_name, 36 | column_qualifier: column_qualifier, 37 | rule: {:append_value, value} 38 | ) 39 | |> add_rule(request) 40 | end 41 | 42 | @spec increment_amount( 43 | Google.Bigtable.V2.ReadModifyWriteRowRequest.t(), 44 | binary(), 45 | binary(), 46 | integer() 47 | ) :: Google.Bigtable.V2.ReadModifyWriteRowRequest.t() 48 | def increment_amount( 49 | %ReadModifyWriteRowRequest{} = request, 50 | family_name, 51 | column_qualifier, 52 | amount 53 | ) 54 | when is_binary(family_name) and is_binary(column_qualifier) and is_integer(amount) do 55 | ReadModifyWriteRule.new( 56 | family_name: family_name, 57 | column_qualifier: column_qualifier, 58 | rule: {:increment_amount, amount} 59 | ) 60 | |> add_rule(request) 61 | end 62 | 63 | @spec mutate(ReadModifyWriteRowRequest.t()) :: response() 64 | def mutate(%ReadModifyWriteRowRequest{} = request) do 65 | request 66 | |> Request.process_request(&Stub.read_modify_write_row/3, single: true) 67 | end 68 | 69 | @spec add_rule(ReadModifyWriteRule.t(), ReadModifyWriteRowRequest.t()) :: 70 | ReadModifyWriteRowRequest.t() 71 | defp add_rule(rule, %ReadModifyWriteRowRequest{} = request) do 72 | %{ 73 | request 74 | | rules: Enum.reverse([rule | request.rules]) 75 | } 76 | end 77 | end 78 | -------------------------------------------------------------------------------- /test/google_acceptance/read_rows_acceptance_test.exs: -------------------------------------------------------------------------------- 1 | defmodule TestResult do 2 | alias Bigtable.ChunkReader.ReadCell 3 | alias Google.Bigtable.V2.ReadRowsResponse.CellChunk 4 | 5 | def from_chunk(row_key, %ReadCell{} = ri) do 6 | %{ 7 | rk: row_key, 8 | fm: ri.family_name, 9 | qual: ri.qualifier, 10 | ts: ri.timestamp, 11 | value: ri.value, 12 | error: false, 13 | label: ri.label 14 | } 15 | end 16 | end 17 | 18 | defmodule GoogleAcceptanceTest do 19 | alias Bigtable.ChunkReader 20 | 21 | defmacro __using__(json: json) do 22 | json 23 | |> File.read!() 24 | |> Poison.decode!(keys: :atoms) 25 | |> Map.get(:tests) 26 | |> Enum.take(60) 27 | |> Enum.map(fn t -> 28 | quote do 29 | test(unquote(t.name)) do 30 | %{chunks: chunks, results: expected} = unquote(Macro.escape(t)) 31 | 32 | result = process_chunks(chunks) 33 | {processed_status, processed_result} = result.processed 34 | 35 | cond do 36 | expected == nil -> 37 | assert processed_result == %{} 38 | 39 | results_error?(expected) -> 40 | assert result.close_error == true or processed_status == :error 41 | 42 | true -> 43 | converted = 44 | processed_result 45 | |> convert_result() 46 | 47 | assert converted == expected 48 | end 49 | end 50 | end 51 | end) 52 | end 53 | end 54 | 55 | defmodule ReadRowsAcceptanceTest do 56 | alias Bigtable.ChunkReader 57 | alias Google.Bigtable.V2.ReadRowsResponse.CellChunk 58 | 59 | use ExUnit.Case 60 | use GoogleAcceptanceTest, json: "test/google_acceptance/read-rows-acceptance.json" 61 | 62 | defp process_chunks(chunks) do 63 | {:ok, cr} = ChunkReader.open() 64 | 65 | processed = 66 | Enum.reduce(chunks, :ok, fn cc, accum -> 67 | case accum do 68 | {:error, _} -> 69 | accum 70 | 71 | _ -> 72 | chunk = build_chunk(cc) 73 | 74 | ChunkReader.process(cr, chunk) 75 | end 76 | end) 77 | 78 | {close_status, _} = ChunkReader.close(cr) 79 | %{close_error: close_status != :ok, processed: processed} 80 | end 81 | 82 | defp build_chunk(cc) do 83 | cc 84 | |> Map.put(:row_status, chunk_status(cc)) 85 | |> Map.drop([:commit_row, :reset_row]) 86 | |> Map.to_list() 87 | |> CellChunk.new() 88 | end 89 | 90 | defp chunk_status(chunk) do 91 | cond do 92 | Map.get(chunk, :commit_row, false) -> 93 | {:commit_row, true} 94 | 95 | Map.get(chunk, :reset_row, false) -> 96 | {:reset_row, true} 97 | 98 | true -> 99 | nil 100 | end 101 | end 102 | 103 | defp results_error?(results), do: Enum.any?(results, &Map.get(&1, :error, false)) 104 | 105 | defp convert_result(result) do 106 | result 107 | |> Enum.flat_map(fn {row_key, read_items} -> 108 | read_items 109 | |> Enum.map(&TestResult.from_chunk(row_key, &1)) 110 | |> Enum.reverse() 111 | end) 112 | end 113 | end 114 | -------------------------------------------------------------------------------- /lib/data/check_and_mutate_row.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.CheckAndMutateRow do 2 | @moduledoc """ 3 | Provides functionality for building and submitting a `Google.Bigtable.V2.CheckAndMutateRowRequest`. 4 | """ 5 | alias Bigtable.{Request, Utils} 6 | alias Google.Bigtable.V2 7 | alias V2.Bigtable.Stub 8 | 9 | @type entries() :: V2.MutateRowsRequest.Entry | [V2.MutateRowsRequest.Entry] 10 | 11 | @doc """ 12 | Builds a `Google.Bigtable.V2.CheckAndMutateRowRequest` given a row key and optional custom table name. 13 | 14 | Defaults to the configured table name if none is provided. 15 | 16 | ## Examples 17 | 18 | ### Default Table 19 | iex> Bigtable.CheckAndMutateRow.build("Test#123") 20 | %Google.Bigtable.V2.CheckAndMutateRowRequest{ 21 | app_profile_id: "", 22 | false_mutations: [], 23 | predicate_filter: nil, 24 | row_key: "Test#123", 25 | table_name: "projects/dev/instances/dev/tables/test", 26 | true_mutations: [] 27 | } 28 | 29 | ### Custom Table 30 | iex> table_name = "projects/project-id/instances/instance-id/tables/table-name" 31 | iex> Bigtable.CheckAndMutateRow.build(table_name, "Test#123") 32 | %Google.Bigtable.V2.CheckAndMutateRowRequest{ 33 | app_profile_id: "", 34 | false_mutations: [], 35 | predicate_filter: nil, 36 | row_key: "Test#123", 37 | table_name: "projects/project-id/instances/instance-id/tables/table-name", 38 | true_mutations: [] 39 | } 40 | """ 41 | @spec build(binary(), binary()) :: V2.CheckAndMutateRowRequest.t() 42 | def build(table_name \\ Utils.configured_table_name(), row_key) 43 | when is_binary(table_name) and is_binary(row_key) do 44 | V2.CheckAndMutateRowRequest.new(table_name: table_name, app_profile_id: "", row_key: row_key) 45 | end 46 | 47 | @spec predicate(V2.CheckAndMutateRowRequest.t(), V2.RowFilter.t()) :: 48 | V2.CheckAndMutateRowRequest.t() 49 | def predicate(%V2.CheckAndMutateRowRequest{} = request, %V2.RowFilter{} = filter) do 50 | %{request | predicate_filter: filter} 51 | end 52 | 53 | @spec if_true(V2.CheckAndMutateRowRequest.t(), entries) :: V2.CheckAndMutateRowRequest.t() 54 | def if_true(%V2.CheckAndMutateRowRequest{} = request, mutations) do 55 | %{request | true_mutations: extract_mutations(mutations)} 56 | end 57 | 58 | @spec if_false(V2.CheckAndMutateRowRequest.t(), entries()) :: V2.CheckAndMutateRowRequest.t() 59 | def if_false(%V2.CheckAndMutateRowRequest{} = request, mutations) do 60 | %{request | false_mutations: extract_mutations(mutations)} 61 | end 62 | 63 | @doc """ 64 | Submits a `Google.Bigtable.V2.CheckAndMutateRowRequest` to Bigtable. 65 | """ 66 | @spec mutate(V2.CheckAndMutateRowRequest.t()) :: 67 | {:ok, [V2.CheckAndMutateRowResponse]} | {:error, binary()} 68 | def mutate(%V2.CheckAndMutateRowRequest{} = request) do 69 | request 70 | |> Request.process_request(&Stub.check_and_mutate_row/3, single: true) 71 | end 72 | 73 | @spec extract_mutations(entries()) :: [V2.Mutation.t()] 74 | defp extract_mutations(entries) do 75 | entries 76 | |> List.wrap() 77 | |> Enum.flat_map(&Map.get(&1, :mutations)) 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /test/data/read_rows_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ReadRowsTest do 2 | alias Bigtable.{ChunkReader, MutateRow, MutateRows, Mutations, ReadRows} 3 | use ExUnit.Case 4 | 5 | doctest ReadRows 6 | 7 | describe "ReadRows.build()" do 8 | test "should build a ReadRowsRequest with configured table" do 9 | assert ReadRows.build() == expected_request() 10 | end 11 | 12 | test "should build a ReadRowsRequest with custom table" do 13 | table_name = "custom-table" 14 | 15 | assert ReadRows.build(table_name) == expected_request(table_name) 16 | end 17 | end 18 | 19 | describe "ReadRows.read()" do 20 | setup do 21 | assert ReadRows.read() == {:ok, %{}} 22 | 23 | row_keys = ["Test#123", "Test#234"] 24 | 25 | on_exit(fn -> 26 | mutations = 27 | Enum.map(row_keys, fn key -> 28 | entry = Mutations.build(key) 29 | 30 | entry 31 | |> Mutations.delete_from_row() 32 | end) 33 | 34 | mutations 35 | |> MutateRows.mutate() 36 | end) 37 | 38 | [ 39 | row_keys: row_keys, 40 | column_family: "cf1", 41 | column_qualifier: "column", 42 | value: "value" 43 | ] 44 | end 45 | 46 | test "should read from an empty table" do 47 | assert ReadRows.read() == {:ok, %{}} 48 | end 49 | 50 | @tag :wip 51 | test "should read from a table with a single record", context do 52 | [key | _] = context.row_keys 53 | 54 | entry = Mutations.build(key) 55 | 56 | entry 57 | |> Mutations.set_cell(context.column_family, context.column_qualifier, context.value, 0) 58 | |> MutateRow.mutate() 59 | 60 | expected = {:ok, expected_response([key], context)} 61 | 62 | assert ReadRows.read() == expected 63 | end 64 | 65 | test "should read from a table with multiple records", context do 66 | entries = 67 | Enum.map(context.row_keys, fn key -> 68 | entry = Mutations.build(key) 69 | 70 | entry 71 | |> Mutations.set_cell(context.column_family, context.column_qualifier, context.value, 0) 72 | end) 73 | 74 | entries 75 | |> MutateRows.mutate() 76 | 77 | expected = {:ok, expected_response(["Test#123", "Test#234"], context)} 78 | 79 | assert ReadRows.read() == expected 80 | end 81 | end 82 | 83 | defp expected_response(row_keys, context) do 84 | for row_key <- row_keys, into: %{} do 85 | {row_key, 86 | [ 87 | %ChunkReader.ReadCell{ 88 | family_name: %Google.Protobuf.StringValue{value: context.column_family}, 89 | label: "", 90 | qualifier: %Google.Protobuf.BytesValue{value: context.column_qualifier}, 91 | row_key: row_key, 92 | timestamp: 0, 93 | value: context.value 94 | } 95 | ]} 96 | end 97 | end 98 | 99 | defp expected_request(table_name \\ Bigtable.Utils.configured_table_name()) do 100 | %Google.Bigtable.V2.ReadRowsRequest{ 101 | app_profile_id: "", 102 | filter: nil, 103 | rows: nil, 104 | rows_limit: 0, 105 | table_name: table_name 106 | } 107 | end 108 | end 109 | -------------------------------------------------------------------------------- /test/admin/gc_rule_test.exs: -------------------------------------------------------------------------------- 1 | defmodule GcRuleTest do 2 | alias Bigtable.Admin.{GcRule, Table, TableAdmin} 3 | alias Google.Bigtable.Admin.V2 4 | alias Google.Protobuf.Duration 5 | use ExUnit.Case 6 | 7 | setup do 8 | table_name = "projects/dev/instances/dev/tables/gc_rule" 9 | 10 | on_exit(fn -> 11 | {:ok, _} = TableAdmin.delete_table(table_name) 12 | end) 13 | 14 | [table_name: table_name] 15 | end 16 | 17 | describe("Bigtagble.Admin.GcRule.max_age/1") do 18 | test "should create a table with a max age gc rule", context do 19 | cf = %{ 20 | "cf1" => GcRule.max_age(2_592_000_500) 21 | } 22 | 23 | cf 24 | |> Table.build() 25 | |> TableAdmin.create_table("gc_rule") 26 | 27 | expected = %{ 28 | "cf1" => %V2.ColumnFamily{ 29 | gc_rule: %V2.GcRule{ 30 | rule: {:max_age, %Duration{nanos: 500_000_000, seconds: 2_592_000}} 31 | } 32 | } 33 | } 34 | 35 | {:ok, response} = TableAdmin.get_table(context.table_name) 36 | 37 | assert response.column_families == expected 38 | end 39 | end 40 | 41 | describe("Bigtable.Admin.GcRule.max_num_versions/1") do 42 | test "should create a table with a max version gc rule", context do 43 | cf = %{ 44 | "cf1" => GcRule.max_num_versions(1) 45 | } 46 | 47 | cf 48 | |> Table.build() 49 | |> TableAdmin.create_table("gc_rule") 50 | 51 | expected = %{ 52 | "cf1" => %V2.ColumnFamily{ 53 | gc_rule: %V2.GcRule{ 54 | rule: {:max_num_versions, 1} 55 | } 56 | } 57 | } 58 | 59 | {:ok, response} = TableAdmin.get_table(context.table_name) 60 | assert response.column_families == expected 61 | end 62 | end 63 | 64 | describe("Bigtable.Admin.GcRule.union/1") do 65 | test "should create a table with a union gc rule", context do 66 | rules = [ 67 | GcRule.max_num_versions(1), 68 | GcRule.max_age(3000) 69 | ] 70 | 71 | cf = %{ 72 | "cf1" => GcRule.union(rules) 73 | } 74 | 75 | cf 76 | |> Table.build() 77 | |> TableAdmin.create_table("gc_rule") 78 | 79 | expected = %{ 80 | "cf1" => %V2.ColumnFamily{ 81 | gc_rule: %V2.GcRule{ 82 | rule: 83 | {:union, 84 | %V2.GcRule.Union{ 85 | rules: rules 86 | }} 87 | } 88 | } 89 | } 90 | 91 | {:ok, response} = TableAdmin.get_table(context.table_name) 92 | assert response.column_families == expected 93 | end 94 | end 95 | 96 | describe("Bigtable.Admin.GcRule.intersection/1") do 97 | test "should create a table with an intersection gc rule", context do 98 | rules = [ 99 | GcRule.max_num_versions(1), 100 | GcRule.max_age(3000) 101 | ] 102 | 103 | cf = %{ 104 | "cf1" => GcRule.intersection(rules) 105 | } 106 | 107 | cf 108 | |> Table.build() 109 | |> TableAdmin.create_table("gc_rule") 110 | 111 | expected = %{ 112 | "cf1" => %V2.ColumnFamily{ 113 | gc_rule: %V2.GcRule{ 114 | rule: 115 | {:intersection, 116 | %V2.GcRule.Intersection{ 117 | rules: rules 118 | }} 119 | } 120 | } 121 | } 122 | 123 | {:ok, response} = TableAdmin.get_table(context.table_name) 124 | assert response.column_families == expected 125 | end 126 | end 127 | end 128 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.MixProject do 2 | use Mix.Project 3 | 4 | alias Bigtable.{Admin, Connection} 5 | 6 | @version "0.7.0" 7 | 8 | def project do 9 | [ 10 | app: :bigtable, 11 | version: @version, 12 | package: package(), 13 | elixir: "~> 1.7", 14 | elixirc_paths: elixirc_paths(Mix.env()), 15 | start_permanent: Mix.env() == :prod, 16 | name: "Bigtable", 17 | homepage_url: "https://github.com/bzzt/bigtable", 18 | source_url: "https://github.com/bzzt/bigtable", 19 | deps: deps(), 20 | docs: docs(), 21 | aliases: aliases(), 22 | test_coverage: [tool: ExCoveralls], 23 | preferred_cli_env: [ 24 | "coveralls.json": :test, 25 | "coveralls.html": :test, 26 | coverage: :test 27 | ] 28 | ] 29 | end 30 | 31 | defp package() do 32 | [ 33 | description: "Elixir client library for Google Bigtable.", 34 | maintainers: ["Jason Scott", "Philip Prophet", "Daniel Fredriksson"], 35 | licenses: ["MIT"], 36 | links: %{github: "https://github.com/bzzt/bigtable"} 37 | ] 38 | end 39 | 40 | # Run "mix help compile.app" to learn about applications. 41 | def application do 42 | [ 43 | mod: {Bigtable, []}, 44 | extra_applications: [:logger, :grpc, :poolboy] 45 | ] 46 | end 47 | 48 | defp elixirc_paths(:test), do: ["lib", "test/support"] 49 | defp elixirc_paths(_), do: ["lib"] 50 | 51 | defp docs do 52 | [ 53 | source_ref: "v#{@version}", 54 | extra_section: "GUIDES", 55 | main: "overview", 56 | formatters: ["html", "epub"], 57 | groups_for_modules: groups_for_modules(), 58 | extras: extras(), 59 | groups_for_extras: groups_for_extras(), 60 | nest_modules_by_prefix: [ 61 | Bigtable.ChunkReader 62 | ] 63 | ] 64 | end 65 | 66 | defp extras do 67 | [ 68 | "guides/introduction/overview.md", 69 | "guides/introduction/installation.md" 70 | ] 71 | end 72 | 73 | defp groups_for_extras do 74 | [ 75 | Introduction: ~r/guides\/introduction\/.?/ 76 | ] 77 | end 78 | 79 | defp groups_for_modules do 80 | [ 81 | Admin: [ 82 | Admin.GcRule, 83 | Admin.Modification, 84 | Admin.Table, 85 | Admin.TableAdmin 86 | ], 87 | Connection: [ 88 | Connection 89 | ], 90 | Data: [ 91 | Bigtable.CheckAndMutateRow, 92 | Bigtable.ChunkReader, 93 | Bigtable.ChunkReader.ReadCell, 94 | Bigtable.MutateRow, 95 | Bigtable.MutateRows, 96 | Bigtable.Mutations, 97 | Bigtable.ReadModifyWriteRow, 98 | Bigtable.ReadRows, 99 | Bigtable.RowFilter, 100 | Bigtable.RowSet, 101 | Bigtable.SampleRowKeys 102 | ] 103 | ] 104 | end 105 | 106 | defp aliases do 107 | [ 108 | coverage: [ 109 | "coveralls.json" 110 | ] 111 | ] 112 | end 113 | 114 | # Run "mix help deps" to learn about dependencies. 115 | defp deps do 116 | [ 117 | {:google_protos, "~> 0.1"}, 118 | {:goth, "~> 0.11.0"}, 119 | {:grpc, "~> 0.3.1"}, 120 | {:lens, "~> 0.8.0"}, 121 | {:poison, "~> 3.1"}, 122 | {:poolboy, "~> 1.5"}, 123 | {:protobuf, "~> 0.5.3"}, 124 | # Dev Deps 125 | {:credo, "~> 1.0.0", only: [:dev, :test, :ci], runtime: false}, 126 | {:dialyxir, "~> 1.0.0-rc.6", only: [:dev], runtime: false}, 127 | {:excoveralls, "~> 0.10", only: [:dev, :test, :ci]}, 128 | {:ex_doc, "~> 0.19", only: :dev, runtime: false}, 129 | {:mix_test_watch, "~> 0.8", only: :dev, runtime: false} 130 | ] 131 | end 132 | end 133 | -------------------------------------------------------------------------------- /test/data/read_modify_write_row_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ReadModifyWriteRowTest do 2 | @moduledoc false 3 | alias Bigtable.{MutateRow, Mutations, ReadModifyWriteRow, ReadRows, RowFilter} 4 | use ExUnit.Case 5 | 6 | doctest ReadModifyWriteRow 7 | 8 | setup do 9 | assert ReadRows.read() == {:ok, %{}} 10 | 11 | row_key = "Test#123" 12 | 13 | on_exit(fn -> 14 | mutation = row_key |> Mutations.build() |> Mutations.delete_from_row() 15 | 16 | mutation |> MutateRow.mutate() 17 | end) 18 | 19 | [ 20 | family: "cf1", 21 | row_key: row_key 22 | ] 23 | end 24 | 25 | describe "ReadModifyWriteRow.mutate/2" do 26 | test "should increment an existing numerical value", context do 27 | qual = "num" 28 | val = <<1::integer-signed-64>> 29 | 30 | {:ok, _result} = 31 | context.row_key 32 | |> Mutations.build() 33 | |> Mutations.set_cell(context.family, qual, val, 0) 34 | |> MutateRow.mutate() 35 | 36 | {:ok, _result} = 37 | context.row_key 38 | |> ReadModifyWriteRow.build() 39 | |> ReadModifyWriteRow.increment_amount(context.family, qual, 1) 40 | |> ReadModifyWriteRow.mutate() 41 | 42 | expected = <<0, 0, 0, 0, 0, 0, 0, 2>> 43 | 44 | {:ok, result} = 45 | ReadRows.build() 46 | |> RowFilter.cells_per_column(1) 47 | |> ReadRows.read() 48 | 49 | new_value = result |> Map.values() |> List.flatten() |> List.first() |> Map.get(:value) 50 | 51 | assert new_value == expected 52 | end 53 | 54 | test "should increment a non existing column", context do 55 | qual = "num" 56 | 57 | {:ok, _result} = 58 | context.row_key 59 | |> ReadModifyWriteRow.build() 60 | |> ReadModifyWriteRow.increment_amount(context.family, qual, 3) 61 | |> ReadModifyWriteRow.mutate() 62 | 63 | expected = <<0, 0, 0, 0, 0, 0, 0, 3>> 64 | 65 | {:ok, result} = 66 | ReadRows.build() 67 | |> RowFilter.cells_per_column(1) 68 | |> ReadRows.read() 69 | 70 | new_value = result |> Map.values() |> List.flatten() |> List.first() |> Map.get(:value) 71 | 72 | assert new_value == expected 73 | end 74 | 75 | test "should append a string to an existing value", context do 76 | qual = "string" 77 | val = "hello" 78 | 79 | {:ok, _result} = 80 | context.row_key 81 | |> Mutations.build() 82 | |> Mutations.set_cell(context.family, qual, val, 0) 83 | |> MutateRow.mutate() 84 | 85 | {:ok, _result} = 86 | context.row_key 87 | |> ReadModifyWriteRow.build() 88 | |> ReadModifyWriteRow.append_value(context.family, qual, "world") 89 | |> ReadModifyWriteRow.mutate() 90 | 91 | expected = "helloworld" 92 | 93 | {:ok, result} = 94 | ReadRows.build() 95 | |> RowFilter.cells_per_column(1) 96 | |> ReadRows.read() 97 | 98 | new_value = result |> Map.values() |> List.flatten() |> List.first() |> Map.get(:value) 99 | 100 | assert new_value == expected 101 | end 102 | 103 | test "should append a string to a non existing column", context do 104 | qual = "string" 105 | 106 | {:ok, _result} = 107 | context.row_key 108 | |> ReadModifyWriteRow.build() 109 | |> ReadModifyWriteRow.append_value(context.family, qual, "world") 110 | |> ReadModifyWriteRow.mutate() 111 | 112 | expected = "world" 113 | 114 | {:ok, result} = 115 | ReadRows.build() 116 | |> RowFilter.cells_per_column(1) 117 | |> ReadRows.read() 118 | 119 | new_value = result |> Map.values() |> List.flatten() |> List.first() |> Map.get(:value) 120 | 121 | assert new_value == expected 122 | end 123 | end 124 | end 125 | -------------------------------------------------------------------------------- /test/data/mutations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule MutationsTest do 2 | alias Bigtable.Mutations 3 | alias Google.Bigtable.V2.MutateRowsRequest.Entry 4 | 5 | use ExUnit.Case 6 | 7 | doctest Mutations 8 | 9 | setup do 10 | [ 11 | entry: Mutations.build("Test#123"), 12 | row_key: "Test#123", 13 | family_name: "testFamily", 14 | column_qualifier: "testColumn", 15 | value: "test" 16 | ] 17 | end 18 | 19 | describe "Mutations.build " do 20 | test "should build a MutateRowsRequest Entry struct", context do 21 | expected = %Entry{ 22 | mutations: [], 23 | row_key: context.row_key 24 | } 25 | 26 | assert context.entry == expected 27 | end 28 | end 29 | 30 | describe "Mutations.set_cell" do 31 | test "should return a SetCell struct", context do 32 | family_name = context.family_name 33 | column_qualifier = context.column_qualifier 34 | value = context.value 35 | 36 | expected = %Entry{ 37 | mutations: [ 38 | %Google.Bigtable.V2.Mutation{ 39 | mutation: 40 | {:set_cell, 41 | %Google.Bigtable.V2.Mutation.SetCell{ 42 | column_qualifier: column_qualifier, 43 | family_name: family_name, 44 | timestamp_micros: -1, 45 | value: value 46 | }} 47 | } 48 | ], 49 | row_key: context.row_key 50 | } 51 | 52 | result = context.entry |> Mutations.set_cell(family_name, column_qualifier, value) 53 | 54 | assert result == expected 55 | end 56 | end 57 | 58 | describe "Mutations.delete_from_column" do 59 | test "should return a DeleteFromColumn struct", context do 60 | family_name = context.family_name 61 | column_qualifier = context.column_qualifier 62 | 63 | expected = %Google.Bigtable.V2.MutateRowsRequest.Entry{ 64 | mutations: [ 65 | %Google.Bigtable.V2.Mutation{ 66 | mutation: 67 | {:delete_from_column, 68 | %Google.Bigtable.V2.Mutation.DeleteFromColumn{ 69 | family_name: family_name, 70 | column_qualifier: column_qualifier, 71 | time_range: %Google.Bigtable.V2.TimestampRange{ 72 | end_timestamp_micros: 0, 73 | start_timestamp_micros: 0 74 | } 75 | }} 76 | } 77 | ], 78 | row_key: context.row_key 79 | } 80 | 81 | result = context.entry |> Mutations.delete_from_column(family_name, column_qualifier) 82 | 83 | assert result == expected 84 | end 85 | end 86 | 87 | describe "Mutations.delete_from_family" do 88 | test "should return a DeleteFromFamily struct", context do 89 | family_name = context.family_name 90 | 91 | expected = %Google.Bigtable.V2.MutateRowsRequest.Entry{ 92 | mutations: [ 93 | %Google.Bigtable.V2.Mutation{ 94 | mutation: 95 | {:delete_from_family, 96 | %Google.Bigtable.V2.Mutation.DeleteFromFamily{family_name: "testFamily"}} 97 | } 98 | ], 99 | row_key: "Test#123" 100 | } 101 | 102 | result = context.entry |> Mutations.delete_from_family(family_name) 103 | 104 | assert result == expected 105 | end 106 | end 107 | 108 | describe "Mutations.delete_from_row" do 109 | test "should return a DeleteFromRow struct", context do 110 | expected = %Google.Bigtable.V2.MutateRowsRequest.Entry{ 111 | mutations: [ 112 | %Google.Bigtable.V2.Mutation{ 113 | mutation: {:delete_from_row, %Google.Bigtable.V2.Mutation.DeleteFromRow{}} 114 | } 115 | ], 116 | row_key: "Test#123" 117 | } 118 | 119 | result = context.entry |> Mutations.delete_from_row() 120 | 121 | assert result == expected 122 | end 123 | end 124 | end 125 | -------------------------------------------------------------------------------- /lib/grpc/admin/instance.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Bigtable.Admin.V2.Instance do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | name: String.t(), 7 | display_name: String.t(), 8 | state: integer, 9 | type: integer, 10 | labels: %{String.t() => String.t()} 11 | } 12 | defstruct [:name, :display_name, :state, :type, :labels] 13 | 14 | field :name, 1, type: :string 15 | field :display_name, 2, type: :string 16 | field :state, 3, type: Google.Bigtable.Admin.V2.Instance.State, enum: true 17 | field :type, 4, type: Google.Bigtable.Admin.V2.Instance.Type, enum: true 18 | field :labels, 5, repeated: true, type: Google.Bigtable.Admin.V2.Instance.LabelsEntry, map: true 19 | end 20 | 21 | defmodule Google.Bigtable.Admin.V2.Instance.LabelsEntry do 22 | @moduledoc false 23 | use Protobuf, map: true, syntax: :proto3 24 | 25 | @type t :: %__MODULE__{ 26 | key: String.t(), 27 | value: String.t() 28 | } 29 | defstruct [:key, :value] 30 | 31 | field :key, 1, type: :string 32 | field :value, 2, type: :string 33 | end 34 | 35 | defmodule Google.Bigtable.Admin.V2.Instance.State do 36 | @moduledoc false 37 | use Protobuf, enum: true, syntax: :proto3 38 | 39 | field :STATE_NOT_KNOWN, 0 40 | field :READY, 1 41 | field :CREATING, 2 42 | end 43 | 44 | defmodule Google.Bigtable.Admin.V2.Instance.Type do 45 | @moduledoc false 46 | use Protobuf, enum: true, syntax: :proto3 47 | 48 | field :TYPE_UNSPECIFIED, 0 49 | field :PRODUCTION, 1 50 | field :DEVELOPMENT, 2 51 | end 52 | 53 | defmodule Google.Bigtable.Admin.V2.Cluster do 54 | @moduledoc false 55 | use Protobuf, syntax: :proto3 56 | 57 | @type t :: %__MODULE__{ 58 | name: String.t(), 59 | location: String.t(), 60 | state: integer, 61 | serve_nodes: integer, 62 | default_storage_type: integer 63 | } 64 | defstruct [:name, :location, :state, :serve_nodes, :default_storage_type] 65 | 66 | field :name, 1, type: :string 67 | field :location, 2, type: :string 68 | field :state, 3, type: Google.Bigtable.Admin.V2.Cluster.State, enum: true 69 | field :serve_nodes, 4, type: :int32 70 | field :default_storage_type, 5, type: Google.Bigtable.Admin.V2.StorageType, enum: true 71 | end 72 | 73 | defmodule Google.Bigtable.Admin.V2.Cluster.State do 74 | @moduledoc false 75 | use Protobuf, enum: true, syntax: :proto3 76 | 77 | field :STATE_NOT_KNOWN, 0 78 | field :READY, 1 79 | field :CREATING, 2 80 | field :RESIZING, 3 81 | field :DISABLED, 4 82 | end 83 | 84 | defmodule Google.Bigtable.Admin.V2.AppProfile do 85 | @moduledoc false 86 | use Protobuf, syntax: :proto3 87 | 88 | @type t :: %__MODULE__{ 89 | routing_policy: {atom, any}, 90 | name: String.t(), 91 | etag: String.t(), 92 | description: String.t() 93 | } 94 | defstruct [:routing_policy, :name, :etag, :description] 95 | 96 | oneof :routing_policy, 0 97 | field :name, 1, type: :string 98 | field :etag, 2, type: :string 99 | field :description, 3, type: :string 100 | 101 | field :multi_cluster_routing_use_any, 5, 102 | type: Google.Bigtable.Admin.V2.AppProfile.MultiClusterRoutingUseAny, 103 | oneof: 0 104 | 105 | field :single_cluster_routing, 6, 106 | type: Google.Bigtable.Admin.V2.AppProfile.SingleClusterRouting, 107 | oneof: 0 108 | end 109 | 110 | defmodule Google.Bigtable.Admin.V2.AppProfile.MultiClusterRoutingUseAny do 111 | @moduledoc false 112 | use Protobuf, syntax: :proto3 113 | 114 | defstruct [] 115 | end 116 | 117 | defmodule Google.Bigtable.Admin.V2.AppProfile.SingleClusterRouting do 118 | @moduledoc false 119 | use Protobuf, syntax: :proto3 120 | 121 | @type t :: %__MODULE__{ 122 | cluster_id: String.t(), 123 | allow_transactional_writes: boolean 124 | } 125 | defstruct [:cluster_id, :allow_transactional_writes] 126 | 127 | field :cluster_id, 1, type: :string 128 | field :allow_transactional_writes, 2, type: :bool 129 | end 130 | -------------------------------------------------------------------------------- /README_old.md: -------------------------------------------------------------------------------- 1 | # Table of Contents 2 | 3 | - [Using the BT Emulator](#using-the-bt-emulator) 4 | - [Installing the emulator](#installing-the-emulator) 5 | - [Starting the emulator](#starting-the-emulator) 6 | - [Bigtable Operations](#bigtable-operations) 7 | - [Read Rows](#read-rows) 8 | - [All Rows](#all-rows) 9 | - [Default Table](#default-table) 10 | - [Custom Table](#custom-table) 11 | - [Single Row Key](#single-row-key) 12 | - [Default Table](#default-table-1) 13 | - [Custom Table](#custom-table-1) 14 | - [Multiple Row Keys](#multiple-row-keys) 15 | - [Default Table](#default-table-2) 16 | - [Custom Table](#custom-table-2) 17 | - [Single Row Range](#single-row-range) 18 | - [Default Table (inclusive range)](#default-table-inclusive-range) 19 | - [Default Table (exclusive range)](#default-table-exclusive-range) 20 | - [Multiple Row Ranges](#multiple-row-ranges) 21 | - [Default Table (inclusive ranges)](#default-table-inclusive-ranges) 22 | - [Default Table (exclusive ranges)](#default-table-exclusive-ranges) 23 | - [Custom Table](#custom-table-3) 24 | - [Filtering Results](#filtering-results) 25 | - [Mutations](#mutations) 26 | - [Single Row](#single-row) 27 | - [SetCell](#setcell) 28 | - [DeleteFromColumn](#deletefromcolumn) 29 | - [DeleteFromFamily](#deletefromfamily) 30 | - [DeleteFromRow](#deletefromrow) 31 | 32 | # Using the BT Emulator 33 | 34 | Google's [bigtable emulator](https://cloud.google.com/bigtable/docs/emulator) can be used for easy local development and testing 35 | 36 | ## Installing the emulator 37 | 38 | ```bash 39 | gcloud components update 40 | gcloud components install beta cbt 41 | ``` 42 | 43 | ## Starting the emulator 44 | 45 | ```bash 46 | gcloud beta emulators bigtable start & $(gcloud beta emulators bigtable env-init) 47 | cbt createtable ride && cbt createfamily ride ride 48 | ``` 49 | 50 | # Bigtable Operations 51 | 52 | ## Read Rows 53 | 54 | ### All Rows 55 | 56 | #### Default Table 57 | 58 | ```elixir 59 | alias Bigtable.ReadRows 60 | 61 | ReadRows.read() 62 | ``` 63 | 64 | #### Custom Table 65 | 66 | ```elixir 67 | alias Bigtable.ReadRows 68 | 69 | ReadRows.read("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 70 | ``` 71 | 72 | ### Single Row Key 73 | 74 | #### Default Table 75 | 76 | ```elixir 77 | alias Bigtable.{ReadRows, RowSet} 78 | 79 | RowSet.row_keys("Ride#123") 80 | |> ReadRows.read() 81 | ``` 82 | 83 | #### Custom Table 84 | 85 | ```elixir 86 | alias Bigtable.{ReadRows, RowSet} 87 | 88 | ReadRows.build("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 89 | |> RowSet.row_keys("Ride#123") 90 | |> ReadRows.read() 91 | ``` 92 | 93 | ### Multiple Row Keys 94 | 95 | #### Default Table 96 | 97 | ```elixir 98 | alias Bigtable.{ReadRows, RowSet} 99 | 100 | RowSet.row_keys(["Ride#123", "Ride#124"]) 101 | |> ReadRows.read() 102 | ``` 103 | 104 | #### Custom Table 105 | 106 | ```elixir 107 | alias Bigtable.{ReadRows, RowSet} 108 | 109 | ReadRows.build("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 110 | |> RowSet.row_keys(["Ride#123", "Ride#124"]) 111 | |> ReadRows.read() 112 | ``` 113 | 114 | ### Single Row Range 115 | 116 | #### Default Table (inclusive range) 117 | 118 | ```elixir 119 | alias Bigtable.{ReadRows, RowSet} 120 | 121 | RowSet.row_range("Ride#121", "Ride#124") 122 | |> ReadRows.read() 123 | ``` 124 | 125 | #### Default Table (exclusive range) 126 | 127 | ```elixir 128 | alias Bigtable.{ReadRows, RowSet} 129 | 130 | RowSet.row_range("Ride#121", "Ride#124", false) 131 | |> ReadRows.read() 132 | ``` 133 | 134 | ### Multiple Row Ranges 135 | 136 | #### Default Table (inclusive ranges) 137 | 138 | ```elixir 139 | alias Bigtable.{ReadRows, RowSet} 140 | 141 | ranges = [ 142 | {"Ride#121", "Ride#124"}, 143 | {"Ride#128", "Ride#131"} 144 | ] 145 | 146 | RowSet.row_ranges(ranges) 147 | |> ReadRows.read() 148 | ``` 149 | 150 | #### Default Table (exclusive ranges) 151 | 152 | ```elixir 153 | alias Bigtable.{ReadRows, RowSet} 154 | 155 | ranges = [ 156 | {"Ride#121", "Ride#124"}, 157 | {"Ride#128", "Ride#131"} 158 | ] 159 | 160 | RowSet.row_ranges(ranges, false) 161 | |> ReadRows.read() 162 | ``` 163 | 164 | #### Custom Table 165 | 166 | ```elixir 167 | alias Bigtable.{ReadRows, RowSet} 168 | 169 | ReadRows.build("projects/[project_id]/instances/[instance_id]/tables/[table_name]") 170 | |> RowSet.row_range("Ride#121", "Ride#124") 171 | |> ReadRows.read() 172 | ``` 173 | 174 | ### Filtering Results 175 | 176 | ```elixir 177 | alias Bigtable.{ReadRows, RowSet} 178 | alias ReadRows.Filter 179 | 180 | RowSet.row_keys("Ride#123") 181 | |> Filter.cells_per_column(5) 182 | |> ReadRows.read() 183 | ``` 184 | 185 | ## Mutations 186 | 187 | ### Single Row 188 | 189 | #### SetCell 190 | 191 | ```elixir 192 | alias Bigtable.{Mutations, MutateRow} 193 | 194 | Mutations.build("Ride#123") 195 | |> Mutations.set_cell("ride", "foo", "bar") 196 | |> MutateRow.mutate 197 | ``` 198 | 199 | #### DeleteFromColumn 200 | 201 | ```elixir 202 | alias Bigtable.{Mutations, MutateRow} 203 | 204 | Mutations.build("Ride#123") 205 | |> Mutations.delete_from_column("ride", "foo") 206 | |> MutateRow.mutate 207 | ``` 208 | 209 | #### DeleteFromFamily 210 | 211 | ```elixir 212 | alias Bigtable.{Mutations, MutateRow} 213 | 214 | Mutations.build("Ride#123") 215 | |> Mutations.delete_from_family("ride") 216 | |> MutateRow.mutate 217 | ``` 218 | 219 | #### DeleteFromRow 220 | 221 | ```elixir 222 | alias Bigtable.{Mutations, MutateRow} 223 | 224 | Mutations.build("Ride#123") 225 | |> Mutations.delete_from_row() 226 | |> MutateRow.mutate 227 | ``` 228 | -------------------------------------------------------------------------------- /lib/grpc/admin/table.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Bigtable.Admin.V2.Table do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | name: String.t(), 7 | cluster_states: %{String.t() => Google.Bigtable.Admin.V2.Table.ClusterState.t()}, 8 | column_families: %{String.t() => Google.Bigtable.Admin.V2.ColumnFamily.t()}, 9 | granularity: integer 10 | } 11 | defstruct [:name, :cluster_states, :column_families, :granularity] 12 | 13 | field :name, 1, type: :string 14 | 15 | field :cluster_states, 2, 16 | repeated: true, 17 | type: Google.Bigtable.Admin.V2.Table.ClusterStatesEntry, 18 | map: true 19 | 20 | field :column_families, 3, 21 | repeated: true, 22 | type: Google.Bigtable.Admin.V2.Table.ColumnFamiliesEntry, 23 | map: true 24 | 25 | field :granularity, 4, type: Google.Bigtable.Admin.V2.Table.TimestampGranularity, enum: true 26 | end 27 | 28 | defmodule Google.Bigtable.Admin.V2.Table.ClusterState do 29 | @moduledoc false 30 | use Protobuf, syntax: :proto3 31 | 32 | @type t :: %__MODULE__{ 33 | replication_state: integer 34 | } 35 | defstruct [:replication_state] 36 | 37 | field :replication_state, 1, 38 | type: Google.Bigtable.Admin.V2.Table.ClusterState.ReplicationState, 39 | enum: true 40 | end 41 | 42 | defmodule Google.Bigtable.Admin.V2.Table.ClusterState.ReplicationState do 43 | @moduledoc false 44 | use Protobuf, enum: true, syntax: :proto3 45 | 46 | field :STATE_NOT_KNOWN, 0 47 | field :INITIALIZING, 1 48 | field :PLANNED_MAINTENANCE, 2 49 | field :UNPLANNED_MAINTENANCE, 3 50 | field :READY, 4 51 | end 52 | 53 | defmodule Google.Bigtable.Admin.V2.Table.ClusterStatesEntry do 54 | @moduledoc false 55 | use Protobuf, map: true, syntax: :proto3 56 | 57 | @type t :: %__MODULE__{ 58 | key: String.t(), 59 | value: Google.Bigtable.Admin.V2.Table.ClusterState.t() 60 | } 61 | defstruct [:key, :value] 62 | 63 | field :key, 1, type: :string 64 | field :value, 2, type: Google.Bigtable.Admin.V2.Table.ClusterState 65 | end 66 | 67 | defmodule Google.Bigtable.Admin.V2.Table.ColumnFamiliesEntry do 68 | @moduledoc false 69 | use Protobuf, map: true, syntax: :proto3 70 | 71 | @type t :: %__MODULE__{ 72 | key: String.t(), 73 | value: Google.Bigtable.Admin.V2.ColumnFamily.t() 74 | } 75 | defstruct [:key, :value] 76 | 77 | field :key, 1, type: :string 78 | field :value, 2, type: Google.Bigtable.Admin.V2.ColumnFamily 79 | end 80 | 81 | defmodule Google.Bigtable.Admin.V2.Table.TimestampGranularity do 82 | @moduledoc false 83 | use Protobuf, enum: true, syntax: :proto3 84 | 85 | field :TIMESTAMP_GRANULARITY_UNSPECIFIED, 0 86 | field :MILLIS, 1 87 | end 88 | 89 | defmodule Google.Bigtable.Admin.V2.Table.View do 90 | @moduledoc false 91 | use Protobuf, enum: true, syntax: :proto3 92 | 93 | field :VIEW_UNSPECIFIED, 0 94 | field :NAME_ONLY, 1 95 | field :SCHEMA_VIEW, 2 96 | field :REPLICATION_VIEW, 3 97 | field :FULL, 4 98 | end 99 | 100 | defmodule Google.Bigtable.Admin.V2.ColumnFamily do 101 | @moduledoc false 102 | use Protobuf, syntax: :proto3 103 | 104 | @type t :: %__MODULE__{ 105 | gc_rule: Google.Bigtable.Admin.V2.GcRule.t() 106 | } 107 | defstruct [:gc_rule] 108 | 109 | field :gc_rule, 1, type: Google.Bigtable.Admin.V2.GcRule 110 | end 111 | 112 | defmodule Google.Bigtable.Admin.V2.GcRule do 113 | @moduledoc false 114 | use Protobuf, syntax: :proto3 115 | 116 | @type t :: %__MODULE__{ 117 | rule: {atom, any} 118 | } 119 | defstruct [:rule] 120 | 121 | oneof :rule, 0 122 | field :max_num_versions, 1, type: :int32, oneof: 0 123 | field :max_age, 2, type: Google.Protobuf.Duration, oneof: 0 124 | field :intersection, 3, type: Google.Bigtable.Admin.V2.GcRule.Intersection, oneof: 0 125 | field :union, 4, type: Google.Bigtable.Admin.V2.GcRule.Union, oneof: 0 126 | end 127 | 128 | defmodule Google.Bigtable.Admin.V2.GcRule.Intersection do 129 | @moduledoc false 130 | use Protobuf, syntax: :proto3 131 | 132 | @type t :: %__MODULE__{ 133 | rules: [Google.Bigtable.Admin.V2.GcRule.t()] 134 | } 135 | defstruct [:rules] 136 | 137 | field :rules, 1, repeated: true, type: Google.Bigtable.Admin.V2.GcRule 138 | end 139 | 140 | defmodule Google.Bigtable.Admin.V2.GcRule.Union do 141 | @moduledoc false 142 | use Protobuf, syntax: :proto3 143 | 144 | @type t :: %__MODULE__{ 145 | rules: [Google.Bigtable.Admin.V2.GcRule.t()] 146 | } 147 | defstruct [:rules] 148 | 149 | field :rules, 1, repeated: true, type: Google.Bigtable.Admin.V2.GcRule 150 | end 151 | 152 | defmodule Google.Bigtable.Admin.V2.Snapshot do 153 | @moduledoc false 154 | use Protobuf, syntax: :proto3 155 | 156 | @type t :: %__MODULE__{ 157 | name: String.t(), 158 | source_table: Google.Bigtable.Admin.V2.Table.t(), 159 | data_size_bytes: integer, 160 | create_time: Google.Protobuf.Timestamp.t(), 161 | delete_time: Google.Protobuf.Timestamp.t(), 162 | state: integer, 163 | description: String.t() 164 | } 165 | defstruct [ 166 | :name, 167 | :source_table, 168 | :data_size_bytes, 169 | :create_time, 170 | :delete_time, 171 | :state, 172 | :description 173 | ] 174 | 175 | field :name, 1, type: :string 176 | field :source_table, 2, type: Google.Bigtable.Admin.V2.Table 177 | field :data_size_bytes, 3, type: :int64 178 | field :create_time, 4, type: Google.Protobuf.Timestamp 179 | field :delete_time, 5, type: Google.Protobuf.Timestamp 180 | field :state, 6, type: Google.Bigtable.Admin.V2.Snapshot.State, enum: true 181 | field :description, 7, type: :string 182 | end 183 | 184 | defmodule Google.Bigtable.Admin.V2.Snapshot.State do 185 | @moduledoc false 186 | use Protobuf, enum: true, syntax: :proto3 187 | 188 | field :STATE_NOT_KNOWN, 0 189 | field :READY, 1 190 | field :CREATING, 2 191 | end 192 | -------------------------------------------------------------------------------- /lib/data/mutations.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.Mutations do 2 | @moduledoc """ 3 | Provides functions to build Bigtable mutations that are used when forming 4 | row mutation requests. 5 | """ 6 | alias Google.Bigtable.V2.{MutateRowsRequest, Mutation, TimestampRange} 7 | alias MutateRowsRequest.Entry 8 | alias Mutation.{DeleteFromColumn, DeleteFromFamily, DeleteFromRow, SetCell} 9 | 10 | @doc """ 11 | Builds a `Google.Bigtable.V2.MutateRowsRequest.Entry` for use with `Google.Bigtable.V2.MutateRowRequest` and `Google.Bigtable.V2.MutateRowsRequest`. 12 | 13 | ## Examples 14 | 15 | iex> Bigtable.Mutations.build("Row#123") 16 | %Google.Bigtable.V2.MutateRowsRequest.Entry{mutations: [], row_key: "Row#123"} 17 | """ 18 | @spec build(binary()) :: Entry.t() 19 | def build(row_key) when is_binary(row_key) do 20 | Entry.new(row_key: row_key) 21 | end 22 | 23 | @doc """ 24 | Creates a `Google.Bigtable.V2.Mutation.SetCell` given a `Google.Bigtable.V2.Mutation`, family name, column qualifier, and timestamp micros. 25 | 26 | The provided timestamp corresponds to the timestamp of the cell into which new data should be written. 27 | Use -1 for current Bigtable server time. Otherwise, the client should set this value itself, noting that the default value is a timestamp of zero if the field is left unspecified. 28 | Values must match the granularity of the table (e.g. micros, millis) 29 | 30 | ## Examples 31 | 32 | iex> Mutations.build("Row#123") |> Mutations.set_cell("family", "column", "value") 33 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 34 | mutations: [ 35 | %Google.Bigtable.V2.Mutation{ 36 | mutation: {:set_cell, 37 | %Google.Bigtable.V2.Mutation.SetCell{ 38 | column_qualifier: "column", 39 | family_name: "family", 40 | timestamp_micros: -1, 41 | value: "value" 42 | }} 43 | } 44 | ], 45 | row_key: "Row#123" 46 | } 47 | """ 48 | @spec set_cell(Entry.t(), binary(), binary(), binary(), integer()) :: Entry.t() 49 | def set_cell(%Entry{} = mutation, family, column, value, timestamp \\ -1) 50 | when is_binary(family) and is_binary(column) and is_integer(timestamp) do 51 | set_mutation = 52 | SetCell.new( 53 | family_name: family, 54 | column_qualifier: column, 55 | value: value, 56 | timestamp_micros: timestamp 57 | ) 58 | 59 | add_mutation(mutation, :set_cell, set_mutation) 60 | end 61 | 62 | @doc """ 63 | Creates a `Google.Bigtable.V2.Mutation.DeleteFromColumn` given a `Google.Bigtable.V2.Mutation`, family name, column qualifier, and time range. 64 | 65 | Time range is a keyword list that should contain optional start_timestamp_micros and end_timestamp_micros. 66 | If not provided, start is treated as 0 and end is treated as infinity 67 | 68 | ## Examples 69 | 70 | iex> Mutations.build("Row#123") |> Mutations.delete_from_column("family", "column") 71 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 72 | mutations: [ 73 | %Google.Bigtable.V2.Mutation{ 74 | mutation: {:delete_from_column, 75 | %Google.Bigtable.V2.Mutation.DeleteFromColumn{ 76 | column_qualifier: "column", 77 | family_name: "family", 78 | time_range: %Google.Bigtable.V2.TimestampRange{ 79 | end_timestamp_micros: 0, 80 | start_timestamp_micros: 0 81 | } 82 | }} 83 | } 84 | ], 85 | row_key: "Row#123" 86 | } 87 | """ 88 | @spec delete_from_column(Entry.t(), binary(), binary(), Keyword.t()) :: Entry.t() 89 | def delete_from_column(%Entry{} = mutation_struct, family, column, time_range \\ []) 90 | when is_binary(family) and is_binary(column) do 91 | time_range = create_time_range(time_range) 92 | 93 | mutation = 94 | DeleteFromColumn.new( 95 | family_name: family, 96 | column_qualifier: column, 97 | time_range: time_range 98 | ) 99 | 100 | add_mutation(mutation_struct, :delete_from_column, mutation) 101 | end 102 | 103 | @doc """ 104 | Creates a `Google.Bigtable.V2.Mutation.DeleteFromFamily` given a `Google.Bigtable.V2.Mutation` and family name. 105 | 106 | ## Examples 107 | 108 | iex> Mutations.build("Row#123") |> Mutations.delete_from_family("family") 109 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 110 | mutations: [ 111 | %Google.Bigtable.V2.Mutation{ 112 | mutation: {:delete_from_family, 113 | %Google.Bigtable.V2.Mutation.DeleteFromFamily{family_name: "family"}} 114 | } 115 | ], 116 | row_key: "Row#123" 117 | } 118 | """ 119 | @spec delete_from_family(Entry.t(), binary()) :: Entry.t() 120 | def delete_from_family(%Entry{} = mutation_struct, family) when is_binary(family) do 121 | mutation = DeleteFromFamily.new(family_name: family) 122 | 123 | add_mutation(mutation_struct, :delete_from_family, mutation) 124 | end 125 | 126 | @doc """ 127 | Creates a `Google.Bigtable.V2.Mutation.DeleteFromRow` given a `Google.Bigtable.V2.Mutation`. 128 | 129 | ## Examples 130 | 131 | iex> Mutations.build("Row#123") |> Mutations.delete_from_row() 132 | %Google.Bigtable.V2.MutateRowsRequest.Entry{ 133 | mutations: [ 134 | %Google.Bigtable.V2.Mutation{ 135 | mutation: {:delete_from_row, %Google.Bigtable.V2.Mutation.DeleteFromRow{}} 136 | } 137 | ], 138 | row_key: "Row#123" 139 | } 140 | """ 141 | 142 | @spec delete_from_row(Entry.t()) :: Entry.t() 143 | def delete_from_row(%Entry{} = mutation_struct) do 144 | mutation = DeleteFromRow.new() 145 | 146 | add_mutation(mutation_struct, :delete_from_row, mutation) 147 | end 148 | 149 | # Adds an additional V2.Mutation to the given mutation struct 150 | @spec add_mutation(Entry.t(), atom(), Mutation.t()) :: Entry.t() 151 | defp add_mutation(%Entry{} = mutation_struct, type, mutation) do 152 | %{ 153 | mutation_struct 154 | | mutations: mutation_struct.mutations ++ [Mutation.new(mutation: {type, mutation})] 155 | } 156 | end 157 | 158 | # Creates a time range that can be used for column deletes 159 | @spec create_time_range(Keyword.t()) :: TimestampRange.t() 160 | defp create_time_range(time_range) do 161 | start_timestamp_micros = Keyword.get(time_range, :start) 162 | end_timestamp_micros = Keyword.get(time_range, :end) 163 | 164 | time_range = TimestampRange.new() 165 | 166 | time_range = 167 | case start_timestamp_micros do 168 | nil -> time_range 169 | micros -> %{time_range | start_timestamp_micros: micros} 170 | end 171 | 172 | time_range = 173 | case end_timestamp_micros do 174 | nil -> time_range 175 | micros -> %{time_range | end_timestamp_micros: micros} 176 | end 177 | 178 | time_range 179 | end 180 | end 181 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "base64url": {:hex, :base64url, "0.0.1", "36a90125f5948e3afd7be97662a1504b934dd5dac78451ca6e9abf85a10286be", [:rebar], [], "hexpm"}, 3 | "bunt": {:hex, :bunt, "0.2.0", "951c6e801e8b1d2cbe58ebbd3e616a869061ddadcc4863d0a2182541acae9a38", [:mix], [], "hexpm"}, 4 | "certifi": {:hex, :certifi, "2.4.2", "75424ff0f3baaccfd34b1214184b6ef616d89e420b258bb0a5ea7d7bc628f7f0", [:rebar3], [{:parse_trans, "~>3.3", [hex: :parse_trans, repo: "hexpm", optional: false]}], "hexpm"}, 5 | "cowboy": {:hex, :cowboy, "2.5.0", "4ef3ae066ee10fe01ea3272edc8f024347a0d3eb95f6fbb9aed556dacbfc1337", [:rebar3], [{:cowlib, "~> 2.6.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "~> 1.6.2", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm"}, 6 | "cowlib": {:hex, :cowlib, "2.6.0", "8aa629f81a0fc189f261dc98a42243fa842625feea3c7ec56c48f4ccdb55490f", [:rebar3], [], "hexpm"}, 7 | "credo": {:hex, :credo, "1.0.0", "aaa40fdd0543a0cf8080e8c5949d8c25f0a24e4fc8c1d83d06c388f5e5e0ea42", [:mix], [{:bunt, "~> 0.2.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"}, 8 | "dialyxir": {:hex, :dialyxir, "1.0.0-rc.6", "78e97d9c0ff1b5521dd68041193891aebebce52fc3b93463c0a6806874557d7d", [:mix], [{:erlex, "~> 0.2.1", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm"}, 9 | "diver": {:hex, :diver, "0.2.0", "8a2a85e97c7b1a989db501fe2ce2a5a0e82109b0d8b02e04c0c1a8a10b53795e", [:mix], [], "hexpm"}, 10 | "earmark": {:hex, :earmark, "1.3.1", "73812f447f7a42358d3ba79283cfa3075a7580a3a2ed457616d6517ac3738cb9", [:mix], [], "hexpm"}, 11 | "elixir_make": {:hex, :elixir_make, "0.4.2", "332c649d08c18bc1ecc73b1befc68c647136de4f340b548844efc796405743bf", [:mix], [], "hexpm"}, 12 | "erlex": {:hex, :erlex, "0.2.1", "cee02918660807cbba9a7229cae9b42d1c6143b768c781fa6cee1eaf03ad860b", [:mix], [], "hexpm"}, 13 | "erlport": {:hex, :erlport, "0.10.0", "2436ec2f4ed62538c6e9c52f523f9315b6002ee7e298d9bd10b35abc3f6b32e7", [:rebar3], [], "hexpm"}, 14 | "ex_doc": {:hex, :ex_doc, "0.19.2", "6f4081ccd9ed081b6dc0bd5af97a41e87f5554de469e7d76025fba535180565f", [:mix], [{:earmark, "~> 1.2", [hex: :earmark, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.10", [hex: :makeup_elixir, repo: "hexpm", optional: false]}], "hexpm"}, 15 | "excoveralls": {:hex, :excoveralls, "0.10.4", "b86230f0978bbc630c139af5066af7cd74fd16536f71bc047d1037091f9f63a9", [:mix], [{:hackney, "~> 1.13", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"}, 16 | "file_system": {:hex, :file_system, "0.2.6", "fd4dc3af89b9ab1dc8ccbcc214a0e60c41f34be251d9307920748a14bf41f1d3", [:mix], [], "hexpm"}, 17 | "google_protos": {:hex, :google_protos, "0.1.0", "c6b9e12092d17571b093d4156d004494ca143b65dbbcbfc3ffff463ea03467c0", [:mix], [{:protobuf, "~> 0.5", [hex: :protobuf, repo: "hexpm", optional: false]}], "hexpm"}, 18 | "goth": {:hex, :goth, "0.11.1", "edabbb776de8a9973cfea235d4e4c16481a28bfc4ff648472020a5a5af1044d0", [:mix], [{:httpoison, "~> 0.11 or ~> 1.0", [hex: :httpoison, repo: "hexpm", optional: false]}, {:json_web_token, "~> 0.2.10", [hex: :json_web_token, repo: "hexpm", optional: false]}, {:poison, "~> 2.1 or ~> 3.0", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm"}, 19 | "grpc": {:hex, :grpc, "0.3.1", "bba240631f1a262db865d9bc620b3e3abc0acfab27a922ad47727d057a734ab3", [:mix], [{:cowboy, "~> 2.5", [hex: :cowboy, repo: "hexpm", optional: false]}, {:gun, "~> 1.2", [hex: :gun, repo: "hexpm", optional: false]}, {:protobuf, "~> 0.5", [hex: :protobuf, repo: "hexpm", optional: false]}], "hexpm"}, 20 | "gun": {:hex, :gun, "1.3.0", "18e5d269649c987af95aec309f68a27ffc3930531dd227a6eaa0884d6684286e", [:rebar3], [{:cowlib, "~> 2.6.0", [hex: :cowlib, repo: "hexpm", optional: false]}], "hexpm"}, 21 | "hackney": {:hex, :hackney, "1.15.0", "287a5d2304d516f63e56c469511c42b016423bcb167e61b611f6bad47e3ca60e", [:rebar3], [{:certifi, "2.4.2", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "6.0.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "1.0.1", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "1.0.2", [hex: :mimerl, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "1.1.4", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm"}, 22 | "httpoison": {:hex, :httpoison, "1.5.0", "71ae9f304bdf7f00e9cd1823f275c955bdfc68282bc5eb5c85c3a9ade865d68e", [:mix], [{:hackney, "~> 1.8", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm"}, 23 | "idna": {:hex, :idna, "6.0.0", "689c46cbcdf3524c44d5f3dde8001f364cd7608a99556d8fbd8239a5798d4c10", [:rebar3], [{:unicode_util_compat, "0.4.1", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm"}, 24 | "inch_ex": {:hex, :inch_ex, "2.0.0", "24268a9284a1751f2ceda569cd978e1fa394c977c45c331bb52a405de544f4de", [:mix], [{:bunt, "~> 0.2", [hex: :bunt, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm"}, 25 | "jason": {:hex, :jason, "1.1.2", "b03dedea67a99223a2eaf9f1264ce37154564de899fd3d8b9a21b1a6fd64afe7", [:mix], [{:decimal, "~> 1.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm"}, 26 | "joken": {:hex, :joken, "2.0.0", "ff10fca10ec539d7a73874da303f4a7a975fea53fcd59b1b89dda2a71ecb4c6b", [:mix], [{:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: true]}, {:jose, "~> 1.8", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm"}, 27 | "jose": {:hex, :jose, "1.9.0", "4167c5f6d06ffaebffd15cdb8da61a108445ef5e85ab8f5a7ad926fdf3ada154", [:mix, :rebar3], [{:base64url, "~> 0.0.1", [hex: :base64url, repo: "hexpm", optional: false]}], "hexpm"}, 28 | "json_web_token": {:hex, :json_web_token, "0.2.10", "61041d56369422c5e3a770cf7d7bf27224b3c4c12d3a7d79b43a002df766db22", [:mix], [{:poison, "~> 3.1", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm"}, 29 | "lens": {:hex, :lens, "0.8.0", "73ebc4459281e01deac5936002b55349996922e9030e4ac11b1ff61f93ae7a02", [:mix], [], "hexpm"}, 30 | "makeup": {:hex, :makeup, "0.8.0", "9cf32aea71c7fe0a4b2e9246c2c4978f9070257e5c9ce6d4a28ec450a839b55f", [:mix], [{:nimble_parsec, "~> 0.5.0", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm"}, 31 | "makeup_elixir": {:hex, :makeup_elixir, "0.13.0", "be7a477997dcac2e48a9d695ec730b2d22418292675c75aa2d34ba0909dcdeda", [:mix], [{:makeup, "~> 0.8", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm"}, 32 | "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm"}, 33 | "mimerl": {:hex, :mimerl, "1.0.2", "993f9b0e084083405ed8252b99460c4f0563e41729ab42d9074fd5e52439be88", [:rebar3], [], "hexpm"}, 34 | "mix_test_watch": {:hex, :mix_test_watch, "0.9.0", "c72132a6071261893518fa08e121e911c9358713f62794a90c95db59042af375", [:mix], [{:file_system, "~> 0.2.1 or ~> 0.3", [hex: :file_system, repo: "hexpm", optional: false]}], "hexpm"}, 35 | "nimble_parsec": {:hex, :nimble_parsec, "0.5.0", "90e2eca3d0266e5c53f8fbe0079694740b9c91b6747f2b7e3c5d21966bba8300", [:mix], [], "hexpm"}, 36 | "parse_trans": {:hex, :parse_trans, "3.3.0", "09765507a3c7590a784615cfd421d101aec25098d50b89d7aa1d66646bc571c1", [:rebar3], [], "hexpm"}, 37 | "poison": {:hex, :poison, "3.1.0", "d9eb636610e096f86f25d9a46f35a9facac35609a7591b3be3326e99a0484665", [:mix], [], "hexpm"}, 38 | "poolboy": {:hex, :poolboy, "1.5.2", "392b007a1693a64540cead79830443abf5762f5d30cf50bc95cb2c1aaafa006b", [:rebar3], [], "hexpm"}, 39 | "pre_commit": {:hex, :pre_commit, "0.3.4", "e2850f80be8090d50ad8019ef2426039307ff5dfbe70c736ad0d4d401facf304", [:mix], [], "hexpm"}, 40 | "protobuf": {:hex, :protobuf, "0.5.4", "2e1b8eec211aff034ad8a14e3674220b0158bfb9a3c7128ac9d2a1ed1b3724d3", [:mix], [], "hexpm"}, 41 | "ranch": {:hex, :ranch, "1.6.2", "6db93c78f411ee033dbb18ba8234c5574883acb9a75af0fb90a9b82ea46afa00", [:rebar3], [], "hexpm"}, 42 | "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.4", "f0eafff810d2041e93f915ef59899c923f4568f4585904d010387ed74988e77b", [:make, :mix, :rebar3], [], "hexpm"}, 43 | "unicode_util_compat": {:hex, :unicode_util_compat, "0.4.1", "d869e4c68901dd9531385bb0c8c40444ebf624e60b6962d95952775cac5e90cd", [:rebar3], [], "hexpm"}, 44 | } 45 | -------------------------------------------------------------------------------- /lib/data/row_set.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.RowSet do 2 | @moduledoc """ 3 | Provides functions to build a `Google.Bigtable.V2.RowSet` and apply it to a `Google.Bigtable.V2.ReadRowsRequest` 4 | """ 5 | alias Bigtable.ReadRows 6 | alias Google.Bigtable.V2 7 | 8 | @doc """ 9 | Adds a single or list of row keys to a `Google.Bigtable.V2.ReadRowsRequest` 10 | 11 | Returns `Google.Bigtable.V2.ReadRowsRequest` 12 | 13 | ## Examples 14 | #### Single Key 15 | 16 | iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_keys("Row#123") 17 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 18 | %Google.Bigtable.V2.RowSet{row_keys: ["Row#123"], row_ranges: []} 19 | 20 | #### Multiple Keys 21 | iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_keys(["Row#123", "Row#124"]) 22 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 23 | %Google.Bigtable.V2.RowSet{row_keys: ["Row#123", "Row#124"], row_ranges: []} 24 | """ 25 | @spec row_keys(V2.ReadRowsRequest.t(), [binary()]) :: V2.ReadRowsRequest.t() 26 | def row_keys(%V2.ReadRowsRequest{} = request, keys) when is_list(keys) do 27 | prev_row_ranges = get_row_ranges(request) 28 | 29 | %{request | rows: V2.RowSet.new(row_keys: keys, row_ranges: prev_row_ranges)} 30 | end 31 | 32 | @spec row_keys(V2.ReadRowsRequest.t(), binary()) :: V2.ReadRowsRequest.t() 33 | def row_keys(%V2.ReadRowsRequest{} = request, key) when is_binary(key) do 34 | row_keys(request, [key]) 35 | end 36 | 37 | @doc """ 38 | Adds a single or list of row keys to the default `Google.Bigtable.V2.ReadRowsRequest` 39 | 40 | Returns `Google.Bigtable.V2.ReadRowsRequest` 41 | 42 | ## Examples 43 | #### Single Key 44 | 45 | iex> request = Bigtable.RowSet.row_keys("Row#123") 46 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 47 | %Google.Bigtable.V2.RowSet{row_keys: ["Row#123"], row_ranges: []} 48 | 49 | #### Multiple Keys 50 | iex> request = Bigtable.RowSet.row_keys(["Row#123", "Row#124"]) 51 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 52 | %Google.Bigtable.V2.RowSet{row_keys: ["Row#123", "Row#124"], row_ranges: []} 53 | """ 54 | @spec row_keys([binary()]) :: V2.ReadRowsRequest.t() 55 | def row_keys(keys) when is_list(keys) do 56 | ReadRows.build() |> row_keys(keys) 57 | end 58 | 59 | @spec row_keys(binary()) :: V2.ReadRowsRequest.t() 60 | def row_keys(key) when is_binary(key) do 61 | ReadRows.build() |> row_keys(key) 62 | end 63 | 64 | @doc """ 65 | Adds a single or list of row ranges to a `Google.Bigtable.V2.ReadRowsRequest` with an optional boolean flag to specify the inclusivity of the range start and end. 66 | 67 | Row ranges should be provided in the format {start, end} or {start, end, inclusive}. 68 | 69 | Returns `Google.Bigtable.V2.ReadRowsRequest` 70 | 71 | ## Examples 72 | #### Single Range 73 | 74 | iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_ranges({"start", "end"}) 75 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 76 | %Google.Bigtable.V2.RowSet{ 77 | row_keys: [], 78 | row_ranges: [ 79 | %Google.Bigtable.V2.RowRange{ 80 | end_key: {:end_key_closed, "end"}, 81 | start_key: {:start_key_closed, "start"} 82 | } 83 | ] 84 | } 85 | 86 | #### Multiple Ranges 87 | 88 | iex> ranges = [{"start1", "end1"}, {"start2", "end2", false}] 89 | iex> request = Bigtable.ReadRows.build("table") |> Bigtable.RowSet.row_ranges(ranges) 90 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 91 | %Google.Bigtable.V2.RowSet{ 92 | row_keys: [], 93 | row_ranges: [ 94 | %Google.Bigtable.V2.RowRange{ 95 | end_key: {:end_key_closed, "end1"}, 96 | start_key: {:start_key_closed, "start1"} 97 | }, 98 | %Google.Bigtable.V2.RowRange{ 99 | end_key: {:end_key_open, "end2"}, 100 | start_key: {:start_key_open, "start2"} 101 | } 102 | ] 103 | } 104 | """ 105 | 106 | @spec row_ranges( 107 | V2.ReadRowsRequest.t(), 108 | [{binary(), binary(), binary()}] 109 | | [{binary(), binary()}] 110 | | {binary(), binary(), binary()} 111 | | {binary(), binary()} 112 | ) :: V2.ReadRowsRequest.t() 113 | def row_ranges(%V2.ReadRowsRequest{} = request, ranges) do 114 | ranges = List.flatten([ranges]) 115 | 116 | ranges 117 | |> Enum.map(&translate_range/1) 118 | |> apply_ranges(request) 119 | end 120 | 121 | @doc """ 122 | Adds a single or list of row ranges to the default `Google.Bigtable.V2.ReadRowsRequest` with an optional boolean flag to specify the inclusivity of the range start and end. 123 | 124 | Row ranges should be provided in the format {start, end} or {start, end, inclusive}. 125 | 126 | Returns `Google.Bigtable.V2.ReadRowsRequest` 127 | 128 | ## Examples 129 | #### Single Range 130 | 131 | iex> request = Bigtable.RowSet.row_ranges({"start", "end"}) 132 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 133 | %Google.Bigtable.V2.RowSet{ 134 | row_keys: [], 135 | row_ranges: [ 136 | %Google.Bigtable.V2.RowRange{ 137 | end_key: {:end_key_closed, "end"}, 138 | start_key: {:start_key_closed, "start"} 139 | } 140 | ] 141 | } 142 | 143 | #### Multiple Ranges 144 | 145 | iex> ranges = [{"start1", "end1"}, {"start2", "end2", false}] 146 | iex> request = Bigtable.RowSet.row_ranges(ranges) 147 | iex> with %Google.Bigtable.V2.ReadRowsRequest{} <- request, do: request.rows 148 | %Google.Bigtable.V2.RowSet{ 149 | row_keys: [], 150 | row_ranges: [ 151 | %Google.Bigtable.V2.RowRange{ 152 | end_key: {:end_key_closed, "end1"}, 153 | start_key: {:start_key_closed, "start1"} 154 | }, 155 | %Google.Bigtable.V2.RowRange{ 156 | end_key: {:end_key_open, "end2"}, 157 | start_key: {:start_key_open, "start2"} 158 | } 159 | ] 160 | } 161 | """ 162 | 163 | @spec row_ranges( 164 | [{binary(), binary(), binary()}] 165 | | [{binary(), binary()}] 166 | | [{binary(), binary(), binary()}] 167 | | {binary(), binary(), binary()} 168 | ) :: V2.ReadRowsRequest.t() 169 | def row_ranges(ranges) do 170 | ReadRows.build() 171 | |> row_ranges(ranges) 172 | end 173 | 174 | # Fetches the previous row ranges from a ReadRowsRequest object 175 | defp get_row_ranges(%V2.ReadRowsRequest{} = request) do 176 | case request.rows do 177 | %V2.RowSet{} = row_set -> 178 | row_set.row_ranges 179 | 180 | _ -> 181 | [] 182 | end 183 | end 184 | 185 | # Fetches the previous row keys from a ReadRowsRequest object 186 | defp get_row_keys(%V2.ReadRowsRequest{} = request) do 187 | case request.rows do 188 | %V2.RowSet{} = row_set -> 189 | row_set.row_keys 190 | 191 | _ -> 192 | [] 193 | end 194 | end 195 | 196 | # Returns an inclusive or exclusive range depending on the boolean flag 197 | 198 | defp translate_range({start_key, end_key, inclusive}) do 199 | case inclusive do 200 | true -> inclusive_range(start_key, end_key) 201 | false -> exclusive_range(start_key, end_key) 202 | end 203 | end 204 | 205 | defp translate_range({start_key, end_key}) do 206 | inclusive_range(start_key, end_key) 207 | end 208 | 209 | defp exclusive_range(start_key, end_key) do 210 | V2.RowRange.new( 211 | start_key: {:start_key_open, start_key}, 212 | end_key: {:end_key_open, end_key} 213 | ) 214 | end 215 | 216 | defp inclusive_range(start_key, end_key) do 217 | V2.RowRange.new( 218 | start_key: {:start_key_closed, start_key}, 219 | end_key: {:end_key_closed, end_key} 220 | ) 221 | end 222 | 223 | # Applies row ranges to a ReadRows request 224 | defp apply_ranges(ranges, %V2.ReadRowsRequest{} = request) do 225 | prev_row_keys = get_row_keys(request) 226 | 227 | %{request | rows: V2.RowSet.new(row_keys: prev_row_keys, row_ranges: ranges)} 228 | end 229 | end 230 | -------------------------------------------------------------------------------- /lib/grpc/data/bigtable.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Bigtable.V2.ReadRowsRequest do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | table_name: String.t(), 7 | app_profile_id: String.t(), 8 | rows: Google.Bigtable.V2.RowSet.t(), 9 | filter: Google.Bigtable.V2.RowFilter.t(), 10 | rows_limit: integer 11 | } 12 | defstruct [:table_name, :app_profile_id, :rows, :filter, :rows_limit] 13 | 14 | field(:table_name, 1, type: :string) 15 | field(:app_profile_id, 5, type: :string) 16 | field(:rows, 2, type: Google.Bigtable.V2.RowSet) 17 | field(:filter, 3, type: Google.Bigtable.V2.RowFilter) 18 | field(:rows_limit, 4, type: :int64) 19 | end 20 | 21 | defmodule Google.Bigtable.V2.ReadRowsResponse do 22 | @moduledoc false 23 | use Protobuf, syntax: :proto3 24 | 25 | @type t :: %__MODULE__{ 26 | chunks: [Google.Bigtable.V2.ReadRowsResponse.CellChunk.t()], 27 | last_scanned_row_key: String.t() 28 | } 29 | defstruct [:chunks, :last_scanned_row_key] 30 | 31 | field(:chunks, 1, repeated: true, type: Google.Bigtable.V2.ReadRowsResponse.CellChunk) 32 | field(:last_scanned_row_key, 2, type: :bytes) 33 | end 34 | 35 | defmodule Google.Bigtable.V2.ReadRowsResponse.CellChunk do 36 | @moduledoc false 37 | use Protobuf, syntax: :proto3 38 | 39 | @type t :: %__MODULE__{ 40 | row_status: {atom, any}, 41 | row_key: String.t(), 42 | family_name: Google.Protobuf.StringValue.t(), 43 | qualifier: Google.Protobuf.BytesValue.t(), 44 | timestamp_micros: integer, 45 | labels: [String.t()], 46 | value: String.t(), 47 | value_size: integer 48 | } 49 | defstruct [ 50 | :row_status, 51 | :row_key, 52 | :family_name, 53 | :qualifier, 54 | :timestamp_micros, 55 | :labels, 56 | :value, 57 | :value_size 58 | ] 59 | 60 | oneof(:row_status, 0) 61 | field(:row_key, 1, type: :bytes) 62 | field(:family_name, 2, type: Google.Protobuf.StringValue) 63 | field(:qualifier, 3, type: Google.Protobuf.BytesValue) 64 | field(:timestamp_micros, 4, type: :int64) 65 | field(:labels, 5, repeated: true, type: :string) 66 | field(:value, 6, type: :bytes) 67 | field(:value_size, 7, type: :int32) 68 | field(:reset_row, 8, type: :bool, oneof: 0) 69 | field(:commit_row, 9, type: :bool, oneof: 0) 70 | end 71 | 72 | defmodule Google.Bigtable.V2.SampleRowKeysRequest do 73 | @moduledoc false 74 | use Protobuf, syntax: :proto3 75 | 76 | @type t :: %__MODULE__{ 77 | table_name: String.t(), 78 | app_profile_id: String.t() 79 | } 80 | defstruct [:table_name, :app_profile_id] 81 | 82 | field(:table_name, 1, type: :string) 83 | field(:app_profile_id, 2, type: :string) 84 | end 85 | 86 | defmodule Google.Bigtable.V2.SampleRowKeysResponse do 87 | @moduledoc false 88 | use Protobuf, syntax: :proto3 89 | 90 | @type t :: %__MODULE__{ 91 | row_key: String.t(), 92 | offset_bytes: integer 93 | } 94 | defstruct [:row_key, :offset_bytes] 95 | 96 | field(:row_key, 1, type: :bytes) 97 | field(:offset_bytes, 2, type: :int64) 98 | end 99 | 100 | defmodule Google.Bigtable.V2.MutateRowRequest do 101 | @moduledoc false 102 | use Protobuf, syntax: :proto3 103 | 104 | @type t :: %__MODULE__{ 105 | table_name: String.t(), 106 | app_profile_id: String.t(), 107 | row_key: String.t(), 108 | mutations: [Google.Bigtable.V2.Mutation.t()] 109 | } 110 | defstruct [:table_name, :app_profile_id, :row_key, :mutations] 111 | 112 | field(:table_name, 1, type: :string) 113 | field(:app_profile_id, 4, type: :string) 114 | field(:row_key, 2, type: :bytes) 115 | field(:mutations, 3, repeated: true, type: Google.Bigtable.V2.Mutation) 116 | end 117 | 118 | defmodule Google.Bigtable.V2.MutateRowResponse do 119 | @moduledoc false 120 | use Protobuf, syntax: :proto3 121 | 122 | defstruct [] 123 | end 124 | 125 | defmodule Google.Bigtable.V2.MutateRowsRequest do 126 | @moduledoc false 127 | use Protobuf, syntax: :proto3 128 | 129 | @type t :: %__MODULE__{ 130 | table_name: String.t(), 131 | app_profile_id: String.t(), 132 | entries: [Google.Bigtable.V2.MutateRowsRequest.Entry.t()] 133 | } 134 | defstruct [:table_name, :app_profile_id, :entries] 135 | 136 | field(:table_name, 1, type: :string) 137 | field(:app_profile_id, 3, type: :string) 138 | field(:entries, 2, repeated: true, type: Google.Bigtable.V2.MutateRowsRequest.Entry) 139 | end 140 | 141 | defmodule Google.Bigtable.V2.MutateRowsRequest.Entry do 142 | @moduledoc false 143 | use Protobuf, syntax: :proto3 144 | 145 | @type t :: %__MODULE__{ 146 | row_key: String.t(), 147 | mutations: [Google.Bigtable.V2.Mutation.t()] 148 | } 149 | defstruct [:row_key, :mutations] 150 | 151 | field(:row_key, 1, type: :bytes) 152 | field(:mutations, 2, repeated: true, type: Google.Bigtable.V2.Mutation) 153 | end 154 | 155 | defmodule Google.Bigtable.V2.MutateRowsResponse do 156 | @moduledoc false 157 | use Protobuf, syntax: :proto3 158 | 159 | @type t :: %__MODULE__{ 160 | entries: [Google.Bigtable.V2.MutateRowsResponse.Entry.t()] 161 | } 162 | defstruct [:entries] 163 | 164 | field(:entries, 1, repeated: true, type: Google.Bigtable.V2.MutateRowsResponse.Entry) 165 | end 166 | 167 | defmodule Google.Bigtable.V2.MutateRowsResponse.Entry do 168 | @moduledoc false 169 | use Protobuf, syntax: :proto3 170 | 171 | @type t :: %__MODULE__{ 172 | index: integer, 173 | status: Google.Rpc.Status.t() 174 | } 175 | defstruct [:index, :status] 176 | 177 | field(:index, 1, type: :int64) 178 | field(:status, 2, type: Google.Rpc.Status) 179 | end 180 | 181 | defmodule Google.Bigtable.V2.CheckAndMutateRowRequest do 182 | @moduledoc false 183 | use Protobuf, syntax: :proto3 184 | 185 | @type t :: %__MODULE__{ 186 | table_name: String.t(), 187 | app_profile_id: String.t(), 188 | row_key: String.t(), 189 | predicate_filter: Google.Bigtable.V2.RowFilter.t(), 190 | true_mutations: [Google.Bigtable.V2.Mutation.t()], 191 | false_mutations: [Google.Bigtable.V2.Mutation.t()] 192 | } 193 | defstruct [ 194 | :table_name, 195 | :app_profile_id, 196 | :row_key, 197 | :predicate_filter, 198 | :true_mutations, 199 | :false_mutations 200 | ] 201 | 202 | field(:table_name, 1, type: :string) 203 | field(:app_profile_id, 7, type: :string) 204 | field(:row_key, 2, type: :bytes) 205 | field(:predicate_filter, 6, type: Google.Bigtable.V2.RowFilter) 206 | field(:true_mutations, 4, repeated: true, type: Google.Bigtable.V2.Mutation) 207 | field(:false_mutations, 5, repeated: true, type: Google.Bigtable.V2.Mutation) 208 | end 209 | 210 | defmodule Google.Bigtable.V2.CheckAndMutateRowResponse do 211 | @moduledoc false 212 | use Protobuf, syntax: :proto3 213 | 214 | @type t :: %__MODULE__{ 215 | predicate_matched: boolean 216 | } 217 | defstruct [:predicate_matched] 218 | 219 | field(:predicate_matched, 1, type: :bool) 220 | end 221 | 222 | defmodule Google.Bigtable.V2.ReadModifyWriteRowRequest do 223 | @moduledoc false 224 | use Protobuf, syntax: :proto3 225 | 226 | @type t :: %__MODULE__{ 227 | table_name: String.t(), 228 | app_profile_id: String.t(), 229 | row_key: String.t(), 230 | rules: [Google.Bigtable.V2.ReadModifyWriteRule.t()] 231 | } 232 | defstruct [:table_name, :app_profile_id, :row_key, :rules] 233 | 234 | field(:table_name, 1, type: :string) 235 | field(:app_profile_id, 4, type: :string) 236 | field(:row_key, 2, type: :bytes) 237 | field(:rules, 3, repeated: true, type: Google.Bigtable.V2.ReadModifyWriteRule) 238 | end 239 | 240 | defmodule Google.Bigtable.V2.ReadModifyWriteRowResponse do 241 | @moduledoc false 242 | use Protobuf, syntax: :proto3 243 | 244 | @type t :: %__MODULE__{ 245 | row: Google.Bigtable.V2.Row.t() 246 | } 247 | defstruct [:row] 248 | 249 | field(:row, 1, type: Google.Bigtable.V2.Row) 250 | end 251 | 252 | defmodule Google.Bigtable.V2.Bigtable.Service do 253 | @moduledoc false 254 | use GRPC.Service, name: "google.bigtable.v2.Bigtable" 255 | 256 | rpc(:ReadRows, Google.Bigtable.V2.ReadRowsRequest, stream(Google.Bigtable.V2.ReadRowsResponse)) 257 | 258 | rpc( 259 | :SampleRowKeys, 260 | Google.Bigtable.V2.SampleRowKeysRequest, 261 | stream(Google.Bigtable.V2.SampleRowKeysResponse) 262 | ) 263 | 264 | rpc(:MutateRow, Google.Bigtable.V2.MutateRowRequest, Google.Bigtable.V2.MutateRowResponse) 265 | 266 | rpc( 267 | :MutateRows, 268 | Google.Bigtable.V2.MutateRowsRequest, 269 | stream(Google.Bigtable.V2.MutateRowsResponse) 270 | ) 271 | 272 | rpc( 273 | :CheckAndMutateRow, 274 | Google.Bigtable.V2.CheckAndMutateRowRequest, 275 | Google.Bigtable.V2.CheckAndMutateRowResponse 276 | ) 277 | 278 | rpc( 279 | :ReadModifyWriteRow, 280 | Google.Bigtable.V2.ReadModifyWriteRowRequest, 281 | Google.Bigtable.V2.ReadModifyWriteRowResponse 282 | ) 283 | end 284 | 285 | defmodule Google.Bigtable.V2.Bigtable.Stub do 286 | @moduledoc false 287 | use GRPC.Stub, service: Google.Bigtable.V2.Bigtable.Service 288 | end 289 | -------------------------------------------------------------------------------- /lib/data/chunk_reader.ex: -------------------------------------------------------------------------------- 1 | defmodule Bigtable.ChunkReader do 2 | @moduledoc """ 3 | Reads chunks from `Google.Bigtable.V2.ReadRowsResponse` and parses them into complete cells grouped by rowkey. 4 | """ 5 | 6 | use Agent, restart: :temporary 7 | 8 | defmodule ReadCell do 9 | @moduledoc """ 10 | A finished cell produced by `Bigtable.ChunkReader`. 11 | """ 12 | @type t :: %__MODULE__{ 13 | label: binary(), 14 | row_key: binary(), 15 | family_name: Google.Protobuf.StringValue.t(), 16 | qualifier: Google.Protobuf.BytesValue.t(), 17 | timestamp: non_neg_integer, 18 | value: binary() 19 | } 20 | 21 | defstruct [ 22 | :label, 23 | :row_key, 24 | :family_name, 25 | :qualifier, 26 | :timestamp, 27 | :value 28 | ] 29 | end 30 | 31 | defmodule ReaderState do 32 | @moduledoc false 33 | defstruct [ 34 | :cur_key, 35 | :cur_label, 36 | :cur_fam, 37 | :cur_qual, 38 | :cur_val, 39 | :last_key, 40 | cur_row: %{}, 41 | cur_ts: 0, 42 | state: :new_row 43 | ] 44 | end 45 | 46 | @typedoc """ 47 | A map containging lists of `Bigtable.ChunkReader.ReadCell` keyed by row key. 48 | """ 49 | @type chunk_reader_result :: %{optional(binary()) => [ReadCell.t()]} 50 | 51 | def start_link(_) do 52 | GenServer.start_link(__MODULE__, %ReaderState{}, []) 53 | end 54 | 55 | @doc """ 56 | Opens a `Bigtable.ChunkReader`. 57 | """ 58 | @spec open() :: :ignore | {:error, any()} | {:ok, pid()} | {:ok, pid(), any()} 59 | def open do 60 | DynamicSupervisor.start_child(__MODULE__.Supervisor, __MODULE__) 61 | end 62 | 63 | @doc """ 64 | Closes a `Bigtable.ChunkReader` when provided its pid and returns the chunk_reader_result. 65 | """ 66 | @spec close(pid()) :: {:ok, chunk_reader_result} | {:error, binary()} 67 | def close(pid) do 68 | result = GenServer.call(pid, :close) 69 | DynamicSupervisor.terminate_child(__MODULE__.Supervisor, pid) 70 | result 71 | end 72 | 73 | @doc """ 74 | Processes a `Google.Bigtable.V2.ReadRowsResponse.CellChunk` given a `Bigtable.ChunkReader` pid. 75 | """ 76 | @spec process(pid(), Google.Bigtable.V2.ReadRowsResponse.CellChunk.t()) :: 77 | {:ok, chunk_reader_result} | {:error, binary()} 78 | def process(pid, cc) do 79 | GenServer.call(pid, {:process, cc}) 80 | end 81 | 82 | @doc false 83 | def init(state) do 84 | {:ok, state} 85 | end 86 | 87 | @doc false 88 | def handle_call(:close, _from, cr) do 89 | if cr.state == :new_row do 90 | {:reply, {:ok, cr.cur_row}, cr} 91 | else 92 | {:reply, {:error, "invalid state for end of stream #{cr.state}"}, cr} 93 | end 94 | end 95 | 96 | @doc false 97 | def handle_call({:process, cc}, _from, cr) do 98 | case handle_state(cr.state, cr, cc) do 99 | {:error, _msg} = result -> 100 | {:reply, result, cr} 101 | 102 | next_state -> 103 | {:reply, {:ok, next_state.cur_row}, next_state} 104 | end 105 | end 106 | 107 | defp handle_state(:new_row, cr, cc) do 108 | with :ok <- validate_new_row(cr, cc) do 109 | to_merge = %{ 110 | cur_key: cc.row_key, 111 | cur_fam: cc.family_name, 112 | cur_qual: cc.qualifier, 113 | cur_ts: cc.timestamp_micros 114 | } 115 | 116 | cr 117 | |> Map.merge(to_merge) 118 | |> handle_cell_value(cc) 119 | else 120 | e -> 121 | e 122 | end 123 | end 124 | 125 | defp handle_state(:cell_in_progress, cr, cc) do 126 | with :ok <- validate_cell_in_progress(cr, cc) do 127 | if reset_row?(cc) do 128 | reset_to_new_row(cr) 129 | else 130 | cr 131 | |> handle_cell_value(cc) 132 | end 133 | else 134 | e -> 135 | e 136 | end 137 | end 138 | 139 | defp handle_state(:row_in_progress, cr, cc) do 140 | with :ok <- validate_row_in_progress(cr, cc) do 141 | if reset_row?(cc) do 142 | reset_to_new_row(cr) 143 | else 144 | cr 145 | |> update_if_contains(cc, :family_name, :cur_fam) 146 | |> update_if_contains(cc, :qualifier, :cur_qual) 147 | |> update_if_contains(cc, :timestamp_micros, :cur_ts) 148 | |> handle_cell_value(cc) 149 | end 150 | else 151 | e -> 152 | e 153 | end 154 | end 155 | 156 | defp update_if_contains(cr, cc, cc_key, cr_key) do 157 | value = Map.get(cc, cc_key) 158 | 159 | if value != nil do 160 | Map.put(cr, cr_key, value) 161 | else 162 | cr 163 | end 164 | end 165 | 166 | defp validate_new_row(cr, cc) do 167 | cond do 168 | reset_row?(cc) -> 169 | {:error, "reset_row not allowed between rows"} 170 | 171 | !row_key?(cc) or !family?(cc) or !qualifier?(cc) -> 172 | {:error, "missing key field for new row #{inspect(cc)}"} 173 | 174 | cr.last_key != "" and cr.last_key >= cc.row_key -> 175 | {:error, "out of order row key: #{cr.last_key}, #{cc.row_key}"} 176 | 177 | true -> 178 | :ok 179 | end 180 | end 181 | 182 | defp validate_row_in_progress(cr, cc) do 183 | status = validate_row_status(cc) 184 | 185 | cond do 186 | status != :ok -> 187 | status 188 | 189 | row_key?(cc) and cc.row_key != cr.cur_key -> 190 | {:error, "received new row key #{cc.row_key} during existing row #{cr.cur_key}"} 191 | 192 | family?(cc) and !qualifier?(cc) -> 193 | {:error, "family name #{cc.family_name} specified without a qualifier"} 194 | 195 | true -> 196 | :ok 197 | end 198 | end 199 | 200 | defp validate_cell_in_progress(cr, cc) do 201 | status = validate_row_status(cc) 202 | 203 | cond do 204 | status != :ok -> 205 | status 206 | 207 | cr.cur_val == nil -> 208 | {:error, "no cached cell while CELL_IN_PROGRESS #{cc}"} 209 | 210 | !reset_row?(cc) and any_key_present?(cc) -> 211 | {:error, "cell key components found while CELL_IN_PROGRESS #{cc}"} 212 | 213 | true -> 214 | :ok 215 | end 216 | end 217 | 218 | defp validate_row_status(cc) do 219 | cond do 220 | reset_row?(cc) and (any_key_present?(cc) or value?(cc) or value_size?(cc) or labels?(cc)) -> 221 | {:error, "reset must not be specified with other fields #{inspect(cc)}"} 222 | 223 | commit_row?(cc) and value_size?(cc) -> 224 | {:error, "commit row found in between chunks in a cell"} 225 | 226 | true -> 227 | :ok 228 | end 229 | end 230 | 231 | defp handle_cell_value(cr, %{value_size: value_size} = cc) when value_size > 0 do 232 | next_value = 233 | if cr.cur_val == nil do 234 | <<>> <> cc.value 235 | else 236 | cr.cur_val <> cc.value 237 | end 238 | 239 | next_label = 240 | if has_property?(cr, :cur_label) do 241 | cr.cur_label 242 | else 243 | Map.get(cc, :labels, "") 244 | end 245 | 246 | cr 247 | |> Map.put(:cur_val, next_value) 248 | |> Map.put(:cur_label, next_label) 249 | |> Map.put(:state, :cell_in_progress) 250 | end 251 | 252 | defp handle_cell_value(cr, cc) do 253 | next_value = 254 | if cr.cur_val == nil do 255 | cc.value 256 | else 257 | cr.cur_val <> cc.value 258 | end 259 | 260 | next_label = 261 | if has_property?(cr, :cur_label) do 262 | cr.cur_label 263 | else 264 | Map.get(cc, :labels, "") 265 | end 266 | 267 | cr 268 | |> Map.put(:cur_val, next_value) 269 | |> Map.put(:cur_label, next_label) 270 | |> finish_cell(cc) 271 | end 272 | 273 | defp finish_cell(cr, cc) do 274 | label = 275 | case cr.cur_label do 276 | label when is_list(label) -> 277 | Enum.join(label, " ") 278 | 279 | label -> 280 | label 281 | end 282 | 283 | ri = %ReadCell{ 284 | label: label, 285 | qualifier: cr.cur_qual, 286 | row_key: cr.cur_key, 287 | family_name: cr.cur_fam, 288 | timestamp: cr.cur_ts, 289 | value: cr.cur_val 290 | } 291 | 292 | next_row = 293 | Map.update(cr.cur_row, cr.cur_key, [ri], fn prev -> 294 | [ri | prev] 295 | end) 296 | 297 | to_merge = 298 | if commit_row?(cc) do 299 | %{ 300 | last_key: cr.cur_key, 301 | state: :new_row 302 | } 303 | else 304 | %{ 305 | state: :row_in_progress 306 | } 307 | end 308 | 309 | next_state = 310 | Map.merge(to_merge, %{ 311 | cur_row: next_row, 312 | cur_label: nil, 313 | cur_val: nil 314 | }) 315 | 316 | Map.merge(cr, next_state) 317 | end 318 | 319 | defp reset_to_new_row(cr) do 320 | Map.merge(cr, %{ 321 | cur_key: nil, 322 | cur_fam: nil, 323 | cur_qual: nil, 324 | cur_val: nil, 325 | cur_row: %{}, 326 | cur_ts: 0, 327 | state: :new_row 328 | }) 329 | end 330 | 331 | defp any_key_present?(cc) do 332 | row_key?(cc) or family?(cc) or qualifier?(cc) or cc.timestamp_micros != 0 333 | end 334 | 335 | defp value?(cc), do: has_property?(cc, :value) 336 | defp value_size?(cc), do: cc.value_size > 0 337 | 338 | defp labels?(cc) do 339 | value = Map.get(cc, :labels) 340 | value != [] and value != nil and value != "" 341 | end 342 | 343 | defp row_key?(cc), do: has_property?(cc, :row_key) 344 | defp family?(cc), do: has_property?(cc, :family_name) 345 | defp qualifier?(cc), do: has_property?(cc, :qualifier) 346 | 347 | defp has_property?(cc, key) do 348 | val = Map.get(cc, key) 349 | val != nil and val != "" 350 | end 351 | 352 | defp reset_row?(cc), do: row_status(cc) == :reset_row 353 | defp commit_row?(cc), do: row_status(cc) == :commit_row 354 | 355 | defp row_status(cc) do 356 | case cc.row_status do 357 | {status, true} -> 358 | status 359 | 360 | _ -> 361 | nil 362 | end 363 | end 364 | end 365 | -------------------------------------------------------------------------------- /test/data/row_filter_test.exs: -------------------------------------------------------------------------------- 1 | defmodule RowFilterTest do 2 | @moduledoc false 3 | alias Bigtable.RowFilter 4 | 5 | use ExUnit.Case 6 | 7 | doctest RowFilter 8 | 9 | setup do 10 | [ 11 | request: %Google.Bigtable.V2.ReadRowsRequest{ 12 | app_profile_id: "", 13 | filter: %Google.Bigtable.V2.RowFilter{ 14 | filter: 15 | {:chain, 16 | %Google.Bigtable.V2.RowFilter.Chain{ 17 | filters: [] 18 | }} 19 | }, 20 | rows: nil, 21 | rows_limit: 0, 22 | table_name: Bigtable.Utils.configured_table_name() 23 | } 24 | ] 25 | end 26 | 27 | describe "RowFilter.chain" do 28 | test "should apply a V2.RowFilter.Chain to a V2.ReadRowsRequest given a list of V2.RowFilter", 29 | context do 30 | filters = [ 31 | %Google.Bigtable.V2.RowFilter{ 32 | filter: {:cells_per_column_limit_filter, 1} 33 | }, 34 | %Google.Bigtable.V2.RowFilter{ 35 | filter: {:cells_per_column_limit_filter, 2} 36 | } 37 | ] 38 | 39 | expected = filters |> expected_chain() |> expected_request() 40 | 41 | assert RowFilter.chain(context.request, filters) == expected 42 | end 43 | end 44 | 45 | describe "RowFilter.cells_per_column" do 46 | setup do 47 | limit = 1 48 | 49 | [ 50 | limit: limit, 51 | filter: %Google.Bigtable.V2.RowFilter{ 52 | filter: {:cells_per_column_limit_filter, 1} 53 | } 54 | ] 55 | end 56 | 57 | test "should apply a cells_per_column_limit V2.RowFilter to a V2.ReadRowsRequest", context do 58 | expected = expected_request(context.filter) 59 | 60 | assert RowFilter.cells_per_column(context.request, context.limit) == expected 61 | end 62 | 63 | test "should return a cells_per_column_limit V2.RowFilter given an integer", context do 64 | assert RowFilter.cells_per_column(context.limit) == context.filter 65 | end 66 | end 67 | 68 | describe "RowFilter.row_key_regex" do 69 | setup do 70 | regex = "^Test#\w+" 71 | 72 | [ 73 | regex: regex, 74 | filter: %Google.Bigtable.V2.RowFilter{ 75 | filter: {:row_key_regex_filter, regex} 76 | } 77 | ] 78 | end 79 | 80 | test "should apply a row_key_regex V2.RowFilter to a V2.ReadRowsRequest", context do 81 | expected = expected_request(context.filter) 82 | 83 | assert RowFilter.row_key_regex(context.request, context.regex) == expected 84 | end 85 | 86 | test "should return a row_key_regex V2.RowFilter given a column limit", context do 87 | assert RowFilter.row_key_regex(context.regex) == context.filter 88 | end 89 | end 90 | 91 | describe "RowFilter.value_regex" do 92 | setup do 93 | regex = "^test$" 94 | 95 | [ 96 | regex: regex, 97 | filter: %Google.Bigtable.V2.RowFilter{ 98 | filter: {:value_regex_filter, regex} 99 | } 100 | ] 101 | end 102 | 103 | test "should apply a value_regex V2.RowFilter to a V2.ReadRowsRequest", context do 104 | expected = expected_request(context.filter) 105 | 106 | assert RowFilter.value_regex(context.request, context.regex) == expected 107 | end 108 | 109 | test "should return a value_regex V2.RowFilter given a regex", context do 110 | assert RowFilter.value_regex(context.regex) == context.filter 111 | end 112 | end 113 | 114 | describe "RowFilter.family_name_regex" do 115 | setup do 116 | regex = "^familyTest$" 117 | 118 | [ 119 | regex: regex, 120 | filter: %Google.Bigtable.V2.RowFilter{ 121 | filter: {:family_name_regex_filter, regex} 122 | } 123 | ] 124 | end 125 | 126 | test "should apply a family_name_regex V2.RowFilter to a V2.ReadRowsRequest", context do 127 | expected = expected_request(context.filter) 128 | 129 | assert RowFilter.family_name_regex(context.request, context.regex) == expected 130 | end 131 | 132 | test "should return a family_name_regex V2.RowFilter given a regex", context do 133 | assert RowFilter.family_name_regex(context.regex) == context.filter 134 | end 135 | end 136 | 137 | describe "RowFilter.column_qualifier_regex" do 138 | setup do 139 | regex = "^columnTest$" 140 | 141 | [ 142 | regex: regex, 143 | filter: %Google.Bigtable.V2.RowFilter{ 144 | filter: {:column_qualifier_regex_filter, regex} 145 | } 146 | ] 147 | end 148 | 149 | test "should apply a column_qualifier_regex V2.RowFilter to a V2.ReadRowsRequest", context do 150 | expected = expected_request(context.filter) 151 | 152 | assert RowFilter.column_qualifier_regex(context.request, context.regex) == expected 153 | end 154 | 155 | test "should return a column_qualifier_regex V2.RowFilter given a regex", context do 156 | assert RowFilter.column_qualifier_regex(context.regex) == context.filter 157 | end 158 | end 159 | 160 | describe "RowFilter.column_range" do 161 | setup do 162 | family_name = "cf1" 163 | start_qualifier = "column2" 164 | end_qualifier = "column4" 165 | 166 | [ 167 | family_name: family_name, 168 | inclusive_range: {start_qualifier, end_qualifier}, 169 | inclusive_range_flagged: {start_qualifier, end_qualifier, true}, 170 | exclusive_range: {start_qualifier, end_qualifier, false}, 171 | inclusive_filter: %Google.Bigtable.V2.RowFilter{ 172 | filter: 173 | {:column_range_filter, 174 | %Google.Bigtable.V2.ColumnRange{ 175 | family_name: family_name, 176 | start_qualifier: {:start_qualifier_closed, start_qualifier}, 177 | end_qualifier: {:end_qualifier_closed, end_qualifier} 178 | }} 179 | }, 180 | exclusive_filter: %Google.Bigtable.V2.RowFilter{ 181 | filter: 182 | {:column_range_filter, 183 | %Google.Bigtable.V2.ColumnRange{ 184 | family_name: family_name, 185 | start_qualifier: {:start_qualifier_open, start_qualifier}, 186 | end_qualifier: {:end_qualifier_open, end_qualifier} 187 | }} 188 | } 189 | ] 190 | end 191 | 192 | test "should apply an inclusive column_range V2.RowFilter to a V2.ReadRowsRequest", context do 193 | expected = expected_request(context.inclusive_filter) 194 | 195 | family_name = context.family_name 196 | request = context.request 197 | 198 | with_flag = RowFilter.column_range(request, family_name, context.inclusive_range_flagged) 199 | 200 | without_flag = RowFilter.column_range(request, family_name, context.inclusive_range) 201 | 202 | assert with_flag == expected 203 | assert without_flag == expected 204 | end 205 | 206 | test "should apply an exclusive column_range V2.RowFilter to a V2.ReadRowsRequest", context do 207 | expected = expected_request(context.exclusive_filter) 208 | 209 | result = 210 | RowFilter.column_range(context.request, context.family_name, context.exclusive_range) 211 | 212 | assert result == expected 213 | end 214 | 215 | test "should return an inclusive column_range V2.RowFilter given a range", 216 | context do 217 | expected = context.inclusive_filter 218 | 219 | family_name = context.family_name 220 | 221 | with_flag = RowFilter.column_range(family_name, context.inclusive_range_flagged) 222 | without_flag = RowFilter.column_range(family_name, context.inclusive_range) 223 | 224 | assert with_flag == expected 225 | assert without_flag == expected 226 | end 227 | 228 | test "should return an exclusive column_range V2.RowFilter given a range", context do 229 | expected = context.exclusive_filter 230 | 231 | result = RowFilter.column_range(context.family_name, context.exclusive_range) 232 | 233 | assert result == expected 234 | end 235 | end 236 | 237 | describe "RowFilter.timestamp_range" do 238 | test "should apply a timerange filter V2.RowFilter to a V2.ReadRowsRequest", context do 239 | start_timestamp = 1000 240 | end_timestamp = 2000 241 | range = [start_timestamp: start_timestamp, end_timestamp: end_timestamp] 242 | 243 | filter = expected_timestamp_filter(start_timestamp, end_timestamp) 244 | 245 | expected = expected_request(filter) 246 | 247 | result = RowFilter.timestamp_range(context.request, range) 248 | 249 | assert result == expected 250 | end 251 | 252 | test "should return default timestamp range when no timestamps provided" do 253 | expected = expected_timestamp_filter(0, 0) 254 | 255 | assert RowFilter.timestamp_range([]) == expected 256 | end 257 | 258 | test "should return timestamp range with start and end provided" do 259 | start_timestamp = 1000 260 | end_timestamp = 2000 261 | 262 | expected = expected_timestamp_filter(start_timestamp, end_timestamp) 263 | 264 | range = [start_timestamp: start_timestamp, end_timestamp: end_timestamp] 265 | 266 | result = RowFilter.timestamp_range(range) 267 | 268 | assert result == expected 269 | end 270 | end 271 | 272 | defp expected_timestamp_filter(start_timestamp, end_timestamp) do 273 | %Google.Bigtable.V2.RowFilter{ 274 | filter: 275 | {:timestamp_range_filter, 276 | %Google.Bigtable.V2.TimestampRange{ 277 | start_timestamp_micros: start_timestamp, 278 | end_timestamp_micros: end_timestamp 279 | }} 280 | } 281 | end 282 | 283 | defp expected_chain(filters) when is_list(filters) do 284 | %Google.Bigtable.V2.RowFilter{ 285 | filter: 286 | {:chain, 287 | %Google.Bigtable.V2.RowFilter.Chain{ 288 | filters: filters 289 | }} 290 | } 291 | end 292 | 293 | defp expected_request(filter) do 294 | %Google.Bigtable.V2.ReadRowsRequest{ 295 | app_profile_id: "", 296 | filter: filter, 297 | rows: nil, 298 | rows_limit: 0, 299 | table_name: Bigtable.Utils.configured_table_name() 300 | } 301 | end 302 | end 303 | -------------------------------------------------------------------------------- /lib/grpc/data/data.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Bigtable.V2.Row do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | key: String.t(), 7 | families: [Google.Bigtable.V2.Family.t()] 8 | } 9 | defstruct [:key, :families] 10 | 11 | field(:key, 1, type: :bytes) 12 | field(:families, 2, repeated: true, type: Google.Bigtable.V2.Family) 13 | end 14 | 15 | defmodule Google.Bigtable.V2.Family do 16 | @moduledoc false 17 | use Protobuf, syntax: :proto3 18 | 19 | @type t :: %__MODULE__{ 20 | name: String.t(), 21 | columns: [Google.Bigtable.V2.Column.t()] 22 | } 23 | defstruct [:name, :columns] 24 | 25 | field(:name, 1, type: :string) 26 | field(:columns, 2, repeated: true, type: Google.Bigtable.V2.Column) 27 | end 28 | 29 | defmodule Google.Bigtable.V2.Column do 30 | @moduledoc false 31 | use Protobuf, syntax: :proto3 32 | 33 | @type t :: %__MODULE__{ 34 | qualifier: String.t(), 35 | cells: [Google.Bigtable.V2.Cell.t()] 36 | } 37 | defstruct [:qualifier, :cells] 38 | 39 | field(:qualifier, 1, type: :bytes) 40 | field(:cells, 2, repeated: true, type: Google.Bigtable.V2.Cell) 41 | end 42 | 43 | defmodule Google.Bigtable.V2.Cell do 44 | @moduledoc false 45 | use Protobuf, syntax: :proto3 46 | 47 | @type t :: %__MODULE__{ 48 | timestamp_micros: integer, 49 | value: String.t(), 50 | labels: [String.t()] 51 | } 52 | defstruct [:timestamp_micros, :value, :labels] 53 | 54 | field(:timestamp_micros, 1, type: :int64) 55 | field(:value, 2, type: :bytes) 56 | field(:labels, 3, repeated: true, type: :string) 57 | end 58 | 59 | defmodule Google.Bigtable.V2.RowRange do 60 | @moduledoc false 61 | use Protobuf, syntax: :proto3 62 | 63 | @type t :: %__MODULE__{ 64 | start_key: {atom, any}, 65 | end_key: {atom, any} 66 | } 67 | defstruct [:start_key, :end_key] 68 | 69 | oneof(:start_key, 0) 70 | oneof(:end_key, 1) 71 | field(:start_key_closed, 1, type: :bytes, oneof: 0) 72 | field(:start_key_open, 2, type: :bytes, oneof: 0) 73 | field(:end_key_open, 3, type: :bytes, oneof: 1) 74 | field(:end_key_closed, 4, type: :bytes, oneof: 1) 75 | end 76 | 77 | defmodule Google.Bigtable.V2.RowSet do 78 | @moduledoc false 79 | use Protobuf, syntax: :proto3 80 | 81 | @type t :: %__MODULE__{ 82 | row_keys: [String.t()], 83 | row_ranges: [Google.Bigtable.V2.RowRange.t()] 84 | } 85 | defstruct [:row_keys, :row_ranges] 86 | 87 | field(:row_keys, 1, repeated: true, type: :bytes) 88 | field(:row_ranges, 2, repeated: true, type: Google.Bigtable.V2.RowRange) 89 | end 90 | 91 | defmodule Google.Bigtable.V2.ColumnRange do 92 | @moduledoc false 93 | use Protobuf, syntax: :proto3 94 | 95 | @type t :: %__MODULE__{ 96 | start_qualifier: {atom, any}, 97 | end_qualifier: {atom, any}, 98 | family_name: String.t() 99 | } 100 | defstruct [:start_qualifier, :end_qualifier, :family_name] 101 | 102 | oneof(:start_qualifier, 0) 103 | oneof(:end_qualifier, 1) 104 | field(:family_name, 1, type: :string) 105 | field(:start_qualifier_closed, 2, type: :bytes, oneof: 0) 106 | field(:start_qualifier_open, 3, type: :bytes, oneof: 0) 107 | field(:end_qualifier_closed, 4, type: :bytes, oneof: 1) 108 | field(:end_qualifier_open, 5, type: :bytes, oneof: 1) 109 | end 110 | 111 | defmodule Google.Bigtable.V2.TimestampRange do 112 | @moduledoc false 113 | use Protobuf, syntax: :proto3 114 | 115 | @type t :: %__MODULE__{ 116 | start_timestamp_micros: integer, 117 | end_timestamp_micros: integer 118 | } 119 | defstruct [:start_timestamp_micros, :end_timestamp_micros] 120 | 121 | field(:start_timestamp_micros, 1, type: :int64) 122 | field(:end_timestamp_micros, 2, type: :int64) 123 | end 124 | 125 | defmodule Google.Bigtable.V2.ValueRange do 126 | @moduledoc false 127 | use Protobuf, syntax: :proto3 128 | 129 | @type t :: %__MODULE__{ 130 | start_value: {atom, any}, 131 | end_value: {atom, any} 132 | } 133 | defstruct [:start_value, :end_value] 134 | 135 | oneof(:start_value, 0) 136 | oneof(:end_value, 1) 137 | field(:start_value_closed, 1, type: :bytes, oneof: 0) 138 | field(:start_value_open, 2, type: :bytes, oneof: 0) 139 | field(:end_value_closed, 3, type: :bytes, oneof: 1) 140 | field(:end_value_open, 4, type: :bytes, oneof: 1) 141 | end 142 | 143 | defmodule Google.Bigtable.V2.RowFilter do 144 | @moduledoc false 145 | use Protobuf, syntax: :proto3 146 | 147 | @type t :: %__MODULE__{ 148 | filter: {atom, any} 149 | } 150 | defstruct [:filter] 151 | 152 | oneof(:filter, 0) 153 | field(:chain, 1, type: Google.Bigtable.V2.RowFilter.Chain, oneof: 0) 154 | field(:interleave, 2, type: Google.Bigtable.V2.RowFilter.Interleave, oneof: 0) 155 | field(:condition, 3, type: Google.Bigtable.V2.RowFilter.Condition, oneof: 0) 156 | field(:sink, 16, type: :bool, oneof: 0) 157 | field(:pass_all_filter, 17, type: :bool, oneof: 0) 158 | field(:block_all_filter, 18, type: :bool, oneof: 0) 159 | field(:row_key_regex_filter, 4, type: :bytes, oneof: 0) 160 | field(:row_sample_filter, 14, type: :double, oneof: 0) 161 | field(:family_name_regex_filter, 5, type: :string, oneof: 0) 162 | field(:column_qualifier_regex_filter, 6, type: :bytes, oneof: 0) 163 | field(:column_range_filter, 7, type: Google.Bigtable.V2.ColumnRange, oneof: 0) 164 | field(:timestamp_range_filter, 8, type: Google.Bigtable.V2.TimestampRange, oneof: 0) 165 | field(:value_regex_filter, 9, type: :bytes, oneof: 0) 166 | field(:value_range_filter, 15, type: Google.Bigtable.V2.ValueRange, oneof: 0) 167 | field(:cells_per_row_offset_filter, 10, type: :int32, oneof: 0) 168 | field(:cells_per_row_limit_filter, 11, type: :int32, oneof: 0) 169 | field(:cells_per_column_limit_filter, 12, type: :int32, oneof: 0) 170 | field(:strip_value_transformer, 13, type: :bool, oneof: 0) 171 | field(:apply_label_transformer, 19, type: :string, oneof: 0) 172 | end 173 | 174 | defmodule Google.Bigtable.V2.RowFilter.Chain do 175 | @moduledoc false 176 | use Protobuf, syntax: :proto3 177 | 178 | @type t :: %__MODULE__{ 179 | filters: [Google.Bigtable.V2.RowFilter.t()] 180 | } 181 | defstruct [:filters] 182 | 183 | field(:filters, 1, repeated: true, type: Google.Bigtable.V2.RowFilter) 184 | end 185 | 186 | defmodule Google.Bigtable.V2.RowFilter.Interleave do 187 | @moduledoc false 188 | use Protobuf, syntax: :proto3 189 | 190 | @type t :: %__MODULE__{ 191 | filters: [Google.Bigtable.V2.RowFilter.t()] 192 | } 193 | defstruct [:filters] 194 | 195 | field(:filters, 1, repeated: true, type: Google.Bigtable.V2.RowFilter) 196 | end 197 | 198 | defmodule Google.Bigtable.V2.RowFilter.Condition do 199 | @moduledoc false 200 | use Protobuf, syntax: :proto3 201 | 202 | @type t :: %__MODULE__{ 203 | predicate_filter: Google.Bigtable.V2.RowFilter.t(), 204 | true_filter: Google.Bigtable.V2.RowFilter.t(), 205 | false_filter: Google.Bigtable.V2.RowFilter.t() 206 | } 207 | defstruct [:predicate_filter, :true_filter, :false_filter] 208 | 209 | field(:predicate_filter, 1, type: Google.Bigtable.V2.RowFilter) 210 | field(:true_filter, 2, type: Google.Bigtable.V2.RowFilter) 211 | field(:false_filter, 3, type: Google.Bigtable.V2.RowFilter) 212 | end 213 | 214 | defmodule Google.Bigtable.V2.Mutation do 215 | @moduledoc false 216 | use Protobuf, syntax: :proto3 217 | 218 | @type t :: %__MODULE__{ 219 | mutation: {atom, any} 220 | } 221 | defstruct [:mutation] 222 | 223 | oneof(:mutation, 0) 224 | field(:set_cell, 1, type: Google.Bigtable.V2.Mutation.SetCell, oneof: 0) 225 | field(:delete_from_column, 2, type: Google.Bigtable.V2.Mutation.DeleteFromColumn, oneof: 0) 226 | field(:delete_from_family, 3, type: Google.Bigtable.V2.Mutation.DeleteFromFamily, oneof: 0) 227 | field(:delete_from_row, 4, type: Google.Bigtable.V2.Mutation.DeleteFromRow, oneof: 0) 228 | end 229 | 230 | defmodule Google.Bigtable.V2.Mutation.SetCell do 231 | @moduledoc false 232 | use Protobuf, syntax: :proto3 233 | 234 | @type t :: %__MODULE__{ 235 | family_name: String.t(), 236 | column_qualifier: String.t(), 237 | timestamp_micros: integer, 238 | value: String.t() 239 | } 240 | defstruct [:family_name, :column_qualifier, :timestamp_micros, :value] 241 | 242 | field(:family_name, 1, type: :string) 243 | field(:column_qualifier, 2, type: :bytes) 244 | field(:timestamp_micros, 3, type: :int64) 245 | field(:value, 4, type: :bytes) 246 | end 247 | 248 | defmodule Google.Bigtable.V2.Mutation.DeleteFromColumn do 249 | @moduledoc false 250 | use Protobuf, syntax: :proto3 251 | 252 | @type t :: %__MODULE__{ 253 | family_name: String.t(), 254 | column_qualifier: String.t(), 255 | time_range: Google.Bigtable.V2.TimestampRange.t() 256 | } 257 | defstruct [:family_name, :column_qualifier, :time_range] 258 | 259 | field(:family_name, 1, type: :string) 260 | field(:column_qualifier, 2, type: :bytes) 261 | field(:time_range, 3, type: Google.Bigtable.V2.TimestampRange) 262 | end 263 | 264 | defmodule Google.Bigtable.V2.Mutation.DeleteFromFamily do 265 | @moduledoc false 266 | use Protobuf, syntax: :proto3 267 | 268 | @type t :: %__MODULE__{ 269 | family_name: String.t() 270 | } 271 | defstruct [:family_name] 272 | 273 | field(:family_name, 1, type: :string) 274 | end 275 | 276 | defmodule Google.Bigtable.V2.Mutation.DeleteFromRow do 277 | @moduledoc false 278 | use Protobuf, syntax: :proto3 279 | 280 | defstruct [] 281 | end 282 | 283 | defmodule Google.Bigtable.V2.ReadModifyWriteRule do 284 | @moduledoc false 285 | use Protobuf, syntax: :proto3 286 | 287 | @type t :: %__MODULE__{ 288 | rule: {atom, any}, 289 | family_name: String.t(), 290 | column_qualifier: String.t() 291 | } 292 | defstruct [:rule, :family_name, :column_qualifier] 293 | 294 | oneof(:rule, 0) 295 | field(:family_name, 1, type: :string) 296 | field(:column_qualifier, 2, type: :bytes) 297 | field(:append_value, 3, type: :bytes, oneof: 0) 298 | field(:increment_amount, 4, type: :int64, oneof: 0) 299 | end 300 | -------------------------------------------------------------------------------- /lib/grpc/admin/bigtable_table_admin.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Bigtable.Admin.V2.CreateTableRequest do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | parent: String.t(), 7 | table_id: String.t(), 8 | table: Google.Bigtable.Admin.V2.Table.t(), 9 | initial_splits: [Google.Bigtable.Admin.V2.CreateTableRequest.Split.t()] 10 | } 11 | defstruct [:parent, :table_id, :table, :initial_splits] 12 | 13 | field :parent, 1, type: :string 14 | field :table_id, 2, type: :string 15 | field :table, 3, type: Google.Bigtable.Admin.V2.Table 16 | 17 | field :initial_splits, 4, 18 | repeated: true, 19 | type: Google.Bigtable.Admin.V2.CreateTableRequest.Split 20 | end 21 | 22 | defmodule Google.Bigtable.Admin.V2.CreateTableRequest.Split do 23 | @moduledoc false 24 | use Protobuf, syntax: :proto3 25 | 26 | @type t :: %__MODULE__{ 27 | key: String.t() 28 | } 29 | defstruct [:key] 30 | 31 | field :key, 1, type: :bytes 32 | end 33 | 34 | defmodule Google.Bigtable.Admin.V2.CreateTableFromSnapshotRequest do 35 | @moduledoc false 36 | use Protobuf, syntax: :proto3 37 | 38 | @type t :: %__MODULE__{ 39 | parent: String.t(), 40 | table_id: String.t(), 41 | source_snapshot: String.t() 42 | } 43 | defstruct [:parent, :table_id, :source_snapshot] 44 | 45 | field :parent, 1, type: :string 46 | field :table_id, 2, type: :string 47 | field :source_snapshot, 3, type: :string 48 | end 49 | 50 | defmodule Google.Bigtable.Admin.V2.DropRowRangeRequest do 51 | @moduledoc false 52 | use Protobuf, syntax: :proto3 53 | 54 | @type t :: %__MODULE__{ 55 | target: {atom, any}, 56 | name: String.t() 57 | } 58 | defstruct [:target, :name] 59 | 60 | oneof :target, 0 61 | field :name, 1, type: :string 62 | field :row_key_prefix, 2, type: :bytes, oneof: 0 63 | field :delete_all_data_from_table, 3, type: :bool, oneof: 0 64 | end 65 | 66 | defmodule Google.Bigtable.Admin.V2.ListTablesRequest do 67 | @moduledoc false 68 | use Protobuf, syntax: :proto3 69 | 70 | @type t :: %__MODULE__{ 71 | parent: String.t(), 72 | view: integer, 73 | page_size: integer, 74 | page_token: String.t() 75 | } 76 | defstruct [:parent, :view, :page_size, :page_token] 77 | 78 | field :parent, 1, type: :string 79 | field :view, 2, type: Google.Bigtable.Admin.V2.Table.View, enum: true 80 | field :page_size, 4, type: :int32 81 | field :page_token, 3, type: :string 82 | end 83 | 84 | defmodule Google.Bigtable.Admin.V2.ListTablesResponse do 85 | @moduledoc false 86 | use Protobuf, syntax: :proto3 87 | 88 | @type t :: %__MODULE__{ 89 | tables: [Google.Bigtable.Admin.V2.Table.t()], 90 | next_page_token: String.t() 91 | } 92 | defstruct [:tables, :next_page_token] 93 | 94 | field :tables, 1, repeated: true, type: Google.Bigtable.Admin.V2.Table 95 | field :next_page_token, 2, type: :string 96 | end 97 | 98 | defmodule Google.Bigtable.Admin.V2.GetTableRequest do 99 | @moduledoc false 100 | use Protobuf, syntax: :proto3 101 | 102 | @type t :: %__MODULE__{ 103 | name: String.t(), 104 | view: integer 105 | } 106 | defstruct [:name, :view] 107 | 108 | field :name, 1, type: :string 109 | field :view, 2, type: Google.Bigtable.Admin.V2.Table.View, enum: true 110 | end 111 | 112 | defmodule Google.Bigtable.Admin.V2.DeleteTableRequest do 113 | @moduledoc false 114 | use Protobuf, syntax: :proto3 115 | 116 | @type t :: %__MODULE__{ 117 | name: String.t() 118 | } 119 | defstruct [:name] 120 | 121 | field :name, 1, type: :string 122 | end 123 | 124 | defmodule Google.Bigtable.Admin.V2.ModifyColumnFamiliesRequest do 125 | @moduledoc false 126 | use Protobuf, syntax: :proto3 127 | 128 | @type t :: %__MODULE__{ 129 | name: String.t(), 130 | modifications: [Google.Bigtable.Admin.V2.ModifyColumnFamiliesRequest.Modification.t()] 131 | } 132 | defstruct [:name, :modifications] 133 | 134 | field :name, 1, type: :string 135 | 136 | field :modifications, 2, 137 | repeated: true, 138 | type: Google.Bigtable.Admin.V2.ModifyColumnFamiliesRequest.Modification 139 | end 140 | 141 | defmodule Google.Bigtable.Admin.V2.ModifyColumnFamiliesRequest.Modification do 142 | @moduledoc false 143 | use Protobuf, syntax: :proto3 144 | 145 | @type t :: %__MODULE__{ 146 | mod: {atom, any}, 147 | id: String.t() 148 | } 149 | defstruct [:mod, :id] 150 | 151 | oneof :mod, 0 152 | field :id, 1, type: :string 153 | field :create, 2, type: Google.Bigtable.Admin.V2.ColumnFamily, oneof: 0 154 | field :update, 3, type: Google.Bigtable.Admin.V2.ColumnFamily, oneof: 0 155 | field :drop, 4, type: :bool, oneof: 0 156 | end 157 | 158 | defmodule Google.Bigtable.Admin.V2.GenerateConsistencyTokenRequest do 159 | @moduledoc false 160 | use Protobuf, syntax: :proto3 161 | 162 | @type t :: %__MODULE__{ 163 | name: String.t() 164 | } 165 | defstruct [:name] 166 | 167 | field :name, 1, type: :string 168 | end 169 | 170 | defmodule Google.Bigtable.Admin.V2.GenerateConsistencyTokenResponse do 171 | @moduledoc false 172 | use Protobuf, syntax: :proto3 173 | 174 | @type t :: %__MODULE__{ 175 | consistency_token: String.t() 176 | } 177 | defstruct [:consistency_token] 178 | 179 | field :consistency_token, 1, type: :string 180 | end 181 | 182 | defmodule Google.Bigtable.Admin.V2.CheckConsistencyRequest do 183 | @moduledoc false 184 | use Protobuf, syntax: :proto3 185 | 186 | @type t :: %__MODULE__{ 187 | name: String.t(), 188 | consistency_token: String.t() 189 | } 190 | defstruct [:name, :consistency_token] 191 | 192 | field :name, 1, type: :string 193 | field :consistency_token, 2, type: :string 194 | end 195 | 196 | defmodule Google.Bigtable.Admin.V2.CheckConsistencyResponse do 197 | @moduledoc false 198 | use Protobuf, syntax: :proto3 199 | 200 | @type t :: %__MODULE__{ 201 | consistent: boolean 202 | } 203 | defstruct [:consistent] 204 | 205 | field :consistent, 1, type: :bool 206 | end 207 | 208 | defmodule Google.Bigtable.Admin.V2.SnapshotTableRequest do 209 | @moduledoc false 210 | use Protobuf, syntax: :proto3 211 | 212 | @type t :: %__MODULE__{ 213 | name: String.t(), 214 | cluster: String.t(), 215 | snapshot_id: String.t(), 216 | ttl: Google.Protobuf.Duration.t(), 217 | description: String.t() 218 | } 219 | defstruct [:name, :cluster, :snapshot_id, :ttl, :description] 220 | 221 | field :name, 1, type: :string 222 | field :cluster, 2, type: :string 223 | field :snapshot_id, 3, type: :string 224 | field :ttl, 4, type: Google.Protobuf.Duration 225 | field :description, 5, type: :string 226 | end 227 | 228 | defmodule Google.Bigtable.Admin.V2.GetSnapshotRequest do 229 | @moduledoc false 230 | use Protobuf, syntax: :proto3 231 | 232 | @type t :: %__MODULE__{ 233 | name: String.t() 234 | } 235 | defstruct [:name] 236 | 237 | field :name, 1, type: :string 238 | end 239 | 240 | defmodule Google.Bigtable.Admin.V2.ListSnapshotsRequest do 241 | @moduledoc false 242 | use Protobuf, syntax: :proto3 243 | 244 | @type t :: %__MODULE__{ 245 | parent: String.t(), 246 | page_size: integer, 247 | page_token: String.t() 248 | } 249 | defstruct [:parent, :page_size, :page_token] 250 | 251 | field :parent, 1, type: :string 252 | field :page_size, 2, type: :int32 253 | field :page_token, 3, type: :string 254 | end 255 | 256 | defmodule Google.Bigtable.Admin.V2.ListSnapshotsResponse do 257 | @moduledoc false 258 | use Protobuf, syntax: :proto3 259 | 260 | @type t :: %__MODULE__{ 261 | snapshots: [Google.Bigtable.Admin.V2.Snapshot.t()], 262 | next_page_token: String.t() 263 | } 264 | defstruct [:snapshots, :next_page_token] 265 | 266 | field :snapshots, 1, repeated: true, type: Google.Bigtable.Admin.V2.Snapshot 267 | field :next_page_token, 2, type: :string 268 | end 269 | 270 | defmodule Google.Bigtable.Admin.V2.DeleteSnapshotRequest do 271 | @moduledoc false 272 | use Protobuf, syntax: :proto3 273 | 274 | @type t :: %__MODULE__{ 275 | name: String.t() 276 | } 277 | defstruct [:name] 278 | 279 | field :name, 1, type: :string 280 | end 281 | 282 | defmodule Google.Bigtable.Admin.V2.SnapshotTableMetadata do 283 | @moduledoc false 284 | use Protobuf, syntax: :proto3 285 | 286 | @type t :: %__MODULE__{ 287 | original_request: Google.Bigtable.Admin.V2.SnapshotTableRequest.t(), 288 | request_time: Google.Protobuf.Timestamp.t(), 289 | finish_time: Google.Protobuf.Timestamp.t() 290 | } 291 | defstruct [:original_request, :request_time, :finish_time] 292 | 293 | field :original_request, 1, type: Google.Bigtable.Admin.V2.SnapshotTableRequest 294 | field :request_time, 2, type: Google.Protobuf.Timestamp 295 | field :finish_time, 3, type: Google.Protobuf.Timestamp 296 | end 297 | 298 | defmodule Google.Bigtable.Admin.V2.CreateTableFromSnapshotMetadata do 299 | @moduledoc false 300 | use Protobuf, syntax: :proto3 301 | 302 | @type t :: %__MODULE__{ 303 | original_request: Google.Bigtable.Admin.V2.CreateTableFromSnapshotRequest.t(), 304 | request_time: Google.Protobuf.Timestamp.t(), 305 | finish_time: Google.Protobuf.Timestamp.t() 306 | } 307 | defstruct [:original_request, :request_time, :finish_time] 308 | 309 | field :original_request, 1, type: Google.Bigtable.Admin.V2.CreateTableFromSnapshotRequest 310 | field :request_time, 2, type: Google.Protobuf.Timestamp 311 | field :finish_time, 3, type: Google.Protobuf.Timestamp 312 | end 313 | 314 | defmodule Google.Bigtable.Admin.V2.BigtableTableAdmin.Service do 315 | @moduledoc false 316 | use GRPC.Service, name: "google.bigtable.admin.v2.BigtableTableAdmin" 317 | 318 | rpc :CreateTable, Google.Bigtable.Admin.V2.CreateTableRequest, Google.Bigtable.Admin.V2.Table 319 | 320 | rpc :CreateTableFromSnapshot, 321 | Google.Bigtable.Admin.V2.CreateTableFromSnapshotRequest, 322 | Google.Longrunning.Operation 323 | 324 | rpc :ListTables, 325 | Google.Bigtable.Admin.V2.ListTablesRequest, 326 | Google.Bigtable.Admin.V2.ListTablesResponse 327 | 328 | rpc :GetTable, Google.Bigtable.Admin.V2.GetTableRequest, Google.Bigtable.Admin.V2.Table 329 | rpc :DeleteTable, Google.Bigtable.Admin.V2.DeleteTableRequest, Google.Protobuf.Empty 330 | 331 | rpc :ModifyColumnFamilies, 332 | Google.Bigtable.Admin.V2.ModifyColumnFamiliesRequest, 333 | Google.Bigtable.Admin.V2.Table 334 | 335 | rpc :DropRowRange, Google.Bigtable.Admin.V2.DropRowRangeRequest, Google.Protobuf.Empty 336 | 337 | rpc :GenerateConsistencyToken, 338 | Google.Bigtable.Admin.V2.GenerateConsistencyTokenRequest, 339 | Google.Bigtable.Admin.V2.GenerateConsistencyTokenResponse 340 | 341 | rpc :CheckConsistency, 342 | Google.Bigtable.Admin.V2.CheckConsistencyRequest, 343 | Google.Bigtable.Admin.V2.CheckConsistencyResponse 344 | 345 | rpc :SnapshotTable, Google.Bigtable.Admin.V2.SnapshotTableRequest, Google.Longrunning.Operation 346 | rpc :GetSnapshot, Google.Bigtable.Admin.V2.GetSnapshotRequest, Google.Bigtable.Admin.V2.Snapshot 347 | 348 | rpc :ListSnapshots, 349 | Google.Bigtable.Admin.V2.ListSnapshotsRequest, 350 | Google.Bigtable.Admin.V2.ListSnapshotsResponse 351 | 352 | rpc :DeleteSnapshot, Google.Bigtable.Admin.V2.DeleteSnapshotRequest, Google.Protobuf.Empty 353 | end 354 | 355 | defmodule Google.Bigtable.Admin.V2.BigtableTableAdmin.Stub do 356 | @moduledoc false 357 | use GRPC.Stub, service: Google.Bigtable.Admin.V2.BigtableTableAdmin.Service 358 | end 359 | -------------------------------------------------------------------------------- /lib/grpc/admin/bigtable_instance_admin.pb.ex: -------------------------------------------------------------------------------- 1 | defmodule Google.Bigtable.Admin.V2.CreateInstanceRequest do 2 | @moduledoc false 3 | use Protobuf, syntax: :proto3 4 | 5 | @type t :: %__MODULE__{ 6 | parent: String.t(), 7 | instance_id: String.t(), 8 | instance: Google.Bigtable.Admin.V2.Instance.t(), 9 | clusters: %{String.t() => Google.Bigtable.Admin.V2.Cluster.t()} 10 | } 11 | defstruct [:parent, :instance_id, :instance, :clusters] 12 | 13 | field :parent, 1, type: :string 14 | field :instance_id, 2, type: :string 15 | field :instance, 3, type: Google.Bigtable.Admin.V2.Instance 16 | 17 | field :clusters, 4, 18 | repeated: true, 19 | type: Google.Bigtable.Admin.V2.CreateInstanceRequest.ClustersEntry, 20 | map: true 21 | end 22 | 23 | defmodule Google.Bigtable.Admin.V2.CreateInstanceRequest.ClustersEntry do 24 | @moduledoc false 25 | use Protobuf, map: true, syntax: :proto3 26 | 27 | @type t :: %__MODULE__{ 28 | key: String.t(), 29 | value: Google.Bigtable.Admin.V2.Cluster.t() 30 | } 31 | defstruct [:key, :value] 32 | 33 | field :key, 1, type: :string 34 | field :value, 2, type: Google.Bigtable.Admin.V2.Cluster 35 | end 36 | 37 | defmodule Google.Bigtable.Admin.V2.GetInstanceRequest do 38 | @moduledoc false 39 | use Protobuf, syntax: :proto3 40 | 41 | @type t :: %__MODULE__{ 42 | name: String.t() 43 | } 44 | defstruct [:name] 45 | 46 | field :name, 1, type: :string 47 | end 48 | 49 | defmodule Google.Bigtable.Admin.V2.ListInstancesRequest do 50 | @moduledoc false 51 | use Protobuf, syntax: :proto3 52 | 53 | @type t :: %__MODULE__{ 54 | parent: String.t(), 55 | page_token: String.t() 56 | } 57 | defstruct [:parent, :page_token] 58 | 59 | field :parent, 1, type: :string 60 | field :page_token, 2, type: :string 61 | end 62 | 63 | defmodule Google.Bigtable.Admin.V2.ListInstancesResponse do 64 | @moduledoc false 65 | use Protobuf, syntax: :proto3 66 | 67 | @type t :: %__MODULE__{ 68 | instances: [Google.Bigtable.Admin.V2.Instance.t()], 69 | failed_locations: [String.t()], 70 | next_page_token: String.t() 71 | } 72 | defstruct [:instances, :failed_locations, :next_page_token] 73 | 74 | field :instances, 1, repeated: true, type: Google.Bigtable.Admin.V2.Instance 75 | field :failed_locations, 2, repeated: true, type: :string 76 | field :next_page_token, 3, type: :string 77 | end 78 | 79 | defmodule Google.Bigtable.Admin.V2.PartialUpdateInstanceRequest do 80 | @moduledoc false 81 | use Protobuf, syntax: :proto3 82 | 83 | @type t :: %__MODULE__{ 84 | instance: Google.Bigtable.Admin.V2.Instance.t(), 85 | update_mask: Google.Protobuf.FieldMask.t() 86 | } 87 | defstruct [:instance, :update_mask] 88 | 89 | field :instance, 1, type: Google.Bigtable.Admin.V2.Instance 90 | field :update_mask, 2, type: Google.Protobuf.FieldMask 91 | end 92 | 93 | defmodule Google.Bigtable.Admin.V2.DeleteInstanceRequest do 94 | @moduledoc false 95 | use Protobuf, syntax: :proto3 96 | 97 | @type t :: %__MODULE__{ 98 | name: String.t() 99 | } 100 | defstruct [:name] 101 | 102 | field :name, 1, type: :string 103 | end 104 | 105 | defmodule Google.Bigtable.Admin.V2.CreateClusterRequest do 106 | @moduledoc false 107 | use Protobuf, syntax: :proto3 108 | 109 | @type t :: %__MODULE__{ 110 | parent: String.t(), 111 | cluster_id: String.t(), 112 | cluster: Google.Bigtable.Admin.V2.Cluster.t() 113 | } 114 | defstruct [:parent, :cluster_id, :cluster] 115 | 116 | field :parent, 1, type: :string 117 | field :cluster_id, 2, type: :string 118 | field :cluster, 3, type: Google.Bigtable.Admin.V2.Cluster 119 | end 120 | 121 | defmodule Google.Bigtable.Admin.V2.GetClusterRequest do 122 | @moduledoc false 123 | use Protobuf, syntax: :proto3 124 | 125 | @type t :: %__MODULE__{ 126 | name: String.t() 127 | } 128 | defstruct [:name] 129 | 130 | field :name, 1, type: :string 131 | end 132 | 133 | defmodule Google.Bigtable.Admin.V2.ListClustersRequest do 134 | @moduledoc false 135 | use Protobuf, syntax: :proto3 136 | 137 | @type t :: %__MODULE__{ 138 | parent: String.t(), 139 | page_token: String.t() 140 | } 141 | defstruct [:parent, :page_token] 142 | 143 | field :parent, 1, type: :string 144 | field :page_token, 2, type: :string 145 | end 146 | 147 | defmodule Google.Bigtable.Admin.V2.ListClustersResponse do 148 | @moduledoc false 149 | use Protobuf, syntax: :proto3 150 | 151 | @type t :: %__MODULE__{ 152 | clusters: [Google.Bigtable.Admin.V2.Cluster.t()], 153 | failed_locations: [String.t()], 154 | next_page_token: String.t() 155 | } 156 | defstruct [:clusters, :failed_locations, :next_page_token] 157 | 158 | field :clusters, 1, repeated: true, type: Google.Bigtable.Admin.V2.Cluster 159 | field :failed_locations, 2, repeated: true, type: :string 160 | field :next_page_token, 3, type: :string 161 | end 162 | 163 | defmodule Google.Bigtable.Admin.V2.DeleteClusterRequest do 164 | @moduledoc false 165 | use Protobuf, syntax: :proto3 166 | 167 | @type t :: %__MODULE__{ 168 | name: String.t() 169 | } 170 | defstruct [:name] 171 | 172 | field :name, 1, type: :string 173 | end 174 | 175 | defmodule Google.Bigtable.Admin.V2.CreateInstanceMetadata do 176 | @moduledoc false 177 | use Protobuf, syntax: :proto3 178 | 179 | @type t :: %__MODULE__{ 180 | original_request: Google.Bigtable.Admin.V2.CreateInstanceRequest.t(), 181 | request_time: Google.Protobuf.Timestamp.t(), 182 | finish_time: Google.Protobuf.Timestamp.t() 183 | } 184 | defstruct [:original_request, :request_time, :finish_time] 185 | 186 | field :original_request, 1, type: Google.Bigtable.Admin.V2.CreateInstanceRequest 187 | field :request_time, 2, type: Google.Protobuf.Timestamp 188 | field :finish_time, 3, type: Google.Protobuf.Timestamp 189 | end 190 | 191 | defmodule Google.Bigtable.Admin.V2.UpdateInstanceMetadata do 192 | @moduledoc false 193 | use Protobuf, syntax: :proto3 194 | 195 | @type t :: %__MODULE__{ 196 | original_request: Google.Bigtable.Admin.V2.PartialUpdateInstanceRequest.t(), 197 | request_time: Google.Protobuf.Timestamp.t(), 198 | finish_time: Google.Protobuf.Timestamp.t() 199 | } 200 | defstruct [:original_request, :request_time, :finish_time] 201 | 202 | field :original_request, 1, type: Google.Bigtable.Admin.V2.PartialUpdateInstanceRequest 203 | field :request_time, 2, type: Google.Protobuf.Timestamp 204 | field :finish_time, 3, type: Google.Protobuf.Timestamp 205 | end 206 | 207 | defmodule Google.Bigtable.Admin.V2.CreateClusterMetadata do 208 | @moduledoc false 209 | use Protobuf, syntax: :proto3 210 | 211 | @type t :: %__MODULE__{ 212 | original_request: Google.Bigtable.Admin.V2.CreateClusterRequest.t(), 213 | request_time: Google.Protobuf.Timestamp.t(), 214 | finish_time: Google.Protobuf.Timestamp.t() 215 | } 216 | defstruct [:original_request, :request_time, :finish_time] 217 | 218 | field :original_request, 1, type: Google.Bigtable.Admin.V2.CreateClusterRequest 219 | field :request_time, 2, type: Google.Protobuf.Timestamp 220 | field :finish_time, 3, type: Google.Protobuf.Timestamp 221 | end 222 | 223 | defmodule Google.Bigtable.Admin.V2.UpdateClusterMetadata do 224 | @moduledoc false 225 | use Protobuf, syntax: :proto3 226 | 227 | @type t :: %__MODULE__{ 228 | original_request: Google.Bigtable.Admin.V2.Cluster.t(), 229 | request_time: Google.Protobuf.Timestamp.t(), 230 | finish_time: Google.Protobuf.Timestamp.t() 231 | } 232 | defstruct [:original_request, :request_time, :finish_time] 233 | 234 | field :original_request, 1, type: Google.Bigtable.Admin.V2.Cluster 235 | field :request_time, 2, type: Google.Protobuf.Timestamp 236 | field :finish_time, 3, type: Google.Protobuf.Timestamp 237 | end 238 | 239 | defmodule Google.Bigtable.Admin.V2.CreateAppProfileRequest do 240 | @moduledoc false 241 | use Protobuf, syntax: :proto3 242 | 243 | @type t :: %__MODULE__{ 244 | parent: String.t(), 245 | app_profile_id: String.t(), 246 | app_profile: Google.Bigtable.Admin.V2.AppProfile.t(), 247 | ignore_warnings: boolean 248 | } 249 | defstruct [:parent, :app_profile_id, :app_profile, :ignore_warnings] 250 | 251 | field :parent, 1, type: :string 252 | field :app_profile_id, 2, type: :string 253 | field :app_profile, 3, type: Google.Bigtable.Admin.V2.AppProfile 254 | field :ignore_warnings, 4, type: :bool 255 | end 256 | 257 | defmodule Google.Bigtable.Admin.V2.GetAppProfileRequest do 258 | @moduledoc false 259 | use Protobuf, syntax: :proto3 260 | 261 | @type t :: %__MODULE__{ 262 | name: String.t() 263 | } 264 | defstruct [:name] 265 | 266 | field :name, 1, type: :string 267 | end 268 | 269 | defmodule Google.Bigtable.Admin.V2.ListAppProfilesRequest do 270 | @moduledoc false 271 | use Protobuf, syntax: :proto3 272 | 273 | @type t :: %__MODULE__{ 274 | parent: String.t(), 275 | page_size: integer, 276 | page_token: String.t() 277 | } 278 | defstruct [:parent, :page_size, :page_token] 279 | 280 | field :parent, 1, type: :string 281 | field :page_size, 3, type: :int32 282 | field :page_token, 2, type: :string 283 | end 284 | 285 | defmodule Google.Bigtable.Admin.V2.ListAppProfilesResponse do 286 | @moduledoc false 287 | use Protobuf, syntax: :proto3 288 | 289 | @type t :: %__MODULE__{ 290 | app_profiles: [Google.Bigtable.Admin.V2.AppProfile.t()], 291 | next_page_token: String.t(), 292 | failed_locations: [String.t()] 293 | } 294 | defstruct [:app_profiles, :next_page_token, :failed_locations] 295 | 296 | field :app_profiles, 1, repeated: true, type: Google.Bigtable.Admin.V2.AppProfile 297 | field :next_page_token, 2, type: :string 298 | field :failed_locations, 3, repeated: true, type: :string 299 | end 300 | 301 | defmodule Google.Bigtable.Admin.V2.UpdateAppProfileRequest do 302 | @moduledoc false 303 | use Protobuf, syntax: :proto3 304 | 305 | @type t :: %__MODULE__{ 306 | app_profile: Google.Bigtable.Admin.V2.AppProfile.t(), 307 | update_mask: Google.Protobuf.FieldMask.t(), 308 | ignore_warnings: boolean 309 | } 310 | defstruct [:app_profile, :update_mask, :ignore_warnings] 311 | 312 | field :app_profile, 1, type: Google.Bigtable.Admin.V2.AppProfile 313 | field :update_mask, 2, type: Google.Protobuf.FieldMask 314 | field :ignore_warnings, 3, type: :bool 315 | end 316 | 317 | defmodule Google.Bigtable.Admin.V2.DeleteAppProfileRequest do 318 | @moduledoc false 319 | use Protobuf, syntax: :proto3 320 | 321 | @type t :: %__MODULE__{ 322 | name: String.t(), 323 | ignore_warnings: boolean 324 | } 325 | defstruct [:name, :ignore_warnings] 326 | 327 | field :name, 1, type: :string 328 | field :ignore_warnings, 2, type: :bool 329 | end 330 | 331 | defmodule Google.Bigtable.Admin.V2.UpdateAppProfileMetadata do 332 | @moduledoc false 333 | use Protobuf, syntax: :proto3 334 | 335 | defstruct [] 336 | end 337 | 338 | defmodule Google.Bigtable.Admin.V2.BigtableInstanceAdmin.Service do 339 | @moduledoc false 340 | use GRPC.Service, name: "google.bigtable.admin.v2.BigtableInstanceAdmin" 341 | 342 | rpc :CreateInstance, 343 | Google.Bigtable.Admin.V2.CreateInstanceRequest, 344 | Google.Longrunning.Operation 345 | 346 | rpc :GetInstance, Google.Bigtable.Admin.V2.GetInstanceRequest, Google.Bigtable.Admin.V2.Instance 347 | 348 | rpc :ListInstances, 349 | Google.Bigtable.Admin.V2.ListInstancesRequest, 350 | Google.Bigtable.Admin.V2.ListInstancesResponse 351 | 352 | rpc :UpdateInstance, Google.Bigtable.Admin.V2.Instance, Google.Bigtable.Admin.V2.Instance 353 | 354 | rpc :PartialUpdateInstance, 355 | Google.Bigtable.Admin.V2.PartialUpdateInstanceRequest, 356 | Google.Longrunning.Operation 357 | 358 | rpc :DeleteInstance, Google.Bigtable.Admin.V2.DeleteInstanceRequest, Google.Protobuf.Empty 359 | rpc :CreateCluster, Google.Bigtable.Admin.V2.CreateClusterRequest, Google.Longrunning.Operation 360 | rpc :GetCluster, Google.Bigtable.Admin.V2.GetClusterRequest, Google.Bigtable.Admin.V2.Cluster 361 | 362 | rpc :ListClusters, 363 | Google.Bigtable.Admin.V2.ListClustersRequest, 364 | Google.Bigtable.Admin.V2.ListClustersResponse 365 | 366 | rpc :UpdateCluster, Google.Bigtable.Admin.V2.Cluster, Google.Longrunning.Operation 367 | rpc :DeleteCluster, Google.Bigtable.Admin.V2.DeleteClusterRequest, Google.Protobuf.Empty 368 | 369 | rpc :CreateAppProfile, 370 | Google.Bigtable.Admin.V2.CreateAppProfileRequest, 371 | Google.Bigtable.Admin.V2.AppProfile 372 | 373 | rpc :GetAppProfile, 374 | Google.Bigtable.Admin.V2.GetAppProfileRequest, 375 | Google.Bigtable.Admin.V2.AppProfile 376 | 377 | rpc :ListAppProfiles, 378 | Google.Bigtable.Admin.V2.ListAppProfilesRequest, 379 | Google.Bigtable.Admin.V2.ListAppProfilesResponse 380 | 381 | rpc :UpdateAppProfile, 382 | Google.Bigtable.Admin.V2.UpdateAppProfileRequest, 383 | Google.Longrunning.Operation 384 | 385 | rpc :DeleteAppProfile, Google.Bigtable.Admin.V2.DeleteAppProfileRequest, Google.Protobuf.Empty 386 | rpc :GetIamPolicy, Google.Iam.V1.GetIamPolicyRequest, Google.Iam.V1.Policy 387 | rpc :SetIamPolicy, Google.Iam.V1.SetIamPolicyRequest, Google.Iam.V1.Policy 388 | 389 | rpc :TestIamPermissions, 390 | Google.Iam.V1.TestIamPermissionsRequest, 391 | Google.Iam.V1.TestIamPermissionsResponse 392 | end 393 | 394 | defmodule Google.Bigtable.Admin.V2.BigtableInstanceAdmin.Stub do 395 | @moduledoc false 396 | use GRPC.Stub, service: Google.Bigtable.Admin.V2.BigtableInstanceAdmin.Service 397 | end 398 | -------------------------------------------------------------------------------- /test/data/check_and_mutate_row_test.exs: -------------------------------------------------------------------------------- 1 | defmodule CheckAndMutateRowTest do 2 | @moduledoc false 3 | alias Bigtable.{CheckAndMutateRow, ChunkReader, MutateRow, Mutations, ReadRows, RowFilter} 4 | alias ChunkReader.ReadCell 5 | 6 | use ExUnit.Case 7 | 8 | doctest CheckAndMutateRow 9 | 10 | setup do 11 | assert ReadRows.read() == {:ok, %{}} 12 | 13 | row_key = "Test#123" 14 | qualifier = "column" 15 | 16 | {:ok, _} = 17 | row_key 18 | |> Mutations.build() 19 | |> Mutations.set_cell("cf1", qualifier, "value", 0) 20 | |> MutateRow.build() 21 | |> MutateRow.mutate() 22 | 23 | on_exit(fn -> 24 | mutation = row_key |> Mutations.build() |> Mutations.delete_from_row() 25 | 26 | mutation |> MutateRow.mutate() 27 | end) 28 | 29 | [ 30 | qualifier: qualifier, 31 | row_key: row_key 32 | ] 33 | end 34 | 35 | describe "CheckAndMutateRow.mutate/2" do 36 | test "should apply a single true mutation when no predicate set and row exists", context do 37 | mutation = 38 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "truthy", "true", 0) 39 | 40 | {:ok, _result} = 41 | context.row_key 42 | |> CheckAndMutateRow.build() 43 | |> CheckAndMutateRow.if_true(mutation) 44 | |> CheckAndMutateRow.mutate() 45 | 46 | expected = 47 | {:ok, 48 | %{ 49 | context.row_key => [ 50 | %ReadCell{ 51 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 52 | label: "", 53 | qualifier: %Google.Protobuf.BytesValue{value: "truthy"}, 54 | row_key: context.row_key, 55 | timestamp: 0, 56 | value: "true" 57 | }, 58 | %ReadCell{ 59 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 60 | label: "", 61 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 62 | row_key: context.row_key, 63 | timestamp: 0, 64 | value: "value" 65 | } 66 | ] 67 | }} 68 | 69 | assert ReadRows.read() == expected 70 | end 71 | 72 | test "should apply a multiple true mutation when no predicate set and row exists", context do 73 | mutation1 = 74 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "truthy", "true", 0) 75 | 76 | mutation2 = 77 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "alsoTruthy", "true", 0) 78 | 79 | {:ok, _result} = 80 | context.row_key 81 | |> CheckAndMutateRow.build() 82 | |> CheckAndMutateRow.if_true([mutation1, mutation2]) 83 | |> CheckAndMutateRow.mutate() 84 | 85 | expected = 86 | {:ok, 87 | %{ 88 | context.row_key => [ 89 | %ReadCell{ 90 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 91 | label: "", 92 | qualifier: %Google.Protobuf.BytesValue{value: "truthy"}, 93 | row_key: context.row_key, 94 | timestamp: 0, 95 | value: "true" 96 | }, 97 | %ReadCell{ 98 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 99 | label: "", 100 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 101 | row_key: context.row_key, 102 | timestamp: 0, 103 | value: "value" 104 | }, 105 | %ReadCell{ 106 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 107 | label: "", 108 | qualifier: %Google.Protobuf.BytesValue{value: "alsoTruthy"}, 109 | row_key: context.row_key, 110 | timestamp: 0, 111 | value: "true" 112 | } 113 | ] 114 | }} 115 | 116 | assert ReadRows.read() == expected 117 | end 118 | 119 | test "should not apply a true mutation when no predicate set and row does not exist", 120 | context do 121 | mutation = 122 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "truthy", "true", 0) 123 | 124 | {:ok, _result} = 125 | CheckAndMutateRow.build("Doesnt#Exist") 126 | |> CheckAndMutateRow.if_true(mutation) 127 | |> CheckAndMutateRow.mutate() 128 | 129 | expected = 130 | {:ok, 131 | %{ 132 | context.row_key => [ 133 | %ReadCell{ 134 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 135 | label: "", 136 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 137 | row_key: context.row_key, 138 | timestamp: 0, 139 | value: "value" 140 | } 141 | ] 142 | }} 143 | 144 | assert ReadRows.read() == expected 145 | end 146 | 147 | test "should apply a single true mutation when predicate true", context do 148 | filter = RowFilter.column_qualifier_regex(context.qualifier) 149 | 150 | mutation = 151 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "truthy", "true", 0) 152 | 153 | {:ok, _result} = 154 | context.row_key 155 | |> CheckAndMutateRow.build() 156 | |> CheckAndMutateRow.predicate(filter) 157 | |> CheckAndMutateRow.if_true(mutation) 158 | |> CheckAndMutateRow.mutate() 159 | 160 | expected = 161 | {:ok, 162 | %{ 163 | context.row_key => [ 164 | %ReadCell{ 165 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 166 | label: "", 167 | qualifier: %Google.Protobuf.BytesValue{value: "truthy"}, 168 | row_key: context.row_key, 169 | timestamp: 0, 170 | value: "true" 171 | }, 172 | %ReadCell{ 173 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 174 | label: "", 175 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 176 | row_key: context.row_key, 177 | timestamp: 0, 178 | value: "value" 179 | } 180 | ] 181 | }} 182 | 183 | assert ReadRows.read() == expected 184 | end 185 | 186 | test "should apply a multiple true mutation when predicate true", context do 187 | filter = RowFilter.column_qualifier_regex(context.qualifier) 188 | 189 | mutation1 = 190 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "truthy", "true", 0) 191 | 192 | mutation2 = 193 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "alsoTruthy", "true", 0) 194 | 195 | {:ok, _result} = 196 | context.row_key 197 | |> CheckAndMutateRow.build() 198 | |> CheckAndMutateRow.predicate(filter) 199 | |> CheckAndMutateRow.if_true([mutation1, mutation2]) 200 | |> CheckAndMutateRow.mutate() 201 | 202 | expected = 203 | {:ok, 204 | %{ 205 | context.row_key => [ 206 | %ReadCell{ 207 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 208 | label: "", 209 | qualifier: %Google.Protobuf.BytesValue{value: "truthy"}, 210 | row_key: context.row_key, 211 | timestamp: 0, 212 | value: "true" 213 | }, 214 | %ReadCell{ 215 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 216 | label: "", 217 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 218 | row_key: context.row_key, 219 | timestamp: 0, 220 | value: "value" 221 | }, 222 | %ReadCell{ 223 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 224 | label: "", 225 | qualifier: %Google.Protobuf.BytesValue{value: "alsoTruthy"}, 226 | row_key: context.row_key, 227 | timestamp: 0, 228 | value: "true" 229 | } 230 | ] 231 | }} 232 | 233 | assert ReadRows.read() == expected 234 | end 235 | 236 | test "should not apply a true mutation when predicate is false", context do 237 | filter = RowFilter.column_qualifier_regex("doesntexist") 238 | 239 | mutation = 240 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "truthy", "true", 0) 241 | 242 | {:ok, _result} = 243 | context.row_key 244 | |> CheckAndMutateRow.build() 245 | |> CheckAndMutateRow.predicate(filter) 246 | |> CheckAndMutateRow.if_true(mutation) 247 | |> CheckAndMutateRow.mutate() 248 | 249 | expected = 250 | {:ok, 251 | %{ 252 | context.row_key => [ 253 | %ReadCell{ 254 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 255 | label: "", 256 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 257 | row_key: context.row_key, 258 | timestamp: 0, 259 | value: "value" 260 | } 261 | ] 262 | }} 263 | 264 | assert ReadRows.read() == expected 265 | end 266 | 267 | test "should apply a single false mutation when predicate false", context do 268 | filter = RowFilter.column_qualifier_regex("doesntexist") 269 | 270 | mutation = 271 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "false", "false", 0) 272 | 273 | {:ok, _result} = 274 | context.row_key 275 | |> CheckAndMutateRow.build() 276 | |> CheckAndMutateRow.predicate(filter) 277 | |> CheckAndMutateRow.if_false(mutation) 278 | |> CheckAndMutateRow.mutate() 279 | 280 | expected = 281 | {:ok, 282 | %{ 283 | context.row_key => [ 284 | %ReadCell{ 285 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 286 | label: "", 287 | qualifier: %Google.Protobuf.BytesValue{value: "false"}, 288 | row_key: context.row_key, 289 | timestamp: 0, 290 | value: "false" 291 | }, 292 | %ReadCell{ 293 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 294 | label: "", 295 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 296 | row_key: context.row_key, 297 | timestamp: 0, 298 | value: "value" 299 | } 300 | ] 301 | }} 302 | 303 | assert ReadRows.read() == expected 304 | end 305 | 306 | test "should apply multiple false mutations when predicate false", context do 307 | filter = RowFilter.column_qualifier_regex("doesntexist") 308 | 309 | mutation1 = 310 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "false", "false", 0) 311 | 312 | mutation2 = 313 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "false2", "false2", 0) 314 | 315 | {:ok, _result} = 316 | context.row_key 317 | |> CheckAndMutateRow.build() 318 | |> CheckAndMutateRow.predicate(filter) 319 | |> CheckAndMutateRow.if_false([mutation1, mutation2]) 320 | |> CheckAndMutateRow.mutate() 321 | 322 | expected = 323 | {:ok, 324 | %{ 325 | context.row_key => [ 326 | %ReadCell{ 327 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 328 | label: "", 329 | qualifier: %Google.Protobuf.BytesValue{value: "false2"}, 330 | row_key: context.row_key, 331 | timestamp: 0, 332 | value: "false2" 333 | }, 334 | %ReadCell{ 335 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 336 | label: "", 337 | qualifier: %Google.Protobuf.BytesValue{value: "false"}, 338 | row_key: context.row_key, 339 | timestamp: 0, 340 | value: "false" 341 | }, 342 | %ReadCell{ 343 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 344 | label: "", 345 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 346 | row_key: context.row_key, 347 | timestamp: 0, 348 | value: "value" 349 | } 350 | ] 351 | }} 352 | 353 | assert ReadRows.read() == expected 354 | end 355 | 356 | test "should not apply a false mutation when predicate is true", context do 357 | filter = RowFilter.column_qualifier_regex(context.qualifier) 358 | 359 | mutation = 360 | context.row_key |> Mutations.build() |> Mutations.set_cell("cf1", "false", "false", 0) 361 | 362 | {:ok, _result} = 363 | context.row_key 364 | |> CheckAndMutateRow.build() 365 | |> CheckAndMutateRow.predicate(filter) 366 | |> CheckAndMutateRow.if_false(mutation) 367 | |> CheckAndMutateRow.mutate() 368 | 369 | expected = 370 | {:ok, 371 | %{ 372 | context.row_key => [ 373 | %ReadCell{ 374 | family_name: %Google.Protobuf.StringValue{value: "cf1"}, 375 | label: "", 376 | qualifier: %Google.Protobuf.BytesValue{value: context.qualifier}, 377 | row_key: context.row_key, 378 | timestamp: 0, 379 | value: "value" 380 | } 381 | ] 382 | }} 383 | 384 | assert ReadRows.read() == expected 385 | end 386 | end 387 | end 388 | -------------------------------------------------------------------------------- /lib/grpc/protos/bigtable.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Google Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | syntax = "proto3"; 16 | 17 | package google.bigtable.v2; 18 | 19 | import "google/api/annotations.proto"; 20 | import "google/bigtable/v2/data.proto"; 21 | import "google/protobuf/wrappers.proto"; 22 | import "google/rpc/status.proto"; 23 | 24 | option csharp_namespace = "Google.Cloud.Bigtable.V2"; 25 | option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable"; 26 | option java_multiple_files = true; 27 | option java_outer_classname = "BigtableProto"; 28 | option java_package = "com.google.bigtable.v2"; 29 | option php_namespace = "Google\\Cloud\\Bigtable\\V2"; 30 | 31 | 32 | // Service for reading from and writing to existing Bigtable tables. 33 | service Bigtable { 34 | // Streams back the contents of all requested rows in key order, optionally 35 | // applying the same Reader filter to each. Depending on their size, 36 | // rows and cells may be broken up across multiple responses, but 37 | // atomicity of each row will still be preserved. See the 38 | // ReadRowsResponse documentation for details. 39 | rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { 40 | option (google.api.http) = { 41 | post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" 42 | body: "*" 43 | }; 44 | } 45 | 46 | // Returns a sample of row keys in the table. The returned row keys will 47 | // delimit contiguous sections of the table of approximately equal size, 48 | // which can be used to break up the data for distributed tasks like 49 | // mapreduces. 50 | rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { 51 | option (google.api.http) = { 52 | get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" 53 | }; 54 | } 55 | 56 | // Mutates a row atomically. Cells already present in the row are left 57 | // unchanged unless explicitly changed by `mutation`. 58 | rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { 59 | option (google.api.http) = { 60 | post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" 61 | body: "*" 62 | }; 63 | } 64 | 65 | // Mutates multiple rows in a batch. Each individual row is mutated 66 | // atomically as in MutateRow, but the entire batch is not executed 67 | // atomically. 68 | rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { 69 | option (google.api.http) = { 70 | post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" 71 | body: "*" 72 | }; 73 | } 74 | 75 | // Mutates a row atomically based on the output of a predicate Reader filter. 76 | rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { 77 | option (google.api.http) = { 78 | post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" 79 | body: "*" 80 | }; 81 | } 82 | 83 | // Modifies a row atomically on the server. The method reads the latest 84 | // existing timestamp and value from the specified columns and writes a new 85 | // entry based on pre-defined read/modify/write rules. The new value for the 86 | // timestamp is the greater of the existing timestamp or the current server 87 | // time. The method returns the new contents of all modified cells. 88 | rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { 89 | option (google.api.http) = { 90 | post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" 91 | body: "*" 92 | }; 93 | } 94 | } 95 | 96 | // Request message for Bigtable.ReadRows. 97 | message ReadRowsRequest { 98 | // The unique name of the table from which to read. 99 | // Values are of the form 100 | // `projects//instances//tables/`. 101 | string table_name = 1; 102 | 103 | // This value specifies routing for replication. If not specified, the 104 | // "default" application profile will be used. 105 | string app_profile_id = 5; 106 | 107 | // The row keys and/or ranges to read. If not specified, reads from all rows. 108 | RowSet rows = 2; 109 | 110 | // The filter to apply to the contents of the specified row(s). If unset, 111 | // reads the entirety of each row. 112 | RowFilter filter = 3; 113 | 114 | // The read will terminate after committing to N rows' worth of results. The 115 | // default (zero) is to return all results. 116 | int64 rows_limit = 4; 117 | } 118 | 119 | // Response message for Bigtable.ReadRows. 120 | message ReadRowsResponse { 121 | // Specifies a piece of a row's contents returned as part of the read 122 | // response stream. 123 | message CellChunk { 124 | // The row key for this chunk of data. If the row key is empty, 125 | // this CellChunk is a continuation of the same row as the previous 126 | // CellChunk in the response stream, even if that CellChunk was in a 127 | // previous ReadRowsResponse message. 128 | bytes row_key = 1; 129 | 130 | // The column family name for this chunk of data. If this message 131 | // is not present this CellChunk is a continuation of the same column 132 | // family as the previous CellChunk. The empty string can occur as a 133 | // column family name in a response so clients must check 134 | // explicitly for the presence of this message, not just for 135 | // `family_name.value` being non-empty. 136 | google.protobuf.StringValue family_name = 2; 137 | 138 | // The column qualifier for this chunk of data. If this message 139 | // is not present, this CellChunk is a continuation of the same column 140 | // as the previous CellChunk. Column qualifiers may be empty so 141 | // clients must check for the presence of this message, not just 142 | // for `qualifier.value` being non-empty. 143 | google.protobuf.BytesValue qualifier = 3; 144 | 145 | // The cell's stored timestamp, which also uniquely identifies it 146 | // within its column. Values are always expressed in 147 | // microseconds, but individual tables may set a coarser 148 | // granularity to further restrict the allowed values. For 149 | // example, a table which specifies millisecond granularity will 150 | // only allow values of `timestamp_micros` which are multiples of 151 | // 1000. Timestamps are only set in the first CellChunk per cell 152 | // (for cells split into multiple chunks). 153 | int64 timestamp_micros = 4; 154 | 155 | // Labels applied to the cell by a 156 | // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set 157 | // on the first CellChunk per cell. 158 | repeated string labels = 5; 159 | 160 | // The value stored in the cell. Cell values can be split across 161 | // multiple CellChunks. In that case only the value field will be 162 | // set in CellChunks after the first: the timestamp and labels 163 | // will only be present in the first CellChunk, even if the first 164 | // CellChunk came in a previous ReadRowsResponse. 165 | bytes value = 6; 166 | 167 | // If this CellChunk is part of a chunked cell value and this is 168 | // not the final chunk of that cell, value_size will be set to the 169 | // total length of the cell value. The client can use this size 170 | // to pre-allocate memory to hold the full cell value. 171 | int32 value_size = 7; 172 | 173 | oneof row_status { 174 | // Indicates that the client should drop all previous chunks for 175 | // `row_key`, as it will be re-read from the beginning. 176 | bool reset_row = 8; 177 | 178 | // Indicates that the client can safely process all previous chunks for 179 | // `row_key`, as its data has been fully read. 180 | bool commit_row = 9; 181 | } 182 | } 183 | 184 | repeated CellChunk chunks = 1; 185 | 186 | // Optionally the server might return the row key of the last row it 187 | // has scanned. The client can use this to construct a more 188 | // efficient retry request if needed: any row keys or portions of 189 | // ranges less than this row key can be dropped from the request. 190 | // This is primarily useful for cases where the server has read a 191 | // lot of data that was filtered out since the last committed row 192 | // key, allowing the client to skip that work on a retry. 193 | bytes last_scanned_row_key = 2; 194 | } 195 | 196 | // Request message for Bigtable.SampleRowKeys. 197 | message SampleRowKeysRequest { 198 | // The unique name of the table from which to sample row keys. 199 | // Values are of the form 200 | // `projects//instances//tables/
`. 201 | string table_name = 1; 202 | 203 | // This value specifies routing for replication. If not specified, the 204 | // "default" application profile will be used. 205 | string app_profile_id = 2; 206 | } 207 | 208 | // Response message for Bigtable.SampleRowKeys. 209 | message SampleRowKeysResponse { 210 | // Sorted streamed sequence of sample row keys in the table. The table might 211 | // have contents before the first row key in the list and after the last one, 212 | // but a key containing the empty string indicates "end of table" and will be 213 | // the last response given, if present. 214 | // Note that row keys in this list may not have ever been written to or read 215 | // from, and users should therefore not make any assumptions about the row key 216 | // structure that are specific to their use case. 217 | bytes row_key = 1; 218 | 219 | // Approximate total storage space used by all rows in the table which precede 220 | // `row_key`. Buffering the contents of all rows between two subsequent 221 | // samples would require space roughly equal to the difference in their 222 | // `offset_bytes` fields. 223 | int64 offset_bytes = 2; 224 | } 225 | 226 | // Request message for Bigtable.MutateRow. 227 | message MutateRowRequest { 228 | // The unique name of the table to which the mutation should be applied. 229 | // Values are of the form 230 | // `projects//instances//tables/
`. 231 | string table_name = 1; 232 | 233 | // This value specifies routing for replication. If not specified, the 234 | // "default" application profile will be used. 235 | string app_profile_id = 4; 236 | 237 | // The key of the row to which the mutation should be applied. 238 | bytes row_key = 2; 239 | 240 | // Changes to be atomically applied to the specified row. Entries are applied 241 | // in order, meaning that earlier mutations can be masked by later ones. 242 | // Must contain at least one entry and at most 100000. 243 | repeated Mutation mutations = 3; 244 | } 245 | 246 | // Response message for Bigtable.MutateRow. 247 | message MutateRowResponse { 248 | 249 | } 250 | 251 | // Request message for BigtableService.MutateRows. 252 | message MutateRowsRequest { 253 | message Entry { 254 | // The key of the row to which the `mutations` should be applied. 255 | bytes row_key = 1; 256 | 257 | // Changes to be atomically applied to the specified row. Mutations are 258 | // applied in order, meaning that earlier mutations can be masked by 259 | // later ones. 260 | // You must specify at least one mutation. 261 | repeated Mutation mutations = 2; 262 | } 263 | 264 | // The unique name of the table to which the mutations should be applied. 265 | string table_name = 1; 266 | 267 | // This value specifies routing for replication. If not specified, the 268 | // "default" application profile will be used. 269 | string app_profile_id = 3; 270 | 271 | // The row keys and corresponding mutations to be applied in bulk. 272 | // Each entry is applied as an atomic mutation, but the entries may be 273 | // applied in arbitrary order (even between entries for the same row). 274 | // At least one entry must be specified, and in total the entries can 275 | // contain at most 100000 mutations. 276 | repeated Entry entries = 2; 277 | } 278 | 279 | // Response message for BigtableService.MutateRows. 280 | message MutateRowsResponse { 281 | message Entry { 282 | // The index into the original request's `entries` list of the Entry 283 | // for which a result is being reported. 284 | int64 index = 1; 285 | 286 | // The result of the request Entry identified by `index`. 287 | // Depending on how requests are batched during execution, it is possible 288 | // for one Entry to fail due to an error with another Entry. In the event 289 | // that this occurs, the same error will be reported for both entries. 290 | google.rpc.Status status = 2; 291 | } 292 | 293 | // One or more results for Entries from the batch request. 294 | repeated Entry entries = 1; 295 | } 296 | 297 | // Request message for Bigtable.CheckAndMutateRow. 298 | message CheckAndMutateRowRequest { 299 | // The unique name of the table to which the conditional mutation should be 300 | // applied. 301 | // Values are of the form 302 | // `projects//instances//tables/
`. 303 | string table_name = 1; 304 | 305 | // This value specifies routing for replication. If not specified, the 306 | // "default" application profile will be used. 307 | string app_profile_id = 7; 308 | 309 | // The key of the row to which the conditional mutation should be applied. 310 | bytes row_key = 2; 311 | 312 | // The filter to be applied to the contents of the specified row. Depending 313 | // on whether or not any results are yielded, either `true_mutations` or 314 | // `false_mutations` will be executed. If unset, checks that the row contains 315 | // any values at all. 316 | RowFilter predicate_filter = 6; 317 | 318 | // Changes to be atomically applied to the specified row if `predicate_filter` 319 | // yields at least one cell when applied to `row_key`. Entries are applied in 320 | // order, meaning that earlier mutations can be masked by later ones. 321 | // Must contain at least one entry if `false_mutations` is empty, and at most 322 | // 100000. 323 | repeated Mutation true_mutations = 4; 324 | 325 | // Changes to be atomically applied to the specified row if `predicate_filter` 326 | // does not yield any cells when applied to `row_key`. Entries are applied in 327 | // order, meaning that earlier mutations can be masked by later ones. 328 | // Must contain at least one entry if `true_mutations` is empty, and at most 329 | // 100000. 330 | repeated Mutation false_mutations = 5; 331 | } 332 | 333 | // Response message for Bigtable.CheckAndMutateRow. 334 | message CheckAndMutateRowResponse { 335 | // Whether or not the request's `predicate_filter` yielded any results for 336 | // the specified row. 337 | bool predicate_matched = 1; 338 | } 339 | 340 | // Request message for Bigtable.ReadModifyWriteRow. 341 | message ReadModifyWriteRowRequest { 342 | // The unique name of the table to which the read/modify/write rules should be 343 | // applied. 344 | // Values are of the form 345 | // `projects//instances//tables/
`. 346 | string table_name = 1; 347 | 348 | // This value specifies routing for replication. If not specified, the 349 | // "default" application profile will be used. 350 | string app_profile_id = 4; 351 | 352 | // The key of the row to which the read/modify/write rules should be applied. 353 | bytes row_key = 2; 354 | 355 | // Rules specifying how the specified row's contents are to be transformed 356 | // into writes. Entries are applied in order, meaning that earlier rules will 357 | // affect the results of later ones. 358 | repeated ReadModifyWriteRule rules = 3; 359 | } 360 | 361 | // Response message for Bigtable.ReadModifyWriteRow. 362 | message ReadModifyWriteRowResponse { 363 | // A Row containing the new contents of all cells modified by the request. 364 | Row row = 1; 365 | } 366 | --------------------------------------------------------------------------------