├── .credo.exs
├── .formatter.exs
├── .github
└── workflows
│ ├── mongodb_driver.yml
│ └── release-please.yml
├── .gitignore
├── .tool-versions
├── CHANGELOG.md
├── LICENSE
├── README.md
├── config
├── config.exs
├── dev.exs
├── prod.exs
└── test.exs
├── coveralls.json
├── examples
├── aws_x509.ex
├── change_stream
│ ├── .formatter.exs
│ ├── README.md
│ ├── config
│ │ └── config.exs
│ ├── lib
│ │ ├── change_stream.ex
│ │ └── change_stream
│ │ │ └── application.ex
│ ├── mix.exs
│ ├── mix.lock
│ └── test
│ │ ├── change_stream_test.exs
│ │ └── test_helper.exs
├── crud_example
│ ├── .formatter.exs
│ ├── README.md
│ ├── config
│ │ └── config.exs
│ ├── docker-compose.yaml
│ ├── lib
│ │ ├── crud_example.ex
│ │ └── crud_example
│ │ │ └── application.ex
│ ├── mix.exs
│ ├── mix.lock
│ └── test
│ │ ├── crud_example_test.exs
│ │ └── test_helper.exs
└── reader.ex
├── insights
├── .formatter.exs
├── .gitignore
├── README.md
├── assets
│ ├── css
│ │ ├── app.css
│ │ ├── bootstrap.min.css
│ │ └── phoenix.css
│ ├── js
│ │ └── app.js
│ └── vendor
│ │ └── topbar.js
├── config
│ ├── config.exs
│ ├── dev.exs
│ ├── prod.exs
│ ├── runtime.exs
│ └── test.exs
├── lib
│ ├── insights.ex
│ ├── insights
│ │ ├── application.ex
│ │ ├── event_handler.ex
│ │ ├── mailer.ex
│ │ ├── mongodb_plugin.ex
│ │ ├── prom_ex.ex
│ │ └── test.ex
│ ├── insights_web.ex
│ └── insights_web
│ │ ├── controllers
│ │ └── page_controller.ex
│ │ ├── endpoint.ex
│ │ ├── gettext.ex
│ │ ├── live
│ │ └── topology_live.ex
│ │ ├── router.ex
│ │ ├── telemetry.ex
│ │ ├── templates
│ │ ├── layout
│ │ │ ├── app.html.heex
│ │ │ ├── live.html.heex
│ │ │ └── root.html.heex
│ │ ├── page
│ │ │ └── index.html.heex
│ │ └── topology
│ │ │ ├── details.html.heex
│ │ │ ├── event.html.heex
│ │ │ ├── events.html.heex
│ │ │ ├── index.html.heex
│ │ │ ├── monitor.html.heex
│ │ │ └── server.html.heex
│ │ └── views
│ │ ├── error_helpers.ex
│ │ ├── error_view.ex
│ │ ├── layout_view.ex
│ │ ├── page_view.ex
│ │ └── topology_view.ex
├── mix.exs
├── mix.lock
├── priv
│ ├── gettext
│ │ ├── en
│ │ │ └── LC_MESSAGES
│ │ │ │ └── errors.po
│ │ └── errors.pot
│ ├── grafana_dashboards
│ │ └── mongodb_driver.json.eex
│ ├── repo
│ │ ├── migrations
│ │ │ └── .formatter.exs
│ │ └── seeds.exs
│ └── static
│ │ ├── favicon.ico
│ │ ├── images
│ │ └── phoenix.png
│ │ └── robots.txt
└── test
│ ├── insights_web
│ ├── controllers
│ │ └── page_controller_test.exs
│ └── views
│ │ ├── error_view_test.exs
│ │ ├── layout_view_test.exs
│ │ └── page_view_test.exs
│ ├── support
│ ├── channel_case.ex
│ ├── conn_case.ex
│ └── data_case.ex
│ └── test_helper.exs
├── lib
├── bson.ex
├── bson
│ ├── decimal128.ex
│ ├── decoder.ex
│ ├── encoder.ex
│ ├── types.ex
│ └── utils.ex
├── mongo.ex
├── mongo
│ ├── app.ex
│ ├── auth.ex
│ ├── auth
│ │ ├── cr.ex
│ │ ├── plain.ex
│ │ ├── scram.ex
│ │ └── x509.ex
│ ├── binary_utils.ex
│ ├── bulk_ops.ex
│ ├── bulk_write.ex
│ ├── change_stream.ex
│ ├── collection.ex
│ ├── compressor.ex
│ ├── encoder.ex
│ ├── error.ex
│ ├── event_handler.ex
│ ├── events.ex
│ ├── grid_fs
│ │ ├── bucket.ex
│ │ ├── download.ex
│ │ ├── upload.ex
│ │ └── upload_stream.ex
│ ├── id_server.ex
│ ├── keywords.ex
│ ├── messages.ex
│ ├── migration.ex
│ ├── mongo_db_connection.ex
│ ├── monitor.ex
│ ├── ordered_bulk.ex
│ ├── password_safe.ex
│ ├── pbkdf2.ex
│ ├── pbkdf2_cache.ex
│ ├── query.ex
│ ├── read_preference.ex
│ ├── repo.ex
│ ├── results.ex
│ ├── server_description.ex
│ ├── session.ex
│ ├── stable_version.ex
│ ├── stream.ex
│ ├── streaming_hello_monitor.ex
│ ├── topology.ex
│ ├── topology_description.ex
│ ├── unordered_bulk.ex
│ ├── url_parser.ex
│ ├── version.ex
│ └── write_concern.ex
├── mongo_db_connection
│ └── utils.ex
├── session
│ ├── server_session.ex
│ └── session_pool.ex
├── tasks
│ ├── gen
│ │ └── migration.ex
│ └── mongo.ex
└── utils.ex
├── mix.exs
├── mix.lock
└── test
├── bson
├── decimal128_test.exs
├── decoder_test.exs
├── encoder_test.exs
├── types_test.exs
└── uuid_test.exs
├── bson_test.exs
├── collections
└── simple_test.exs
├── data
├── test.jpg
└── test.txt
├── mongo
├── batch_size_text.exs
├── bulk_writes_test.exs
├── change_stream_test.exs
├── collection_test.exs
├── connection_test.exs
├── cursor_test.exs
├── encoder_test.exs
├── errors_test.exs
├── find_one_test.exs
├── grid_fs
│ ├── bucket_test.exs
│ ├── download_test.exs
│ └── upload_test.exs
├── migration_test.exs
├── not_writable_primary_test.exs
├── password_safe_test.exs
├── read_preferences_test.exs
├── repo_test.exs
├── retryable_reads_test.exs
├── retryable_writes_test.exs
├── session_test.exs
├── topology_description_test.exs
├── topology_test.exs
├── transaction_retries_test.exs
├── update_hint_test.exs
└── url_parser_test.exs
├── mongo_test.exs
├── specification_tests
└── crud_test.exs
├── support
├── collection_case.ex
├── crud_tests
│ └── read
│ │ ├── aggregate-collation.json
│ │ ├── aggregate-out.json
│ │ ├── aggregate.json
│ │ ├── count-collation.json
│ │ ├── count.json
│ │ ├── distinct-collation.json
│ │ ├── distinct.json
│ │ ├── find-collation.json
│ │ └── find.json
├── event_catcher.ex
├── specification_case.ex
├── specifications
│ ├── crud.ex
│ └── crud
│ │ └── helpers.ex
├── test_connection.ex
└── topology_test_data.ex
└── test_helper.exs
/.formatter.exs:
--------------------------------------------------------------------------------
1 | # Used by "mix format"
2 | locals_without_parens = [
3 | # MongoDB
4 | after_load: :*,
5 | before_dump: :*,
6 | attribute: :*,
7 | collection: :*,
8 | embeds_one: :*,
9 | embeds_many: :*,
10 | # Test
11 | ## Assertions
12 | assert_receive_event: :*,
13 | refute_receive_event: :*
14 | ]
15 |
16 | [
17 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"],
18 | line_length: 250,
19 | locals_without_parens: locals_without_parens,
20 | export: [locals_without_parens: locals_without_parens]
21 | ]
22 |
--------------------------------------------------------------------------------
/.github/workflows/mongodb_driver.yml:
--------------------------------------------------------------------------------
1 | name: Elixir CI
2 | on:
3 | push:
4 | branches:
5 | - master
6 | pull_request:
7 | branches:
8 | - master
9 |
10 | jobs:
11 | test:
12 | name: Compile and Test
13 | timeout-minutes: 5
14 |
15 | strategy:
16 | matrix:
17 | mongodb-version: ["5.0", "6.0", "7.0", "8.0"]
18 |
19 | runs-on: ubuntu-latest
20 | steps:
21 | - uses: actions/checkout@v2
22 |
23 | - name: Start MongoDB
24 | uses: supercharge/mongodb-github-action@1.11.0
25 | with:
26 | mongodb-version: ${{ matrix.mongodb-version }}
27 | mongodb-replica-set: rs_1
28 |
29 | - name: Read .tool-versions
30 | uses: marocchino/tool-versions-action@v1
31 | id: versions
32 |
33 | - name: Set up Elixir
34 | uses: erlef/setup-beam@v1
35 | with:
36 | elixir-version: ${{steps.versions.outputs.elixir}}
37 | otp-version: ${{ steps.versions.outputs.erlang}}
38 |
39 | - name: Restore dependencies cache
40 | uses: actions/cache@v4
41 | with:
42 | path: |
43 | deps
44 | _build
45 | priv/plts
46 | key: ${{ runner.os }}-mix-v1-${{ hashFiles('**/mix.lock') }}
47 | restore-keys: ${{ runner.os }}-mix-v1-
48 |
49 | - name: Install dependencies
50 | run: mix deps.get
51 |
52 | - name: Run tests
53 | run: mix test --exclude ssl --exclude socket --exclude rs_required
54 |
55 | - name: Run Credo
56 | run: mix credo
57 |
58 | - name: Check Formatting
59 | run: mix format --check-formatted
60 | if: always()
61 |
--------------------------------------------------------------------------------
/.github/workflows/release-please.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches:
4 | - master
5 | pull_request:
6 | branches:
7 | - master
8 |
9 | name: release-please
10 |
11 | env:
12 | HEX_API_KEY: ${{ secrets.HEX_TOKEN }}
13 |
14 | jobs:
15 | release-please:
16 | runs-on: ubuntu-latest
17 | steps:
18 | - uses: google-github-actions/release-please-action@v3
19 | id: release
20 | with:
21 | release-type: elixir
22 | package-name: mongodb_driver
23 | - uses: erlef/setup-beam@v1
24 | if: ${{ steps.release.outputs.release_created }}
25 | with:
26 | otp-version: 26.0.2
27 | elixir-version: 1.15.4
28 | - run: mix do deps.get, deps.compile
29 | if: ${{ steps.release.outputs.release_created }}
30 | - run: mix hex.build
31 | if: ${{ steps.release.outputs.release_created }}
32 | - run: mix hex.publish --yes --replace
33 | if: ${{ steps.release.outputs.release_created }}
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # The directory Mix will write compiled artifacts to.
2 | **/_build/
3 |
4 | # If you run "mix test --cover", coverage assets end up here.
5 | /cover/
6 |
7 | # The directory Mix downloads your dependencies sources to.
8 | **/deps/
9 |
10 | # Where third-party dependencies like ExDoc output generated docs.
11 | /doc/
12 |
13 | # Ignore .fetch files in case you like to edit your project deps locally.
14 | /.fetch
15 |
16 | # If the VM crashes, it generates a dump, let's ignore it too.
17 | erl_crash.dump
18 |
19 | # Also ignore archive artifacts (built via "mix archive.build").
20 | *.ez
21 |
22 | # Ignore package tarball (built via "mix hex.build").
23 | mongodb_driver-*.tar
24 |
25 | # Temporary files, for example, from tests.
26 | /tmp/
27 |
28 | # Misc.
29 | .idea/
30 | *.iml
31 | *.code-workspace
32 | data/
33 | plt_core_path/*.plt
34 | **/stream_data/
--------------------------------------------------------------------------------
/.tool-versions:
--------------------------------------------------------------------------------
1 | elixir 1.18.2-otp-27
2 | erlang 27.2.2
3 | python 3.10.2
--------------------------------------------------------------------------------
/config/config.exs:
--------------------------------------------------------------------------------
1 | # This file is responsible for configuring your application
2 | # and its dependencies with the aid of the Mix.Config module.
3 | import Config
4 |
5 | # This configuration is loaded before any dependency and is restricted
6 | # to this project. If another project depends on this project, this
7 | # file won't be loaded nor affect the parent project. For this reason,
8 | # if you want to provide default values for your application for third-
9 | # party users, it should be done in your mix.exs file.
10 |
11 | config :logger, :console,
12 | level: :info,
13 | truncate: 1024,
14 | format: "$time [$level] $message ($metadata)\n\n",
15 | metadata: [:module, :function, :line]
16 |
17 | config :mongodb_driver,
18 | log: true,
19 | migration: [
20 | path: "mongo/migrations",
21 | otp_app: :mongodb_driver,
22 | topology: :mongo,
23 | collection: "migrations"
24 | ]
25 |
26 | import_config "#{Mix.env()}.exs"
27 |
--------------------------------------------------------------------------------
/config/dev.exs:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zookzook/elixir-mongodb-driver/83b19556c0f0b7ddd20a14a114bf0e01b86f9945/config/dev.exs
--------------------------------------------------------------------------------
/config/prod.exs:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zookzook/elixir-mongodb-driver/83b19556c0f0b7ddd20a14a114bf0e01b86f9945/config/prod.exs
--------------------------------------------------------------------------------
/config/test.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | config :mongodb_driver, Mongo.RepoTest.MyRepo,
4 | url: "mongodb://127.0.0.1:27017/mongodb_test",
5 | show_sensitive_data_on_connection_error: true
6 |
7 | config :mongodb_driver,
8 | log: false,
9 | migration: [
10 | path: "mongo/migrations",
11 | otp_app: :mongodb_driver,
12 | topology: :mongo,
13 | collection: "migrations"
14 | ]
15 |
--------------------------------------------------------------------------------
/coveralls.json:
--------------------------------------------------------------------------------
1 | {
2 | "skip_files": [
3 | "lib/bson/utils.ex",
4 | "lib/mongo/binary_utils.ex",
5 | "test/"
6 | ]
7 | }
--------------------------------------------------------------------------------
/examples/aws_x509.ex:
--------------------------------------------------------------------------------
1 | defmodule AWSX509.Example do
2 | def connect do
3 | cert_dir = "/home/username/certs/"
4 | {:ok, conn} = Mongo.start_link(
5 | database: "database",
6 | hostname: "mongodb.company.com",
7 | username: "CN=username,OU=unit,O=company,L=Location,ST=State,C=US",
8 | password: "foo", # needs a dummy string. but would be nice if it could ignore this for X509
9 | ssl: true,
10 | auth_mechanism: :x509,
11 | ssl_opts: [
12 | ciphers: ['AES256-GCM-SHA384'], # needed to connect to AWS
13 | cacertfile: Path.join([cert_dir, "rootca.pem"]),
14 | certfile: Path.join([cert_dir, "mycert.pem"])
15 | ]
16 | )
17 | conn
18 | end
19 | end
--------------------------------------------------------------------------------
/examples/change_stream/.formatter.exs:
--------------------------------------------------------------------------------
1 | # Used by "mix format"
2 | [
3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"]
4 | ]
5 |
--------------------------------------------------------------------------------
/examples/change_stream/README.md:
--------------------------------------------------------------------------------
1 | # Change streams
2 |
3 | This project shows a change stream example. You need to set up a replica set, because change streams are only available for replica sets and sharded clusters. For more information see
4 |
5 | * https://www.mongodb.com/blog/post/an-introduction-to-change-streams
6 | * https://docs.mongodb.com/manual/changeStreams/
7 |
8 | If you creating a new replica set then you need to create the database `db-1` first, before starting the example. Otherwise you will get some errors, because the database which we will observe does not exists.
9 |
10 | The `ChangeStream` module uses a GenServer for observing changes. It spawns a process to consume the documents returned by the change stream cursor:
11 |
12 | pid = spawn(fn -> Enum.each(get_cursor(state), fn doc -> new_doc(doc) end) end)
13 |
14 | While running this process you will receive some message:
15 |
16 | * token: you get a token after a while. You can use this token to reconnect to the change stream without getting old change documents again.
17 | * documents: If data changes, you get a document which describes these changes
18 |
19 | Let's start the program with `iex -S mix`:
20 |
21 | iex(2)>
22 | 16:10:05.018 [info] Connecting change stream
23 |
24 | 16:10:05.022 [info] Receiving new token nil
25 |
26 | iex(3)> Mongo.insert_one(:mongo, "http_errors", %{url: "https://elixir-lang.org"})
27 | {:ok,
28 | %Mongo.InsertOneResult{
29 | acknowledged: true,
30 | inserted_id: #BSON.ObjectId<5d595c42306a5f0d87ab24e7>
31 | }}
32 | iex(4)>
33 | 16:10:10.509 [info] Receiving new token %{"_data" => #BSON.Binary<825d595c420000000146645f696400645d595c42306a5f0d87ab24e7005a1004fefbdf8754024c339cd73f510a91db2b04>}
34 |
35 | 16:10:10.509 [info] Receiving new document %{"coll" => "http_errors", "db" => "db-1"}
36 |
37 | 16:10:10.509 [info] Got http error for url https://elixir-lang.org
38 |
39 |
--------------------------------------------------------------------------------
/examples/change_stream/config/config.exs:
--------------------------------------------------------------------------------
1 | # This file is responsible for configuring your application
2 | # and its dependencies with the aid of the Mix.Config module.
3 | use Mix.Config
4 |
5 | # This configuration is loaded before any dependency and is restricted
6 | # to this project. If another project depends on this project, this
7 | # file won't be loaded nor affect the parent project. For this reason,
8 | # if you want to provide default values for your application for
9 | # third-party users, it should be done in your "mix.exs" file.
10 |
11 | # You can configure your application as:
12 | #
13 | # config :change_stream, key: :value
14 | #
15 | # and access this configuration in your application as:
16 | #
17 | # Application.get_env(:change_stream, :key)
18 | #
19 | # You can also configure a third-party app:
20 | #
21 | # config :logger, level: :info
22 | #
23 |
24 | # It is also possible to import configuration files, relative to this
25 | # directory. For example, you can emulate configuration per environment
26 | # by uncommenting the line below and defining dev.exs, test.exs and such.
27 | # Configuration from the imported file will override the ones defined
28 | # here (which is why it is important to import them last).
29 | #
30 | # import_config "#{Mix.env()}.exs"
31 |
--------------------------------------------------------------------------------
/examples/change_stream/lib/change_stream.ex:
--------------------------------------------------------------------------------
1 | defmodule ChangeStream do
2 | use GenServer
3 |
4 | require Logger
5 |
6 | @collection "http_errors"
7 | @me __MODULE__
8 |
9 | def start_link() do
10 | GenServer.start_link(__MODULE__, nil, name: @me)
11 | end
12 |
13 | def new_token(token) do
14 | GenServer.cast(@me, {:token, token})
15 | end
16 |
17 | def new_doc(doc) do
18 | GenServer.cast(@me, {:doc, doc})
19 | end
20 |
21 | def init(_) do
22 | state = %{last_resume_token: nil}
23 | Process.send_after(self(), :connect, 3000)
24 | {:ok, state}
25 | end
26 |
27 | def handle_info({:DOWN, _, :process, _pid, reason}, state) do
28 | Logger.info("#Cursor process is down: #{inspect reason}")
29 | Process.send_after(self(), :connect, 3000)
30 | {:noreply, state}
31 | end
32 |
33 | def handle_info(:connect, state) do
34 | Logger.info("Connecting change stream")
35 | # Span a new process
36 | pid = spawn(fn -> Enum.each(get_cursor(state), fn doc -> new_doc(doc) end) end)
37 |
38 | # Monitor the process
39 | Process.monitor(pid)
40 |
41 | {:noreply, state}
42 | end
43 |
44 | def handle_cast({:doc, doc}, state) do
45 | Logger.info("Receiving new document #{inspect doc["ns"]}")
46 | process_doc(doc)
47 | {:noreply, state}
48 | end
49 |
50 | def handle_cast({:token, token}, state) do
51 | Logger.info("Receiving new token #{inspect token}")
52 | {:noreply, %{state | last_resume_token: token}}
53 | end
54 |
55 | defp process_doc(%{"fullDocument" => %{"url" => url}, "ns" => %{"coll" => "http_errors", "db" => "db-1"}}) do
56 | Logger.info("Got http error for url #{url}")
57 | end
58 |
59 | defp get_cursor(%{last_resume_token: nil}) do
60 | Mongo.watch_collection(:mongo, @collection, [], fn token -> new_token(token) end, max_time: 2_000)
61 | end
62 | defp get_cursor(%{last_resume_token: token}) do
63 | Mongo.watch_collection(:mongo, @collection, [], fn token -> new_token(token) end, max_time: 2_000, resume_after: token)
64 | end
65 |
66 | end
--------------------------------------------------------------------------------
/examples/change_stream/lib/change_stream/application.ex:
--------------------------------------------------------------------------------
1 | defmodule ChangeStream.Application do
2 |
3 | @moduledoc false
4 |
5 | use Application
6 | import Supervisor.Spec
7 |
8 | def start(_type, _args) do
9 |
10 | children = [
11 | # this should be a replicat set!
12 | worker(Mongo, [[name: :mongo, url: "mongodb://localhost:27027/db-1", pool_size: 3]]),
13 | worker(ChangeStream, [])
14 | ]
15 |
16 | opts = [strategy: :one_for_one, name: ChangeStream.Supervisor]
17 | Supervisor.start_link(children, opts)
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/examples/change_stream/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule ChangeStream.MixProject do
2 | use Mix.Project
3 |
4 | def project do
5 | [
6 | app: :change_stream,
7 | version: "0.1.0",
8 | elixir: "~> 1.8",
9 | start_permanent: Mix.env() == :prod,
10 | deps: deps()
11 | ]
12 | end
13 |
14 | # Run "mix help compile.app" to learn about applications.
15 | def application do
16 | [
17 | extra_applications: [:logger],
18 | mod: {ChangeStream.Application, []}
19 | ]
20 | end
21 |
22 | # Run "mix help deps" to learn about dependencies.
23 | defp deps do
24 | [
25 | {:mongodb_driver, "~> 0.5"}
26 | ]
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/examples/change_stream/mix.lock:
--------------------------------------------------------------------------------
1 | %{
2 | "connection": {:hex, :connection, "1.0.4", "a1cae72211f0eef17705aaededacac3eb30e6625b04a6117c1b2db6ace7d5976", [:mix], [], "hexpm"},
3 | "db_connection": {:hex, :db_connection, "2.0.6", "bde2f85d047969c5b5800cb8f4b3ed6316c8cb11487afedac4aa5f93fd39abfa", [:mix], [{:connection, "~> 1.0.2", [hex: :connection, repo: "hexpm", optional: false]}], "hexpm"},
4 | "decimal": {:hex, :decimal, "1.8.0", "ca462e0d885f09a1c5a342dbd7c1dcf27ea63548c65a65e67334f4b61803822e", [:mix], [], "hexpm"},
5 | "mongodb_driver": {:hex, :mongodb_driver, "0.5.7", "f29cab9a011f685210c472888ee3fde3573459323f839b67c4d942e067d9eda1", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.0.6", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm"},
6 | }
7 |
--------------------------------------------------------------------------------
/examples/change_stream/test/change_stream_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ChangeStreamTest do
2 | use ExUnit.Case
3 | doctest ChangeStream
4 |
5 | test "greets the world" do
6 | assert ChangeStream.hello() == :world
7 | end
8 | end
9 |
--------------------------------------------------------------------------------
/examples/change_stream/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | ExUnit.start()
2 |
--------------------------------------------------------------------------------
/examples/crud_example/.formatter.exs:
--------------------------------------------------------------------------------
1 | # Used by "mix format"
2 | [
3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"]
4 | ]
5 |
--------------------------------------------------------------------------------
/examples/crud_example/README.md:
--------------------------------------------------------------------------------
1 | # CrudExample
2 |
3 | This project shows basic CRUD examples. You can use your own MongoDB instance or just use Docker to start a MongoDb container by calling:
4 |
5 | docker-compose up -d mongodb
6 |
7 | After that go to the project folder an start
8 |
9 | #> mix deps.get
10 | #> iex -S mix
11 |
12 | iex(1)> CrudExample.example_1()
13 | iex(1)> CrudExample.example_2()
14 |
15 | ## `example_1`
16 |
17 | In this function we are using one connection to the database without using the application supervisor.
18 |
19 | ## `example_2`
20 |
21 | The same operation like `example_1` but now using the connection pooling and application supervisor.
22 |
23 |
--------------------------------------------------------------------------------
/examples/crud_example/config/config.exs:
--------------------------------------------------------------------------------
1 | # This file is responsible for configuring your application
2 | # and its dependencies with the aid of the Mix.Config module.
3 | use Mix.Config
4 |
5 | # This configuration is loaded before any dependency and is restricted
6 | # to this project. If another project depends on this project, this
7 | # file won't be loaded nor affect the parent project. For this reason,
8 | # if you want to provide default values for your application for
9 | # third-party users, it should be done in your "mix.exs" file.
10 |
11 | # You can configure your application as:
12 | #
13 | # config :crud_example, key: :value
14 | #
15 | # and access this configuration in your application as:
16 | #
17 | # Application.get_env(:crud_example, :key)
18 | #
19 | # You can also configure a third-party app:
20 | #
21 | # config :logger, level: :info
22 | #
23 |
24 | # It is also possible to import configuration files, relative to this
25 | # directory. For example, you can emulate configuration per environment
26 | # by uncommenting the line below and defining dev.exs, test.exs and such.
27 | # Configuration from the imported file will override the ones defined
28 | # here (which is why it is important to import them last).
29 | #
30 | # import_config "#{Mix.env()}.exs"
31 |
--------------------------------------------------------------------------------
/examples/crud_example/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 |
4 | mongodb:
5 | image: mongo:4
6 | ports:
7 | - "27017:27017"
8 | volumes:
9 | - mongodb-volume:/data/db
10 | networks:
11 | - crud
12 | logging:
13 | options:
14 | max-size: "10k"
15 | max-file: "1"
16 |
17 | volumes:
18 | mongodb-volume:
19 |
20 | networks:
21 | crud:
--------------------------------------------------------------------------------
/examples/crud_example/lib/crud_example.ex:
--------------------------------------------------------------------------------
1 | defmodule CrudExample do
2 |
3 | def create_vcard() do
4 | %{
5 | firstname: "Alexander",
6 | lastname: "Abendroth",
7 | contact: %{
8 | email: "alexander.abendroth@campany.de",
9 | telephone: "+49 111938947373",
10 | mobile: "+49 222192938383",
11 | fax: "+49 3332929292"
12 | },
13 | address: %{
14 | street: "Fasanenweg 5",
15 | postal_code: "12345",
16 | city: "Berlin",
17 | country: "de"
18 | }
19 | }
20 | end
21 |
22 | def example_1() do
23 |
24 | {:ok, top} = Mongo.start_link(url: "mongodb://localhost:27017/db-1")
25 |
26 | result = Mongo.insert_one(top, "people", create_vcard())
27 |
28 | IO.puts "#{inspect result}\n"
29 |
30 | result = Mongo.find_one(top, "people", %{})
31 | IO.puts "#{inspect result}\n"
32 |
33 | result = Mongo.update_one(top, "people", %{lastname: "Abendroth"}, ["$set": ["address.postal_code": "20000"]])
34 | IO.puts "#{inspect result}\n"
35 |
36 | result = Mongo.find_one(top, "people", %{"contact.email": "alexander.abendroth@campany.de"})
37 | IO.puts "#{inspect result}\n"
38 |
39 | result = Mongo.delete_one(top, "people", %{lastname: "Abendroth"})
40 | IO.puts "#{inspect result}\n"
41 |
42 | end
43 |
44 | def example_2() do
45 |
46 | {:ok, %Mongo.InsertOneResult{acknowledged: true, inserted_id: id}} = Mongo.insert_one(:mongo, "people", create_vcard())
47 |
48 | IO.puts "ID is #{inspect id}\n"
49 |
50 | result = Mongo.find_one(:mongo, "people", %{_id: id})
51 | IO.puts "#{inspect result}\n"
52 |
53 | result = Mongo.update_one(:mongo, "people", %{_id: id}, ["$set": ["address.postal_code": "20000"]])
54 | IO.puts "#{inspect result}\n"
55 |
56 | result = Mongo.find_one(:mongo, "people",%{_id: id})
57 | IO.puts "#{inspect result}\n"
58 |
59 | result = Mongo.delete_one(:mongo, "people", %{_id: id})
60 | IO.puts "#{inspect result}\n"
61 |
62 | end
63 |
64 | end
65 |
--------------------------------------------------------------------------------
/examples/crud_example/lib/crud_example/application.ex:
--------------------------------------------------------------------------------
1 | defmodule CrudExample.Application do
2 | @moduledoc false
3 |
4 | use Application
5 |
6 | def start(_type, _args) do
7 | import Supervisor.Spec
8 |
9 | children = [
10 | worker(Mongo, [[name: :mongo, database: "db-1", pool_size: 3]])
11 | ]
12 |
13 | opts = [strategy: :one_for_one, name: CrudExample.Supervisor]
14 | Supervisor.start_link(children, opts)
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/examples/crud_example/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule CrudExample.MixProject do
2 | use Mix.Project
3 |
4 | def project do
5 | [
6 | app: :crud_example,
7 | version: "0.1.0",
8 | elixir: "~> 1.8",
9 | start_permanent: Mix.env() == :prod,
10 | deps: deps()
11 | ]
12 | end
13 |
14 | # Run "mix help compile.app" to learn about applications.
15 | def application do
16 | [
17 | extra_applications: [:logger],
18 | mod: {CrudExample.Application, []}
19 | ]
20 | end
21 |
22 | # Run "mix help deps" to learn about dependencies.
23 | defp deps do
24 | [
25 | {:mongodb_driver, "~> 0.5"}
26 | ]
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/examples/crud_example/mix.lock:
--------------------------------------------------------------------------------
1 | %{
2 | "connection": {:hex, :connection, "1.0.4", "a1cae72211f0eef17705aaededacac3eb30e6625b04a6117c1b2db6ace7d5976", [:mix], [], "hexpm"},
3 | "db_connection": {:hex, :db_connection, "2.0.6", "bde2f85d047969c5b5800cb8f4b3ed6316c8cb11487afedac4aa5f93fd39abfa", [:mix], [{:connection, "~> 1.0.2", [hex: :connection, repo: "hexpm", optional: false]}], "hexpm"},
4 | "decimal": {:hex, :decimal, "1.8.0", "ca462e0d885f09a1c5a342dbd7c1dcf27ea63548c65a65e67334f4b61803822e", [:mix], [], "hexpm"},
5 | "mongodb_driver": {:hex, :mongodb_driver, "0.5.7", "f29cab9a011f685210c472888ee3fde3573459323f839b67c4d942e067d9eda1", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.0.6", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm"},
6 | }
7 |
--------------------------------------------------------------------------------
/examples/crud_example/test/crud_example_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CrudExampleTest do
2 | use ExUnit.Case
3 | doctest CrudExample
4 |
5 | test "greets the world" do
6 | assert CrudExample.hello() == :world
7 | end
8 | end
9 |
--------------------------------------------------------------------------------
/examples/crud_example/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | ExUnit.start()
2 |
--------------------------------------------------------------------------------
/examples/reader.ex:
--------------------------------------------------------------------------------
1 | defmodule Reader do
2 |
3 | require Logger
4 |
5 | ##
6 | # see https://github.com/zookzook/elixir-mongodb-driver/issues/63 for more information
7 | #
8 | # 1. start a replica set and call the Reader.test()
9 | # 2. go to the primary db and call db.adminCommand({replSetStepDown: 30})
10 | # 3. check the log to see the error message only one time
11 | ##
12 | def start_link(conn) do
13 | Logger.info("starting reader")
14 |
15 | Task.start_link(fn -> read(conn, false) end)
16 | end
17 |
18 | defp read(conn, error) do
19 |
20 | if error do
21 | Logger.info("Called with error")
22 | end
23 |
24 | # Gets an enumerable cursor for the results
25 | cursor = Mongo.find(conn, "data", %{})
26 |
27 | error = case cursor do
28 | {:error, error} ->
29 | Logger.info("Error: #{inspect error}")
30 | true
31 |
32 | _ ->
33 | cursor
34 | |> Enum.to_list()
35 | |> Enum.count()
36 | false
37 | end
38 |
39 | read(conn, error)
40 | end
41 |
42 | def test() do
43 | {:ok, conn} = Mongo.start_link(url: "mongodb://localhost:27017,localhost:27018,localhost:27019/load?replicaSet=rs_1")
44 |
45 | Enum.map(1..10_000, fn counter -> Mongo.insert_one(conn, "data", %{counter: counter}) end)
46 | Reader.start_link(conn)
47 | end
48 | end
--------------------------------------------------------------------------------
/insights/.formatter.exs:
--------------------------------------------------------------------------------
1 | [
2 | import_deps: [:ecto, :phoenix],
3 | inputs: ["*.{ex,exs}", "priv/*/seeds.exs", "{config,lib,test}/**/*.{ex,exs}"],
4 | subdirectories: ["priv/*/migrations"]
5 | ]
6 |
--------------------------------------------------------------------------------
/insights/.gitignore:
--------------------------------------------------------------------------------
1 | # The directory Mix will write compiled artifacts to.
2 | /_build/
3 |
4 | # If you run "mix test --cover", coverage assets end up here.
5 | /cover/
6 |
7 | # The directory Mix downloads your dependencies sources to.
8 | /deps/
9 |
10 | # Where 3rd-party dependencies like ExDoc output generated docs.
11 | /doc/
12 |
13 | # Ignore .fetch files in case you like to edit your project deps locally.
14 | /.fetch
15 |
16 | # If the VM crashes, it generates a dump, let's ignore it too.
17 | erl_crash.dump
18 |
19 | # Also ignore archive artifacts (built via "mix archive.build").
20 | *.ez
21 |
22 | # Ignore package tarball (built via "mix hex.build").
23 | insights-*.tar
24 |
25 | # Ignore assets that are produced by build tools.
26 | /priv/static/assets/
27 |
28 | # Ignore digested assets cache.
29 | /priv/static/cache_manifest.json
30 |
31 | # In case you use Node.js/npm, you want to ignore these.
32 | npm-debug.log
33 | /assets/node_modules/
34 |
35 |
--------------------------------------------------------------------------------
/insights/README.md:
--------------------------------------------------------------------------------
1 | # Insights
2 |
3 | To start your Phoenix server:
4 |
5 | * Install dependencies with `mix deps.get`
6 | * Create and migrate your database with `mix ecto.setup`
7 | * Start Phoenix endpoint with `mix phx.server` or inside IEx with `iex -S mix phx.server`
8 |
9 | Now you can visit [`localhost:4000`](http://localhost:4000) from your browser.
10 |
11 | Ready to run in production? Please [check our deployment guides](https://hexdocs.pm/phoenix/deployment.html).
12 |
13 | ## Learn more
14 |
15 | * Official website: https://www.phoenixframework.org/
16 | * Guides: https://hexdocs.pm/phoenix/overview.html
17 | * Docs: https://hexdocs.pm/phoenix
18 | * Forum: https://elixirforum.com/c/phoenix-forum
19 | * Source: https://github.com/phoenixframework/phoenix
20 |
--------------------------------------------------------------------------------
/insights/assets/css/app.css:
--------------------------------------------------------------------------------
1 | /* This file is for your main application CSS */
2 | @import "./bootstrap.min.css";
3 |
4 | .list-group.stripes .list-group-item {
5 | color: #212529;
6 | }
7 | .list-group.stripes .list-group-item:nth-child(even) {
8 | background-color: #f7fbfe;
9 | }
10 | .list-group.stripes .list-group-item:nth-child(odd) {
11 | background-color: #edf5fe;
12 | }
13 | .list-group.stripes a.list-group-item {
14 | text-decoration: none;
15 | }
16 | .list-group.stripes a.list-group-item:nth-child(even):hover {
17 | background-color: #d7ebfa;
18 | }
19 | .list-group.stripes a.list-group-item:nth-child(odd):hover {
20 | background-color: #cbe2fc;
21 | }
22 |
23 | .list-group.stripes .list-group-item {
24 | color: #212529;
25 | }
26 | .list-group.stripes .list-group-item:nth-child(even) {
27 | background-color: #f7fbfe;
28 | }
29 | .list-group.stripes .list-group-item:nth-child(odd) {
30 | background-color: #edf5fe;
31 | }
32 | .list-group.stripes a.list-group-item {
33 | text-decoration: none;
34 | }
35 | .list-group.stripes a.list-group-item:nth-child(even):hover {
36 | background-color: #d7ebfa;
37 | }
38 | .list-group.stripes a.list-group-item:nth-child(odd):hover {
39 | background-color: #cbe2fc;
40 | }
41 |
42 | a.list-group-item.selected {
43 | font-weight: bold;
44 | }
45 |
46 | .separator {
47 | height: 5px;
48 | background-color: rgba(0, 0, 0, 0.125) !important;
49 | border-bottom: 0;
50 | padding: 0;
51 | }
52 |
53 | /* Alerts and form errors used by phx.new */
54 | .alert {
55 | padding: 15px;
56 | margin-bottom: 20px;
57 | border: 1px solid transparent;
58 | border-radius: 4px;
59 | }
60 | .alert-info {
61 | color: #31708f;
62 | background-color: #d9edf7;
63 | border-color: #bce8f1;
64 | }
65 | .alert-warning {
66 | color: #8a6d3b;
67 | background-color: #fcf8e3;
68 | border-color: #faebcc;
69 | }
70 | .alert-danger {
71 | color: #a94442;
72 | background-color: #f2dede;
73 | border-color: #ebccd1;
74 | }
75 | .alert p {
76 | margin-bottom: 0;
77 | }
78 | .alert:empty {
79 | display: none;
80 | }
81 | .invalid-feedback {
82 | color: #a94442;
83 | display: block;
84 | margin: -1rem 0 2rem;
85 | }
86 |
87 | /* LiveView specific classes for your customization */
88 | .phx-no-feedback.invalid-feedback,
89 | .phx-no-feedback .invalid-feedback {
90 | display: none;
91 | }
92 |
93 | .phx-click-loading {
94 | opacity: 0.5;
95 | transition: opacity 1s ease-out;
96 | }
97 |
98 | .phx-loading{
99 | cursor: wait;
100 | }
101 |
102 | .phx-modal {
103 | opacity: 1!important;
104 | position: fixed;
105 | z-index: 1;
106 | left: 0;
107 | top: 0;
108 | width: 100%;
109 | height: 100%;
110 | overflow: auto;
111 | background-color: rgba(0,0,0,0.4);
112 | }
113 |
114 | .phx-modal-content {
115 | background-color: #fefefe;
116 | margin: 15vh auto;
117 | padding: 20px;
118 | border: 1px solid #888;
119 | width: 80%;
120 | }
121 |
122 | .phx-modal-close {
123 | color: #aaa;
124 | float: right;
125 | font-size: 28px;
126 | font-weight: bold;
127 | }
128 |
129 | .phx-modal-close:hover,
130 | .phx-modal-close:focus {
131 | color: black;
132 | text-decoration: none;
133 | cursor: pointer;
134 | }
135 |
136 | .fade-in-scale {
137 | animation: 0.2s ease-in 0s normal forwards 1 fade-in-scale-keys;
138 | }
139 |
140 | .fade-out-scale {
141 | animation: 0.2s ease-out 0s normal forwards 1 fade-out-scale-keys;
142 | }
143 |
144 | .fade-in {
145 | animation: 0.2s ease-out 0s normal forwards 1 fade-in-keys;
146 | }
147 | .fade-out {
148 | animation: 0.2s ease-out 0s normal forwards 1 fade-out-keys;
149 | }
150 |
151 | @keyframes fade-in-scale-keys{
152 | 0% { scale: 0.95; opacity: 0; }
153 | 100% { scale: 1.0; opacity: 1; }
154 | }
155 |
156 | @keyframes fade-out-scale-keys{
157 | 0% { scale: 1.0; opacity: 1; }
158 | 100% { scale: 0.95; opacity: 0; }
159 | }
160 |
161 | @keyframes fade-in-keys{
162 | 0% { opacity: 0; }
163 | 100% { opacity: 1; }
164 | }
165 |
166 | @keyframes fade-out-keys{
167 | 0% { opacity: 1; }
168 | 100% { opacity: 0; }
169 | }
170 |
--------------------------------------------------------------------------------
/insights/assets/js/app.js:
--------------------------------------------------------------------------------
1 | // We import the CSS which is extracted to its own file by esbuild.
2 | // Remove this line if you add a your own CSS build pipeline (e.g postcss).
3 | import "../css/app.css"
4 |
5 | // If you want to use Phoenix channels, run `mix help phx.gen.channel`
6 | // to get started and then uncomment the line below.
7 | // import "./user_socket.js"
8 |
9 | // You can include dependencies in two ways.
10 | //
11 | // The simplest option is to put them in assets/vendor and
12 | // import them using relative paths:
13 | //
14 | // import "../vendor/some-package.js"
15 | //
16 | // Alternatively, you can `npm install some-package --prefix assets` and import
17 | // them using a path starting with the package name:
18 | //
19 | // import "some-package"
20 | //
21 |
22 | // Include phoenix_html to handle method=PUT/DELETE in forms and buttons.
23 | import "phoenix_html"
24 | // Establish Phoenix Socket and LiveView configuration.
25 | import {Socket} from "phoenix"
26 | import {LiveSocket} from "phoenix_live_view"
27 | import topbar from "../vendor/topbar"
28 |
29 | let csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content")
30 | let liveSocket = new LiveSocket("/live", Socket, {params: {_csrf_token: csrfToken}})
31 |
32 | // Show progress bar on live navigation and form submits
33 | topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"})
34 | window.addEventListener("phx:page-loading-start", info => topbar.show())
35 | window.addEventListener("phx:page-loading-stop", info => topbar.hide())
36 |
37 | // connect if there are any LiveViews on the page
38 | liveSocket.connect()
39 |
40 | // expose liveSocket on window for web console debug logs and latency simulation:
41 | // >> liveSocket.enableDebug()
42 | // >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session
43 | // >> liveSocket.disableLatencySim()
44 | window.liveSocket = liveSocket
45 |
46 |
--------------------------------------------------------------------------------
/insights/config/config.exs:
--------------------------------------------------------------------------------
1 | # This file is responsible for configuring your application
2 | # and its dependencies with the aid of the Config module.
3 | #
4 | # This configuration file is loaded before any dependency and
5 | # is restricted to this project.
6 |
7 | # General application configuration
8 | import Config
9 |
10 | config :mongodb_driver,
11 | log: true
12 |
13 | config :insights, Insights.PromEx,
14 | disabled: false,
15 | manual_metrics_start_delay: :no_delay,
16 | drop_metrics_groups: [],
17 | grafana: [
18 | host: "http://localhost:3000",
19 | username: "admin", # Or authenticate via Basic Auth
20 | password: "admin",
21 | upload_dashboards_on_start: true # This is an optional setting and will default to `true`
22 | ],
23 | metrics_server: :disabled
24 |
25 | # Configures the endpoint
26 | config :insights, InsightsWeb.Endpoint,
27 | url: [host: "localhost"],
28 | render_errors: [view: InsightsWeb.ErrorView, accepts: ~w(html json), layout: false],
29 | pubsub_server: Insights.PubSub,
30 | live_view: [signing_salt: "x7TkPhml"]
31 |
32 | # Configures the mailer
33 | #
34 | # By default it uses the "Local" adapter which stores the emails
35 | # locally. You can see the emails in your browser, at "/dev/mailbox".
36 | #
37 | # For production it's recommended to configure a different adapter
38 | # at the `config/runtime.exs`.
39 | config :insights, Insights.Mailer, adapter: Swoosh.Adapters.Local
40 |
41 | # Swoosh API client is needed for adapters other than SMTP.
42 | config :swoosh, :api_client, false
43 |
44 | # Configure esbuild (the version is required)
45 | config :esbuild,
46 | version: "0.14.0",
47 | default: [
48 | args:
49 | ~w(js/app.js --bundle --target=es2017 --outdir=../priv/static/assets --external:/fonts/* --external:/images/*),
50 | cd: Path.expand("../assets", __DIR__),
51 | env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)}
52 | ]
53 |
54 | # Configures Elixir's Logger
55 | config :logger, :console,
56 | format: "$time $metadata[$level] $message\n",
57 | metadata: [:request_id]
58 |
59 | # Use Jason for JSON parsing in Phoenix
60 | config :phoenix, :json_library, Jason
61 |
62 | # Import environment specific config. This must remain at the bottom
63 | # of this file so it overrides the configuration defined above.
64 | import_config "#{config_env()}.exs"
65 |
--------------------------------------------------------------------------------
/insights/config/dev.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | # For development, we disable any cache and enable
4 | # debugging and code reloading.
5 | #
6 | # The watchers configuration can be used to run external
7 | # watchers to your application. For example, we use it
8 | # with esbuild to bundle .js and .css sources.
9 | config :insights, InsightsWeb.Endpoint,
10 | # Binding to loopback ipv4 address prevents access from other machines.
11 | # Change to `ip: {0, 0, 0, 0}` to allow access from other machines.
12 | http: [ip: {0, 0, 0, 0}, port: 4000],
13 | check_origin: false,
14 | code_reloader: true,
15 | debug_errors: true,
16 | secret_key_base: "SjGtitt2BMvX5g2lqkF00pNAxXRI5u0v/pF0CR2TF1QZTjyB2Pb1llPe+P9DItK/",
17 | watchers: [
18 | # Start the esbuild watcher by calling Esbuild.install_and_run(:default, args)
19 | esbuild: {Esbuild, :install_and_run, [:default, ~w(--sourcemap=inline --watch)]}
20 | ]
21 |
22 | # ## SSL Support
23 | #
24 | # In order to use HTTPS in development, a self-signed
25 | # certificate can be generated by running the following
26 | # Mix task:
27 | #
28 | # mix phx.gen.cert
29 | #
30 | # Note that this task requires Erlang/OTP 20 or later.
31 | # Run `mix help phx.gen.cert` for more information.
32 | #
33 | # The `http:` config above can be replaced with:
34 | #
35 | # https: [
36 | # port: 4001,
37 | # cipher_suite: :strong,
38 | # keyfile: "priv/cert/selfsigned_key.pem",
39 | # certfile: "priv/cert/selfsigned.pem"
40 | # ],
41 | #
42 | # If desired, both `http:` and `https:` keys can be
43 | # configured to run both http and https servers on
44 | # different ports.
45 |
46 | # Watch static and templates for browser reloading.
47 | config :insights, InsightsWeb.Endpoint,
48 | live_reload: [
49 | patterns: [
50 | ~r"priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$",
51 | ~r"priv/gettext/.*(po)$",
52 | ~r"lib/insights_web/(live|views)/.*(ex)$",
53 | ~r"lib/insights_web/templates/.*(eex)$"
54 | ]
55 | ]
56 |
57 | # Do not include metadata nor timestamps in development logs
58 | config :logger, :console,
59 | format: "$time [$level] $message ($metadata)\n",
60 | metadata: [:module, :function, :line]
61 |
62 | # Set a higher stacktrace during development. Avoid configuring such
63 | # in production as building large stacktraces may be expensive.
64 | config :phoenix, :stacktrace_depth, 20
65 |
66 | # Initialize plugs at runtime for faster development compilation
67 | config :phoenix, :plug_init_mode, :runtime
68 |
--------------------------------------------------------------------------------
/insights/config/prod.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | # For production, don't forget to configure the url host
4 | # to something meaningful, Phoenix uses this information
5 | # when generating URLs.
6 | #
7 | # Note we also include the path to a cache manifest
8 | # containing the digested version of static files. This
9 | # manifest is generated by the `mix phx.digest` task,
10 | # which you should run after static files are built and
11 | # before starting your production server.
12 | config :insights, InsightsWeb.Endpoint, cache_static_manifest: "priv/static/cache_manifest.json"
13 |
14 | # Do not print debug messages in production
15 | config :logger, level: :info
16 |
17 | # ## SSL Support
18 | #
19 | # To get SSL working, you will need to add the `https` key
20 | # to the previous section and set your `:url` port to 443:
21 | #
22 | # config :insights, InsightsWeb.Endpoint,
23 | # ...,
24 | # url: [host: "example.com", port: 443],
25 | # https: [
26 | # ...,
27 | # port: 443,
28 | # cipher_suite: :strong,
29 | # keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"),
30 | # certfile: System.get_env("SOME_APP_SSL_CERT_PATH")
31 | # ]
32 | #
33 | # The `cipher_suite` is set to `:strong` to support only the
34 | # latest and more secure SSL ciphers. This means old browsers
35 | # and clients may not be supported. You can set it to
36 | # `:compatible` for wider support.
37 | #
38 | # `:keyfile` and `:certfile` expect an absolute path to the key
39 | # and cert in disk or a relative path inside priv, for example
40 | # "priv/ssl/server.key". For all supported SSL configuration
41 | # options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1
42 | #
43 | # We also recommend setting `force_ssl` in your endpoint, ensuring
44 | # no data is ever sent via http, always redirecting to https:
45 | #
46 | # config :insights, InsightsWeb.Endpoint,
47 | # force_ssl: [hsts: true]
48 | #
49 | # Check `Plug.SSL` for all available options in `force_ssl`.
50 |
--------------------------------------------------------------------------------
/insights/config/runtime.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | # config/runtime.exs is executed for all environments, including
4 | # during releases. It is executed after compilation and before the
5 | # system starts, so it is typically used to load production configuration
6 | # and secrets from environment variables or elsewhere. Do not define
7 | # any compile-time configuration in here, as it won't be applied.
8 | # The block below contains prod specific runtime configuration.
9 |
10 | # Start the phoenix server if environment is set and running in a release
11 | if System.get_env("PHX_SERVER") && System.get_env("RELEASE_NAME") do
12 | config :insights, InsightsWeb.Endpoint, server: true
13 | end
14 |
15 | if config_env() == :prod do
16 |
17 | # The secret key base is used to sign/encrypt cookies and other secrets.
18 | # A default value is used in config/dev.exs and config/test.exs but you
19 | # want to use a different value for prod and you most likely don't want
20 | # to check this value into version control, so we use an environment
21 | # variable instead.
22 | secret_key_base =
23 | System.get_env("SECRET_KEY_BASE") ||
24 | raise """
25 | environment variable SECRET_KEY_BASE is missing.
26 | You can generate one by calling: mix phx.gen.secret
27 | """
28 |
29 | host = System.get_env("PHX_HOST") || "example.com"
30 | port = String.to_integer(System.get_env("PORT") || "4000")
31 |
32 | config :insights, InsightsWeb.Endpoint,
33 | url: [host: host, port: 443],
34 | http: [
35 | # Enable IPv6 and bind on all interfaces.
36 | # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access.
37 | # See the documentation on https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html
38 | # for details about using IPv6 vs IPv4 and loopback vs public addresses.
39 | ip: {0, 0, 0, 0, 0, 0, 0, 0},
40 | port: port
41 | ],
42 | secret_key_base: secret_key_base
43 |
44 | # ## Using releases
45 | #
46 | # If you are doing OTP releases, you need to instruct Phoenix
47 | # to start each relevant endpoint:
48 | #
49 | # config :insights, InsightsWeb.Endpoint, server: true
50 | #
51 | # Then you can assemble a release by calling `mix release`.
52 | # See `mix help release` for more information.
53 |
54 | # ## Configuring the mailer
55 | #
56 | # In production you need to configure the mailer to use a different adapter.
57 | # Also, you may need to configure the Swoosh API client of your choice if you
58 | # are not using SMTP. Here is an example of the configuration:
59 | #
60 | # config :insights, Insights.Mailer,
61 | # adapter: Swoosh.Adapters.Mailgun,
62 | # api_key: System.get_env("MAILGUN_API_KEY"),
63 | # domain: System.get_env("MAILGUN_DOMAIN")
64 | #
65 | # For this example you need include a HTTP client required by Swoosh API client.
66 | # Swoosh supports Hackney and Finch out of the box:
67 | #
68 | # config :swoosh, :api_client, Swoosh.ApiClient.Hackney
69 | #
70 | # See https://hexdocs.pm/swoosh/Swoosh.html#module-installation for details.
71 | end
72 |
--------------------------------------------------------------------------------
/insights/config/test.exs:
--------------------------------------------------------------------------------
1 | import Config
2 |
3 | # We don't run a server during test. If one is required,
4 | # you can enable the server option below.
5 | config :insights, InsightsWeb.Endpoint,
6 | http: [ip: {127, 0, 0, 1}, port: 4002],
7 | secret_key_base: "HeabAcw7tPo0UbMqdHT5xd7t0hUzsWaTBVUaDApYnhNIhV5uPcUVAMjQqj6OVhh/",
8 | server: false
9 |
10 | # In test we don't send emails.
11 | config :insights, Insights.Mailer, adapter: Swoosh.Adapters.Test
12 |
13 | # Print only warnings and errors during test
14 | config :logger, level: :warn
15 |
16 | # Initialize plugs at runtime for faster test compilation
17 | config :phoenix, :plug_init_mode, :runtime
18 |
--------------------------------------------------------------------------------
/insights/lib/insights.ex:
--------------------------------------------------------------------------------
1 | defmodule Insights do
2 | @moduledoc """
3 | Insights keeps the contexts that define your domain
4 | and business logic.
5 |
6 | Contexts are also responsible for managing your data, regardless
7 | if it comes from the database, an external API or others.
8 | """
9 | end
10 |
--------------------------------------------------------------------------------
/insights/lib/insights/application.ex:
--------------------------------------------------------------------------------
1 | defmodule Insights.Application do
2 | # See https://hexdocs.pm/elixir/Application.html
3 | # for more information on OTP Applications
4 | @moduledoc false
5 |
6 | use Application
7 |
8 | @impl true
9 | def start(_type, _args) do
10 | children = [
11 | Insights.PromEx,
12 | {Mongo, [name: :mongo, url: "mongodb://localhost:27017/insights", timeout: 60_000, pool_size: 1, idle_interval: 10_000]},
13 | # Start the Telemetry supervisor
14 | InsightsWeb.Telemetry,
15 | # Start the PubSub system
16 | {Phoenix.PubSub, name: Insights.PubSub},
17 | {Insights.EventHandler, []},
18 | # Start the Endpoint (http/https)
19 | InsightsWeb.Endpoint
20 | # Start a worker by calling: Insights.Worker.start_link(arg)
21 | # {Insights.Worker, arg}
22 | ]
23 |
24 | # See https://hexdocs.pm/elixir/Supervisor.html
25 | # for other strategies and supported options
26 | opts = [strategy: :one_for_one, name: Insights.Supervisor]
27 | Supervisor.start_link(children, opts)
28 | end
29 |
30 | # Tell Phoenix to update the endpoint configuration
31 | # whenever the application is updated.
32 | @impl true
33 | def config_change(changed, _new, removed) do
34 | InsightsWeb.Endpoint.config_change(changed, removed)
35 | :ok
36 | end
37 | end
38 |
--------------------------------------------------------------------------------
/insights/lib/insights/event_handler.ex:
--------------------------------------------------------------------------------
1 | defmodule Insights.EventHandler do
2 |
3 | require Logger
4 |
5 | use GenServer
6 |
7 | @me __MODULE__
8 |
9 | @doc """
10 | Starts the GenServer.
11 | """
12 | def start_link(_args) do
13 | GenServer.start_link(@me, :no_args, name: @me)
14 | end
15 |
16 | @impl true
17 | def init(:no_args) do
18 |
19 | info("Starting insights event handler")
20 |
21 | Registry.register(:events_registry, :commands, [])
22 | Registry.register(:events_registry, :topology, [])
23 |
24 | {:ok, %{}}
25 | end
26 |
27 | def handle_info({:broadcast, :topology, event}, state) do
28 | Phoenix.PubSub.local_broadcast(Insights.PubSub, "topology", event)
29 | {:noreply, state}
30 | end
31 |
32 | def handle_info({:broadcast, :commands, event}, state) do
33 | Phoenix.PubSub.local_broadcast(Insights.PubSub, "commands", event)
34 | {:noreply, state}
35 | end
36 |
37 | def handle_info(_message, state) do
38 | ## info("Receiving message: #{inspect message}")
39 | {:noreply, state}
40 | end
41 |
42 | defp info(message) do
43 | Logger.info(IO.ANSI.format([:light_magenta, :bright, message]))
44 | end
45 | end
--------------------------------------------------------------------------------
/insights/lib/insights/mailer.ex:
--------------------------------------------------------------------------------
1 | defmodule Insights.Mailer do
2 | use Swoosh.Mailer, otp_app: :insights
3 | end
4 |
--------------------------------------------------------------------------------
/insights/lib/insights/mongodb_plugin.ex:
--------------------------------------------------------------------------------
1 | defmodule PromEx.Plugins.MongoDB do
2 | use PromEx.Plugin
3 |
4 | @impl true
5 | def event_metrics(opts) do
6 | Event.build(
7 | :mongodb_cmd_duration,
8 | [
9 | # Capture command execution duration information
10 | distribution(
11 | [:mongodb_driver, :execution, :duration, :milliseconds],
12 | event_name: [:mongodb_driver, :execution],
13 | measurement: :duration, description: "The execution time for a command",
14 | reporter_options: [
15 | buckets: [10, 100, 500, 1_000, 5_000, 10_000, 30_000]
16 | ],
17 | #tag_values: [:todo],
18 | tags: [:collection, :command],
19 | unit: {:microsecond, :millisecond}
20 | )
21 | ]
22 | )
23 |
24 | end
25 | end
26 |
--------------------------------------------------------------------------------
/insights/lib/insights/prom_ex.ex:
--------------------------------------------------------------------------------
1 | defmodule Insights.PromEx do
2 | @moduledoc """
3 | Be sure to add the following to finish setting up PromEx:
4 |
5 | 1. Update your configuration (config.exs, dev.exs, prod.exs, releases.exs, etc) to
6 | configure the necessary bit of PromEx. Be sure to check out `PromEx.Config` for
7 | more details regarding configuring PromEx:
8 | ```
9 | config :insights, Insights.PromEx,
10 | disabled: false,
11 | manual_metrics_start_delay: :no_delay,
12 | drop_metrics_groups: [],
13 | grafana: :disabled,
14 | metrics_server: :disabled
15 | ```
16 |
17 | 2. Add this module to your application supervision tree. It should be one of the first
18 | things that is started so that no Telemetry events are missed. For example, if PromEx
19 | is started after your Repo module, you will miss Ecto's init events and the dashboards
20 | will be missing some data points:
21 | ```
22 | def start(_type, _args) do
23 | children = [
24 | Insights.PromEx,
25 |
26 | ...
27 | ]
28 |
29 | ...
30 | end
31 | ```
32 |
33 | 3. Update your `endpoint.ex` file to expose your metrics (or configure a standalone
34 | server using the `:metrics_server` config options). Be sure to put this plug before
35 | your `Plug.Telemetry` entry so that you can avoid having calls to your `/metrics`
36 | endpoint create their own metrics and logs which can pollute your logs/metrics given
37 | that Prometheus will scrape at a regular interval and that can get noisy:
38 | ```
39 | defmodule InsightsWeb.Endpoint do
40 | use Phoenix.Endpoint, otp_app: :insights
41 |
42 | ...
43 |
44 | plug PromEx.Plug, prom_ex_module: Insights.PromEx
45 |
46 | ...
47 | end
48 | ```
49 |
50 | 4. Update the list of plugins in the `plugins/0` function return list to reflect your
51 | application's dependencies. Also update the list of dashboards that are to be uploaded
52 | to Grafana in the `dashboards/0` function.
53 | """
54 |
55 | use PromEx, otp_app: :insights
56 |
57 | alias PromEx.Plugins
58 |
59 | @impl true
60 | def plugins do
61 | [
62 | # PromEx built in plugins
63 | Plugins.Application,
64 | Plugins.Beam,
65 | {Plugins.Phoenix, router: InsightsWeb.Router, endpoint: InsightsWeb.Endpoint},
66 | # Plugins.Ecto,
67 | # Plugins.Oban,
68 | # Plugins.PhoenixLiveView,
69 | # Plugins.Absinthe,
70 | # Plugins.Broadway,
71 |
72 | # Add your own PromEx metrics plugins
73 | # Insights.Users.PromExPlugin
74 | {PromEx.Plugins.MongoDB, otp_app: :insights}
75 | ]
76 | end
77 |
78 | @impl true
79 | def dashboard_assigns do
80 | [
81 | datasource_id: "Prometheus",
82 | default_selected_interval: "30s"
83 | ]
84 | end
85 |
86 | @impl true
87 | def dashboards do
88 | [
89 | # PromEx built in Grafana dashboards
90 | {:prom_ex, "application.json"},
91 | {:prom_ex, "beam.json"},
92 | {:prom_ex, "phoenix.json"},
93 | # {:prom_ex, "ecto.json"},
94 | # {:prom_ex, "oban.json"},
95 | # {:prom_ex, "phoenix_live_view.json"},
96 | # {:prom_ex, "absinthe.json"},
97 | # {:prom_ex, "broadway.json"},
98 |
99 | # Add your dashboard definitions here with the format: {:otp_app, "path_in_priv"}
100 | {:insights, "/grafana_dashboards/mongodb_driver.json"}
101 | ]
102 | end
103 | end
104 |
--------------------------------------------------------------------------------
/insights/lib/insights/test.ex:
--------------------------------------------------------------------------------
1 | defmodule Insights.Test do
2 |
3 | require Logger
4 |
5 | @host_unreachable 6
6 | @host_not_found 7
7 | @network_timeout 89
8 | @shutdown_in_progress 91
9 | @primary_stepped_down 189
10 | @exceeded_time_limit 262
11 | @socket_exception 9001
12 | @not_master 10107
13 | @interrupted_at_shutdown 11600
14 | @interrupted_due_to_repl_state_change 11602
15 | @not_master_no_slaveok 13435
16 | @not_master_or_secondary 13436
17 | @stale_shard_version 63
18 | @stale_epoch 150
19 | #@stale_config 13388
20 | @retry_change_stream 234
21 | @failed_to_satisfy_read_preference 133
22 |
23 | @resumxable [@host_unreachable, @host_not_found, @network_timeout, @shutdown_in_progress, @primary_stepped_down,
24 | @exceeded_time_limit, @socket_exception, @not_master, @interrupted_at_shutdown, @interrupted_at_shutdown,
25 | @interrupted_due_to_repl_state_change, @not_master_no_slaveok, @not_master_or_secondary, @stale_shard_version,
26 | @stale_epoch, @retry_change_stream, @failed_to_satisfy_read_preference] #@stale_config,
27 |
28 | @resumable [@primary_stepped_down ]
29 | def test() do
30 | @resumable
31 | |> Enum.map(fn code ->
32 |
33 | fail_cmd = [
34 | configureFailPoint: "failCommand",
35 | mode: %{times: 1},
36 | data: [errorCode: code, failCommands: ["find"]]
37 | ]
38 |
39 | {:ok, _} = Mongo.admin_command(:mongo, fail_cmd)
40 | {:error, msg} = Mongo.find_one(:mongo, "test", %{})
41 | Logger.info("Error: #{inspect msg}")
42 | end)
43 | end
44 | end
45 |
46 |
--------------------------------------------------------------------------------
/insights/lib/insights_web.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb do
2 | @moduledoc """
3 | The entrypoint for defining your web interface, such
4 | as controllers, views, channels and so on.
5 |
6 | This can be used in your application as:
7 |
8 | use InsightsWeb, :controller
9 | use InsightsWeb, :view
10 |
11 | The definitions below will be executed for every view,
12 | controller, etc, so keep them short and clean, focused
13 | on imports, uses and aliases.
14 |
15 | Do NOT define functions inside the quoted expressions
16 | below. Instead, define any helper function in modules
17 | and import those modules here.
18 | """
19 |
20 | def controller do
21 | quote do
22 | use Phoenix.Controller, namespace: InsightsWeb
23 |
24 | import Plug.Conn
25 | import InsightsWeb.Gettext
26 | alias InsightsWeb.Router.Helpers, as: Routes
27 | end
28 | end
29 |
30 | def view do
31 | quote do
32 | use Phoenix.View,
33 | root: "lib/insights_web/templates",
34 | namespace: InsightsWeb
35 |
36 | # Import convenience functions from controllers
37 | import Phoenix.Controller,
38 | only: [get_flash: 1, get_flash: 2, view_module: 1, view_template: 1]
39 |
40 | # Include shared imports and aliases for views
41 | unquote(view_helpers())
42 | end
43 | end
44 |
45 | def live_view do
46 | quote do
47 | use Phoenix.LiveView,
48 | layout: {InsightsWeb.LayoutView, "live.html"}
49 |
50 | unquote(view_helpers())
51 | end
52 | end
53 |
54 | def live_component do
55 | quote do
56 | use Phoenix.LiveComponent
57 |
58 | unquote(view_helpers())
59 | end
60 | end
61 |
62 | def component do
63 | quote do
64 | use Phoenix.Component
65 |
66 | unquote(view_helpers())
67 | end
68 | end
69 |
70 | def router do
71 | quote do
72 | use Phoenix.Router
73 |
74 | import Plug.Conn
75 | import Phoenix.Controller
76 | import Phoenix.LiveView.Router
77 | end
78 | end
79 |
80 | def channel do
81 | quote do
82 | use Phoenix.Channel
83 | import InsightsWeb.Gettext
84 | end
85 | end
86 |
87 | defp view_helpers do
88 | quote do
89 | # Use all HTML functionality (forms, tags, etc)
90 | use Phoenix.HTML
91 |
92 | # Import LiveView and .heex helpers (live_render, live_patch, <.form>, etc)
93 | import Phoenix.LiveView.Helpers
94 |
95 | # Import basic rendering functionality (render, render_layout, etc)
96 | import Phoenix.View
97 |
98 | import InsightsWeb.ErrorHelpers
99 | import InsightsWeb.Gettext
100 | alias InsightsWeb.Router.Helpers, as: Routes
101 | end
102 | end
103 |
104 | @doc """
105 | When used, dispatch to the appropriate controller/view/etc.
106 | """
107 | defmacro __using__(which) when is_atom(which) do
108 | apply(__MODULE__, which, [])
109 | end
110 | end
111 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/controllers/page_controller.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.PageController do
2 | use InsightsWeb, :controller
3 |
4 | def index(conn, _params) do
5 | render(conn, "index.html")
6 | end
7 | end
8 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/endpoint.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.Endpoint do
2 | use Phoenix.Endpoint, otp_app: :insights
3 |
4 | # The session will be stored in the cookie and signed,
5 | # this means its contents can be read but not tampered with.
6 | # Set :encryption_salt if you would also like to encrypt it.
7 | @session_options [
8 | store: :cookie,
9 | key: "_insights_key",
10 | signing_salt: "spJ/zHt6"
11 | ]
12 |
13 | socket "/live", Phoenix.LiveView.Socket, websocket: [connect_info: [session: @session_options]]
14 |
15 | plug PromEx.Plug, prom_ex_module: Insights.PromEx
16 |
17 | # Serve at "/" the static files from "priv/static" directory.
18 | #
19 | # You should set gzip to true if you are running phx.digest
20 | # when deploying your static files in production.
21 | plug Plug.Static,
22 | at: "/",
23 | from: :insights,
24 | gzip: false,
25 | only: ~w(assets fonts images favicon.ico robots.txt)
26 |
27 | # Code reloading can be explicitly enabled under the
28 | # :code_reloader configuration of your endpoint.
29 | if code_reloading? do
30 | socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket
31 | plug Phoenix.LiveReloader
32 | plug Phoenix.CodeReloader
33 | end
34 |
35 | plug Phoenix.LiveDashboard.RequestLogger,
36 | param_key: "request_logger",
37 | cookie_key: "request_logger"
38 |
39 | plug Plug.RequestId
40 | plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint]
41 |
42 | plug Plug.Parsers,
43 | parsers: [:urlencoded, :multipart, :json],
44 | pass: ["*/*"],
45 | json_decoder: Phoenix.json_library()
46 |
47 | plug Plug.MethodOverride
48 | plug Plug.Head
49 | plug Plug.Session, @session_options
50 | plug InsightsWeb.Router
51 | end
52 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/gettext.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.Gettext do
2 | @moduledoc """
3 | A module providing Internationalization with a gettext-based API.
4 |
5 | By using [Gettext](https://hexdocs.pm/gettext),
6 | your module gains a set of macros for translations, for example:
7 |
8 | import InsightsWeb.Gettext
9 |
10 | # Simple translation
11 | gettext("Here is the string to translate")
12 |
13 | # Plural translation
14 | ngettext("Here is the string to translate",
15 | "Here are the strings to translate",
16 | 3)
17 |
18 | # Domain-based translation
19 | dgettext("errors", "Here is the error message to translate")
20 |
21 | See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage.
22 | """
23 | use Gettext, otp_app: :insights
24 | end
25 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/live/topology_live.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.TopologyLive do
2 | use InsightsWeb, :live_view
3 |
4 | alias Mongo.Monitor
5 | alias Mongo.StreamingHelloMonitor
6 | alias Mongo.Topology
7 |
8 | require Logger
9 |
10 | @impl true
11 | def mount(_params, _session, socket) do
12 |
13 | if connected?(socket) do
14 | Phoenix.PubSub.subscribe(Insights.PubSub, "topology")
15 | Phoenix.PubSub.subscribe(Insights.PubSub, "commands")
16 | end
17 |
18 | {:ok, reset_defaults(socket)}
19 | end
20 |
21 | @impl true
22 | def render(assigns) do
23 | Phoenix.View.render(InsightsWeb.TopologyView, "index.html", assigns)
24 | end
25 |
26 | @impl true
27 | def handle_info(%Mongo.Events.ServerDescriptionChangedEvent{} = event, %{assigns: %{events: events}} = socket) do
28 |
29 | event = event
30 | |> Map.put(:time_stamp, DateTime.utc_now())
31 | |> Map.put(:id, random_string(10))
32 | events = [event | events] |> Enum.take(10)
33 | socket = socket
34 | |> set_topology(Topology.get_state(:mongo))
35 | |> assign(events: events)
36 |
37 | {:noreply, socket}
38 | end
39 |
40 | def handle_info(event, %{assigns: %{events: events}} = socket) do
41 | event = event
42 | |> Map.put(:time_stamp, DateTime.utc_now())
43 | |> Map.put(:id, random_string(10))
44 | events = [event | events] |> Enum.take(10)
45 | {:noreply, assign(socket, events: events)}
46 | end
47 |
48 | def handle_event("show-events", _params, socket) do
49 | {:noreply, assign(socket, tab: "events")}
50 | end
51 |
52 | def handle_event("show-details", _params, socket) do
53 | {:noreply, assign(socket, tab: "details")}
54 | end
55 |
56 | def handle_event("select-event", %{"id" => event_id}, %{assigns: %{events: events}} = socket) do
57 | {:noreply, assign(socket, event: Enum.find(events, fn %{id: id} -> event_id == id end))}
58 | end
59 |
60 | defp reset_defaults(socket) do
61 | socket
62 | |> set_topology(Topology.get_state(:mongo))
63 | |> assign(events: [])
64 | |> assign(tab: "details")
65 | |> assign(event: nil)
66 | end
67 |
68 | def set_topology(socket, %{topology: %{servers: servers} = topology, monitors: monitors}) do
69 |
70 | monitors = monitors
71 | |> Enum.map(fn {address, pid} -> {address, Monitor.get_state(pid)} end)
72 | |> Enum.into(%{})
73 |
74 | socket
75 | |> assign(topology: topology)
76 | |> assign(servers: Map.values(servers))
77 | |> assign(monitors: monitors)
78 | end
79 |
80 | def set_topology(socket, _other) do
81 | socket
82 | |> assign(topology: nil)
83 | |> assign(servers: [])
84 | |> assign(monitors: [])
85 | end
86 |
87 | def random_string(length) do
88 | :crypto.strong_rand_bytes(length) |> Base.url_encode64 |> binary_part(0, length)
89 | end
90 |
91 |
92 | end
93 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/router.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.Router do
2 | use InsightsWeb, :router
3 |
4 | pipeline :browser do
5 | plug :accepts, ["html"]
6 | plug :fetch_session
7 | plug :fetch_live_flash
8 | plug :put_root_layout, {InsightsWeb.LayoutView, :root}
9 | plug :protect_from_forgery
10 | plug :put_secure_browser_headers
11 | end
12 |
13 | pipeline :api do
14 | plug :accepts, ["json"]
15 | end
16 |
17 | scope "/", InsightsWeb do
18 | pipe_through :browser
19 | live "/", TopologyLive, :index
20 | end
21 |
22 | # Other scopes may use custom stacks.
23 | # scope "/api", InsightsWeb do
24 | # pipe_through :api
25 | # end
26 |
27 | # Enables LiveDashboard only for development
28 | #
29 | # If you want to use the LiveDashboard in production, you should put
30 | # it behind authentication and allow only admins to access it.
31 | # If your application does not have an admins-only section yet,
32 | # you can use Plug.BasicAuth to set up some basic authentication
33 | # as long as you are also using SSL (which you should anyway).
34 | if Mix.env() in [:dev, :test] do
35 | import Phoenix.LiveDashboard.Router
36 |
37 | scope "/" do
38 | pipe_through :browser
39 |
40 | live_dashboard "/dashboard", metrics: InsightsWeb.Telemetry
41 | end
42 | end
43 |
44 | # Enables the Swoosh mailbox preview in development.
45 | #
46 | # Note that preview only shows emails that were sent by the same
47 | # node running the Phoenix server.
48 | if Mix.env() == :dev do
49 | scope "/dev" do
50 | pipe_through :browser
51 |
52 | forward "/mailbox", Plug.Swoosh.MailboxPreview
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/telemetry.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.Telemetry do
2 | use Supervisor
3 | import Telemetry.Metrics
4 |
5 | def start_link(arg) do
6 | Supervisor.start_link(__MODULE__, arg, name: __MODULE__)
7 | end
8 |
9 | @impl true
10 | def init(_arg) do
11 | children = [
12 | # Telemetry poller will execute the given period measurements
13 | # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics
14 | {:telemetry_poller, measurements: periodic_measurements(), period: 10_000}
15 | # Add reporters as children of your supervision tree.
16 | # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()}
17 | ]
18 |
19 | Supervisor.init(children, strategy: :one_for_one)
20 | end
21 |
22 | def metrics do
23 | [
24 | # Phoenix Metrics
25 | summary("phoenix.endpoint.stop.duration",
26 | unit: {:native, :millisecond}
27 | ),
28 | summary("phoenix.router_dispatch.stop.duration",
29 | tags: [:route],
30 | unit: {:native, :millisecond}
31 | ),
32 |
33 | # Database Metrics
34 | summary("insights.repo.query.total_time",
35 | unit: {:native, :millisecond},
36 | description: "The sum of the other measurements"
37 | ),
38 | summary("insights.repo.query.decode_time",
39 | unit: {:native, :millisecond},
40 | description: "The time spent decoding the data received from the database"
41 | ),
42 | summary("insights.repo.query.query_time",
43 | unit: {:native, :millisecond},
44 | description: "The time spent executing the query"
45 | ),
46 | summary("insights.repo.query.queue_time",
47 | unit: {:native, :millisecond},
48 | description: "The time spent waiting for a database connection"
49 | ),
50 | summary("insights.repo.query.idle_time",
51 | unit: {:native, :millisecond},
52 | description:
53 | "The time the connection spent waiting before being checked out for the query"
54 | ),
55 |
56 | # VM Metrics
57 | summary("vm.memory.total", unit: {:byte, :kilobyte}),
58 | summary("vm.total_run_queue_lengths.total"),
59 | summary("vm.total_run_queue_lengths.cpu"),
60 | summary("vm.total_run_queue_lengths.io")
61 | ]
62 | end
63 |
64 | defp periodic_measurements do
65 | [
66 | # A module, function and arguments to be invoked periodically.
67 | # This function must call :telemetry.execute/3 and a metric must be added above.
68 | # {InsightsWeb, :count_users, []}
69 | ]
70 | end
71 | end
72 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/layout/app.html.heex:
--------------------------------------------------------------------------------
1 |
2 | <%= get_flash(@conn, :info) %>
3 | <%= get_flash(@conn, :error) %>
4 | <%= @inner_content %>
5 |
6 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/layout/live.html.heex:
--------------------------------------------------------------------------------
1 |
2 | <%= live_flash(@flash, :info) %>
3 |
4 | <%= live_flash(@flash, :error) %>
5 |
6 | <%= @inner_content %>
7 |
8 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/layout/root.html.heex:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | <%= csrf_meta_tag() %>
8 | <%= live_title_tag assigns[:page_title] || "Insights" %>
9 |
10 |
11 |
12 |
13 |
28 | <%= @inner_content %>
29 |
30 |
31 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/page/index.html.heex:
--------------------------------------------------------------------------------
1 |
2 |
Topology
3 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/topology/details.html.heex:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%= if @topology do %>
4 |
5 | - <%= @topology.type%>
6 | - <%= @topology.heartbeat_frequency_ms%>
7 | - <%= @topology.local_threshold_ms%>
8 | - <%= @topology.compatible%>
9 |
10 | <% else %>
11 |
12 |
No topology entry found!
13 |
14 | <% end %>
15 |
16 |
17 |
18 |
19 |
Servers
20 | <%= for server <- @servers do %>
21 |
22 |
23 | <%= render InsightsWeb.TopologyView, "server.html", server: server %>
24 |
25 |
26 | <%= render InsightsWeb.TopologyView, "monitor.html", monitor: Map.get(@monitors, server.address) %>
27 |
28 |
29 | <% end %>
30 |
31 |
32 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/topology/event.html.heex:
--------------------------------------------------------------------------------
1 | <%= event_name(@event)%>
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/topology/events.html.heex:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | <%= for event <- @events do %>
5 | <%= if @event do %>
6 | <%= render InsightsWeb.TopologyView, "event.html", event: event, select_event_id: @event.id %>
7 | <% else %>
8 | <%= render InsightsWeb.TopologyView, "event.html", event: event, select_event_id: nil %>
9 | <% end %>
10 | <% end %>
11 |
12 |
13 |
14 |
15 | <%= if @event do %>
16 | <%= event_name(@event)%>
17 |
18 |
19 |
20 | - <%= @event.time_stamp%>
21 |
22 |
23 |
24 | <% end %>
25 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/topology/index.html.heex:
--------------------------------------------------------------------------------
1 |
2 |
Topology
3 |
4 |
12 |
13 |
14 | <%= render InsightsWeb.TopologyView, "details.html", assigns %>
15 |
16 |
17 | <%= render InsightsWeb.TopologyView, "events.html", assigns %>
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/topology/monitor.html.heex:
--------------------------------------------------------------------------------
1 | Monitor process <%= inspect @monitor.pid %>
2 |
3 | - <%= @monitor.address%>
4 | - <%= @monitor.mode%>
5 | - <%= @monitor.heartbeat_frequency_ms%>
6 | - <%= @monitor.round_trip_time%>
7 | - <%= inspect @monitor.connection_pid%>
8 | - <%= inspect @monitor.streaming_pid%>
9 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/templates/topology/server.html.heex:
--------------------------------------------------------------------------------
1 | <%= @server.address%>
2 |
3 | - <%= @server.address%>
4 | - <%= @server.type%>
5 | - <%= @server.round_trip_time%>
6 | - <%= @server.last_update_time%>
7 |
8 | - <%= @server.min_wire_version%>
9 | - <%= @server.max_wire_version%>
10 | - <%= inspect @server.error%>
11 |
12 | - <%= @server.max_message_size_bytes%>
13 | - <%= @server.max_write_batch_size%>
14 | - <%= @server.max_bson_object_size%>
15 |
16 | - <%= inspect @server.set_name%>
17 | - <%= inspect @server.set_version%>
18 |
19 | - <%= @server.support_retryable_writes%>
20 | - <%= @server.read_only%>
21 |
22 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/views/error_helpers.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.ErrorHelpers do
2 | @moduledoc """
3 | Conveniences for translating and building error messages.
4 | """
5 |
6 | use Phoenix.HTML
7 |
8 | @doc """
9 | Generates tag for inlined form input errors.
10 | """
11 | def error_tag(form, field) do
12 | Enum.map(Keyword.get_values(form.errors, field), fn error ->
13 | content_tag(:span, translate_error(error),
14 | class: "invalid-feedback",
15 | phx_feedback_for: input_name(form, field)
16 | )
17 | end)
18 | end
19 |
20 | @doc """
21 | Translates an error message using gettext.
22 | """
23 | def translate_error({msg, opts}) do
24 | # When using gettext, we typically pass the strings we want
25 | # to translate as a static argument:
26 | #
27 | # # Translate "is invalid" in the "errors" domain
28 | # dgettext("errors", "is invalid")
29 | #
30 | # # Translate the number of files with plural rules
31 | # dngettext("errors", "1 file", "%{count} files", count)
32 | #
33 | # Because the error messages we show in our forms and APIs
34 | # are defined inside Ecto, we need to translate them dynamically.
35 | # This requires us to call the Gettext module passing our gettext
36 | # backend as first argument.
37 | #
38 | # Note we use the "errors" domain, which means translations
39 | # should be written to the errors.po file. The :count option is
40 | # set by Ecto and indicates we should also apply plural rules.
41 | if count = opts[:count] do
42 | Gettext.dngettext(InsightsWeb.Gettext, "errors", msg, msg, count, opts)
43 | else
44 | Gettext.dgettext(InsightsWeb.Gettext, "errors", msg, opts)
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/views/error_view.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.ErrorView do
2 | use InsightsWeb, :view
3 |
4 | # If you want to customize a particular status code
5 | # for a certain format, you may uncomment below.
6 | # def render("500.html", _assigns) do
7 | # "Internal Server Error"
8 | # end
9 |
10 | # By default, Phoenix returns the status message from
11 | # the template name. For example, "404.html" becomes
12 | # "Not Found".
13 | def template_not_found(template, _assigns) do
14 | Phoenix.Controller.status_message_from_template(template)
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/views/layout_view.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.LayoutView do
2 | use InsightsWeb, :view
3 |
4 | # Phoenix LiveDashboard is available only in development by default,
5 | # so we instruct Elixir to not warn if the dashboard route is missing.
6 | @compile {:no_warn_undefined, {Routes, :live_dashboard_path, 2}}
7 | end
8 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/views/page_view.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.PageView do
2 | use InsightsWeb, :view
3 | end
4 |
--------------------------------------------------------------------------------
/insights/lib/insights_web/views/topology_view.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.TopologyView do
2 | use InsightsWeb, :view
3 |
4 | def event_name(%{__struct__: name} = struct) do
5 | name
6 | |> Module.split
7 | |> Enum.join(".")
8 | end
9 |
10 | @doc """
11 | Support for tabs
12 | """
13 | def tab_active(tab, current) do
14 | case tab == current do
15 | true -> "active"
16 | false -> []
17 | end
18 | end
19 |
20 | def event_selected?(true) do
21 | "selected"
22 | end
23 |
24 | def event_selected?(_) do
25 | []
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/insights/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule Insights.MixProject do
2 | use Mix.Project
3 |
4 | def project do
5 | [
6 | app: :insights,
7 | version: "0.1.0",
8 | elixir: "~> 1.12",
9 | elixirc_paths: elixirc_paths(Mix.env()),
10 | compilers: [:gettext] ++ Mix.compilers(),
11 | start_permanent: Mix.env() == :prod,
12 | aliases: aliases(),
13 | deps: deps()
14 | ]
15 | end
16 |
17 | # Configuration for the OTP application.
18 | #
19 | # Type `mix help compile.app` for more information.
20 | def application do
21 | [
22 | mod: {Insights.Application, []},
23 | extra_applications: [:logger, :runtime_tools]
24 | ]
25 | end
26 |
27 | # Specifies which paths to compile per environment.
28 | defp elixirc_paths(:test), do: ["lib", "test/support"]
29 | defp elixirc_paths(_), do: ["lib"]
30 |
31 | # Specifies your project dependencies.
32 | #
33 | # Type `mix help deps` for examples and options.
34 | defp deps do
35 | [
36 | {:phoenix, "~> 1.6.5"},
37 | {:phoenix_html, "~> 3.0"},
38 | {:phoenix_live_reload, "~> 1.2", only: :dev},
39 | {:phoenix_live_view, "~> 0.17.5"},
40 | {:floki, ">= 0.30.0", only: :test},
41 | {:phoenix_live_dashboard, "~> 0.6"},
42 | {:esbuild, "~> 0.3", runtime: Mix.env() == :dev},
43 | {:swoosh, "~> 1.3"},
44 | {:telemetry_metrics, "~> 0.6"},
45 | {:telemetry_poller, "~> 1.0"},
46 | {:gettext, "~> 0.18"},
47 | {:jason, "~> 1.2"},
48 | {:plug_cowboy, "~> 2.5"},
49 | {:mongodb_driver, path: "..", override: true},
50 | {:prom_ex, "~> 1.7.1"}
51 | ]
52 | end
53 |
54 | # Aliases are shortcuts or tasks specific to the current project.
55 | # For example, to install project dependencies and perform other setup tasks, run:
56 | #
57 | # $ mix setup
58 | #
59 | # See the documentation for `Mix` for more info on aliases.
60 | defp aliases do
61 | [
62 | setup: ["deps.get", "ecto.setup"],
63 | "ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"],
64 | "ecto.reset": ["ecto.drop", "ecto.setup"],
65 | test: ["ecto.create --quiet", "ecto.migrate --quiet", "test"],
66 | "assets.deploy": ["esbuild default --minify", "phx.digest"]
67 | ]
68 | end
69 | end
70 |
--------------------------------------------------------------------------------
/insights/priv/gettext/en/LC_MESSAGES/errors.po:
--------------------------------------------------------------------------------
1 | ## `msgid`s in this file come from POT (.pot) files.
2 | ##
3 | ## Do not add, change, or remove `msgid`s manually here as
4 | ## they're tied to the ones in the corresponding POT file
5 | ## (with the same domain).
6 | ##
7 | ## Use `mix gettext.extract --merge` or `mix gettext.merge`
8 | ## to merge POT files into PO files.
9 | msgid ""
10 | msgstr ""
11 | "Language: en\n"
12 |
13 | ## From Ecto.Changeset.cast/4
14 | msgid "can't be blank"
15 | msgstr ""
16 |
17 | ## From Ecto.Changeset.unique_constraint/3
18 | msgid "has already been taken"
19 | msgstr ""
20 |
21 | ## From Ecto.Changeset.put_change/3
22 | msgid "is invalid"
23 | msgstr ""
24 |
25 | ## From Ecto.Changeset.validate_acceptance/3
26 | msgid "must be accepted"
27 | msgstr ""
28 |
29 | ## From Ecto.Changeset.validate_format/3
30 | msgid "has invalid format"
31 | msgstr ""
32 |
33 | ## From Ecto.Changeset.validate_subset/3
34 | msgid "has an invalid entry"
35 | msgstr ""
36 |
37 | ## From Ecto.Changeset.validate_exclusion/3
38 | msgid "is reserved"
39 | msgstr ""
40 |
41 | ## From Ecto.Changeset.validate_confirmation/3
42 | msgid "does not match confirmation"
43 | msgstr ""
44 |
45 | ## From Ecto.Changeset.no_assoc_constraint/3
46 | msgid "is still associated with this entry"
47 | msgstr ""
48 |
49 | msgid "are still associated with this entry"
50 | msgstr ""
51 |
52 | ## From Ecto.Changeset.validate_length/3
53 | msgid "should have %{count} item(s)"
54 | msgid_plural "should have %{count} item(s)"
55 | msgstr[0] ""
56 | msgstr[1] ""
57 |
58 | msgid "should be %{count} character(s)"
59 | msgid_plural "should be %{count} character(s)"
60 | msgstr[0] ""
61 | msgstr[1] ""
62 |
63 | msgid "should be %{count} byte(s)"
64 | msgid_plural "should be %{count} byte(s)"
65 | msgstr[0] ""
66 | msgstr[1] ""
67 |
68 | msgid "should have at least %{count} item(s)"
69 | msgid_plural "should have at least %{count} item(s)"
70 | msgstr[0] ""
71 | msgstr[1] ""
72 |
73 | msgid "should be at least %{count} character(s)"
74 | msgid_plural "should be at least %{count} character(s)"
75 | msgstr[0] ""
76 | msgstr[1] ""
77 |
78 | msgid "should be at least %{count} byte(s)"
79 | msgid_plural "should be at least %{count} byte(s)"
80 | msgstr[0] ""
81 | msgstr[1] ""
82 |
83 | msgid "should have at most %{count} item(s)"
84 | msgid_plural "should have at most %{count} item(s)"
85 | msgstr[0] ""
86 | msgstr[1] ""
87 |
88 | msgid "should be at most %{count} character(s)"
89 | msgid_plural "should be at most %{count} character(s)"
90 | msgstr[0] ""
91 | msgstr[1] ""
92 |
93 | msgid "should be at most %{count} byte(s)"
94 | msgid_plural "should be at most %{count} byte(s)"
95 | msgstr[0] ""
96 | msgstr[1] ""
97 |
98 | ## From Ecto.Changeset.validate_number/3
99 | msgid "must be less than %{number}"
100 | msgstr ""
101 |
102 | msgid "must be greater than %{number}"
103 | msgstr ""
104 |
105 | msgid "must be less than or equal to %{number}"
106 | msgstr ""
107 |
108 | msgid "must be greater than or equal to %{number}"
109 | msgstr ""
110 |
111 | msgid "must be equal to %{number}"
112 | msgstr ""
113 |
--------------------------------------------------------------------------------
/insights/priv/gettext/errors.pot:
--------------------------------------------------------------------------------
1 | ## This is a PO Template file.
2 | ##
3 | ## `msgid`s here are often extracted from source code.
4 | ## Add new translations manually only if they're dynamic
5 | ## translations that can't be statically extracted.
6 | ##
7 | ## Run `mix gettext.extract` to bring this file up to
8 | ## date. Leave `msgstr`s empty as changing them here has no
9 | ## effect: edit them in PO (`.po`) files instead.
10 |
11 | ## From Ecto.Changeset.cast/4
12 | msgid "can't be blank"
13 | msgstr ""
14 |
15 | ## From Ecto.Changeset.unique_constraint/3
16 | msgid "has already been taken"
17 | msgstr ""
18 |
19 | ## From Ecto.Changeset.put_change/3
20 | msgid "is invalid"
21 | msgstr ""
22 |
23 | ## From Ecto.Changeset.validate_acceptance/3
24 | msgid "must be accepted"
25 | msgstr ""
26 |
27 | ## From Ecto.Changeset.validate_format/3
28 | msgid "has invalid format"
29 | msgstr ""
30 |
31 | ## From Ecto.Changeset.validate_subset/3
32 | msgid "has an invalid entry"
33 | msgstr ""
34 |
35 | ## From Ecto.Changeset.validate_exclusion/3
36 | msgid "is reserved"
37 | msgstr ""
38 |
39 | ## From Ecto.Changeset.validate_confirmation/3
40 | msgid "does not match confirmation"
41 | msgstr ""
42 |
43 | ## From Ecto.Changeset.no_assoc_constraint/3
44 | msgid "is still associated with this entry"
45 | msgstr ""
46 |
47 | msgid "are still associated with this entry"
48 | msgstr ""
49 |
50 | ## From Ecto.Changeset.validate_length/3
51 | msgid "should be %{count} character(s)"
52 | msgid_plural "should be %{count} character(s)"
53 | msgstr[0] ""
54 | msgstr[1] ""
55 |
56 | msgid "should have %{count} item(s)"
57 | msgid_plural "should have %{count} item(s)"
58 | msgstr[0] ""
59 | msgstr[1] ""
60 |
61 | msgid "should be at least %{count} character(s)"
62 | msgid_plural "should be at least %{count} character(s)"
63 | msgstr[0] ""
64 | msgstr[1] ""
65 |
66 | msgid "should have at least %{count} item(s)"
67 | msgid_plural "should have at least %{count} item(s)"
68 | msgstr[0] ""
69 | msgstr[1] ""
70 |
71 | msgid "should be at most %{count} character(s)"
72 | msgid_plural "should be at most %{count} character(s)"
73 | msgstr[0] ""
74 | msgstr[1] ""
75 |
76 | msgid "should have at most %{count} item(s)"
77 | msgid_plural "should have at most %{count} item(s)"
78 | msgstr[0] ""
79 | msgstr[1] ""
80 |
81 | ## From Ecto.Changeset.validate_number/3
82 | msgid "must be less than %{number}"
83 | msgstr ""
84 |
85 | msgid "must be greater than %{number}"
86 | msgstr ""
87 |
88 | msgid "must be less than or equal to %{number}"
89 | msgstr ""
90 |
91 | msgid "must be greater than or equal to %{number}"
92 | msgstr ""
93 |
94 | msgid "must be equal to %{number}"
95 | msgstr ""
96 |
--------------------------------------------------------------------------------
/insights/priv/repo/migrations/.formatter.exs:
--------------------------------------------------------------------------------
1 | [
2 | import_deps: [:ecto_sql],
3 | inputs: ["*.exs"]
4 | ]
5 |
--------------------------------------------------------------------------------
/insights/priv/repo/seeds.exs:
--------------------------------------------------------------------------------
1 | # Script for populating the database. You can run it as:
2 | #
3 | # mix run priv/repo/seeds.exs
4 | #
5 | # Inside the script, you can read and write to any of your
6 | # repositories directly:
7 | #
8 | # Insights.Repo.insert!(%Insights.SomeSchema{})
9 | #
10 | # We recommend using the bang functions (`insert!`, `update!`
11 | # and so on) as they will fail if something goes wrong.
12 |
--------------------------------------------------------------------------------
/insights/priv/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zookzook/elixir-mongodb-driver/83b19556c0f0b7ddd20a14a114bf0e01b86f9945/insights/priv/static/favicon.ico
--------------------------------------------------------------------------------
/insights/priv/static/images/phoenix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zookzook/elixir-mongodb-driver/83b19556c0f0b7ddd20a14a114bf0e01b86f9945/insights/priv/static/images/phoenix.png
--------------------------------------------------------------------------------
/insights/priv/static/robots.txt:
--------------------------------------------------------------------------------
1 | # See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file
2 | #
3 | # To ban all spiders from the entire site uncomment the next two lines:
4 | # User-agent: *
5 | # Disallow: /
6 |
--------------------------------------------------------------------------------
/insights/test/insights_web/controllers/page_controller_test.exs:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.PageControllerTest do
2 | use InsightsWeb.ConnCase
3 |
4 | test "GET /", %{conn: conn} do
5 | conn = get(conn, "/")
6 | assert html_response(conn, 200) =~ "Welcome to Phoenix!"
7 | end
8 | end
9 |
--------------------------------------------------------------------------------
/insights/test/insights_web/views/error_view_test.exs:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.ErrorViewTest do
2 | use InsightsWeb.ConnCase, async: true
3 |
4 | # Bring render/3 and render_to_string/3 for testing custom views
5 | import Phoenix.View
6 |
7 | test "renders 404.html" do
8 | assert render_to_string(InsightsWeb.ErrorView, "404.html", []) == "Not Found"
9 | end
10 |
11 | test "renders 500.html" do
12 | assert render_to_string(InsightsWeb.ErrorView, "500.html", []) == "Internal Server Error"
13 | end
14 | end
15 |
--------------------------------------------------------------------------------
/insights/test/insights_web/views/layout_view_test.exs:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.LayoutViewTest do
2 | use InsightsWeb.ConnCase, async: true
3 |
4 | # When testing helpers, you may want to import Phoenix.HTML and
5 | # use functions such as safe_to_string() to convert the helper
6 | # result into an HTML string.
7 | # import Phoenix.HTML
8 | end
9 |
--------------------------------------------------------------------------------
/insights/test/insights_web/views/page_view_test.exs:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.PageViewTest do
2 | use InsightsWeb.ConnCase, async: true
3 | end
4 |
--------------------------------------------------------------------------------
/insights/test/support/channel_case.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.ChannelCase do
2 | @moduledoc """
3 | This module defines the test case to be used by
4 | channel tests.
5 |
6 | Such tests rely on `Phoenix.ChannelTest` and also
7 | import other functionality to make it easier
8 | to build common data structures and query the data layer.
9 |
10 | Finally, if the test case interacts with the database,
11 | we enable the SQL sandbox, so changes done to the database
12 | are reverted at the end of every test. If you are using
13 | PostgreSQL, you can even run database tests asynchronously
14 | by setting `use InsightsWeb.ChannelCase, async: true`, although
15 | this option is not recommended for other databases.
16 | """
17 |
18 | use ExUnit.CaseTemplate
19 |
20 | using do
21 | quote do
22 | # Import conveniences for testing with channels
23 | import Phoenix.ChannelTest
24 | import InsightsWeb.ChannelCase
25 |
26 | # The default endpoint for testing
27 | @endpoint InsightsWeb.Endpoint
28 | end
29 | end
30 |
31 | setup tags do
32 | pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Insights.Repo, shared: not tags[:async])
33 | on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end)
34 | :ok
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/insights/test/support/conn_case.ex:
--------------------------------------------------------------------------------
1 | defmodule InsightsWeb.ConnCase do
2 | @moduledoc """
3 | This module defines the test case to be used by
4 | tests that require setting up a connection.
5 |
6 | Such tests rely on `Phoenix.ConnTest` and also
7 | import other functionality to make it easier
8 | to build common data structures and query the data layer.
9 |
10 | Finally, if the test case interacts with the database,
11 | we enable the SQL sandbox, so changes done to the database
12 | are reverted at the end of every test. If you are using
13 | PostgreSQL, you can even run database tests asynchronously
14 | by setting `use InsightsWeb.ConnCase, async: true`, although
15 | this option is not recommended for other databases.
16 | """
17 |
18 | use ExUnit.CaseTemplate
19 |
20 | using do
21 | quote do
22 | # Import conveniences for testing with connections
23 | import Plug.Conn
24 | import Phoenix.ConnTest
25 | import InsightsWeb.ConnCase
26 |
27 | alias InsightsWeb.Router.Helpers, as: Routes
28 |
29 | # The default endpoint for testing
30 | @endpoint InsightsWeb.Endpoint
31 | end
32 | end
33 |
34 | setup tags do
35 | pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Insights.Repo, shared: not tags[:async])
36 | on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end)
37 | {:ok, conn: Phoenix.ConnTest.build_conn()}
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/insights/test/support/data_case.ex:
--------------------------------------------------------------------------------
1 | defmodule Insights.DataCase do
2 | @moduledoc """
3 | This module defines the setup for tests requiring
4 | access to the application's data layer.
5 |
6 | You may define functions here to be used as helpers in
7 | your tests.
8 |
9 | Finally, if the test case interacts with the database,
10 | we enable the SQL sandbox, so changes done to the database
11 | are reverted at the end of every test. If you are using
12 | PostgreSQL, you can even run database tests asynchronously
13 | by setting `use Insights.DataCase, async: true`, although
14 | this option is not recommended for other databases.
15 | """
16 |
17 | use ExUnit.CaseTemplate
18 |
19 | using do
20 | quote do
21 | alias Insights.Repo
22 |
23 | import Ecto
24 | import Ecto.Changeset
25 | import Ecto.Query
26 | import Insights.DataCase
27 | end
28 | end
29 |
30 | setup tags do
31 | pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Insights.Repo, shared: not tags[:async])
32 | on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end)
33 | :ok
34 | end
35 |
36 | @doc """
37 | A helper that transforms changeset errors into a map of messages.
38 |
39 | assert {:error, changeset} = Accounts.create_user(%{password: "short"})
40 | assert "password is too short" in errors_on(changeset).password
41 | assert %{password: ["password is too short"]} = errors_on(changeset)
42 |
43 | """
44 | def errors_on(changeset) do
45 | Ecto.Changeset.traverse_errors(changeset, fn {message, opts} ->
46 | Regex.replace(~r"%{(\w+)}", message, fn _, key ->
47 | opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string()
48 | end)
49 | end)
50 | end
51 | end
52 |
--------------------------------------------------------------------------------
/insights/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | ExUnit.start()
2 | Ecto.Adapters.SQL.Sandbox.mode(Insights.Repo, :manual)
3 |
--------------------------------------------------------------------------------
/lib/bson.ex:
--------------------------------------------------------------------------------
1 | defmodule BSON do
2 | @moduledoc """
3 | Functions for encoding and decoding BSON documents.
4 | """
5 |
6 | @type t :: document | String.t() | atom | number | boolean | BSON.Binary.t() | BSON.ObjectId.t() | BSON.Regex.t() | BSON.JavaScript.t() | BSON.Timestamp.t() | BSON.LongNumber.t() | [t]
7 | @type document :: %{atom => BSON.t()} | %{String.t() => BSON.t()} | [{atom, BSON.t()}] | [{String.t(), BSON.t()}]
8 |
9 | @doc """
10 | Encode a BSON document to `iodata`.
11 | """
12 | @spec encode(document) :: iodata
13 | def encode(map) when is_map(map) do
14 | case Map.has_key?(map, :__struct__) do
15 | true ->
16 | BSON.Encoder.encode(Map.to_list(map))
17 |
18 | false ->
19 | BSON.Encoder.encode(map)
20 | end
21 | end
22 |
23 | def encode([{_, _} | _] = keyword) do
24 | BSON.Encoder.encode(keyword)
25 | end
26 |
27 | @doc """
28 | Decode `iodata` to a BSON document.
29 | """
30 | @spec decode(iodata) :: document
31 | def decode(iodata) do
32 | iodata
33 | |> IO.iodata_to_binary()
34 | |> BSON.Decoder.decode()
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/lib/bson/decimal128.ex:
--------------------------------------------------------------------------------
1 | defmodule BSON.Decimal128 do
2 | @moduledoc """
3 | see https://en.wikipedia.org/wiki/Decimal128_floating-point_format
4 | """
5 |
6 | import Bitwise
7 |
8 | @signed_bit_mask 1 <<< 63
9 | @combination_mask 0x1F
10 | @combintation_infinity 30
11 | @combintation_nan 31
12 | @exponent_mask 0x3FFF
13 | @exponent_bias 6176
14 | @max_exponent 6111
15 | @min_exponent -6176
16 | @significand_mask (0x1 <<< 49) - 1
17 | @low_mask 0xFFFFFFFFFFFFFFFF
18 |
19 | def decode(<<_::little-64, high::little-64>> = bits) do
20 | is_negative = (high &&& @signed_bit_mask) == @signed_bit_mask
21 | combination = high >>> 58 &&& @combination_mask
22 | two_highest_bits_set = combination >>> 3 == 3
23 | is_infinity = two_highest_bits_set && combination == @combintation_infinity
24 | is_nan = two_highest_bits_set && combination == @combintation_nan
25 | exponent = exponent(high, two_highest_bits_set)
26 |
27 | value(
28 | %{is_negative: is_negative, is_infinity: is_infinity, is_nan: is_nan, two_highest_bits_set: two_highest_bits_set},
29 | coef(bits),
30 | exponent
31 | )
32 | end
33 |
34 | @doc """
35 | s 11110 xx...x ±infinity
36 | s 11111 0x...x a quiet NaN
37 | s 11111 1x...x a signalling NaN
38 | """
39 | def encode(%Decimal{sign: -1, coef: :inf}) do
40 | low = 0
41 | high = 0x3E <<< 58
42 | <>
43 | end
44 |
45 | def encode(%Decimal{coef: :inf}) do
46 | low = 0
47 | high = 0x1E <<< 58
48 | <>
49 | end
50 |
51 | def encode(%Decimal{coef: :NaN}) do
52 | low = 0
53 | high = 0x1F <<< 58
54 | <>
55 | end
56 |
57 | def encode(%Decimal{sign: sign, coef: significand, exp: exponent}) when exponent >= @min_exponent and exponent <= @max_exponent do
58 | biased_exponent = exponent + @exponent_bias
59 | low = significand &&& @low_mask
60 | ## mask max significand
61 | high = significand >>> 64 &&& @significand_mask
62 | high = bor(high, biased_exponent <<< 49)
63 |
64 | high =
65 | case sign do
66 | 1 -> high
67 | _ -> bor(high, @signed_bit_mask)
68 | end
69 |
70 | <>
71 | end
72 |
73 | def encode(%Decimal{exp: exponent}) do
74 | message = "Exponent is out of range for Decimal128 encoding, #{exponent}"
75 | raise ArgumentError, message
76 | end
77 |
78 | defp exponent(high, true) do
79 | biased_exponent = high >>> 47 &&& @exponent_mask
80 | biased_exponent - @exponent_bias
81 | end
82 |
83 | defp exponent(high, _two_highest_bits_not_set) do
84 | biased_exponent = high >>> 49 &&& @exponent_mask
85 | biased_exponent - @exponent_bias
86 | end
87 |
88 | defp value(%{is_negative: true, is_infinity: true}, _, _) do
89 | %Decimal{sign: -1, coef: :inf}
90 | end
91 |
92 | defp value(%{is_negative: false, is_infinity: true}, _, _) do
93 | %Decimal{coef: :inf}
94 | end
95 |
96 | defp value(%{is_nan: true}, _, _) do
97 | %Decimal{coef: :NaN}
98 | end
99 |
100 | defp value(%{two_highest_bits_set: true}, _, _) do
101 | %Decimal{sign: 0, coef: 0, exp: 0}
102 | end
103 |
104 | defp value(%{is_negative: true}, coef, exponent) do
105 | %Decimal{sign: -1, coef: coef, exp: exponent}
106 | end
107 |
108 | defp value(_, coef, exponent) do
109 | %Decimal{coef: coef, exp: exponent}
110 | end
111 |
112 | defp coef(<>) do
113 | bor((high &&& 0x1FFFFFFFFFFFF) <<< 64, low)
114 | end
115 | end
116 |
--------------------------------------------------------------------------------
/lib/bson/utils.ex:
--------------------------------------------------------------------------------
1 | defmodule BSON.Utils do
2 | @moduledoc false
3 |
4 | defmacro __using__(_) do
5 | quote do
6 | import BSON.Utils
7 | import Mongo.BinaryUtils
8 |
9 | @type_float 0x01
10 | @type_string 0x02
11 | @type_document 0x03
12 | @type_array 0x04
13 | @type_binary 0x05
14 | @type_undefined 0x06
15 | @type_objectid 0x07
16 | @type_bool 0x08
17 | @type_datetime 0x09
18 | @type_null 0x0A
19 | @type_regex 0x0B
20 | @type_js 0x0D
21 | @type_symbol 0x0E
22 | @type_js_scope 0x0F
23 | @type_int32 0x10
24 | @type_timestamp 0x11
25 | @type_int64 0x12
26 | @type_decimal128 0x13
27 | @type_min 0xFF
28 | @type_max 0x7F
29 | end
30 | end
31 |
32 | @int32_min -2_147_483_648
33 | @int32_max 2_147_483_647
34 | @int64_min -9_223_372_036_854_775_808
35 | @int64_max 9_223_372_036_854_775_807
36 |
37 | defmacro is_int32(value) do
38 | quote do
39 | is_integer(unquote(value)) and
40 | unquote(value) in unquote(@int32_min)..unquote(@int32_max)
41 | end
42 | end
43 |
44 | defmacro is_int64(value) do
45 | quote do
46 | is_integer(unquote(value)) and
47 | unquote(value) in unquote(@int64_min)..unquote(@int64_max)
48 | end
49 | end
50 | end
51 |
--------------------------------------------------------------------------------
/lib/mongo/app.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.App do
2 | @moduledoc false
3 |
4 | use Application
5 |
6 | def start(_type, _args) do
7 | children = [
8 | {Mongo.IdServer, []},
9 | {Mongo.PBKDF2Cache, []},
10 | %{
11 | id: Registry,
12 | start: {Registry, :start_link, [:duplicate, :events_registry]},
13 | type: :supervisor
14 | }
15 | ]
16 |
17 | opts = [strategy: :one_for_one, name: Mongo.Supervisor]
18 | Supervisor.start_link(children, opts)
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/lib/mongo/auth.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Auth do
2 | @moduledoc false
3 |
4 | alias Mongo.PasswordSafe
5 |
6 | def run(opts, state) do
7 | auth_source = opts[:auth_source]
8 | mechanism = mechanism(state)
9 |
10 | # change database for auth
11 | auth_state =
12 | case auth_source != nil && state.wire_version > 0 do
13 | true ->
14 | Map.put(state, :database, auth_source)
15 |
16 | false ->
17 | state
18 | end
19 |
20 | case opts |> credentials() |> mechanism.auth(state.database, auth_state) do
21 | :ok ->
22 | {:ok, state}
23 |
24 | error ->
25 | {mod, socket} = state.connection
26 | mod.close(socket)
27 | error
28 | end
29 | end
30 |
31 | defp credentials(opts) do
32 | username = opts[:username]
33 | pw_safe = opts[:pw_safe]
34 | password = PasswordSafe.get_password(pw_safe)
35 | {username, password}
36 | end
37 |
38 | defp mechanism(%{wire_version: version, auth_mechanism: :x509}) when version >= 3 do
39 | Mongo.Auth.X509
40 | end
41 |
42 | defp mechanism(%{wire_version: version, auth_mechanism: :plain}) when version >= 3 do
43 | Mongo.Auth.PLAIN
44 | end
45 |
46 | defp mechanism(%{wire_version: version}) when version >= 3 do
47 | Mongo.Auth.SCRAM
48 | end
49 |
50 | defp mechanism(_) do
51 | Mongo.Auth.CR
52 | end
53 | end
54 |
--------------------------------------------------------------------------------
/lib/mongo/auth/cr.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Auth.CR do
2 | @moduledoc false
3 | alias Mongo.MongoDBConnection.Utils
4 |
5 | def auth({nil, nil}, _db, _s) do
6 | :ok
7 | end
8 |
9 | def auth({username, password}, _db, s) do
10 | with {:ok, _flags, message} <- Utils.command(-2, [getnonce: 1], s),
11 | do: nonce(message, username, password, s)
12 | end
13 |
14 | # Note that we use numeric comparisons in guards (e.g., `... when ok == 1`)
15 | # instead of pattern matching below. This is to accommodate responses that
16 | # return either integer or float values. Pattern matching treats 1 and 1.0,
17 | # and 0, 0.0 and -0.0 (OTP 27+), as distinct values due to their different
18 | # types/internal representation. By using numeric comparisons, we can ensure
19 | # correct behavior regardless of the numeric type returned.
20 | defp nonce(%{"nonce" => nonce, "ok" => ok}, username, password, s) when ok == 1 do
21 | digest = Utils.digest(nonce, username, password)
22 | command = [authenticate: 1, user: username, nonce: nonce, key: digest]
23 |
24 | case Utils.command(-3, command, s) do
25 | {:ok, _flags, %{"ok" => ok}} when ok == 1 ->
26 | :ok
27 |
28 | {:ok, _flags, %{"ok" => ok, "errmsg" => reason, "code" => code}} when ok == 0 ->
29 | {:error, Mongo.Error.exception(message: "auth failed for '#{username}': #{reason}", code: code)}
30 |
31 | {:ok, _flags, nil} ->
32 | {:error, Mongo.Error.exception(message: "auth failed for '#{username}'")}
33 |
34 | error ->
35 | error
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/lib/mongo/auth/plain.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Auth.PLAIN do
2 | @moduledoc false
3 | alias Mongo.MongoDBConnection.Utils
4 |
5 | def auth({nil, nil}, _db, _s) do
6 | :ok
7 | end
8 |
9 | def auth({username, password}, _db, s) do
10 | auth_payload = build_auth_payload(username, password)
11 | message = [saslStart: 1, mechanism: "PLAIN", payload: auth_payload]
12 |
13 | case Utils.command(-3, message, s) do
14 | {:ok, _flags, %{"ok" => ok, "done" => true}} when ok == 1 ->
15 | :ok
16 |
17 | {:ok, _flags, %{"ok" => ok, "errmsg" => reason, "code" => code}} when ok == 0 ->
18 | {:error, Mongo.Error.exception(message: "auth failed for user #{username}: #{reason}", code: code)}
19 |
20 | error ->
21 | error
22 | end
23 | end
24 |
25 | defp build_auth_payload(username, password) do
26 | # https://www.ietf.org/rfc/rfc4616.txt
27 | # Null separate listed of authorization ID (blank), username, password. These are sent as raw UTF-8.
28 | payload = "\0#{username}\0#{password}"
29 | %BSON.Binary{binary: payload}
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/lib/mongo/auth/x509.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Auth.X509 do
2 | @moduledoc false
3 | alias Mongo.MongoDBConnection.Utils
4 |
5 | def auth({nil, _password}, _db, _s) do
6 | {:error, "X509 auth needs a username!"}
7 | end
8 |
9 | def auth({username, _password}, _db, s) do
10 | cmd = [authenticate: 1, user: username, mechanism: "MONGODB-X509"]
11 |
12 | case Utils.command(-2, cmd, s) do
13 | {:ok, _flags, _message} ->
14 | :ok
15 |
16 | _error ->
17 | {:error, "X509 auth failed"}
18 | end
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/lib/mongo/binary_utils.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.BinaryUtils do
2 | @moduledoc false
3 |
4 | defmacro int64 do
5 | quote do: signed - little - 64
6 | end
7 |
8 | defmacro int32 do
9 | quote do: signed - little - 32
10 | end
11 |
12 | defmacro int16 do
13 | quote do: signed - little - 16
14 | end
15 |
16 | defmacro uint16 do
17 | quote do: unsigned - little - 16
18 | end
19 |
20 | defmacro int8 do
21 | quote do: signed - little - 8
22 | end
23 |
24 | defmacro uint8 do
25 | quote do: unsigned - little - 8
26 | end
27 |
28 | defmacro float64 do
29 | quote do: float - little - 64
30 | end
31 |
32 | defmacro float32 do
33 | quote do: float - little - 32
34 | end
35 |
36 | defmacro binary(size) do
37 | quote do: binary - size(unquote(size))
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/lib/mongo/bulk_ops.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.BulkOps do
2 | @moduledoc """
3 |
4 | This module defines bulk operation for insert, update and delete. A bulk operation is a tuple of two elements
5 |
6 | 1. an atom, which specify the type `:insert`, `:update` and `:delete`
7 | 2. a document or another tuple which contains all parameters of the operation.
8 |
9 | You use these function in streams:
10 |
11 | ## Example
12 |
13 | ```
14 | alias Mongo.UnorderedBulk
15 | alias Mongo.BulkOps
16 |
17 | Filestream!("large.csv")
18 | |> Stream.map(&String.trim(&1))
19 | |> Stream.map(&String.split(&1,","))
20 | |> Stream.map(fn [firstname | [lastname | _]] -> %{firstname: firstname, lastname: lastname} end)
21 | |> Stream.map(fn doc -> BulkOps.get_insert_one(doc) end)
22 | |> UnorderedBulk.write(:mongo, "bulk", 1_000)
23 | |> Stream.run()
24 | ```
25 |
26 | """
27 |
28 | @type bulk_op ::
29 | {atom, BSON.document()}
30 | | {atom, {BSON.document(), Keyword.t()}}
31 | | {atom, {BSON.document(), BSON.document(), Keyword.t()}}
32 |
33 | import Mongo.Utils
34 |
35 | @doc """
36 | Returns an `insert_one` operation tuple for appending to a bulk. Used to perform stream bulk writes.
37 |
38 | Example
39 | ```
40 | Mongo.BulkOps.get_insert_one(%{name: "Waldo"})
41 |
42 | {:insert, %{name: "Waldo"}}
43 | ```
44 | """
45 | @spec get_insert_one(BSON.document()) :: bulk_op
46 | def get_insert_one(doc), do: {:insert, doc}
47 |
48 | @doc """
49 | Returns an `delete_one` operation tuple for appending to a bulk. Used to perform stream bulk writes.
50 |
51 | Example
52 |
53 | ```
54 | Mongo.BulkOps.get_delete_one(%{name: "Waldo"})
55 |
56 | {:delete, {%{name: "Waldo"}, [limit: 1]}}
57 | ```
58 | """
59 | @spec get_delete_one(BSON.document()) :: bulk_op
60 | def get_delete_one(doc), do: {:delete, {doc, [limit: 1]}}
61 |
62 | @doc """
63 | Returns an `delete_many` operation for appending to a bulk. Used to perform stream bulk writes.
64 |
65 | Example
66 |
67 | ```
68 | Mongo.BulkOps.get_delete_many(%{name: "Waldo"})
69 |
70 | {:delete, {%{name: "Waldo"}, [limit: 0]}}
71 | ```
72 | """
73 | @spec get_delete_many(BSON.document()) :: bulk_op
74 | def get_delete_many(doc), do: {:delete, {doc, [limit: 0]}}
75 |
76 | @doc """
77 | Returns an `update_one` operation for appending to a bulk. Used to perform stream bulk writes.
78 |
79 | Example
80 |
81 | ```
82 | Mongo.BulkOps.get_update_one(%{name: "Waldo"}, %{"$set" : %{name: "Greta", kind: "dog"}})
83 |
84 | {:update,
85 | {%{name: "Waldo"}, %{"$set": %{kind: "dog", name: "Greta"}}, [multi: false]}}
86 | ```
87 | """
88 | @spec get_update_one(BSON.document(), BSON.document(), Keyword.t()) :: bulk_op
89 | def get_update_one(filter, update, opts \\ []) do
90 | _ = modifier_docs(update, :update)
91 | {:update, {filter, update, Keyword.put(opts, :multi, false)}}
92 | end
93 |
94 | @doc """
95 | Returns an `update_many` operation for appending to a bulk. Used to perform stream bulk writes.
96 |
97 | Example
98 |
99 | ```
100 | Mongo.BulkOps.get_update_many(%{name: "Waldo"}, %{"$set" : %{name: "Greta", kind: "dog"}})
101 |
102 | {:update,
103 | {%{name: "Waldo"}, %{"$set": %{kind: "dog", name: "Greta"}}, [multi: true]}}
104 | ```
105 | """
106 | @spec get_update_many(BSON.document(), BSON.document(), Keyword.t()) :: bulk_op
107 | def get_update_many(filter, update, opts \\ []) do
108 | _ = modifier_docs(update, :update)
109 | {:update, {filter, update, Keyword.put(opts, :multi, true)}}
110 | end
111 |
112 | @doc """
113 | Returns an `replace_one` operation for appending to a bulk. Used to perform stream bulk writes.
114 |
115 | Example
116 |
117 | ```
118 | Mongo.BulkOps.get_replace_one(%{name: "Waldo"}, %{name: "Greta", kind: "dog"})
119 |
120 | {:update, {%{name: "Waldo"}, %{kind: "dog", name: "Greta"}, [multi: false]}}
121 | ```
122 | """
123 | @spec get_replace_one(BSON.document(), BSON.document(), Keyword.t()) :: bulk_op
124 | def get_replace_one(filter, replacement, opts \\ []) do
125 | _ = modifier_docs(replacement, :replace)
126 | {:update, {filter, replacement, Keyword.put(opts, :multi, false)}}
127 | end
128 | end
129 |
--------------------------------------------------------------------------------
/lib/mongo/compressor.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Compressor do
2 | @moduledoc false
3 |
4 | @zlib_compressor_id 2
5 | if Code.ensure_loaded?(:ezstd) do
6 | @zstd_compressor_id 3
7 | end
8 |
9 | def compress(binary, :zlib) do
10 | {@zlib_compressor_id, :zlib.compress(binary)}
11 | end
12 |
13 | if Code.ensure_loaded?(:ezstd) do
14 | def compress(binary, :zstd) when is_binary(binary) do
15 | {@zstd_compressor_id, :ezstd.compress(binary)}
16 | end
17 |
18 | def compress(iodata, :zstd) when is_list(iodata) do
19 | {@zstd_compressor_id,
20 | iodata
21 | |> IO.iodata_to_binary()
22 | |> :ezstd.compress()}
23 | end
24 | end
25 |
26 | def uncompress(binary, @zlib_compressor_id) do
27 | :zlib.uncompress(binary)
28 | end
29 |
30 | if Code.ensure_loaded?(:ezstd) do
31 | def uncompress(binary, @zstd_compressor_id) do
32 | :ezstd.decompress(binary)
33 | end
34 | end
35 |
36 | def uncompress(binary, :zlib) do
37 | :zlib.uncompress(binary)
38 | end
39 |
40 | if Code.ensure_loaded?(:ezstd) do
41 | def uncompress(binary, :zstd) do
42 | :ezstd.decompress(binary)
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/lib/mongo/encoder.ex:
--------------------------------------------------------------------------------
1 | defprotocol Mongo.Encoder do
2 | @fallback_to_any false
3 |
4 | @spec encode(t) :: map()
5 | def encode(value)
6 | end
7 |
--------------------------------------------------------------------------------
/lib/mongo/event_handler.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.EventHandler do
2 | @moduledoc false
3 |
4 | require Logger
5 |
6 | @all [:commands, :topology]
7 |
8 | def start(opts \\ [topics: [:commands]]) do
9 | spawn(__MODULE__, :register, [opts])
10 | end
11 |
12 | def register(opts) do
13 | with true <-
14 | (opts[:topics] || @all)
15 | |> Enum.map(fn topic -> Registry.register(:events_registry, topic, []) end)
16 | |> Enum.all?(fn
17 | {:ok, _} -> true
18 | _other -> false
19 | end) do
20 | listen(opts)
21 | :ok
22 | end
23 | end
24 |
25 | def listen(opts) do
26 | receive do
27 | {:broadcast, :commands, %{command_name: cmd} = message} when cmd != :isMaster and cmd != :hello ->
28 | Logger.info("Received command: " <> inspect(message))
29 | listen(opts)
30 |
31 | {:broadcast, :commands, hello} ->
32 | case opts[:is_master] || opts[:hello] do
33 | true -> Logger.info("Received hello:" <> inspect(hello))
34 | _ -> []
35 | end
36 |
37 | listen(opts)
38 |
39 | {:broadcast, topic, message} ->
40 | Logger.info("Received #{topic}: " <> inspect(message))
41 | listen(opts)
42 |
43 | other ->
44 | Logger.info("Stopping EventHandler received unknown message:" <> inspect(other))
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/lib/mongo/grid_fs/download.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.GridFs.Download do
2 | @moduledoc """
3 | The main entry point for downloading files from the grid-fs specified by the bucket struct.
4 | """
5 |
6 | alias BSON.ObjectId
7 | alias Mongo.GridFs.Bucket
8 |
9 | @type result :: {:error, :unknown} | {:error, :length_is_zero} | {:error, :not_found} | {:ok, Mongo.cursor()}
10 |
11 | @doc """
12 | Opens a Stream from which the application can read the contents of the stored file
13 | specified by fileId. The fileId can be a string, an ObjectId or just a map with the
14 | keys `length` and `_id`. In case of the map the function tries to stream the chunks
15 | described by the `length` and the `_id` values.
16 |
17 | Returns a Stream.
18 | """
19 | @spec open_download_stream(Bucket.t(), String.t() | BSON.ObjectId.t() | map()) :: result
20 | def open_download_stream(%Bucket{topology_pid: topology_pid, opts: opts} = bucket, file_id) when is_binary(file_id) do
21 | topology_pid
22 | |> Mongo.find_one(Bucket.files_collection_name(bucket), %{"_id" => ObjectId.decode!(file_id)}, opts)
23 | |> stream_chunk(bucket)
24 | end
25 |
26 | def open_download_stream(%Bucket{topology_pid: topology_pid, opts: opts} = bucket, %BSON.ObjectId{} = oid) do
27 | topology_pid
28 | |> Mongo.find_one(Bucket.files_collection_name(bucket), %{"_id" => oid}, opts)
29 | |> stream_chunk(bucket)
30 | end
31 |
32 | def open_download_stream(bucket, %{"length" => _, "_id" => _} = file) do
33 | stream_chunk(file, bucket)
34 | end
35 |
36 | @doc """
37 | Same as above, but returns also the file document.
38 | """
39 | @spec find_and_stream(Bucket.t(), String.t()) :: {result, BSON.document()}
40 | def find_and_stream(bucket, file_id)
41 |
42 | def find_and_stream(%Bucket{topology_pid: topology_pid, opts: opts} = bucket, file_id) when is_binary(file_id) do
43 | file = Mongo.find_one(topology_pid, Bucket.files_collection_name(bucket), %{"_id" => ObjectId.decode!(file_id)}, opts)
44 | {stream_chunk(file, bucket), file}
45 | end
46 |
47 | def find_and_stream(%Bucket{topology_pid: topology_pid, opts: opts} = bucket, file_id) do
48 | file = Mongo.find_one(topology_pid, Bucket.files_collection_name(bucket), %{"_id" => file_id}, opts)
49 | {stream_chunk(file, bucket), file}
50 | end
51 |
52 | def find_one_file(%Bucket{topology_pid: topology_pid, opts: opts} = bucket, filename) when is_binary(filename) do
53 | Mongo.find_one(topology_pid, Bucket.files_collection_name(bucket), %{"filename" => filename}, opts)
54 | end
55 |
56 | def find_one_file(%Bucket{topology_pid: topology_pid, opts: opts} = bucket, %BSON.ObjectId{} = file_id) do
57 | Mongo.find_one(topology_pid, Bucket.files_collection_name(bucket), %{"_id" => file_id}, opts)
58 | end
59 |
60 | ##
61 | # In case that the file map is nil we return :error
62 | #
63 | defp stream_chunk(nil, _bucket), do: {:error, :not_found}
64 |
65 | ##
66 | # However, when downloading a zero length stored file the driver MUST NOT issue a query against the chunks
67 | # collection, since that query is not necessary. For a zero length file, drivers return either an empty
68 | # stream or send nothing to the provided stream (depending on the download method).
69 | ##
70 | defp stream_chunk(%{"length" => 0}, _bucket), do: {:error, :length_is_zero}
71 |
72 | ##
73 | # Streaming the chunks with `file_id` sorted ascending by n
74 | #
75 | defp stream_chunk(%{"_id" => id}, %Bucket{topology_pid: topology_pid, opts: opts} = bucket) do
76 | opts = Keyword.merge(opts, sort: [n: 1])
77 |
78 | stream =
79 | topology_pid
80 | |> Mongo.find(Bucket.chunks_collection_name(bucket), %{files_id: id}, opts)
81 | |> Stream.map(fn map -> map["data"].binary end)
82 |
83 | {:ok, stream}
84 | end
85 |
86 | ##
87 | # catch up for other cases
88 | #
89 | defp stream_chunk(_, _bucket), do: {:error, :unknown}
90 | end
91 |
--------------------------------------------------------------------------------
/lib/mongo/grid_fs/upload.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.GridFs.Upload do
2 | @moduledoc """
3 | The main entry point for uploading files into the grid-fs specified by the bucket struct.
4 | """
5 |
6 | alias Mongo.GridFs.UploadStream
7 |
8 | @doc """
9 | Opens a stream that the application can write the contents of the file to.
10 | The driver generates the file id if not provided.
11 |
12 | User data for the 'metadata' field of the files collection document.
13 | """
14 | @spec open_upload_stream(Mongo.GridFs.Bucket.t(), String.t(), BSON.document() | nil, UploadStream.file_id() | nil) :: UploadStream.t()
15 | def open_upload_stream(bucket, filename, meta \\ nil, file_id \\ nil) do
16 | UploadStream.new(bucket, filename, meta, file_id)
17 | end
18 | end
19 |
--------------------------------------------------------------------------------
/lib/mongo/id_server.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.IdServer do
2 | @moduledoc false
3 |
4 | # An ObjectId consists of a machine id, process id, seconds since unix epoch
5 | # and a counter. The counter is used to differentiate between generated
6 | # ObjectIds during a single second.
7 | #
8 | # A counter is generated for each second in an hour, the counter is
9 | # initialized to a random number based on MongoDB documentation's
10 | # recommendation. Each time a new ObjectId is generated we take the counter
11 | # for the current second and increment it.
12 | #
13 | # To keep the counters random and to make sure they don't grow infinitely they
14 | # need to be reset. Care needs to be taken to ensure a counter is not reset
15 | # during its second's window during which it is being used. Once each minute
16 | # ~60 counters should be reset, only counters that will be used ~30 minutes in
17 | # the future are reset to ensure the current second's counter is not touched.
18 |
19 | use GenServer
20 |
21 | @name __MODULE__
22 | @num_counters 3600
23 | @reset_timer 60_000
24 | @counter_max 16_777_216
25 | @gs_epoch :calendar.datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}})
26 |
27 | def start_link(_ \\ nil) do
28 | GenServer.start_link(__MODULE__, [], name: @name)
29 | end
30 |
31 | def init([]) do
32 | @name = :ets.new(@name, [:named_table, :public, write_concurrency: true])
33 | true = :ets.insert(@name, machineprocid: {machine_id(), process_id()})
34 | true = :ets.insert(@name, gen_counters(0..@num_counters))
35 |
36 | _ = Process.send_after(self(), :reset_counters, @reset_timer)
37 |
38 | {:ok, opposite_on_window(:calendar.universal_time())}
39 | end
40 |
41 | def handle_info(:reset_counters, last_reset) do
42 | new_reset = opposite_on_window(:calendar.universal_time())
43 |
44 | step =
45 | case last_reset + 1 <= new_reset do
46 | true ->
47 | 1
48 |
49 | false ->
50 | -1
51 | end
52 |
53 | :ets.insert(@name, gen_counters((last_reset + 1)..new_reset//step))
54 | Process.send_after(self(), :reset_counters, @reset_timer)
55 |
56 | {:noreply, new_reset}
57 | end
58 |
59 | def new() do
60 | {machine_id, proc_id} = :ets.lookup_element(@name, :machineprocid, 2)
61 | now = :calendar.universal_time()
62 | secs = :calendar.datetime_to_gregorian_seconds(now) - @gs_epoch
63 | counter = :ets.update_counter(@name, in_window(now), 1)
64 | counter = rem(counter, @counter_max)
65 |
66 | BSON.ObjectId.new(machine_id, proc_id, secs, counter)
67 | end
68 |
69 | defp gen_counters(range) do
70 | for ix <- range do
71 | {ix, :rand.uniform(@counter_max) - 1}
72 | end
73 | end
74 |
75 | defp in_window(now) do
76 | secs = :calendar.datetime_to_gregorian_seconds(now)
77 | window = @num_counters
78 |
79 | rem(secs, window)
80 | end
81 |
82 | defp opposite_on_window(now) do
83 | secs = :calendar.datetime_to_gregorian_seconds(now)
84 | window = @num_counters
85 | half_window = div(window, 2)
86 |
87 | rem(secs + half_window, window)
88 | end
89 |
90 | defp machine_id() do
91 | {:ok, hostname} = :inet.gethostname()
92 | <> = :crypto.hash(:md5, hostname)
93 | machine_id
94 | end
95 |
96 | defp process_id() do
97 | :os.getpid() |> List.to_integer()
98 | end
99 | end
100 |
--------------------------------------------------------------------------------
/lib/mongo/keywords.ex:
--------------------------------------------------------------------------------
1 | defmodule Keywords do
2 | @moduledoc false
3 |
4 | def filter_nils(keyword) when is_list(keyword) do
5 | Enum.reject(keyword, fn {_key, value} -> is_nil(value) end)
6 | end
7 |
8 | def filter_nils(map) when is_map(map) do
9 | Enum.reject(map, fn {_key, value} -> is_nil(value) end)
10 | |> Enum.into(%{})
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/lib/mongo/password_safe.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.PasswordSafe do
2 | @moduledoc """
3 | The password safe stores the password while parsing the url and/or the options to avoid it from logging while the sasl logger is activated.
4 |
5 | The password is encrypted before storing in the GenServer's state. It will be encrypted before returning. This should help, that the password
6 | is not stored as plain text in the memory.
7 | """
8 |
9 | @me __MODULE__
10 |
11 | use GenServer
12 |
13 | def start_link() do
14 | GenServer.start_link(@me, [])
15 | end
16 |
17 | def set_password(pid, password) do
18 | GenServer.cast(pid, {:set, password})
19 | end
20 |
21 | def get_password(nil), do: nil
22 |
23 | def get_password(pid) do
24 | GenServer.call(pid, :get)
25 | end
26 |
27 | def init([]) do
28 | {:ok, %{key: generate_key(), pw: nil}}
29 | end
30 |
31 | def handle_cast({:set, password}, %{key: key} = data) do
32 | {:noreply, %{data | pw: password |> encrypt(key)}}
33 | end
34 |
35 | def handle_call(:get, _from, %{key: key, pw: password} = data) do
36 | {:reply, password |> decrypt(key), data}
37 | end
38 |
39 | if String.to_integer(System.otp_release()) < 22 do
40 | @aad "AES256GCM"
41 |
42 | defp encrypt(plaintext, key) do
43 | # create random Initialisation Vector
44 | iv = :crypto.strong_rand_bytes(16)
45 | {ciphertext, tag} = :crypto.block_encrypt(:aes_gcm, key, iv, {@aad, to_string(plaintext), 16})
46 | # "return" iv with the cipher tag & ciphertext
47 | iv <> tag <> ciphertext
48 | end
49 |
50 | defp decrypt(ciphertext, key) do
51 | <> = ciphertext
52 | :crypto.block_decrypt(:aes_gcm, key, iv, {@aad, ciphertext, tag})
53 | end
54 | else
55 | defp encrypt(plaintext, key) do
56 | # create random Initialisation Vector
57 | iv = :crypto.strong_rand_bytes(16)
58 | ciphertext = :crypto.crypto_one_time(:aes_256_ctr, key, iv, plaintext, true)
59 | # "return" iv & ciphertext
60 | iv <> ciphertext
61 | end
62 |
63 | defp decrypt(ciphertext, key) do
64 | <> = ciphertext
65 | :crypto.crypto_one_time(:aes_256_ctr, key, iv, ciphertext, false)
66 | end
67 | end
68 |
69 | defp generate_key() do
70 | :crypto.strong_rand_bytes(32)
71 | end
72 | end
73 |
--------------------------------------------------------------------------------
/lib/mongo/pbkdf2.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.PBKDF2 do
2 | # From https://github.com/elixir-lang/plug/blob/ef616a9db9c87ec392dd8a0949bc52fafcf37005/lib/plug/crypto/key_generator.ex
3 | # with modifications
4 |
5 | @moduledoc """
6 | `PBKDF2` implements PBKDF2 (Password-Based Key Derivation Function 2),
7 | part of PKCS #5 v2.0 (Password-Based Cryptography Specification).
8 | It can be used to derive a number of keys for various purposes from a given
9 | secret. This lets applications have a single secure secret, but avoid reusing
10 | that key in multiple incompatible contexts.
11 | see http://tools.ietf.org/html/rfc2898#section-5.2
12 | """
13 |
14 | import Bitwise
15 | @max_length bsl(1, 32) - 1
16 |
17 | @doc """
18 | Returns a derived key suitable for use.
19 | ## Options
20 | * `:iterations` - defaults to 1000 (increase to at least 2^16 if used for
21 | passwords)
22 | * `:length` - a length in octets for the derived key. Defaults to 32
23 | * `:digest` - an hmac function to use as the pseudo-random function.
24 | Defaults to `:sha256`
25 | """
26 | def generate(secret, salt, opts \\ []) do
27 | iterations = Keyword.get(opts, :iterations, 1000)
28 | length = Keyword.get(opts, :length, 32)
29 | digest = Keyword.get(opts, :digest, :sha256)
30 |
31 | if length > @max_length do
32 | raise ArgumentError, "length must be less than or equal to #{@max_length}"
33 | else
34 | generate(mac_fun(digest, secret), salt, iterations, length, 1, [], 0)
35 | end
36 | end
37 |
38 | defp generate(_fun, _salt, _iterations, max_length, _block_index, acc, length) when length >= max_length do
39 | key = acc |> Enum.reverse() |> IO.iodata_to_binary()
40 | <> = key
41 | bin
42 | end
43 |
44 | defp generate(fun, salt, iterations, max_length, block_index, acc, length) do
45 | initial = fun.(<>)
46 | block = iterate(fun, iterations - 1, initial, initial)
47 | generate(fun, salt, iterations, max_length, block_index + 1, [block | acc], byte_size(block) + length)
48 | end
49 |
50 | defp iterate(_fun, 0, _prev, acc), do: acc
51 |
52 | defp iterate(fun, iteration, prev, acc) do
53 | next = fun.(prev)
54 | iterate(fun, iteration - 1, next, :crypto.exor(next, acc))
55 | end
56 |
57 | if Code.ensure_loaded?(:crypto) and function_exported?(:crypto, :hmac, 3) do
58 | defp mac_fun(digest, secret) do
59 | &:crypto.hmac(digest, secret, &1)
60 | end
61 | else
62 | defp mac_fun(digest, secret) do
63 | &:crypto.mac(:hmac, digest, secret, &1)
64 | end
65 | end
66 | end
67 |
--------------------------------------------------------------------------------
/lib/mongo/pbkdf2_cache.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.PBKDF2Cache do
2 | @moduledoc false
3 | use GenServer
4 | @name __MODULE__
5 |
6 | def start_link(_ \\ nil) do
7 | GenServer.start_link(__MODULE__, [], name: @name)
8 | end
9 |
10 | def pbkdf2(password, salt, iterations, digest) do
11 | GenServer.call(@name, {password, salt, iterations, digest})
12 | end
13 |
14 | def init([]) do
15 | {:ok, %{pending: %{}, cache: %{}}}
16 | end
17 |
18 | def handle_call(key, from, s) do
19 | cond do
20 | salted_password = s.cache[key] ->
21 | {:reply, salted_password, s}
22 |
23 | list = s.pending[key] ->
24 | {:noreply, put_in(s.pending[key], [from | list])}
25 |
26 | true ->
27 | _ = run_task(key)
28 | {:noreply, put_in(s.pending[key], [from])}
29 | end
30 | end
31 |
32 | def handle_info({ref, {key, result}}, s) when is_reference(ref) do
33 | Enum.each(s.pending[key], fn from ->
34 | GenServer.reply(from, result)
35 | end)
36 |
37 | s = update_in(s.pending, &Map.delete(&1, key))
38 | s = put_in(s.cache[key], result)
39 | {:noreply, s}
40 | end
41 |
42 | def handle_info({:DOWN, _ref, :process, _pid, :normal}, s) do
43 | {:noreply, s}
44 | end
45 |
46 | defp run_task({password, salt, iterations, :sha256} = key) do
47 | Task.async(fn ->
48 | result = Mongo.PBKDF2.generate(password, salt, iterations: iterations, length: 32, digest: :sha256)
49 | {key, result}
50 | end)
51 | end
52 |
53 | defp run_task({password, salt, iterations, :sha} = key) do
54 | Task.async(fn ->
55 | result = Mongo.PBKDF2.generate(password, salt, iterations: iterations, length: 20, digest: :sha)
56 | {key, result}
57 | end)
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/lib/mongo/query.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Query do
2 | @moduledoc """
3 | This is the query implementation for the Query Protocol
4 |
5 | Encoding and decoding does not take place at this point, but is directly performed
6 | into the functions of Mongo.MongoDBConnection.Utils.
7 | """
8 | defstruct action: nil
9 | end
10 |
11 | defimpl DBConnection.Query, for: Mongo.Query do
12 | # coveralls-ignore-start
13 | # gets never called
14 | def parse(query, _opts), do: query
15 | # gets never called
16 | def describe(query, _opts), do: query
17 | # coveralls-ignore-stop
18 | def encode(_query, params, _opts), do: params
19 | def decode(_query, reply, _opts), do: reply
20 | end
21 |
--------------------------------------------------------------------------------
/lib/mongo/read_preference.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.ReadPreference do
2 | import Keywords
3 |
4 | @moduledoc ~S"""
5 | Determines which servers are considered suitable for read operations
6 |
7 | A read preference consists of a mode and optional `tags`, max_staleness_ms, and `hedge`.
8 | The mode prioritizes between primaries and secondaries to produce either a single suitable server or a list of candidate servers.
9 | If tags and maxStalenessSeconds are set, they determine which candidate servers are eligible for selection.
10 | If hedge is set, it configures how server hedged reads are used.
11 |
12 | The default mode is `:primary`.
13 | The default tags is a list with an empty tag set: [{}].
14 | The default max_staleness_ms is unset.
15 | The default hedge is unset.
16 |
17 | ## mode
18 |
19 | * `:primary` Only an available primary is suitable.
20 | * `:secondary` All secondaries (and only secondaries) are candidates, but only eligible candidates (i.e. after applying tags and maxStalenessSeconds) are suitable.
21 | * `:primary_preferred` If a primary is available, only the primary is suitable. Otherwise, all secondaries are candidates,
22 | but only eligible secondaries are suitable.
23 | * `:secondary_preferred` All secondaries are candidates. If there is at least one eligible secondary, only eligible secondaries are suitable.
24 | Otherwise, when there are no eligible secondaries, the primary is suitable.
25 | * `:nearest` The primary and all secondaries are candidates, but only eligible candidates are suitable.
26 |
27 | """
28 |
29 | @primary %{
30 | mode: :primary,
31 | tags: [],
32 | max_staleness_ms: 0
33 | }
34 |
35 | @doc """
36 | Merge default values to the read preferences and converts deprecated tag_sets to tags
37 | """
38 | def merge_defaults(%{tag_sets: tags} = map) do
39 | map =
40 | map
41 | |> Map.delete(:tag_sets)
42 | |> Map.put(:tags, tags)
43 |
44 | Map.merge(@primary, map)
45 | end
46 |
47 | def merge_defaults(map) when is_map(map) do
48 | Map.merge(@primary, map)
49 | end
50 |
51 | def merge_defaults(_other) do
52 | @primary
53 | end
54 |
55 | @doc """
56 | Add read preference to the cmd
57 | """
58 | def add_read_preference(cmd, opts) do
59 | case Keyword.get(opts, :read_preference) do
60 | nil ->
61 | cmd
62 |
63 | pref ->
64 | cmd ++ ["$readPreference": pref]
65 | end
66 | end
67 |
68 | @doc """
69 | Converts the preference to the mongodb format for replica sets
70 | """
71 | def to_replica_set(%{:mode => :primary}) do
72 | %{mode: :primary}
73 | end
74 |
75 | def to_replica_set(config) do
76 | mode =
77 | case config[:mode] do
78 | :primary_preferred ->
79 | :primaryPreferred
80 |
81 | :secondary_preferred ->
82 | :secondaryPreferred
83 |
84 | other ->
85 | other
86 | end
87 |
88 | case config[:tags] do
89 | [] ->
90 | %{mode: mode}
91 |
92 | nil ->
93 | %{mode: mode}
94 |
95 | tags ->
96 | %{mode: mode, tags: [tags]}
97 | end
98 | end
99 |
100 | @doc """
101 | Converts the preference to the mongodb format for mongos
102 | """
103 | def to_mongos(%{mode: :primary}) do
104 | nil
105 | end
106 |
107 | # for the others we should use the read preferences
108 | def to_mongos(config) do
109 | mode =
110 | case config[:mode] do
111 | :primary_preferred ->
112 | :primaryPreferred
113 |
114 | :secondary_preferred ->
115 | :secondaryPreferred
116 |
117 | other ->
118 | other
119 | end
120 |
121 | max_staleness_seconds =
122 | case config[:max_staleness_ms] do
123 | i when is_integer(i) ->
124 | div(i, 1000)
125 |
126 | nil ->
127 | nil
128 | end
129 |
130 | read_preference =
131 | case config[:tags] do
132 | [] ->
133 | %{mode: mode, maxStalenessSeconds: max_staleness_seconds, hedge: config[:hedge]}
134 |
135 | nil ->
136 | %{mode: mode, maxStalenessSeconds: max_staleness_seconds, hedge: config[:hedge]}
137 |
138 | tags ->
139 | %{mode: mode, tags: [tags], maxStalenessSeconds: max_staleness_seconds, hedge: config[:hedge]}
140 | end
141 |
142 | filter_nils(read_preference)
143 | end
144 |
145 | def to_topology_single_type({_, %{replica?: true} = _server_description}), do: %{mode: :primaryPreferred}
146 | def to_topology_single_type(_), do: nil
147 | end
148 |
--------------------------------------------------------------------------------
/lib/mongo/stable_version.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.StableVersion do
2 | @moduledoc false
3 |
4 | defmodule ServerAPI do
5 | @moduledoc false
6 |
7 | defstruct version: "1",
8 | strict: false,
9 | deprecation_errors: false
10 | end
11 |
12 | def merge_stable_api(command, %{version: version, strict: strict, deprecation_errors: deprecation_errors}) do
13 | command
14 | |> Keyword.put(:apiVersion, version)
15 | |> Keyword.put(:apiStrict, strict)
16 | |> Keyword.put(:apiDeprecationErrors, deprecation_errors)
17 | end
18 |
19 | def merge_stable_api(command, _other) do
20 | command
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/lib/mongo/version.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Version do
2 | @moduledoc """
3 | This module contains the constant of all wire versions.
4 |
5 | see https://github.com/mongodb/mongo/blob/master/src/mongo/db/wire_version.h
6 | """
7 |
8 | values = [
9 | # Everything before we started tracking.
10 | release_2_4_and_before: 0,
11 | # The aggregation command may now be requested to return cursors.
12 | agg_returns_cursors: 1,
13 | # insert, update, and delete batch command
14 | batch_commands: 2,
15 | # support SCRAM-SHA1, listIndexes, listCollections, new explain
16 | release_2_7_7: 3,
17 | # Support find and getMore commands, as well as OP_COMMAND in mongod (but not mongos).
18 | find_command: 4,
19 | # Supports all write commands take a write concern.
20 | commands_accept_write_concern: 5,
21 | # Supports the new OP_MSG wireprotocol (3.6+).
22 | supports_op_msg: 6,
23 | # Supports replica set transactions (4.0+).
24 | replica_set_transactions: 7,
25 | # Supports sharded transactions (4.2+).
26 | sharded_transactions: 8,
27 | # Supports resumable initial sync (4.4+).
28 | resumable_initial_sync: 9,
29 | # Supports features available from 4.7 and onwards.
30 | wire_version_47: 10,
31 | # Supports features available from 4.8 and onwards.
32 | wire_version_48: 11,
33 | # Supports features available from 4.9 and onwards.
34 | wire_version_49: 12,
35 | # Supports features available from 5.0 and onwards.
36 | wire_version_50: 13,
37 | # Supports features available from 5.1 and onwards.
38 | wire_version_51: 14,
39 | # Supports features available from 5.2 and onwards.
40 | wire_version_52: 15,
41 | # Supports features available from 5.3 and onwards.
42 | wire_version_53: 16,
43 | # Supports features available from 6.0 and onwards.
44 | wire_version_60: 17,
45 | # Supports features available from 6.1 and onwards.
46 | wire_version_61: 18
47 | ]
48 |
49 | for {key, value} <- values do
50 | def encode(unquote(key)), do: unquote(value)
51 | def decode(unquote(value)), do: unquote(key)
52 | end
53 | end
54 |
--------------------------------------------------------------------------------
/lib/mongo/write_concern.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.WriteConcern do
2 | @moduledoc false
3 |
4 | import Keywords
5 |
6 | @spec write_concern(keyword) :: nil | map
7 | def write_concern(opts) do
8 | %{
9 | w: Keyword.get(opts, :w),
10 | j: Keyword.get(opts, :j),
11 | wtimeout: Keyword.get(opts, :wtimeout)
12 | }
13 | |> filter_nils()
14 | |> filter_empty()
15 | end
16 |
17 | @spec filter_empty(map) :: nil | map
18 | defp filter_empty(%{} = map) when map == %{}, do: nil
19 | defp filter_empty(%{} = map), do: map
20 |
21 | @spec acknowledged?(nil | keyword | map) :: boolean
22 | def acknowledged?(nil), do: true
23 |
24 | def acknowledged?(%{} = write_concern), do: Map.get(write_concern, :w) != 0
25 |
26 | def acknowledged?(write_concern) when is_list(write_concern), do: Keyword.get(write_concern, :w) != 0
27 | end
28 |
--------------------------------------------------------------------------------
/lib/session/server_session.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Session.ServerSession do
2 | @moduledoc """
3 | This module represents the server-side session. There are three fields:
4 |
5 | * `last_use` - The timestamp for the last use of this server session
6 | * `txn_num` - The current transaction number
7 | * `session_id` - The session id of this server session
8 |
9 | When a transaction is active, all operations in that transaction
10 | use the same transaction number.
11 |
12 | Transaction number is also used outside of transactions for
13 | retryable writes. In this case, each write operation has its own
14 | transaction number, but retries of a write operation use the same
15 | transaction number as the first write (which is how the server
16 | knows that subsequent writes are retries and should be ignored if
17 | the first write succeeded on the server but was not read by the
18 | client, for example).
19 | """
20 |
21 | alias Mongo.Session.ServerSession
22 |
23 | @type t :: %__MODULE__{
24 | last_use: integer,
25 | txn_num: non_neg_integer,
26 | session_id: BSON.Binary.t()
27 | }
28 |
29 | defstruct last_use: 0, txn_num: 0, session_id: nil
30 |
31 | @doc """
32 | Create a new server session.
33 | """
34 | @spec new() :: ServerSession.t()
35 | def new() do
36 | %ServerSession{session_id: Mongo.uuid(), last_use: System.monotonic_time(:second)}
37 | end
38 |
39 | @doc """
40 | Update the last_use attribute of the server session to now.
41 | """
42 | @spec set_last_use(ServerSession.t()) :: ServerSession.t()
43 | def set_last_use(%ServerSession{} = session) do
44 | %ServerSession{session | last_use: System.monotonic_time(:second)}
45 | end
46 |
47 | @doc """
48 | Increment the current transaction number and return the new value.
49 | """
50 | @spec next_txn_num(ServerSession.t()) :: ServerSession.t()
51 | def next_txn_num(%ServerSession{:txn_num => txn_num} = session) do
52 | %ServerSession{session | txn_num: txn_num + 1}
53 | end
54 |
55 | @doc """
56 | Return true, if the server session will time out. In this case the session
57 | can be removed from the queue.
58 | """
59 | @spec about_to_expire?(ServerSession.t(), integer) :: boolean
60 | @compile {:inline, about_to_expire?: 2}
61 | def about_to_expire?(%ServerSession{:last_use => last_use}, logical_session_timeout) do
62 | System.monotonic_time(:second) - last_use >= logical_session_timeout
63 | end
64 |
65 | defimpl Inspect, for: ServerSession do
66 | def inspect(%ServerSession{last_use: last_use, txn_num: txn, session_id: session_id}, _opts) do
67 | "#ServerSession(" <> inspect(DateTime.from_unix(last_use)) <> ", " <> to_string(txn) <> ", session_id: " <> inspect(session_id) <> ")"
68 | end
69 | end
70 | end
71 |
--------------------------------------------------------------------------------
/lib/session/session_pool.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Session.SessionPool do
2 | @moduledoc """
3 |
4 | A FIFO cache for sessions. To get a new session, call `checkout`. This returns a new session or a cached session.
5 | After running the operation call `checkin(session)` to put the session into the FIFO cache for reuse.
6 |
7 | The MongoDB specifications allows to generate the uuid from the client. That means, that we can
8 | just create server sessions and use them for logicial sessions. If they expire then we drop these sessions,
9 | otherwise we can reuse the server sessions.
10 | """
11 |
12 | alias Mongo.Session.ServerSession
13 |
14 | @type session_pool() :: %{:pool_size => any, :queue => [ServerSession.t()], :timeout => any, optional(any) => any}
15 |
16 | def new(logical_session_timeout, opts \\ []) do
17 | pool_size = Keyword.get(opts, :session_pool, 1000)
18 |
19 | %{
20 | timeout: logical_session_timeout * 60 - 60,
21 | queue: Enum.map(1..pool_size, fn _ -> ServerSession.new() end),
22 | pool_size: pool_size
23 | }
24 | end
25 |
26 | @doc """
27 | Return a server session. If the session timeout is not reached, then a cached server session is return for reuse.
28 | Otherwise a newly created server session is returned.
29 | """
30 | @spec checkout(session_pool()) :: {ServerSession.t(), session_pool()}
31 | @compile {:inline, checkout: 1}
32 | def checkout(%{queue: queue, timeout: timeout, pool_size: size} = pool) do
33 | {session, queue} = find_session(queue, timeout, size)
34 | {session, %{pool | queue: queue}}
35 | end
36 |
37 | @doc """
38 | Checkin a used server session. It if is already expired, the server session is dropped. Otherwise the server session
39 | is cache for reuse, until it expires due of being cached all the time.
40 | """
41 | @spec checkin(session_pool(), ServerSession.t()) :: session_pool()
42 | @compile {:inline, checkin: 2}
43 | def checkin(%{queue: queue, timeout: timeout} = pool, session) do
44 | case ServerSession.about_to_expire?(session, timeout) do
45 | true -> %{pool | queue: queue}
46 | false -> %{pool | queue: [session | queue]}
47 | end
48 | end
49 |
50 | ##
51 | # remove all old sessions, dead code
52 | #
53 | # def prune(%{queue: queue, timeout: timeout} = pool) do
54 | # queue = Enum.reject(queue, fn session -> ServerSession.about_to_expire?(session, timeout) end)
55 | # %{pool | queue: queue}
56 | # end
57 |
58 | ##
59 | # find the next valid sessions and removes all sessions that timed out
60 | #
61 | @compile {:inline, find_session: 3}
62 | defp find_session([], _timeout, size) do
63 | {ServerSession.new(), Enum.map(1..size, fn _ -> ServerSession.new() end)}
64 | end
65 |
66 | defp find_session([session | rest], timeout, size) do
67 | case ServerSession.about_to_expire?(session, timeout) do
68 | true -> find_session(rest, timeout, size)
69 | false -> {session, rest}
70 | end
71 | end
72 | end
73 |
--------------------------------------------------------------------------------
/lib/tasks/gen/migration.ex:
--------------------------------------------------------------------------------
1 | defmodule Mix.Tasks.Mongo.Gen.Migration do
2 | @moduledoc false
3 |
4 | use Mix.Task
5 |
6 | import Macro, only: [camelize: 1, underscore: 1]
7 | import Mix.Generator
8 |
9 | alias Mongo.Migration
10 |
11 | @shortdoc "Generates a new migration for Mongo"
12 |
13 | @spec run([String.t()]) :: integer()
14 | def run(args) do
15 | {name, topology} =
16 | case args do
17 | [name | [topology | _xs]] ->
18 | {name, topology}
19 |
20 | [name | _xs] ->
21 | {name, Migration.get_config()[:topology]}
22 |
23 | _other ->
24 | Mix.raise("Filename is missing")
25 | end
26 |
27 | migrations_path = migration_file_path(topology)
28 |
29 | base_name = "#{underscore(name)}.exs"
30 | current_timestamp = timestamp()
31 | file = Path.join(migrations_path, "#{current_timestamp}_#{base_name}")
32 | unless File.dir?(migrations_path), do: create_directory(migrations_path)
33 | fuzzy_path = Path.join(migrations_path, "*_#{base_name}")
34 |
35 | if Path.wildcard(fuzzy_path) != [] do
36 | Mix.raise("Migration can't be created, there is already a migration file with name #{name}.")
37 | end
38 |
39 | assigns = [mod: Module.concat([Mongo, Migrations, camelize(to_string(topology)), camelize(name)])]
40 | create_file(file, migration_template(assigns))
41 | String.to_integer(current_timestamp)
42 | end
43 |
44 | @doc """
45 | Returns the private repository path relative to the source.
46 | """
47 | def migration_file_path(topology) do
48 | path = "priv/#{topology}/#{Migration.get_config()[:path]}"
49 | otp_app = Migration.get_config()[:otp_app]
50 | Path.join(Mix.Project.deps_paths()[otp_app] || File.cwd!(), path)
51 | end
52 |
53 | defp timestamp do
54 | {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time()
55 | "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}"
56 | end
57 |
58 | defp pad(i) when i < 10, do: <0, ?0 + i>>
59 | defp pad(i), do: to_string(i)
60 |
61 | embed_template(:migration, """
62 | defmodule <%= inspect @mod %> do
63 | def up() do
64 | # The `up` functions will be executed when running `mix mongo.migrate`
65 | #
66 | # indexes = [[key: [files_id: 1, n: 1], name: "files_n_index", unique: true]]
67 | # Mongo.create_indexes(<%= inspect(Mongo.Migration.get_config()[:topology]) %>, "my_collection", indexes)
68 | end
69 |
70 | def down() do
71 | # The `down` functions will be executed when running `mix mongo.drop`
72 | #
73 | # Mongo.drop_collection(<%= inspect(Mongo.Migration.get_config()[:topology]) %>, "my_collection")
74 | end
75 | end
76 | """)
77 | end
78 |
--------------------------------------------------------------------------------
/lib/utils.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Utils do
2 | @moduledoc false
3 |
4 | def assign_ids(list) when is_list(list) do
5 | list
6 | |> Enum.map(fn item ->
7 | case Mongo.Encoder.impl_for(item) do
8 | nil ->
9 | item
10 |
11 | _ ->
12 | Mongo.Encoder.encode(item)
13 | end
14 | end)
15 | |> Enum.map(fn item -> assign_id(item) end)
16 | |> Enum.unzip()
17 | end
18 |
19 | defp assign_id(%{_id: id} = map) when id != nil, do: {id, map}
20 | defp assign_id(%{"_id" => id} = map) when id != nil, do: {id, map}
21 |
22 | defp assign_id([{_, _} | _] = keyword) do
23 | case Keyword.take(keyword, [:_id, "_id"]) do
24 | [{_key, id} | _] when id != nil -> {id, keyword}
25 | [] -> add_id(keyword)
26 | end
27 | end
28 |
29 | defp assign_id(map) when is_map(map) do
30 | map
31 | |> Map.to_list()
32 | |> add_id()
33 | end
34 |
35 | ##
36 | # Inserts an ID to the document. A distinction is made as to whether binaries or atoms are used as keys.
37 | #
38 | defp add_id(doc) do
39 | id = Mongo.IdServer.new()
40 | {id, add_id(doc, id)}
41 | end
42 |
43 | defp add_id([{key, _} | _] = list, id) when is_atom(key), do: [{:_id, id} | list]
44 | defp add_id([{key, _} | _] = list, id) when is_binary(key), do: [{"_id", id} | list]
45 | defp add_id([], id), do: [{"_id", id}]
46 |
47 | def modifier_docs([{key, _} | _], type), do: key |> key_to_string |> modifier_key(type)
48 | def modifier_docs(map, _type) when is_map(map) and map_size(map) == 0, do: :ok
49 | def modifier_docs(map, type) when is_map(map), do: Enum.at(map, 0) |> elem(0) |> key_to_string |> modifier_key(type)
50 | def modifier_docs(list, type) when is_list(list), do: Enum.map(list, &modifier_docs(&1, type))
51 |
52 | defp modifier_key(<$, _::binary>> = other, :replace), do: raise(ArgumentError, "replace does not allow atomic modifiers, got: #{other}")
53 | defp modifier_key(<$, _::binary>>, :update), do: :ok
54 | defp modifier_key(<<_, _::binary>> = other, :update), do: raise(ArgumentError, "update only allows atomic modifiers, got: #{other}")
55 | defp modifier_key(_, _), do: :ok
56 |
57 | defp key_to_string(key) when is_atom(key), do: Atom.to_string(key)
58 | defp key_to_string(key) when is_binary(key), do: key
59 | end
60 |
--------------------------------------------------------------------------------
/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongodb.Mixfile do
2 | use Mix.Project
3 |
4 | @source_url "https://github.com/zookzook/elixir-mongodb-driver"
5 | @version "1.5.4"
6 |
7 | def project() do
8 | [
9 | app: :mongodb_driver,
10 | version: @version,
11 | elixirc_paths: elixirc_paths(Mix.env()),
12 | elixir: "~> 1.15",
13 | name: "mongodb-driver",
14 | deps: deps(),
15 | docs: docs(),
16 | package: package(),
17 | consolidate_protocols: Mix.env() != :test
18 | ]
19 | end
20 |
21 | defp elixirc_paths(:test), do: ["lib", "test/support"]
22 | defp elixirc_paths(_), do: ["lib"]
23 |
24 | def application do
25 | [
26 | env: [],
27 | extra_applications: [:logger, :crypto, :ssl],
28 | mod: {Mongo.App, []}
29 | ]
30 | end
31 |
32 | defp deps do
33 | [
34 | {:telemetry, "~> 1.0"},
35 | {:db_connection, "~> 2.6"},
36 | {:decimal, "~> 2.1 and >= 2.1.1"},
37 | {:patch, "~> 0.12.0", only: [:dev, :test]},
38 | {:jason, "~> 1.3", only: [:dev, :test]},
39 | {:credo, "~> 1.7.0", only: [:dev, :test], runtime: false},
40 | {:ex_doc, "~> 0.32.2", only: :dev, runtime: false},
41 | {:ezstd, "~> 1.1", optional: true}
42 | ]
43 | end
44 |
45 | defp docs do
46 | [
47 | extras: [
48 | "README.md",
49 | "CHANGELOG.md",
50 | "LICENSE"
51 | ],
52 | main: "readme",
53 | source_url: @source_url,
54 | source_ref: "v#{@version}",
55 | formatters: ["html"]
56 | ]
57 | end
58 |
59 | defp package do
60 | [
61 | description: "The MongoDB driver for Elixir",
62 | maintainers: ["Michael Maier"],
63 | licenses: ["Apache-2.0"],
64 | links: %{
65 | "Changelog" => "https://hexdocs.pm/mongodb_driver/changelog.html",
66 | "GitHub" => @source_url
67 | }
68 | ]
69 | end
70 | end
71 |
--------------------------------------------------------------------------------
/test/bson/decimal128_test.exs:
--------------------------------------------------------------------------------
1 | defmodule BSON.Decimal128Test do
2 | use ExUnit.Case, async: true
3 |
4 | @nan_binaries <<00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 124>>
5 | @inf_binaries <<00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 120>>
6 | @neg_inf_binaries <<00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 248>>
7 | @binaries_0 <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 48>>
8 | @binaries_0_neg_expo <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 122, 43>>
9 | @binaries_neg_0_0 <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 62, 176>>
10 | @binaries_1_e_3 <<1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 48>>
11 | @binaries_0_001234 <<210, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52, 48>>
12 | @binaries_0_00123400000 <<64, 239, 90, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 48>>
13 | @binaries_0_1234567890123456789012345678901234 <<242, 175, 150, 126, 208, 92, 130, 222, 50, 151, 255, 111, 222, 60, 252, 47>>
14 | @binaries_regular_largest <<242, 175, 150, 126, 208, 92, 130, 222, 50, 151, 255, 111, 222, 60, 64, 48>>
15 | @binaries_scientific_tiniest <<255, 255, 255, 255, 99, 142, 141, 55, 192, 135, 173, 190, 9, 237, 1, 0>>
16 | @binaries_scientific_tiny <<1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>
17 | @binaries_neg_tiny <<1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128>>
18 |
19 | @tag :mongo_3_4
20 | test "BSON.Decimal128.decode/1" do
21 | assert_decimal(@nan_binaries, %Decimal{coef: :NaN})
22 | assert_decimal(@inf_binaries, %Decimal{coef: :inf})
23 | assert_decimal(@neg_inf_binaries, %Decimal{sign: -1, coef: :inf})
24 | assert_decimal(@binaries_0, %Decimal{coef: 0})
25 | assert_decimal(@binaries_0_neg_expo, %Decimal{coef: 0, exp: -611})
26 | assert_decimal(@binaries_neg_0_0, %Decimal{sign: -1, coef: 0, exp: -1})
27 | assert_decimal(@binaries_1_e_3, %Decimal{coef: 1, exp: 3})
28 | assert_decimal(@binaries_0_001234, %Decimal{coef: 1234, exp: -6})
29 | assert_decimal(@binaries_0_00123400000, %Decimal{coef: 123_400_000, exp: -11})
30 |
31 | assert_decimal(@binaries_0_1234567890123456789012345678901234, %Decimal{
32 | coef: 1_234_567_890_123_456_789_012_345_678_901_234,
33 | exp: -34
34 | })
35 |
36 | assert_decimal(@binaries_regular_largest, %Decimal{
37 | coef: 1_234_567_890_123_456_789_012_345_678_901_234,
38 | exp: 0
39 | })
40 |
41 | assert_decimal(@binaries_scientific_tiniest, %Decimal{coef: 9_999_999_999_999_999_999_999_999_999_999_999, exp: -6176})
42 | assert_decimal(@binaries_scientific_tiny, %Decimal{coef: 1, exp: -6176})
43 | assert_decimal(@binaries_neg_tiny, %Decimal{sign: -1, coef: 1, exp: -6176})
44 | end
45 |
46 | @tag :mongo_3_4
47 | test "BSON.Decimal128.encode/1" do
48 | assert_decimal(%Decimal{coef: :NaN})
49 | assert_decimal(%Decimal{sign: -1, coef: :inf})
50 | assert_decimal(%Decimal{coef: :inf})
51 | assert_decimal(%Decimal{coef: 0, exp: -611})
52 | assert_decimal(%Decimal{sign: -1, coef: 0, exp: -1})
53 | assert_decimal(%Decimal{coef: 1, exp: 3})
54 | assert_decimal(%Decimal{coef: 1234, exp: -6})
55 | assert_decimal(%Decimal{coef: 123_400_000, exp: -11})
56 | assert_decimal(%Decimal{coef: 1_234_567_890_123_456_789_012_345_678_901_234, exp: -34})
57 | assert_decimal(%Decimal{coef: 1_234_567_890_123_456_789_012_345_678_901_234, exp: 0})
58 | assert_decimal(%Decimal{coef: 9_999_999_999_999_999_999_999_999_999_999_999, exp: -6176})
59 | assert_decimal(%Decimal{coef: 1, exp: -6176})
60 | assert_decimal(%Decimal{sign: -1, coef: 1, exp: -6176})
61 | end
62 |
63 | defp assert_decimal(expected_decimal) do
64 | value =
65 | expected_decimal
66 | |> BSON.Decimal128.encode()
67 | |> BSON.Decimal128.decode()
68 |
69 | assert value == expected_decimal
70 | end
71 |
72 | defp assert_decimal(binaries, expected_decimal) do
73 | assert BSON.Decimal128.decode(binaries) == expected_decimal
74 | end
75 | end
76 |
--------------------------------------------------------------------------------
/test/bson/decoder_test.exs:
--------------------------------------------------------------------------------
1 | defmodule BSON.DecoderTest.CustomPreserveOrderDecoder do
2 | use BSON.DecoderGenerator, preserve_order: :original_order
3 | end
4 |
5 | defmodule BSON.DecoderTest.MapWithOrder do
6 | def to_list(doc, order_key \\ :__order__) do
7 | do_to_list(doc, order_key)
8 | end
9 |
10 | defp do_to_list(%{__struct__: _} = elem, _order_key) do
11 | elem
12 | end
13 |
14 | defp do_to_list(doc, order_key) when is_map(doc) do
15 | doc
16 | |> Map.get(order_key, Map.keys(doc))
17 | |> Enum.map(fn key -> {key, do_to_list(Map.get(doc, key), order_key)} end)
18 | end
19 |
20 | defp do_to_list(xs, order_key) when is_list(xs) do
21 | Enum.map(xs, fn elem -> do_to_list(elem, order_key) end)
22 | end
23 |
24 | defp do_to_list(elem, _order_key) do
25 | elem
26 | end
27 | end
28 |
29 | defmodule BSON.DecoderTest do
30 | use ExUnit.Case, async: true
31 |
32 | # {
33 | # "key1": {
34 | # "a": 1,
35 | # "b": 2,
36 | # "c": 3
37 | # },
38 | # "key2": {
39 | # "x": 4,
40 | # "y": 5
41 | # }
42 | # }
43 | @bson_document <<62, 0, 0, 0, 3, 107, 101, 121, 49, 0, 26, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 16, 98, 0, 2, 0, 0, 0, 16, 99, 0, 3, 0, 0, 0, 0, 3, 107, 101, 121, 50, 0, 19, 0, 0, 0, 16, 120, 0, 4, 0, 0, 0, 16, 121, 0, 5, 0, 0, 0, 0, 0>>
44 |
45 | describe "BSON.Decoder.decode/1" do
46 | test "decodes binary data into a map" do
47 | assert BSON.Decoder.decode(@bson_document) == %{
48 | "key1" => %{
49 | "a" => 1,
50 | "b" => 2,
51 | "c" => 3
52 | },
53 | "key2" => %{
54 | "x" => 4,
55 | "y" => 5
56 | }
57 | }
58 | end
59 | end
60 |
61 | describe "BSON.PreserveOrderDecoder.decode/1" do
62 | test "decodes binary data into a map with :__order__" do
63 | assert BSON.PreserveOrderDecoder.decode(@bson_document) == %{
64 | "key1" => %{
65 | "a" => 1,
66 | "b" => 2,
67 | "c" => 3,
68 | __order__: ["a", "b", "c"]
69 | },
70 | "key2" => %{
71 | "x" => 4,
72 | "y" => 5,
73 | __order__: ["x", "y"]
74 | },
75 | __order__: ["key1", "key2"]
76 | }
77 | end
78 |
79 | test "decodes binary data into a map with custom key" do
80 | assert BSON.DecoderTest.CustomPreserveOrderDecoder.decode(@bson_document) == %{
81 | "key1" => %{
82 | "a" => 1,
83 | "b" => 2,
84 | "c" => 3,
85 | original_order: ["a", "b", "c"]
86 | },
87 | "key2" => %{
88 | "x" => 4,
89 | "y" => 5,
90 | original_order: ["x", "y"]
91 | },
92 | original_order: ["key1", "key2"]
93 | }
94 | end
95 | end
96 |
97 | test "annotated maps can be converted to lists" do
98 | ordered_list =
99 | %{
100 | "_id" => BSON.ObjectId.new(1, 2, 3, 4),
101 | "user" => %{
102 | "name" => "John Doe",
103 | "age" => 42,
104 | __order__: ["name", "age"]
105 | },
106 | __order__: ["_id", "user"]
107 | }
108 | |> BSON.DecoderTest.MapWithOrder.to_list()
109 |
110 | assert ordered_list == [
111 | {"_id", BSON.ObjectId.new(1, 2, 3, 4)},
112 | {"user", [{"name", "John Doe"}, {"age", 42}]}
113 | ]
114 | end
115 | end
116 |
--------------------------------------------------------------------------------
/test/bson/encoder_test.exs:
--------------------------------------------------------------------------------
1 | defmodule BSON.EncoderTest do
2 | use ExUnit.Case, async: true
3 |
4 | test "return error in the case of encoder issues" do
5 | assert_raise Mongo.Error, fn -> %{message: "invalid document: {:error, \"some error\"}"} = BSON.encode(%{"field" => {:error, "some error"}}) end
6 | end
7 |
8 | test "while decoding use max unix time range for invalid time ranges" do
9 | assert %{"ts" => ~U[9999-12-31 23:59:59.999Z]} == BSON.decode([<<17, 0, 0, 0>>, ["", 9, ["ts", 0], <<6_312_846_085_200_000::signed-little-64>>], 0])
10 | assert %{"ts" => ~U[-9999-01-01 00:00:00.000Z]} == BSON.decode([<<17, 0, 0, 0>>, ["", 9, ["ts", 0], <<-6_312_846_085_200_000::signed-little-64>>], 0])
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/test/bson/types_test.exs:
--------------------------------------------------------------------------------
1 | defmodule BSON.TypesTest do
2 | use ExUnit.Case, async: true
3 |
4 | test "inspect BSON.Binary" do
5 | value = %BSON.Binary{binary: <<1, 2, 3>>}
6 | assert inspect(value) == "#BSON.Binary<010203>"
7 |
8 | value = %BSON.Binary{binary: <<132, 142, 144, 233, 87, 80, 78, 10, 171, 115, 102, 172, 107, 50, 130, 66>>, subtype: :uuid}
9 | assert inspect(value) == "#BSON.UUID<848e90e9-5750-4e0a-ab73-66ac6b328242>"
10 | end
11 |
12 | @objectid %BSON.ObjectId{value: <<29, 32, 69, 244, 101, 119, 228, 28, 61, 24, 21, 215>>}
13 | @string "1d2045f46577e41c3d1815d7"
14 | @string_uppercase "1D2045F46577E41C3D1815D7"
15 | @timestamp DateTime.from_unix!(488_654_324)
16 |
17 | test "inspect BSON.ObjectId" do
18 | assert inspect(@objectid) == "#BSON.ObjectId<#{@string}>"
19 | end
20 |
21 | if Version.match?(System.version(), "<= 1.8.0") do
22 | test "BSON.ObjectId.encode!/1" do
23 | assert BSON.ObjectId.encode!(@objectid) == @string
24 |
25 | assert_raise FunctionClauseError, fn ->
26 | BSON.ObjectId.encode!("")
27 | end
28 | end
29 | else
30 | test "BSON.ObjectId.encode!/1" do
31 | assert BSON.ObjectId.encode!(@objectid) == @string
32 | end
33 | end
34 |
35 | test "BSON.ObjectId.decode!/1" do
36 | assert BSON.ObjectId.decode!(@string) == @objectid
37 |
38 | assert_raise FunctionClauseError, fn ->
39 | BSON.ObjectId.decode!("")
40 | end
41 | end
42 |
43 | test "BSON.ObjectId.decode!/1 for uppercase HEX" do
44 | assert BSON.ObjectId.decode!(@string_uppercase) == @objectid
45 |
46 | assert_raise FunctionClauseError, fn ->
47 | BSON.ObjectId.decode!("")
48 | end
49 | end
50 |
51 | test "BSON.ObjectId.encode/1" do
52 | assert BSON.ObjectId.encode(@objectid) == {:ok, @string}
53 | assert BSON.ObjectId.encode("") == :error
54 | end
55 |
56 | test "BSON.ObjectId.decode/1" do
57 | assert BSON.ObjectId.decode(@string) == {:ok, @objectid}
58 | assert BSON.ObjectId.decode("") == :error
59 | end
60 |
61 | test "to_string BSON.ObjectId" do
62 | assert to_string(@objectid) == @string
63 | end
64 |
65 | if Version.match?(System.version(), "<= 1.8.0") do
66 | test "BSON.ObjectId.get_timestamp!/1" do
67 | value = BSON.ObjectId.get_timestamp!(@objectid)
68 | assert DateTime.compare(value, @timestamp) == :eq
69 |
70 | assert_raise FunctionClauseError, fn ->
71 | BSON.ObjectId.get_timestamp!("")
72 | end
73 | end
74 | else
75 | test "BSON.ObjectId.get_timestamp!/1" do
76 | value = BSON.ObjectId.get_timestamp!(@objectid)
77 | assert DateTime.compare(value, @timestamp) == :eq
78 | end
79 | end
80 |
81 | test "BSON.ObjectId.get_timestamp/1" do
82 | assert {:ok, value} = BSON.ObjectId.get_timestamp(@objectid)
83 | assert DateTime.compare(value, @timestamp) == :eq
84 | assert BSON.ObjectId.get_timestamp("") == :error
85 | end
86 |
87 | test "inspect BSON.Regex" do
88 | value = %BSON.Regex{pattern: "abc"}
89 | assert inspect(value) == "#BSON.Regex<\"abc\", \"\">"
90 |
91 | value = %BSON.Regex{pattern: "abc", options: "i"}
92 | assert inspect(value) == "#BSON.Regex<\"abc\", \"i\">"
93 | end
94 |
95 | test "inspect BSON.JavaScript" do
96 | value = %BSON.JavaScript{code: "this === null"}
97 | assert inspect(value) == "#BSON.JavaScript<\"this === null\">"
98 |
99 | value = %BSON.JavaScript{code: "this === value", scope: %{value: nil}}
100 | assert inspect(value) == "#BSON.JavaScript<\"this === value\", %{value: nil}>"
101 | end
102 |
103 | test "inspect BSON.Timestamp" do
104 | value = %BSON.Timestamp{value: 1_412_180_887, ordinal: 12}
105 | assert inspect(value) == "#BSON.Timestamp<1412180887:12>"
106 |
107 | {:ok, datetime} = DateTime.now("Etc/UTC")
108 | date_1 = %BSON.Timestamp{value: DateTime.to_unix(datetime), ordinal: 1}
109 | date_2 = %BSON.Timestamp{value: DateTime.to_unix(DateTime.add(datetime, 10)), ordinal: 1}
110 |
111 | assert BSON.Timestamp.is_after(date_1, date_2) == false
112 | assert BSON.Timestamp.is_before(date_1, date_2) == true
113 | end
114 |
115 | test "inspect BSON.LongNumber" do
116 | value = %BSON.LongNumber{value: 1_412_180_887}
117 | assert inspect(value) == "#BSON.LongNumber<1412180887>"
118 | end
119 | end
120 |
--------------------------------------------------------------------------------
/test/bson/uuid_test.exs:
--------------------------------------------------------------------------------
1 | defmodule BSON.UUIDTest do
2 | use ExUnit.Case
3 |
4 | test "converting uuids" do
5 | assert %BSON.Binary{binary: <<132, 142, 144, 233, 87, 80, 78, 10, 171, 115, 102, 172, 107, 50, 130, 66>>, subtype: :uuid} = Mongo.uuid!("848e90e9-5750-4e0a-ab73-66ac6b328242")
6 | assert_raise ArgumentError, fn -> Mongo.uuid!("848e90e9-5750-4e0a-ab73-66ac6b328242x") end
7 | assert_raise ArgumentError, fn -> Mongo.uuid!("848e90e9-5750-4e0a-ab73-66ac6-328242") end
8 |
9 | assert {:ok, %BSON.Binary{binary: <<132, 142, 144, 233, 87, 80, 78, 10, 171, 115, 102, 172, 107, 50, 130, 66>>, subtype: :uuid}} = Mongo.uuid("848e90e9-5750-4e0a-ab73-66ac6b328242")
10 | assert {:error, %ArgumentError{}} = Mongo.uuid("848e90e9-5750-4e0a-ab73-66ac6b328242x")
11 | assert {:error, %ArgumentError{}} = Mongo.uuid("848e90e9-5750-4e0a-ab73-66ac6-328242")
12 | end
13 |
14 | test "creating uudis" do
15 | assert %BSON.Binary{binary: _value, subtype: :uuid} = value_1 = Mongo.uuid()
16 | value_2 = inspect(value_1) |> String.slice(11..46) |> Mongo.uuid!()
17 | assert value_1 == value_2
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/test/data/test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zookzook/elixir-mongodb-driver/83b19556c0f0b7ddd20a14a114bf0e01b86f9945/test/data/test.jpg
--------------------------------------------------------------------------------
/test/mongo/batch_size_text.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.BatchSizeTest do
2 | require Logger
3 |
4 | use CollectionCase
5 |
6 | test "find, using :batch_size of 100 options", %{pid: top, catcher: catcher} do
7 | coll = unique_collection()
8 | n = 10_000
9 | Mongo.delete_many(top, coll, %{})
10 |
11 | Enum.each(1..n, fn i ->
12 | Mongo.insert_one(top, coll, %{index: i}, w: 0)
13 | end)
14 |
15 | assert {:ok, n} == Mongo.count(top, coll, %{})
16 |
17 | assert n ==
18 | top
19 | |> Mongo.find(coll, %{}, batch_size: 100)
20 | |> Enum.to_list()
21 | |> Enum.count()
22 |
23 | get_mores =
24 | catcher
25 | |> EventCatcher.succeeded_events()
26 | |> Enum.map(fn event -> event.command_name end)
27 | |> Enum.filter(fn command_name -> command_name == :getMore end)
28 | |> Enum.count()
29 |
30 | assert 100 == get_mores
31 | end
32 | end
33 |
--------------------------------------------------------------------------------
/test/mongo/collection_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.CollectionTest do
2 | use CollectionCase
3 |
4 | test "rename_collection", %{pid: top} do
5 | coll = unique_collection()
6 | new_coll = "this_is_my_new_collection"
7 |
8 | Mongo.insert_one(top, coll, %{name: "Greta", age: 10})
9 | Mongo.insert_one(top, coll, %{name: "Tom", age: 13})
10 | Mongo.insert_one(top, coll, %{name: "Waldo", age: 5})
11 | Mongo.insert_one(top, coll, %{name: "Oska", age: 3})
12 |
13 | assert {:ok, 4} == Mongo.count(top, coll, %{})
14 |
15 | assert top
16 | |> Mongo.show_collections()
17 | |> Enum.to_list()
18 | |> Enum.find(fn name -> name == coll end)
19 |
20 | assert :ok = Mongo.rename_collection(top, "mongodb_test.#{coll}", "mongodb_test.#{new_coll}")
21 |
22 | assert {:ok, 4} == Mongo.count(top, new_coll, %{})
23 |
24 | assert top
25 | |> Mongo.show_collections()
26 | |> Enum.to_list()
27 | |> Enum.find(fn name -> name == new_coll end)
28 | end
29 |
30 | test "create collection", c do
31 | coll = unique_collection()
32 |
33 | assert nil == Mongo.show_collections(c.pid) |> Enum.find(fn c -> c == coll end)
34 | assert :ok == Mongo.create(c.pid, coll)
35 | assert nil != Mongo.show_collections(c.pid) |> Enum.find(fn c -> c == coll end)
36 | end
37 |
38 | test "drop collection", c do
39 | coll = unique_collection()
40 |
41 | assert nil == Mongo.show_collections(c.pid) |> Enum.find(fn c -> c == coll end)
42 | assert :ok == Mongo.create(c.pid, coll)
43 | assert nil != Mongo.show_collections(c.pid) |> Enum.find(fn c -> c == coll end)
44 | assert :ok == Mongo.drop_collection(c.pid, coll)
45 | assert nil == Mongo.show_collections(c.pid) |> Enum.find(fn c -> c == coll end)
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/test/mongo/cursor_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.CursorTest do
2 | use CollectionCase, async: false
3 |
4 | test "tailable cursors with awaitData", c do
5 | coll = "tailable_cursors"
6 | init_docs = Stream.cycle([%{"foo" => 42}]) |> Enum.take(5)
7 | tail_docs = Stream.cycle([%{"foo" => 10}]) |> Enum.take(10)
8 |
9 | assert :ok = Mongo.create(c.pid, coll, capped: true, size: 1_000_000)
10 | assert {:ok, _} = Mongo.insert_many(c.pid, coll, init_docs)
11 |
12 | tailing_task =
13 | Task.async(fn ->
14 | Mongo.find(c.pid, coll, %{}, tailable: true, await_data: true)
15 | |> Enum.take(15)
16 | end)
17 |
18 | Enum.each(tail_docs, fn doc ->
19 | Process.sleep(100)
20 | Mongo.insert_one(c.pid, coll, doc)
21 | end)
22 |
23 | expected_docs = init_docs ++ tail_docs
24 | assert ^expected_docs = Task.await(tailing_task) |> Enum.map(fn m -> Map.pop(m, "_id") |> elem(1) end)
25 | end
26 |
27 | test "checking if killCursor is called properly", c do
28 | coll = "kill_cursors"
29 | catcher = c.catcher
30 | ## forcing to get a cursor id
31 | docs = Stream.cycle([%{foo: 42}]) |> Enum.take(1000)
32 |
33 | assert {:ok, _} = Mongo.insert_many(c.pid, coll, docs)
34 | assert [%{"foo" => 42}, %{"foo" => 42}] = Mongo.find(c.pid, coll, %{}) |> Enum.take(2) |> Enum.map(fn m -> Map.pop(m, "_id") |> elem(1) end)
35 | assert [:killCursors | _] = EventCatcher.succeeded_events(catcher) |> Enum.map(fn event -> event.command_name end)
36 | end
37 |
38 | # issue #35: Crash executing find function without enough permission
39 | test "matching errors in the next function of the stream api", c do
40 | assert {:error,
41 | %Mongo.Error{
42 | __exception__: true,
43 | code: 2,
44 | error_labels: ~c"",
45 | fail_command: false,
46 | host: nil,
47 | message: "unknown operator: $gth",
48 | resumable: false,
49 | retryable_reads: false,
50 | retryable_writes: false,
51 | not_writable_primary_or_recovering: false
52 | }} ==
53 | Mongo.find(c.pid, "test", _id: ["$gth": 1])
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/test/mongo/errors_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.ErrorsTest do
2 | use ExUnit.Case, async: false
3 |
4 | alias Mongo.Error
5 |
6 | @host_unreachable 6
7 | @host_not_found 7
8 | @network_timeout 89
9 | @shutdown_in_progress 91
10 | @primary_stepped_down 189
11 | @exceeded_time_limit 262
12 | @socket_exception 9001
13 | @not_master 10_107
14 | @interrupted_at_shutdown 11_600
15 | @interrupted_due_to_repl_state_change 11_602
16 | @not_master_no_slaveok 13_435
17 | @not_master_or_secondary 13_436
18 | @stale_shard_version 63
19 | @stale_epoch 150
20 | # @stale_config 13388
21 | @retry_change_stream 234
22 | @failed_to_satisfy_read_preference 133
23 |
24 | @resumable [
25 | @host_unreachable,
26 | @host_not_found,
27 | @network_timeout,
28 | @shutdown_in_progress,
29 | @primary_stepped_down,
30 | @exceeded_time_limit,
31 | @socket_exception,
32 | @not_master,
33 | @interrupted_at_shutdown,
34 | @interrupted_at_shutdown,
35 | @interrupted_due_to_repl_state_change,
36 | @not_master_no_slaveok,
37 | @not_master_or_secondary,
38 | @stale_shard_version,
39 | # @stale_config,
40 | @stale_epoch,
41 | @retry_change_stream,
42 | @failed_to_satisfy_read_preference
43 | ]
44 |
45 | setup_all do
46 | assert {:ok, pid} = Mongo.TestConnection.connect()
47 | {:ok, [pid: pid]}
48 | end
49 |
50 | @tag :rs_required
51 | test "resumable errors", %{pid: top} do
52 | @resumable
53 | |> Enum.map(fn code ->
54 | fail_cmd = [
55 | configureFailPoint: "failCommand",
56 | mode: %{times: 1},
57 | data: [errorCode: code, failCommands: ["find"]]
58 | ]
59 |
60 | assert {:ok, _} = Mongo.admin_command(top, fail_cmd)
61 | assert {:error, msg} = Mongo.find_one(top, "test", %{})
62 | assert msg.resumable == true
63 | end)
64 |
65 | fail_cmd = [configureFailPoint: "failCommand", mode: [times: 1], data: [failCommands: ["find"], errorCode: 2, errorLabels: ["ResumableChangeStreamError"]]]
66 |
67 | assert {:ok, _} = Mongo.admin_command(top, fail_cmd)
68 | assert {:error, msg} = Mongo.find_one(top, "test", %{})
69 |
70 | assert msg.resumable == true
71 | end
72 |
73 | test "handle connection error" do
74 | the_error = %DBConnection.ConnectionError{}
75 | assert false == Error.not_writable_primary_or_recovering?(the_error, [])
76 | assert false == Error.should_retry_read(the_error, [ping: 1], [])
77 | end
78 |
79 | test "error info", %{pid: top} do
80 | cmd = [
81 | collMod: "validated",
82 | validator: [
83 | "$jsonSchema": %{
84 | "bsonType" => "object",
85 | "properties" => %{
86 | "_id" => %{
87 | "bsonType" => "objectId"
88 | },
89 | "text" => %{
90 | "bsonType" => "string"
91 | },
92 | "isDone" => %{
93 | "bsonType" => "bool"
94 | }
95 | },
96 | "required" => ["text", "isDone"],
97 | "additionalProperties" => false
98 | }
99 | ]
100 | ]
101 |
102 | # Let's play safe
103 | Mongo.drop_collection(top, "validated")
104 | Mongo.create(top, "validated")
105 |
106 | Mongo.command!(top, cmd)
107 |
108 | assert match?({:error, %Mongo.WriteError{write_errors: [%{"code" => 121, "errInfo" => %{"details" => _}}]}}, Mongo.insert_one(top, "validated", %{"text" => 11}))
109 | end
110 | end
111 |
--------------------------------------------------------------------------------
/test/mongo/find_one_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.FindOneTest do
2 | use CollectionCase
3 |
4 | test "find_one, using :sort options", %{pid: top} do
5 | coll = unique_collection()
6 |
7 | Mongo.insert_one(top, coll, %{name: "Greta", age: 10})
8 | Mongo.insert_one(top, coll, %{name: "Tom", age: 13})
9 | Mongo.insert_one(top, coll, %{name: "Waldo", age: 5})
10 | Mongo.insert_one(top, coll, %{name: "Oska", age: 3})
11 |
12 | assert {:ok, 4} == Mongo.count(top, coll, %{})
13 |
14 | assert %{"name" => "Greta"} == Mongo.find_one(top, coll, %{}, sort: %{name: 1}) |> Map.take(["name"])
15 | assert %{"name" => "Waldo"} == Mongo.find_one(top, coll, %{}, sort: %{name: -1}) |> Map.take(["name"])
16 | assert %{"name" => "Oska"} == Mongo.find_one(top, coll, %{}, sort: %{age: 1}) |> Map.take(["name"])
17 | assert %{"name" => "Tom"} == Mongo.find_one(top, coll, %{}, sort: %{age: -1}) |> Map.take(["name"])
18 | end
19 | end
20 |
--------------------------------------------------------------------------------
/test/mongo/grid_fs/download_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.GridFs.DownloadTest do
2 | use ExUnit.Case
3 |
4 | alias Mongo.GridFs.Bucket
5 | alias Mongo.GridFs.Upload
6 | alias Mongo.GridFs.Download
7 | alias BSON.ObjectId
8 |
9 | setup_all do
10 | assert {:ok, pid} = Mongo.TestConnection.connect()
11 | bucket = Bucket.new(pid)
12 |
13 | upload_stream = Upload.open_upload_stream(bucket, "test.jpg", nil)
14 | src_filename = "./test/data/test.jpg"
15 | File.stream!(src_filename, [], 512) |> Stream.into(upload_stream) |> Stream.run()
16 | file_id = upload_stream.id
17 | assert file_id != nil
18 |
19 | {:ok, [pid: pid, bucket: bucket, id: file_id]}
20 | end
21 |
22 | test "open_download_stream - binary", c do
23 | dest_filename = "/tmp/my-test-file.jps"
24 | File.rm(dest_filename)
25 |
26 | with {:ok, stream} <- Download.open_download_stream(c.bucket, ObjectId.encode!(c.id)) do
27 | stream
28 | |> Stream.into(File.stream!(dest_filename))
29 | |> Stream.run()
30 | end
31 |
32 | assert true == File.exists?(dest_filename)
33 | end
34 |
35 | test "open_download_stream - object id", c do
36 | dest_filename = "/tmp/my-test-file.jps"
37 | File.rm(dest_filename)
38 |
39 | with {:ok, stream} <- Download.open_download_stream(c.bucket, c.id) do
40 | stream
41 | |> Stream.into(File.stream!(dest_filename))
42 | |> Stream.run()
43 | end
44 |
45 | assert true == File.exists?(dest_filename)
46 | end
47 |
48 | test "open_download_stream - map ", c do
49 | assert c.id != nil
50 | file = Download.find_one_file(c.bucket, c.id)
51 |
52 | dest_filename = "/tmp/my-test-file.jps"
53 | File.rm(dest_filename)
54 |
55 | with {:ok, stream} <- Download.open_download_stream(c.bucket, file) do
56 | stream
57 | |> Stream.into(File.stream!(dest_filename))
58 | |> Stream.run()
59 | end
60 |
61 | assert true == File.exists?(dest_filename)
62 | end
63 |
64 | test "find_and_stream", c do
65 | dest_filename = "/tmp/my-test-file.jps"
66 | File.rm(dest_filename)
67 |
68 | with {{:ok, stream}, file_info} <- Download.find_and_stream(c.bucket, c.id) do
69 | stream
70 | |> Stream.into(File.stream!(dest_filename))
71 | |> Stream.run()
72 |
73 | assert file_info["filename"] == "test.jpg"
74 | end
75 |
76 | assert true == File.exists?(dest_filename)
77 |
78 | File.rm(dest_filename)
79 |
80 | with {{:ok, stream}, file_info} <- Download.find_and_stream(c.bucket, ObjectId.encode!(c.id)) do
81 | stream
82 | |> Stream.into(File.stream!(dest_filename))
83 | |> Stream.run()
84 |
85 | assert file_info["filename"] == "test.jpg"
86 | end
87 |
88 | assert true == File.exists?(dest_filename)
89 | end
90 |
91 | test "find_one_file - filename ", c do
92 | assert c.id != nil
93 | file = Download.find_one_file(c.bucket, "test.jpg")
94 |
95 | dest_filename = "/tmp/my-test-file.jps"
96 | File.rm(dest_filename)
97 |
98 | with {:ok, stream} <- Download.open_download_stream(c.bucket, file) do
99 | stream
100 | |> Stream.into(File.stream!(dest_filename))
101 | |> Stream.run()
102 | end
103 |
104 | assert true == File.exists?(dest_filename)
105 | end
106 | end
107 |
--------------------------------------------------------------------------------
/test/mongo/migration_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.MigrationTest do
2 | use CollectionCase
3 | use Patch
4 |
5 | alias Mongo.Migration
6 |
7 | test "test lock and unlock", %{pid: top} do
8 | Mongo.drop_collection(top, "migrations")
9 | Patch.patch(Mongo.Migration, :get_config, fn _ -> [topology: top, collection: "migrations", path: "migrations", otp_app: :mongodb_driver] end)
10 | assert :locked == Migration.lock()
11 | assert {:error, :already_locked} == Migration.lock()
12 | assert :unlocked == Migration.unlock()
13 | assert {:error, :not_locked} == Migration.unlock()
14 | end
15 |
16 | test "test lock and unlock with database options", %{pid: top} do
17 | Mongo.drop_collection(top, "migrations", database: "one")
18 | Mongo.drop_collection(top, "migrations", database: "two")
19 | Patch.patch(Mongo.Migration, :get_config, fn _ -> [topology: top, collection: "migrations", path: "migrations", otp_app: :mongodb_driver] end)
20 | assert :locked == Migration.lock(database: "one")
21 | assert :locked == Migration.lock(database: "two")
22 | assert {:error, :already_locked} == Migration.lock(database: "one")
23 | assert {:error, :already_locked} == Migration.lock(database: "two")
24 | assert :unlocked == Migration.unlock(database: "one")
25 | assert :unlocked == Migration.unlock(database: "two")
26 | assert {:error, :not_locked} == Migration.unlock(database: "one")
27 | assert {:error, :not_locked} == Migration.unlock(database: "two")
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/test/mongo/not_writable_primary_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.NotWritablePrimaryTest do
2 | use ExUnit.Case, async: false
3 |
4 | setup_all do
5 | assert {:ok, top} = Mongo.TestConnection.connect()
6 | Mongo.drop_database(top, nil, w: 3)
7 | %{pid: top}
8 | end
9 |
10 | test "not writable primary", c do
11 | top = c.pid
12 |
13 | cmd = [
14 | configureFailPoint: "failCommand",
15 | mode: [times: 1],
16 | data: [errorCode: 10_107, failCommands: ["insert"], closeConnection: false]
17 | ]
18 |
19 | assert {:ok, %Mongo.InsertOneResult{}} = Mongo.insert_one(top, "users", %{name: "Greta1"})
20 | Mongo.admin_command(top, cmd)
21 | assert {:ok, %Mongo.InsertOneResult{}} = Mongo.insert_one(top, "users", %{name: "Greta2"})
22 | end
23 | end
24 |
--------------------------------------------------------------------------------
/test/mongo/password_safe_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.PasswordSafeTest do
2 | @moduledoc false
3 |
4 | use ExUnit.Case, async: false
5 | alias Mongo.UrlParser
6 | alias Mongo.PasswordSafe
7 |
8 | test "encrypted password" do
9 | pw = "my-secret-password"
10 | {:ok, pid} = PasswordSafe.start_link()
11 | PasswordSafe.set_password(pid, pw)
12 | %{key: _key, pw: enc_pw} = :sys.get_state(pid)
13 | assert enc_pw != pw
14 | assert pw == PasswordSafe.get_password(pid)
15 | end
16 |
17 | #
18 | # When the sasl logger is activated like `--logger-sasl-reports true` then the supervisor reports all parameters when it starts a process. So, the password should not
19 | # used in the options
20 | #
21 | test "encoded password" do
22 | url = "mongodb://myDBReader:D1fficultP%40ssw0rd@mongodb0.example.com:27017/admin"
23 | opts = UrlParser.parse_url(url: url)
24 | assert "*****" == Keyword.get(opts, :password)
25 | assert "D1fficultP@ssw0rd" == PasswordSafe.get_password(Keyword.get(opts, :pw_safe))
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/test/mongo/read_preferences_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.ReadPreferencesTest do
2 | use CollectionCase, async: false
3 |
4 | @tag :rs_required
5 | test "find_one, using read_preferences options", %{pid: top} do
6 | coll = unique_collection()
7 |
8 | Mongo.insert_one(top, coll, %{name: "Greta", age: 10})
9 | Mongo.insert_one(top, coll, %{name: "Tom", age: 13})
10 | Mongo.insert_one(top, coll, %{name: "Waldo", age: 5})
11 | Mongo.insert_one(top, coll, %{name: "Oskar", age: 3})
12 |
13 | assert {:ok, 4} == Mongo.count(top, coll, %{})
14 |
15 | Process.sleep(1000)
16 |
17 | prefs = %{
18 | mode: :secondary_preferred,
19 | max_staleness_ms: 120_000
20 | }
21 |
22 | assert %{"name" => "Oskar"} == Mongo.find_one(top, coll, %{name: "Oskar"}, read_preference: prefs) |> Map.take(["name"])
23 |
24 | prefs = %{
25 | mode: :secondary,
26 | max_staleness_ms: 120_000
27 | }
28 |
29 | assert %{"name" => "Oskar"} == Mongo.find_one(top, coll, %{name: "Oskar"}, read_preference: prefs) |> Map.take(["name"])
30 |
31 | prefs = %{
32 | mode: :primary
33 | }
34 |
35 | assert %{"name" => "Oskar"} == Mongo.find_one(top, coll, %{name: "Oskar"}, read_preference: prefs) |> Map.take(["name"])
36 |
37 | prefs = %{
38 | mode: :primary_preferred,
39 | max_staleness_ms: 120_000
40 | }
41 |
42 | assert %{"name" => "Oskar"} == Mongo.find_one(top, coll, %{name: "Oskar"}, read_preference: prefs) |> Map.take(["name"])
43 | end
44 |
45 | @doc """
46 |
47 | This test case needs a special deployment like this:
48 |
49 | conf = rs.conf();
50 | conf.members[0].tags = { "dc": "east", "usage": "production" };
51 | conf.members[1].tags = { "dc": "east", "usage": "reporting" };
52 | conf.members[2].tags = { "dc": "west", "usage": "production" };
53 | rs.reconfig(conf);
54 |
55 | """
56 | @tag :tag_set
57 | @tag :rs_required
58 | test "find_one, using read_preferences options, tag_set", %{pid: top, catcher: catcher} do
59 | coll = unique_collection()
60 |
61 | Mongo.insert_one(top, coll, %{name: "Greta", age: 10})
62 | Mongo.insert_one(top, coll, %{name: "Tom", age: 13})
63 | Mongo.insert_one(top, coll, %{name: "Waldo", age: 5})
64 | Mongo.insert_one(top, coll, %{name: "Oskar", age: 3})
65 |
66 | assert {:ok, 4} == Mongo.count(top, coll, %{})
67 |
68 | Process.sleep(1000)
69 |
70 | prefs = %{
71 | mode: :secondary,
72 | max_staleness_ms: 120_000,
73 | tags: [dc: "west", usage: "production"]
74 | }
75 |
76 | assert %{"name" => "Oskar"} == Mongo.find_one(top, coll, %{name: "Oskar"}, read_preference: prefs) |> Map.take(["name"])
77 |
78 | prefs = %{
79 | mode: :nearest,
80 | max_staleness_ms: 120_000,
81 | tags: [dc: "east", usage: "production"]
82 | }
83 |
84 | assert %{"name" => "Oskar"} == Mongo.find_one(top, coll, %{name: "Oskar"}, read_preference: prefs) |> Map.take(["name"])
85 | ## this configuration results in an empty selection
86 | prefs = %{
87 | mode: :secondary,
88 | max_staleness_ms: 120_000,
89 | tags: [dc: "south", usage: "production"]
90 | }
91 |
92 | assert catch_exit(Mongo.find_one(top, coll, %{name: "Oskar"}, read_preference: prefs, checkout_timeout: 500))
93 | assert [:checkout_session | _xs] = EventCatcher.empty_selection_events(catcher) |> Enum.map(fn event -> event.action end)
94 | end
95 |
96 | @tag :rs_required
97 | test "find_one, using primary_preferred options" do
98 | prefs = %{
99 | mode: :primary_preferred
100 | }
101 |
102 | assert {:ok, top} = Mongo.start_link(database: "mongodb_test", seeds: ["127.0.0.1:27017"], read_preference: prefs, show_sensitive_data_on_connection_error: true)
103 | Mongo.admin_command(top, configureFailPoint: "failCommand", mode: "off")
104 |
105 | Mongo.insert_one(top, "dogs", %{name: "Greta"})
106 | Mongo.insert_one(top, "dogs", %{name: "Tom"})
107 | Mongo.insert_one(top, "dogs", %{name: "Gustav"})
108 |
109 | assert :ok = Mongo.create_indexes(top, "dogs", [%{key: %{name: 1}, name: "name_index"}])
110 | assert :ok = Mongo.create_indexes(top, "dogs", [%{key: %{name: 1}, name: "name_index"}], read_preference: prefs)
111 | end
112 | end
113 |
--------------------------------------------------------------------------------
/test/mongo/retryable_reads_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.RetryableReadsTest do
2 | use CollectionCase
3 |
4 | alias Mongo.Error
5 | alias Mongo.Session
6 |
7 | @tag :rs_required
8 | test "find_one", %{pid: top, catcher: catcher} do
9 | coll = unique_collection()
10 |
11 | Mongo.insert_one(top, coll, %{name: "Greta", age: 10})
12 | Mongo.insert_one(top, coll, %{name: "Tom", age: 13})
13 | Mongo.insert_one(top, coll, %{name: "Waldo", age: 5})
14 | Mongo.insert_one(top, coll, %{name: "Oska", age: 3})
15 |
16 | assert {:ok, 4} == Mongo.count(top, coll, %{})
17 |
18 | cmd = [
19 | configureFailPoint: "failCommand",
20 | mode: [times: 1],
21 | data: [errorCode: 6, failCommands: ["find"]]
22 | ]
23 |
24 | Mongo.admin_command(top, cmd)
25 | {:error, %Error{code: 6, retryable_reads: true}} = Mongo.find_one(top, coll, %{"name" => "Waldo"})
26 |
27 | Mongo.admin_command(top, cmd)
28 | assert %{"_id" => _id, "age" => 5, "name" => "Waldo"} = Mongo.find_one(top, coll, %{"name" => "Waldo"}, retryable_reads: true)
29 |
30 | assert [:find | _] = EventCatcher.retryable_read_events(catcher) |> Enum.map(fn event -> event.command_name end)
31 | end
32 |
33 | @tag :rs_required
34 | test "find_one in transaction", %{pid: top, catcher: catcher} do
35 | coll = unique_collection()
36 | Mongo.insert_one(top, coll, %{name: "Greta", age: 10})
37 | Mongo.insert_one(top, coll, %{name: "Tom", age: 13})
38 | Mongo.insert_one(top, coll, %{name: "Waldo", age: 5})
39 | Mongo.insert_one(top, coll, %{name: "Oska", age: 3})
40 |
41 | assert {:ok, 4} == Mongo.count(top, coll, %{})
42 |
43 | cmd = [
44 | configureFailPoint: "failCommand",
45 | mode: [times: 1],
46 | data: [errorCode: 6, failCommands: ["find"]]
47 | ]
48 |
49 | {:ok, session} = Session.start_session(top, :read, [])
50 |
51 | Mongo.admin_command(top, cmd)
52 | {:error, %Error{code: 6, retryable_reads: true}} = Mongo.find_one(top, coll, %{"name" => "Waldo"}, retryable_reads: true, session: session)
53 |
54 | Session.end_session(top, session)
55 |
56 | assert [] = EventCatcher.retryable_read_events(catcher) |> Enum.map(fn event -> event.command_name end)
57 | end
58 |
59 | @tag :rs_required
60 | test "count", %{pid: top, catcher: catcher} do
61 | coll = unique_collection()
62 | Mongo.insert_one(top, coll, %{name: "Greta", age: 10})
63 | Mongo.insert_one(top, coll, %{name: "Tom", age: 13})
64 | Mongo.insert_one(top, coll, %{name: "Waldo", age: 5})
65 | Mongo.insert_one(top, coll, %{name: "Oska", age: 3})
66 |
67 | assert {:ok, 4} == Mongo.count(top, coll, %{})
68 |
69 | cmd = [
70 | configureFailPoint: "failCommand",
71 | mode: [times: 1],
72 | data: [errorCode: 6, failCommands: ["count"]]
73 | ]
74 |
75 | Mongo.admin_command(top, cmd)
76 | {:error, %Error{code: 6, retryable_reads: true}} = Mongo.count(top, coll, %{})
77 |
78 | Mongo.admin_command(top, cmd)
79 | assert {:ok, 4} == Mongo.count(top, coll, %{}, retryable_reads: true)
80 |
81 | assert [:count | _] = EventCatcher.retryable_read_events(catcher) |> Enum.map(fn event -> event.command_name end)
82 | end
83 | end
84 |
--------------------------------------------------------------------------------
/test/mongo/retryable_writes_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.RetryableWritesTest do
2 | use CollectionCase
3 |
4 | alias Mongo.Error
5 |
6 | @tag :rs_required
7 | test "retryable writes: insert one", %{pid: top, catcher: catcher} do
8 | coll = unique_collection()
9 |
10 | cmd = [
11 | configureFailPoint: "failCommand",
12 | mode: [times: 1],
13 | data: [errorCode: 6, failCommands: ["insert"]]
14 | ]
15 |
16 | assert {:ok, _} = Mongo.admin_command(top, cmd)
17 | assert {:error, %Error{code: 6, retryable_writes: true}} = Mongo.insert_one(top, coll, %{"name" => "Waldo"}, retryable_writes: false)
18 |
19 | assert {:ok, _} = Mongo.admin_command(top, cmd)
20 | assert {:ok, _} = Mongo.insert_one(top, coll, %{"name" => "Waldo"})
21 |
22 | assert [:insert | _] = EventCatcher.retry_write_events(catcher) |> Enum.map(fn event -> event.command_name end)
23 | end
24 |
25 | @tag :rs_required
26 | test "retryable writes: delete one", %{pid: top, catcher: catcher} do
27 | coll = unique_collection()
28 |
29 | Mongo.insert_one(top, coll, %{"name" => "Waldo"})
30 |
31 | cmd = [
32 | configureFailPoint: "failCommand",
33 | mode: [times: 1],
34 | data: [errorCode: 6, failCommands: ["delete"]]
35 | ]
36 |
37 | assert {:ok, _} = Mongo.admin_command(top, cmd)
38 | assert {:error, %Error{code: 6, retryable_writes: true}} = Mongo.delete_one(top, coll, %{"name" => "Waldo"}, retryable_writes: false)
39 |
40 | assert {:ok, _} = Mongo.admin_command(top, cmd)
41 | assert {:ok, _} = Mongo.delete_one(top, coll, %{"name" => "Waldo"})
42 |
43 | assert [:delete | _] = EventCatcher.retry_write_events(catcher) |> Enum.map(fn event -> event.command_name end)
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/test/mongo/topology_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.TopologyTest do
2 | # DO NOT MAKE ASYNCHRONOUS
3 | use ExUnit.Case
4 |
5 | setup_all do
6 | assert {:ok, pid} = Mongo.TestConnection.connect()
7 | %{pid: pid}
8 | end
9 |
10 | @modes [:secondary, :secondary_preferred, :primary, :primary_preferred]
11 |
12 | @tag :rs_required
13 | test "replica set selection", %{pid: mongo_pid} do
14 | for mode <- @modes do
15 | assert {:ok, %Mongo.InsertOneResult{inserted_id: new_id}} = Mongo.insert_one(mongo_pid, "test", %{topology_test: 1}, w: 3)
16 |
17 | rp = Mongo.ReadPreference.merge_defaults(%{mode: mode})
18 |
19 | assert [%{"_id" => ^new_id, "topology_test" => 1}] =
20 | mongo_pid
21 | |> Mongo.find("test", %{_id: new_id}, read_preference: rp, slave_ok: mode in [:secondary, :secondary_preferred])
22 | |> Enum.to_list()
23 |
24 | assert {:ok, %Mongo.DeleteResult{deleted_count: 1}} = Mongo.delete_one(mongo_pid, "test", %{_id: new_id})
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/test/mongo/update_hint_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.UpdateHintTest do
2 | use CollectionCase
3 |
4 | test "update_one, using :hint options", %{pid: top} do
5 | coll = unique_collection()
6 |
7 | Mongo.insert_one(top, coll, %{_id: 1, member: "abc123", status: "P", points: 0, misc1: nil, misc2: nil})
8 | Mongo.insert_one(top, coll, %{_id: 2, member: "xyz123", status: "A", points: 60, misc1: "reminder: ping me at 100pts", misc2: "Some random comment"})
9 | Mongo.insert_one(top, coll, %{_id: 3, member: "lmn123", status: "P", points: 0, misc1: nil, misc2: nil})
10 | Mongo.insert_one(top, coll, %{_id: 4, member: "pqr123", status: "D", points: 20, misc1: "Deactivated", misc2: nil})
11 | Mongo.insert_one(top, coll, %{_id: 5, member: "ijk123", status: "P", points: 0, misc1: nil, misc2: nil})
12 | Mongo.insert_one(top, coll, %{_id: 6, member: "cde123", status: "A", points: 86, misc1: "reminder: ping me at 100pts", misc2: "Some random comment"})
13 |
14 | assert :ok = Mongo.create_indexes(top, coll, [%{key: %{status: 1}, name: "status_index"}, %{key: %{points: 1}, name: "points_index"}])
15 |
16 | assert {:ok,
17 | %Mongo.UpdateResult{
18 | acknowledged: true,
19 | matched_count: 3,
20 | modified_count: 3,
21 | upserted_ids: []
22 | }} = Mongo.update_many(top, coll, %{points: %{"$lte": 20}, status: "P"}, %{"$set": %{misc1: "Need to activate"}}, hint: %{status: 1})
23 |
24 | assert {:error, %{write_errors: [%{"code" => 2, "index" => 0}]}} = Mongo.update_many(top, coll, %{points: %{"$lte": 20}, status: "P"}, %{"$set": %{misc1: "Need to activate"}}, hint: %{email: 1})
25 | end
26 | end
27 |
--------------------------------------------------------------------------------
/test/specification_tests/crud_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Mongo.SpecificationTests.CRUDTest do
2 | use Mongo.SpecificationCase
3 | import Mongo.Specification.CRUD.Helpers
4 | require Mongo.Specification.CRUD
5 |
6 | def min_server_version?(_all), do: true
7 |
8 | setup_all do
9 | {:ok, pid} = Mongo.start_link(database: "mongodb_test")
10 |
11 | %{mongo: pid}
12 | end
13 |
14 | Enum.map(@crud_tests, fn file ->
15 | json = file |> File.read!() |> Jason.decode!()
16 |
17 | [file_no_suffix, _suffix] =
18 | file
19 | |> String.split("/")
20 | |> List.last()
21 | |> String.split(".")
22 |
23 | describe file do
24 | setup %{mongo: mongo} do
25 | collection = unquote(Macro.escape(file_no_suffix))
26 | Mongo.delete_many!(mongo, collection, %{})
27 | %{collection: collection}
28 | end
29 |
30 | Mongo.Specification.CRUD.create_tests(json)
31 | end
32 | end)
33 | end
34 |
--------------------------------------------------------------------------------
/test/support/collection_case.ex:
--------------------------------------------------------------------------------
1 | defmodule CollectionCase do
2 | @moduledoc false
3 |
4 | use ExUnit.CaseTemplate
5 |
6 | @seeds ["127.0.0.1:27017"]
7 |
8 | setup_all do
9 | assert {:ok, pid} = Mongo.start_link(database: "mongodb_test", seeds: @seeds, show_sensitive_data_on_connection_error: true)
10 | Mongo.admin_command(pid, configureFailPoint: "failCommand", mode: "off")
11 | Mongo.drop_database(pid, nil, w: 3)
12 | {:ok, [pid: pid]}
13 | end
14 |
15 | setup do
16 | {:ok, catcher} = EventCatcher.start_link()
17 | on_exit(fn -> EventCatcher.stop(catcher) end)
18 | [catcher: catcher]
19 | end
20 |
21 | using do
22 | quote do
23 | import CollectionCase
24 | end
25 | end
26 |
27 | defmacro unique_collection do
28 | {function, _arity} = __CALLER__.function
29 |
30 | "#{__CALLER__.module}.#{function}"
31 | |> String.replace(" ", "_")
32 | |> String.replace(".", "_")
33 | |> String.replace(":", "_")
34 | |> String.downcase()
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/aggregate-collation.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": "ping"
6 | }
7 | ],
8 | "minServerVersion": "3.4",
9 | "tests": [
10 | {
11 | "description": "Aggregate with collation",
12 | "operation": {
13 | "name": "aggregate",
14 | "arguments": {
15 | "pipeline": [
16 | {
17 | "$match": {
18 | "x": "PING"
19 | }
20 | }
21 | ],
22 | "collation": {
23 | "locale": "en_US",
24 | "strength": 2
25 | }
26 | }
27 | },
28 | "outcome": {
29 | "result": [
30 | {
31 | "_id": 1,
32 | "x": "ping"
33 | }
34 | ]
35 | }
36 | }
37 | ]
38 | }
39 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/aggregate-out.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": 11
6 | },
7 | {
8 | "_id": 2,
9 | "x": 22
10 | },
11 | {
12 | "_id": 3,
13 | "x": 33
14 | }
15 | ],
16 | "minServerVersion": "2.6",
17 | "tests": [
18 | {
19 | "description": "Aggregate with $out",
20 | "operation": {
21 | "name": "aggregate",
22 | "arguments": {
23 | "pipeline": [
24 | {
25 | "$sort": {
26 | "x": 1
27 | }
28 | },
29 | {
30 | "$match": {
31 | "_id": {
32 | "$gt": 1
33 | }
34 | }
35 | },
36 | {
37 | "$out": "other_test_collection"
38 | }
39 | ],
40 | "batchSize": 2
41 | }
42 | },
43 | "outcome": {
44 | "result": [
45 | {
46 | "_id": 2,
47 | "x": 22
48 | },
49 | {
50 | "_id": 3,
51 | "x": 33
52 | }
53 | ],
54 | "collection": {
55 | "name": "other_test_collection",
56 | "data": [
57 | {
58 | "_id": 2,
59 | "x": 22
60 | },
61 | {
62 | "_id": 3,
63 | "x": 33
64 | }
65 | ]
66 | }
67 | }
68 | },
69 | {
70 | "description": "Aggregate with $out and batch size of 0",
71 | "operation": {
72 | "name": "aggregate",
73 | "arguments": {
74 | "pipeline": [
75 | {
76 | "$sort": {
77 | "x": 1
78 | }
79 | },
80 | {
81 | "$match": {
82 | "_id": {
83 | "$gt": 1
84 | }
85 | }
86 | },
87 | {
88 | "$out": "other_test_collection"
89 | }
90 | ],
91 | "batchSize": 0
92 | }
93 | },
94 | "outcome": {
95 | "result": [
96 | {
97 | "_id": 2,
98 | "x": 22
99 | },
100 | {
101 | "_id": 3,
102 | "x": 33
103 | }
104 | ],
105 | "collection": {
106 | "name": "other_test_collection",
107 | "data": [
108 | {
109 | "_id": 2,
110 | "x": 22
111 | },
112 | {
113 | "_id": 3,
114 | "x": 33
115 | }
116 | ]
117 | }
118 | }
119 | }
120 | ]
121 | }
122 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/aggregate.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": 11
6 | },
7 | {
8 | "_id": 2,
9 | "x": 22
10 | },
11 | {
12 | "_id": 3,
13 | "x": 33
14 | }
15 | ],
16 | "tests": [
17 | {
18 | "description": "Aggregate with multiple stages",
19 | "operation": {
20 | "name": "aggregate",
21 | "arguments": {
22 | "pipeline": [
23 | {
24 | "$sort": {
25 | "x": 1
26 | }
27 | },
28 | {
29 | "$match": {
30 | "_id": {
31 | "$gt": 1
32 | }
33 | }
34 | }
35 | ],
36 | "batchSize": 2
37 | }
38 | },
39 | "outcome": {
40 | "result": [
41 | {
42 | "_id": 2,
43 | "x": 22
44 | },
45 | {
46 | "_id": 3,
47 | "x": 33
48 | }
49 | ]
50 | }
51 | }
52 | ]
53 | }
54 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/count-collation.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": "PING"
6 | }
7 | ],
8 | "minServerVersion": "3.4",
9 | "tests": [
10 | {
11 | "description": "Count documents with collation",
12 | "operation": {
13 | "name": "countDocuments",
14 | "arguments": {
15 | "filter": {
16 | "x": "ping"
17 | },
18 | "collation": {
19 | "locale": "en_US",
20 | "strength": 2
21 | }
22 | }
23 | },
24 | "outcome": {
25 | "result": 1
26 | }
27 | },
28 | {
29 | "description": "Deprecated count with collation",
30 | "operation": {
31 | "name": "count",
32 | "arguments": {
33 | "filter": {
34 | "x": "ping"
35 | },
36 | "collation": {
37 | "locale": "en_US",
38 | "strength": 2
39 | }
40 | }
41 | },
42 | "outcome": {
43 | "result": 1
44 | }
45 | }
46 | ]
47 | }
48 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/count.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": 11
6 | },
7 | {
8 | "_id": 2,
9 | "x": 22
10 | },
11 | {
12 | "_id": 3,
13 | "x": 33
14 | }
15 | ],
16 | "tests": [
17 | {
18 | "description": "Estimated document count",
19 | "operation": {
20 | "name": "estimatedDocumentCount",
21 | "arguments": {}
22 | },
23 | "outcome": {
24 | "result": 3
25 | }
26 | },
27 | {
28 | "description": "Count documents without a filter",
29 | "operation": {
30 | "name": "countDocuments",
31 | "arguments": {
32 | "filter": {}
33 | }
34 | },
35 | "outcome": {
36 | "result": 3
37 | }
38 | },
39 | {
40 | "description": "Count documents with a filter",
41 | "operation": {
42 | "name": "countDocuments",
43 | "arguments": {
44 | "filter": {
45 | "_id": {
46 | "$gt": 1
47 | }
48 | }
49 | }
50 | },
51 | "outcome": {
52 | "result": 2
53 | }
54 | },
55 | {
56 | "description": "Count documents with skip and limit",
57 | "operation": {
58 | "name": "countDocuments",
59 | "arguments": {
60 | "filter": {},
61 | "skip": 1,
62 | "limit": 3
63 | }
64 | },
65 | "outcome": {
66 | "result": 2
67 | }
68 | },
69 | {
70 | "description": "Deprecated count without a filter",
71 | "operation": {
72 | "name": "count",
73 | "arguments": {
74 | "filter": {}
75 | }
76 | },
77 | "outcome": {
78 | "result": 3
79 | }
80 | },
81 | {
82 | "description": "Deprecated count with a filter",
83 | "operation": {
84 | "name": "count",
85 | "arguments": {
86 | "filter": {
87 | "_id": {
88 | "$gt": 1
89 | }
90 | }
91 | }
92 | },
93 | "outcome": {
94 | "result": 2
95 | }
96 | },
97 | {
98 | "description": "Deprecated count with skip and limit",
99 | "operation": {
100 | "name": "count",
101 | "arguments": {
102 | "filter": {},
103 | "skip": 1,
104 | "limit": 3
105 | }
106 | },
107 | "outcome": {
108 | "result": 2
109 | }
110 | }
111 | ]
112 | }
113 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/distinct-collation.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "string": "PING"
6 | },
7 | {
8 | "_id": 2,
9 | "string": "ping"
10 | }
11 | ],
12 | "minServerVersion": "3.4",
13 | "tests": [
14 | {
15 | "description": "Distinct with a collation",
16 | "operation": {
17 | "name": "distinct",
18 | "arguments": {
19 | "fieldName": "string",
20 | "collation": {
21 | "locale": "en_US",
22 | "strength": 2
23 | }
24 | }
25 | },
26 | "outcome": {
27 | "result": [
28 | "PING"
29 | ]
30 | }
31 | }
32 | ]
33 | }
34 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/distinct.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": 11
6 | },
7 | {
8 | "_id": 2,
9 | "x": 22
10 | },
11 | {
12 | "_id": 3,
13 | "x": 33
14 | }
15 | ],
16 | "tests": [
17 | {
18 | "description": "Distinct without a filter",
19 | "operation": {
20 | "name": "distinct",
21 | "arguments": {
22 | "fieldName": "x",
23 | "filter": {}
24 | }
25 | },
26 | "outcome": {
27 | "result": [
28 | 11,
29 | 22,
30 | 33
31 | ]
32 | }
33 | },
34 | {
35 | "description": "Distinct with a filter",
36 | "operation": {
37 | "name": "distinct",
38 | "arguments": {
39 | "fieldName": "x",
40 | "filter": {
41 | "_id": {
42 | "$gt": 1
43 | }
44 | }
45 | }
46 | },
47 | "outcome": {
48 | "result": [
49 | 22,
50 | 33
51 | ]
52 | }
53 | }
54 | ]
55 | }
56 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/find-collation.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": "ping"
6 | }
7 | ],
8 | "minServerVersion": "3.4",
9 | "tests": [
10 | {
11 | "description": "Find with a collation",
12 | "operation": {
13 | "name": "find",
14 | "arguments": {
15 | "filter": {
16 | "x": "PING"
17 | },
18 | "collation": {
19 | "locale": "en_US",
20 | "strength": 2
21 | }
22 | }
23 | },
24 | "outcome": {
25 | "result": [
26 | {
27 | "_id": 1,
28 | "x": "ping"
29 | }
30 | ]
31 | }
32 | }
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/test/support/crud_tests/read/find.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "_id": 1,
5 | "x": 11
6 | },
7 | {
8 | "_id": 2,
9 | "x": 22
10 | },
11 | {
12 | "_id": 3,
13 | "x": 33
14 | },
15 | {
16 | "_id": 4,
17 | "x": 44
18 | },
19 | {
20 | "_id": 5,
21 | "x": 55
22 | }
23 | ],
24 | "tests": [
25 | {
26 | "description": "Find with filter",
27 | "operation": {
28 | "name": "find",
29 | "arguments": {
30 | "filter": {
31 | "_id": 1
32 | }
33 | }
34 | },
35 | "outcome": {
36 | "result": [
37 | {
38 | "_id": 1,
39 | "x": 11
40 | }
41 | ]
42 | }
43 | },
44 | {
45 | "description": "Find with filter, sort, skip, and limit",
46 | "operation": {
47 | "name": "find",
48 | "arguments": {
49 | "filter": {
50 | "_id": {
51 | "$gt": 2
52 | }
53 | },
54 | "sort": {
55 | "_id": 1
56 | },
57 | "skip": 2,
58 | "limit": 2
59 | }
60 | },
61 | "outcome": {
62 | "result": [
63 | {
64 | "_id": 5,
65 | "x": 55
66 | }
67 | ]
68 | }
69 | },
70 | {
71 | "description": "Find with limit, sort, and batchsize",
72 | "operation": {
73 | "name": "find",
74 | "arguments": {
75 | "filter": {},
76 | "sort": {
77 | "_id": 1
78 | },
79 | "limit": 4,
80 | "batch_size": 2
81 | }
82 | },
83 | "outcome": {
84 | "result": [
85 | {
86 | "_id": 1,
87 | "x": 11
88 | },
89 | {
90 | "_id": 2,
91 | "x": 22
92 | },
93 | {
94 | "_id": 3,
95 | "x": 33
96 | },
97 | {
98 | "_id": 4,
99 | "x": 44
100 | }
101 | ]
102 | }
103 | }
104 | ]
105 | }
106 |
--------------------------------------------------------------------------------
/test/support/event_catcher.ex:
--------------------------------------------------------------------------------
1 | defmodule EventCatcher do
2 | @moduledoc false
3 |
4 | use GenServer
5 |
6 | require Logger
7 |
8 | alias Mongo.Events.CommandSucceededEvent
9 | alias Mongo.Events.CommandFailedEvent
10 | alias Mongo.Events.RetryReadEvent
11 | alias Mongo.Events.RetryWriteEvent
12 | alias Mongo.Events.ServerSelectionEmptyEvent
13 |
14 | @all [:commands, :topology]
15 |
16 | def start_link(topics \\ @all) do
17 | GenServer.start_link(__MODULE__, topics)
18 | end
19 |
20 | def stop(pid) do
21 | GenServer.cast(pid, :stop)
22 | end
23 |
24 | def events(pid) do
25 | GenServer.call(pid, :events)
26 | end
27 |
28 | def terminate(_reason, _state) do
29 | :ok
30 | end
31 |
32 | def succeeded_events(pid) do
33 | GenServer.call(pid, :succeeded_events)
34 | end
35 |
36 | def failed_events(pid) do
37 | GenServer.call(pid, :failed_events)
38 | end
39 |
40 | def retryable_read_events(pid) do
41 | GenServer.call(pid, :retryable_read_events)
42 | end
43 |
44 | def retry_write_events(pid) do
45 | GenServer.call(pid, :retry_write_events)
46 | end
47 |
48 | def empty_selection_events(pid) do
49 | GenServer.call(pid, :empty_selection_events)
50 | end
51 |
52 | def init(topics) do
53 | Enum.each(topics, fn topic -> Registry.register(:events_registry, topic, []) end)
54 | {:ok, []}
55 | end
56 |
57 | def handle_cast(:stop, state) do
58 | {:stop, :normal, state}
59 | end
60 |
61 | def handle_call(:events, _from, state) do
62 | {:reply, state, state}
63 | end
64 |
65 | def handle_call(:succeeded_events, _from, state) do
66 | {:reply,
67 | state
68 | |> Enum.filter(fn
69 | %CommandSucceededEvent{} -> true
70 | _other -> false
71 | end), state}
72 | end
73 |
74 | def handle_call(:failed_events, _from, state) do
75 | {:reply,
76 | state
77 | |> Enum.filter(fn
78 | %CommandFailedEvent{} -> true
79 | _other -> false
80 | end), state}
81 | end
82 |
83 | def handle_call(:retryable_read_events, _from, state) do
84 | {:reply,
85 | state
86 | |> Enum.filter(fn
87 | %RetryReadEvent{} -> true
88 | _other -> false
89 | end), state}
90 | end
91 |
92 | def handle_call(:retry_write_events, _from, state) do
93 | {:reply,
94 | state
95 | |> Enum.filter(fn
96 | %RetryWriteEvent{} -> true
97 | _other -> false
98 | end), state}
99 | end
100 |
101 | def handle_call(:empty_selection_events, _from, state) do
102 | {:reply,
103 | state
104 | |> Enum.filter(fn
105 | %ServerSelectionEmptyEvent{} -> true
106 | _other -> false
107 | end), state}
108 | end
109 |
110 | def handle_info({:broadcast, :commands, msg}, state) do
111 | {:noreply, [msg | state]}
112 | end
113 |
114 | def handle_info({:broadcast, :topology, %ServerSelectionEmptyEvent{} = msg}, state) do
115 | {:noreply, [msg | state]}
116 | end
117 |
118 | def handle_info(_ignored, state) do
119 | {:noreply, state}
120 | end
121 | end
122 |
--------------------------------------------------------------------------------
/test/support/specification_case.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.SpecificationCase do
2 | @moduledoc false
3 |
4 | use ExUnit.CaseTemplate
5 |
6 | using do
7 | quote do
8 | @crud_tests Path.wildcard("test/support/crud_tests/**/*.json")
9 |
10 | import MongoTest.Case
11 | import Mongo.SpecificationCase
12 | end
13 | end
14 | end
15 |
--------------------------------------------------------------------------------
/test/support/specifications/crud.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Specification.CRUD do
2 | # credo:disable-for-this-file
3 | @moduledoc false
4 |
5 | defmacro create_tests(json) do
6 | quote bind_quoted: [json: json] do
7 | Enum.map(json["tests"], fn t ->
8 | @tag :specification
9 | test t["description"], %{mongo: mongo, collection: collection} do
10 | test_json = unquote(Macro.escape(t))
11 | json = unquote(Macro.escape(json))
12 |
13 | if min_server_version?(json["minServerVersion"]) do
14 | data = json["data"]
15 | operation = test_json["operation"]
16 | outcome = test_json["outcome"]
17 |
18 | Mongo.insert_many!(mongo, collection, data)
19 |
20 | name = operation_name(operation["name"])
21 | arguments = operation["arguments"]
22 |
23 | expected = outcome["result"]
24 | actual = apply(Mongo.Specification.CRUD.Helpers, name, [mongo, collection, arguments])
25 |
26 | assert match_operation_result?(expected, actual)
27 |
28 | if outcome["collection"] do
29 | data =
30 | mongo
31 | |> Mongo.find(outcome["collection"]["name"], %{})
32 | |> Enum.to_list()
33 |
34 | assert ^data = outcome["collection"]["data"]
35 | end
36 | end
37 | end
38 | end)
39 | end
40 | end
41 | end
42 |
--------------------------------------------------------------------------------
/test/support/specifications/crud/helpers.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.Specification.CRUD.Helpers do
2 | @moduledoc false
3 |
4 | defp atomize_keys(map) do
5 | Enum.map(map, fn {key, value} ->
6 | {String.to_existing_atom(key), value}
7 | end)
8 | end
9 |
10 | def find(pid, collection, arguments) do
11 | filter = arguments["filter"]
12 |
13 | opts =
14 | arguments
15 | |> Map.drop(["filter"])
16 | |> atomize_keys()
17 |
18 | pid |> Mongo.find(collection, filter, opts) |> Enum.to_list()
19 | end
20 |
21 | def distinct(pid, collection, arguments) do
22 | field_name = arguments["fieldName"]
23 | filter = arguments["filter"] || %{}
24 |
25 | opts =
26 | arguments
27 | |> Map.drop(["fieldName", "filter"])
28 | |> atomize_keys()
29 |
30 | {:ok, values} = Mongo.distinct(pid, collection, field_name, filter, opts)
31 | values
32 | end
33 |
34 | def estimated_document_count(pid, collection, arguments) do
35 | opts = atomize_keys(arguments)
36 |
37 | {:ok, result} = Mongo.estimated_document_count(pid, collection, opts)
38 | result
39 | end
40 |
41 | def count_documents(pid, collection, arguments) do
42 | filter = arguments["filter"]
43 |
44 | opts =
45 | arguments
46 | |> Map.drop(["filter"])
47 | |> atomize_keys()
48 |
49 | {:ok, result} = Mongo.count_documents(pid, collection, filter, opts)
50 | result
51 | end
52 |
53 | def count(pid, collection, arguments) do
54 | filter = arguments["filter"]
55 |
56 | opts =
57 | arguments
58 | |> Map.drop(["filter"])
59 | |> atomize_keys()
60 |
61 | {:ok, result} = Mongo.count(pid, collection, filter, opts)
62 | result
63 | end
64 |
65 | def aggregate(pid, collection, arguments) do
66 | pipeline = arguments["pipeline"]
67 |
68 | opts =
69 | arguments
70 | |> Map.drop(["pipeline"])
71 | |> atomize_keys()
72 |
73 | pid |> Mongo.aggregate(collection, pipeline, opts) |> Enum.to_list()
74 | end
75 |
76 | def match_operation_result?(expected, actual) do
77 | actual == [] || expected == actual
78 | end
79 |
80 | def operation_name("estimatedDocumentCount"), do: :estimated_document_count
81 | def operation_name("countDocuments"), do: :count_documents
82 | def operation_name(name), do: String.to_existing_atom(name)
83 | end
84 |
--------------------------------------------------------------------------------
/test/support/test_connection.ex:
--------------------------------------------------------------------------------
1 | defmodule Mongo.TestConnection do
2 | @moduledoc false
3 |
4 | @seeds ["127.0.0.1:27017"]
5 |
6 | def connect() do
7 | Mongo.start_link(database: "mongodb_test", seeds: @seeds, show_sensitive_data_on_connection_error: true)
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | # Do not run the SSL tests on Travis
2 | ExUnit.configure(exclude: [ssl: true, socket: true])
3 | ExUnit.start()
4 |
5 | defmodule MongoTest.Case do
6 | use ExUnit.CaseTemplate
7 |
8 | using do
9 | quote do
10 | import MongoTest.Case
11 | end
12 | end
13 |
14 | defmacro unique_collection do
15 | {function, _arity} = __CALLER__.function
16 |
17 | "#{__CALLER__.module}.#{function}"
18 | |> String.replace(" ", "_")
19 | |> String.replace(".", "_")
20 | |> String.downcase()
21 | end
22 | end
23 |
--------------------------------------------------------------------------------