├── .env.example
├── .github
├── dependabot.yml
└── workflows
│ └── ci.yml
├── .gitignore
├── .rspec
├── CHANGELOG.md
├── Gemfile
├── Gemfile.lock
├── LICENSE
├── README.md
├── Rakefile
├── bin
├── console
└── setup
├── lib
├── qdrant.rb
└── qdrant
│ ├── aliases.rb
│ ├── base.rb
│ ├── client.rb
│ ├── clusters.rb
│ ├── collections.rb
│ ├── error.rb
│ ├── points.rb
│ ├── service.rb
│ ├── snapshots.rb
│ └── version.rb
├── qdrant.gemspec
├── sig
└── qdrant.rbs
└── spec
├── fixtures
├── aliases.json
├── batch_delete_object.json
├── cluster.json
├── collection.json
├── collection_cluster.json
├── collections.json
├── count_points.json
├── locks.json
├── point.json
├── points.json
├── snapshot.json
├── snapshots.json
└── status_response.json
├── qdrant
├── aliases_spec.rb
├── client_spec.rb
├── clusters_spec.rb
├── collections_spec.rb
├── points_spec.rb
├── service_spec.rb
└── snapshots_spec.rb
├── qdrant_spec.rb
└── spec_helper.rb
/.env.example:
--------------------------------------------------------------------------------
1 | QDRANT_URL=
2 | QDRANT_API_KEY=
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "bundler" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - "*"
7 | push:
8 | branches:
9 | - master
10 | jobs:
11 | tests:
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | ruby: ["2.7", "3.0", "3.1", "3.2"]
16 |
17 | steps:
18 | - uses: actions/checkout@master
19 |
20 | - name: Set up Ruby
21 | uses: ruby/setup-ruby@v1
22 | with:
23 | ruby-version: ${{ matrix.ruby }}
24 | bundler: default
25 | bundler-cache: true
26 |
27 | - name: StandardRb check
28 | run: bundle exec standardrb
29 |
30 | - name: Run tests
31 | run: |
32 | bundle exec rspec
33 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.bundle/
2 | /.yardoc
3 | /_yardoc/
4 | /coverage/
5 | /doc/
6 | /pkg/
7 | /spec/reports/
8 | /tmp/
9 |
10 | # rspec failure tracking
11 | .rspec_status
12 |
--------------------------------------------------------------------------------
/.rspec:
--------------------------------------------------------------------------------
1 | --format documentation
2 | --color
3 | --require spec_helper
4 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## [Unreleased]
2 |
3 | ## [0.9.9] - 2024-04-11
4 | - Allow sparse_vectors to be passed to collections.create
5 | - Support query endpoint
6 |
7 | ## [0.9.8] - 2024-10-01
8 | - Qdrant::Client constructor accepts customer logger: to be passed in
9 |
10 | ## [0.9.7] - 2024-01-19
11 | - fix: points/delete api
12 |
13 | ## [0.9.6] - 2024-01-13
14 | - Updated Points#delete() method: Removed the requirement to specify points: in parameters. Now generates an error if neither points: nor filters: are provided, aligning with delete_points documentation standards.
15 |
16 | ## [0.9.5] - 2024-01-12
17 | - Bugfix: ArgumentError for filter in points delete
18 | ## [0.9.4] - 2023-08-31
19 | - Introduce `Points#get_all()` method
20 |
21 | ## [0.9.0] - 2023-04-08
22 | - Initial release
23 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | source "https://rubygems.org"
4 |
5 | # Specify your gem's dependencies in qdrant.gemspec
6 | gemspec
7 |
8 | gem "rake", "~> 13.2"
9 |
10 | gem "rspec", "~> 3.13"
11 | gem "standard", "~> 1.28.5"
12 |
--------------------------------------------------------------------------------
/Gemfile.lock:
--------------------------------------------------------------------------------
1 | PATH
2 | remote: .
3 | specs:
4 | qdrant-ruby (0.9.9)
5 | faraday (>= 2.0.1, < 3)
6 |
7 | GEM
8 | remote: https://rubygems.org/
9 | specs:
10 | ast (2.4.2)
11 | base64 (0.2.0)
12 | byebug (11.1.3)
13 | coderay (1.1.3)
14 | diff-lcs (1.5.1)
15 | faraday (2.8.1)
16 | base64
17 | faraday-net_http (>= 2.0, < 3.1)
18 | ruby2_keywords (>= 0.0.4)
19 | faraday-net_http (3.0.2)
20 | json (2.7.4)
21 | language_server-protocol (3.17.0.3)
22 | lint_roller (1.1.0)
23 | method_source (1.0.0)
24 | parallel (1.24.0)
25 | parser (3.3.5.0)
26 | ast (~> 2.4.1)
27 | racc
28 | pry (0.14.2)
29 | coderay (~> 1.1)
30 | method_source (~> 1.0)
31 | pry-byebug (3.10.1)
32 | byebug (~> 11.0)
33 | pry (>= 0.13, < 0.15)
34 | racc (1.8.1)
35 | rainbow (3.1.1)
36 | rake (13.2.1)
37 | regexp_parser (2.9.2)
38 | rexml (3.3.9)
39 | rspec (3.13.0)
40 | rspec-core (~> 3.13.0)
41 | rspec-expectations (~> 3.13.0)
42 | rspec-mocks (~> 3.13.0)
43 | rspec-core (3.13.2)
44 | rspec-support (~> 3.13.0)
45 | rspec-expectations (3.13.3)
46 | diff-lcs (>= 1.2.0, < 2.0)
47 | rspec-support (~> 3.13.0)
48 | rspec-mocks (3.13.2)
49 | diff-lcs (>= 1.2.0, < 2.0)
50 | rspec-support (~> 3.13.0)
51 | rspec-support (3.13.1)
52 | rubocop (1.50.2)
53 | json (~> 2.3)
54 | parallel (~> 1.10)
55 | parser (>= 3.2.0.0)
56 | rainbow (>= 2.2.2, < 4.0)
57 | regexp_parser (>= 1.8, < 3.0)
58 | rexml (>= 3.2.5, < 4.0)
59 | rubocop-ast (>= 1.28.0, < 2.0)
60 | ruby-progressbar (~> 1.7)
61 | unicode-display_width (>= 2.4.0, < 3.0)
62 | rubocop-ast (1.30.0)
63 | parser (>= 3.2.1.0)
64 | rubocop-performance (1.16.0)
65 | rubocop (>= 1.7.0, < 2.0)
66 | rubocop-ast (>= 0.4.0)
67 | ruby-progressbar (1.13.0)
68 | ruby2_keywords (0.0.5)
69 | standard (1.28.5)
70 | language_server-protocol (~> 3.17.0.2)
71 | lint_roller (~> 1.0)
72 | rubocop (~> 1.50.2)
73 | standard-custom (~> 1.0.0)
74 | standard-performance (~> 1.0.1)
75 | standard-custom (1.0.2)
76 | lint_roller (~> 1.0)
77 | rubocop (~> 1.50)
78 | standard-performance (1.0.1)
79 | lint_roller (~> 1.0)
80 | rubocop-performance (~> 1.16.0)
81 | unicode-display_width (2.6.0)
82 |
83 | PLATFORMS
84 | arm64-darwin-23
85 | arm64-darwin-24
86 | x86_64-darwin-19
87 | x86_64-darwin-21
88 | x86_64-linux
89 |
90 | DEPENDENCIES
91 | pry-byebug (~> 3.9)
92 | qdrant-ruby!
93 | rake (~> 13.2)
94 | rspec (~> 3.13)
95 | standard (~> 1.28.5)
96 |
97 | BUNDLED WITH
98 | 2.4.0
99 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Qdrant
2 |
3 |
4 |
5 | +
6 |
7 |
8 |
9 | Ruby wrapper for the Qdrant vector search database API.
10 |
11 | Part of the [Langchain.rb](https://github.com/andreibondarev/langchainrb) stack.
12 |
13 | 
14 | [](https://badge.fury.io/rb/qdrant-ruby)
15 | [](http://rubydoc.info/gems/qdrant-ruby)
16 | [](https://github.com/andreibondarev/qdrant-ruby/blob/main/LICENSE.txt)
17 | [](https://discord.gg/WDARp7J2n8)
18 |
19 | ## Installation
20 |
21 | Install the gem and add to the application's Gemfile by executing:
22 |
23 | $ bundle add qdrant-ruby
24 |
25 | If bundler is not being used to manage dependencies, install the gem by executing:
26 |
27 | $ gem install qdrant-ruby
28 |
29 | ## Usage
30 |
31 | ### Instantiating API client
32 |
33 | ```ruby
34 | require 'qdrant'
35 |
36 | client = Qdrant::Client.new(
37 | url: ENV["QDRANT_URL"],
38 | api_key: ENV["QDRANT_API_KEY"]
39 | )
40 | ```
41 |
42 | ### Collections
43 |
44 | ```ruby
45 | # Get list name of all existing collections
46 | client.collections.list
47 |
48 | # Get detailed information about specified existing collection
49 | client.collections.get(collection_name: "string")
50 |
51 | # Create new collection with given parameters
52 | client.collections.create(
53 | collection_name: "string", # required
54 | vectors: {}, # required
55 | shard_number: nil,
56 | replication_factor: nil,
57 | write_consistency_factor: nil,
58 | on_disk_payload: nil,
59 | hnsw_config: nil,
60 | wal_config: nil,
61 | optimizers_config: nil,
62 | init_from: nil,
63 | quantization_config: nil
64 | )
65 |
66 | # Update parameters of the existing collection
67 | client.collections.update(
68 | collection_name: "string", # required
69 | optimizers_config: nil,
70 | params: nil
71 | )
72 |
73 | # Drop collection and all associated data
74 | client.collections.delete(collection_name: "string")
75 |
76 | # Get list of all aliases (for a collection)
77 | client.collections.aliases(
78 | collection_name: "string" # optional
79 | )
80 |
81 | # Update aliases of the collections
82 | client.collections.update_aliases(
83 | actions: [{
84 | # `create_alias:`, `delete_alias` and/or `rename_alias` is required
85 | create_alias: {
86 | collection_name: "string", # required
87 | alias_name: "string" # required
88 | }
89 | }]
90 | )
91 |
92 | # Create index for field in collection
93 | client.collections.create_index(
94 | collection_name: "string", # required
95 | field_name: "string", # required
96 | field_schema: "string",
97 | wait: "boolean",
98 | ordering: "ordering"
99 | )
100 |
101 | # Delete field index for collection
102 | client.collections.delete_index(
103 | collection_name: "string", # required
104 | field_name: "string", # required
105 | wait: "boolean",
106 | ordering: "ordering"
107 | )
108 |
109 | # Get cluster information for a collection
110 | client.collections.cluster_info(
111 | collection_name: "test_collection" # required
112 | )
113 |
114 | # Update collection cluster setup
115 | client.collections.update_cluster(
116 | collection_name: "string", # required
117 | move_shard: { # required
118 | shard_id: "int",
119 | to_peer_id: "int",
120 | from_peer_id: "int"
121 | },
122 | timeout: "int"
123 | )
124 |
125 | # Create new snapshot for a collection
126 | client.collections.create_snapshot(
127 | collection_name: "string", # required
128 | )
129 |
130 | # Get list of snapshots for a collection
131 | client.collections.list_snapshots(
132 | collection_name: "string", # required
133 | )
134 |
135 | # Delete snapshot for a collection
136 | client.collections.delete_snapshot(
137 | collection_name: "string", # required
138 | snapshot_name: "string" # required
139 | )
140 |
141 | # Recover local collection data from a snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created.
142 | client.collections.restore_snapshot(
143 | collection_name: "string", # required
144 | filepath: "string", # required
145 | wait: "boolean",
146 | priority: "string"
147 | )
148 |
149 | # Download specified snapshot from a collection as a file
150 | client.collections.download_snapshot(
151 | collection_name: "string", # required
152 | snapshot_name: "string", # required
153 | filepath: "/dir/filename.snapshot" #require
154 | )
155 | ```
156 |
157 | ### Points
158 | ```ruby
159 | # Retrieve full information of single point by id
160 | client.points.get(
161 | collection_name: "string", # required
162 | id: "int/string", # required
163 | consistency: "int"
164 | )
165 |
166 | # Retrieve full information of points by ids
167 | client.points.get_all(
168 | collection_name: "string", # required
169 | ids: "[int]", # required
170 | with_payload: "boolean"
171 | with_vector: "boolean"
172 | )
173 |
174 | # Lists all data objects in reverse order of creation. The data will be returned as an array of objects.
175 | client.points.list(
176 | collection_name: "string", # required
177 | ids: "[int/string]", # required
178 | with_payload: nil,
179 | with_vector: nil,
180 | consistency: nil
181 |
182 | )
183 |
184 | # Get a single data object.
185 | client.points.upsert(
186 | collection_name: "string", # required
187 | batch: {}, # required
188 | wait: "boolean",
189 | ordering: "string"
190 | )
191 |
192 | # Delete points
193 | client.points.delete(
194 | collection_name: "string", # required
195 | points: "[int/string]", # either `points:` or `filter:` required
196 | filter: {},
197 | wait: "boolean",
198 | ordering: "string"
199 | )
200 |
201 | # Set payload values for points
202 | client.points.set_payload(
203 | collection_name: "string", # required
204 | payload: { # required
205 | "property name" => "value"
206 | },
207 | points: "[int/string]", # `points:` or `filter:` are required
208 | filter: {},
209 | wait: "boolean",
210 | ordering: "string"
211 | )
212 |
213 | # Replace full payload of points with new one
214 | client.points.overwrite_payload(
215 | collection_name: "string", # required
216 | payload: {}, # required
217 | wait: "boolean",
218 | ordering: "string",
219 | points: "[int/string]",
220 | filter: {}
221 | )
222 |
223 | # Delete specified key payload for points
224 | client.points.clear_payload_keys(
225 | collection_name: "string", # required
226 | keys: "[string]", # required
227 | points: "[int/string]",
228 | filter: {},
229 | wait: "boolean",
230 | ordering: "string"
231 | )
232 |
233 | # Delete specified key payload for points
234 | client.points.clear_payload(
235 | collection_name: "string", # required
236 | points: "[int/string]", # required
237 | wait: "boolean",
238 | ordering: "string"
239 | )
240 |
241 | # Scroll request - paginate over all points which matches given filtering condition
242 | client.points.scroll(
243 | collection_name: "string", # required
244 | limit: "int",
245 | filter: {},
246 | offset: "string",
247 | with_payload: "boolean",
248 | with_vector: "boolean",
249 | consistency: "int/string"
250 | )
251 |
252 | # Retrieve closest points based on vector similarity and given filtering conditions
253 | client.points.search(
254 | collection_name: "string", # required
255 | limit: "int", # required
256 | vector: "[int]", # required
257 | filter: {},
258 | params: {},
259 | offset: "int",
260 | with_payload: "boolean",
261 | with_vector: "boolean",
262 | score_threshold: "float"
263 | )
264 |
265 |
266 | # Retrieve by batch the closest points based on vector similarity and given filtering conditions
267 | client.points.batch_search(
268 | collection_name: "string", # required
269 | searches: [{}], # required
270 | consistency: "int/string"
271 | )
272 |
273 | # Look for the points which are closer to stored positive examples and at the same time further to negative examples.
274 | client.points.recommend(
275 | collection_name: "string", # required
276 | positive: "[int/string]", # required; Arrray of point IDs
277 | limit: "int", # required
278 | negative: "[int/string]",
279 | filter: {},
280 | params: {},
281 | offset: "int",
282 | with_payload: "boolean",
283 | with_vector: "boolean",
284 | score_threshold: "float"
285 | using: "string",
286 | lookup_from: {},
287 | )
288 |
289 | # Look for the points which are closer to stored positive examples and at the same time further to negative examples.
290 | client.points.batch_recommend(
291 | collection_name: "string", # required
292 | searches: [{}], # required
293 | consistency: "string"
294 | )
295 |
296 | # Count points which matches given filtering condition
297 | client.points.count(
298 | collection_name: "string", # required
299 | filter: {},
300 | exact: "boolean"
301 | )
302 | ```
303 |
304 | ### Snapshots
305 | ```ruby
306 | # Get list of snapshots of the whole storage
307 | client.snapshots.list(
308 | collection_name: "string" # optional
309 | )
310 |
311 | # Create new snapshot of the whole storage
312 | client.snapshots.create(
313 | collection_name: "string" # required
314 | )
315 |
316 | # Delete snapshot of the whole storage
317 | client.snapshots.delete(
318 | collection_name: "string", # required
319 | snapshot_name: "string" # required
320 | )
321 |
322 | # Download specified snapshot of the whole storage as a file
323 | client.snapshots.download(
324 | collection_name: "string", # required
325 | snapshot_name: "string" # required
326 | filepath: "~/Downloads/backup.txt" # required
327 | )
328 |
329 |
330 | # Get the backup
331 | client.backups.get(
332 | backend: "filesystem",
333 | id: "my-first-backup"
334 | )
335 |
336 | # Restore backup
337 | client.backups.restore(
338 | backend: "filesystem",
339 | id: "my-first-backup"
340 | )
341 |
342 | # Check the backup restore status
343 | client.backups.restore_status(
344 | backend: "filesystem",
345 | id: "my-first-backup"
346 | )
347 | ```
348 |
349 | ### Cluster
350 | ```ruby
351 | # Get information about the current state and composition of the cluster
352 | client.cluster.info
353 |
354 | # Tries to recover current peer Raft state.
355 | client.cluster.recover
356 |
357 | # Tries to remove peer from the cluster. Will return an error if peer has shards on it.
358 | client.cluster.remove_peer(
359 | peer_id: "int", # required
360 | force: "boolean"
361 | )
362 | ```
363 |
364 | ### Service
365 | ```ruby
366 | # Collect telemetry data including app info, system info, collections info, cluster info, configs and statistics
367 | client.telemetry(
368 | anonymize: "boolean" # optional
369 | )
370 |
371 | # Collect metrics data including app info, collections info, cluster info and statistics
372 | client.metrics(
373 | anonymize: "boolean" # optional
374 | )
375 |
376 | # Get lock options. If write is locked, all write operations and collection creation are forbidden
377 | client.locks
378 |
379 | # Set lock options. If write is locked, all write operations and collection creation are forbidden. Returns previous lock options
380 | client.set_lock(
381 | write: "boolean" # required
382 | error_message: "string"
383 | )
384 | ```
385 |
386 | ## Development
387 |
388 | After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
389 |
390 | To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the `.gem` file to [rubygems.org](https://rubygems.org).
391 |
392 | ## Contributing
393 |
394 | Bug reports and pull requests are welcome on GitHub at https://github.com/andreibondarev/qdrant.
395 |
396 | ## License
397 |
398 | qdrant-ruby is licensed under the Apache License, Version 2.0. View a copy of the License file.
399 |
400 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "bundler/gem_tasks"
4 | require "rspec/core/rake_task"
5 |
6 | RSpec::Core::RakeTask.new(:spec)
7 |
8 | task default: :spec
9 |
--------------------------------------------------------------------------------
/bin/console:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | # frozen_string_literal: true
3 |
4 | require "bundler/setup"
5 | require "qdrant"
6 |
7 | # You can add fixtures and/or initialization code here to make experimenting
8 | # with your gem easier. You can also use a different console, if you like.
9 |
10 | # (If you use this, don't forget to add pry to your Gemfile!)
11 | require "pry"
12 | # Pry.start
13 |
14 | client = Qdrant::Client.new(
15 | url: ENV["QDRANT_URL"],
16 | api_key: ENV["QDRANT_API_KEY"]
17 | )
18 |
19 | require "irb"
20 | IRB.start(__FILE__)
21 |
--------------------------------------------------------------------------------
/bin/setup:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -euo pipefail
3 | IFS=$'\n\t'
4 | set -vx
5 |
6 | bundle install
7 |
8 | # Do any other automated setup that you need to do here
9 |
--------------------------------------------------------------------------------
/lib/qdrant.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative "qdrant/version"
4 |
5 | module Qdrant
6 | autoload :Aliases, "qdrant/aliases"
7 | autoload :Base, "qdrant/base"
8 | autoload :Collections, "qdrant/collections"
9 | autoload :Client, "qdrant/client"
10 | autoload :Clusters, "qdrant/clusters"
11 | autoload :Collections, "qdrant/collections"
12 | autoload :Error, "qdrant/error"
13 | autoload :Points, "qdrant/points"
14 | autoload :Service, "qdrant/service"
15 | autoload :Snapshots, "qdrant/snapshots"
16 | end
17 |
--------------------------------------------------------------------------------
/lib/qdrant/aliases.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Aliases < Base
5 | PATH = "aliases"
6 |
7 | # Get list of all aliases (for a collection)
8 | def list
9 | response = client.connection.get("aliases")
10 | response.body
11 | end
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/lib/qdrant/base.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Base
5 | attr_reader :client
6 |
7 | def initialize(client:)
8 | @client = client
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/lib/qdrant/client.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "faraday"
4 | require "forwardable"
5 |
6 | module Qdrant
7 | class Client
8 | extend Forwardable
9 |
10 | attr_reader :url, :api_key, :adapter, :raise_error, :logger
11 |
12 | def_delegators :service, :telemetry, :metrics, :locks, :set_lock
13 |
14 | def initialize(
15 | url:,
16 | api_key: nil,
17 | adapter: Faraday.default_adapter,
18 | raise_error: false,
19 | logger: nil
20 | )
21 | @url = url
22 | @api_key = api_key
23 | @adapter = adapter
24 | @raise_error = raise_error
25 | @logger = logger || Logger.new($stdout)
26 | end
27 |
28 | def connection
29 | @connection ||= Faraday.new(url: url) do |faraday|
30 | if api_key
31 | faraday.headers["api-key"] = api_key
32 | end
33 | faraday.request :json
34 | faraday.response :logger, @logger, {headers: true, bodies: true, errors: true}
35 | faraday.response :raise_error if raise_error
36 | faraday.response :json, content_type: /\bjson$/
37 | faraday.adapter adapter
38 | end
39 | end
40 |
41 | def aliases
42 | @aliases ||= Qdrant::Aliases.new(client: self).list
43 | end
44 |
45 | def collections
46 | @collections ||= Qdrant::Collections.new(client: self)
47 | end
48 |
49 | def snapshots
50 | @snapshots ||= Qdrant::Snapshots.new(client: self)
51 | end
52 |
53 | def service
54 | @service ||= Qdrant::Service.new(client: self)
55 | end
56 |
57 | def clusters
58 | @clusters ||= Qdrant::Clusters.new(client: self)
59 | end
60 |
61 | def points
62 | @points ||= Qdrant::Points.new(client: self)
63 | end
64 | end
65 | end
66 |
--------------------------------------------------------------------------------
/lib/qdrant/clusters.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Clusters < Base
5 | PATH = "cluster"
6 |
7 | # Get information about the current state and composition of the cluster
8 | def info
9 | response = client.connection.get(PATH)
10 | response.body
11 | end
12 |
13 | # Tries to recover current peer Raft state.
14 | def recover
15 | response = client.connection.post("#{PATH}/recover")
16 | response.body
17 | end
18 |
19 | # Remove peer from the cluster
20 | def remove_peer(
21 | peer_id:,
22 | force: nil
23 | )
24 | response = client.connection.post("#{PATH}/recover") do |req|
25 | req.params["force"] = force if force
26 | end
27 | response.body
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/lib/qdrant/collections.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Collections < Base
5 | PATH = "collections"
6 |
7 | # Get list name of all existing collections
8 | def list
9 | response = client.connection.get(PATH)
10 | response.body
11 | end
12 |
13 | # Get detailed information about specified existing collection
14 | def get(collection_name:)
15 | response = client.connection.get("#{PATH}/#{collection_name}")
16 | response.body
17 | end
18 |
19 | # Create new collection with given parameters
20 | def create(
21 | collection_name:,
22 | vectors:,
23 | sparse_vectors: nil,
24 | shard_number: nil,
25 | replication_factor: nil,
26 | write_consistency_factor: nil,
27 | on_disk_payload: nil,
28 | hnsw_config: nil,
29 | wal_config: nil,
30 | optimizers_config: nil,
31 | init_from: nil,
32 | quantization_config: nil
33 | )
34 | response = client.connection.put("#{PATH}/#{collection_name}") do |req|
35 | req.body = {}
36 | req.body["vectors"] = vectors
37 | req.body["sparse_vectors"] = sparse_vectors unless sparse_vectors.nil?
38 | req.body["shard_number"] = shard_number unless shard_number.nil?
39 | req.body["replication_factor"] = replication_factor unless replication_factor.nil?
40 | req.body["write_consistency_factor"] = write_consistency_factor unless write_consistency_factor.nil?
41 | req.body["on_disk_payload"] = on_disk_payload unless on_disk_payload.nil?
42 | req.body["hnsw_config"] = hnsw_config unless hnsw_config.nil?
43 | req.body["wal_config"] = wal_config unless wal_config.nil?
44 | req.body["optimizers_config"] = optimizers_config unless optimizers_config.nil?
45 | req.body["init_from"] = init_from unless init_from.nil?
46 | req.body["quantization_config"] = quantization_config unless quantization_config.nil?
47 | end
48 |
49 | response.body
50 | end
51 |
52 | # Update parameters of the existing collection
53 | def update(
54 | collection_name:,
55 | optimizers_config: nil,
56 | params: nil
57 | )
58 | response = client.connection.patch("#{PATH}/#{collection_name}") do |req|
59 | req.body = {}
60 | req.body["optimizers_config"] = optimizers_config unless optimizers_config.nil?
61 | req.body["params"] = params unless params.nil?
62 | end
63 |
64 | response.body
65 | end
66 |
67 | # Drop collection and all associated data
68 | def delete(collection_name:)
69 | response = client.connection.delete("#{PATH}/#{collection_name}")
70 | response.body
71 | end
72 |
73 | # Get list of all aliases for a collection
74 | def aliases(collection_name:)
75 | response = client.connection.get("#{PATH}/#{collection_name}/aliases")
76 | response.body
77 | end
78 |
79 | # Update aliases of the collections
80 | def update_aliases(actions:)
81 | response = client.connection.post("#{PATH}/aliases") do |req|
82 | req.body = {
83 | actions: actions
84 | }
85 | end
86 |
87 | response.body
88 | end
89 |
90 | # Create index for field in collection.
91 | def create_index(
92 | collection_name:,
93 | field_name:,
94 | field_schema: nil
95 | )
96 | response = client.connection.put("#{PATH}/#{collection_name}/index") do |req|
97 | req.body = {
98 | field_name: field_name
99 | }
100 | req.body["field_schema"] = field_schema unless field_schema.nil?
101 | end
102 |
103 | response.body
104 | end
105 |
106 | # Delete field index for collection
107 | def delete_index(
108 | collection_name:,
109 | field_name:
110 | )
111 | response = client.connection.delete("#{PATH}/#{collection_name}/index/#{field_name}")
112 | response.body
113 | end
114 |
115 | # Get cluster information for a collection
116 | def cluster_info(collection_name:)
117 | response = client.connection.get("#{PATH}/#{collection_name}/cluster")
118 | response.body
119 | end
120 |
121 | # Update collection cluster setup
122 | def update_cluster(
123 | collection_name:,
124 | move_shard:
125 | )
126 | response = client.connection.post("#{PATH}/#{collection_name}/cluster") do |req|
127 | req.body = {
128 | move_shard: move_shard
129 | }
130 | end
131 | response.body
132 | end
133 |
134 | # Download specified snapshot from a collection as a file
135 | def download_snapshot(
136 | collection_name:,
137 | snapshot_name:,
138 | filepath:
139 | )
140 | response = client.connection.get("#{PATH}/#{collection_name}/snapshots/#{snapshot_name}")
141 | File.open(File.expand_path(filepath), "wb+") { |fp| fp.write(response.body) }
142 | end
143 |
144 | # Delete snapshot for a collection
145 | def delete_snapshot(
146 | collection_name:,
147 | snapshot_name:
148 | )
149 | response = client.connection.delete("#{PATH}/#{collection_name}/snapshots/#{snapshot_name}")
150 | response.body
151 | end
152 |
153 | # Create new snapshot for a collection
154 | def create_snapshot(
155 | collection_name:
156 | )
157 | response = client.connection.post("#{PATH}/#{collection_name}/snapshots")
158 | response.body
159 | end
160 |
161 | # Get list of snapshots for a collection
162 | def list_snapshots(
163 | collection_name:
164 | )
165 | response = client.connection.get("collections/#{collection_name}/snapshots")
166 | response.body
167 | end
168 |
169 | # Recover local collection data from a snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created.
170 | def restore_snapshot(
171 | collection_name:,
172 | filepath:,
173 | priority: nil,
174 | wait: nil
175 | )
176 | response = client.connection.post("#{PATH}/#{collection_name}/snapshots/recover") do |req|
177 | req.params["wait"] = wait unless wait.nil?
178 |
179 | req.body = {
180 | location: filepath
181 | }
182 | req.body["priority"] = priority unless priority.nil?
183 | end
184 | response.body
185 | end
186 | end
187 | end
188 |
--------------------------------------------------------------------------------
/lib/qdrant/error.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Error < StandardError
5 | end
6 | end
7 |
--------------------------------------------------------------------------------
/lib/qdrant/points.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Points < Base
5 | PATH = "points"
6 |
7 | # Lists all data objects in reverse order of creation. The data will be returned as an array of objects.
8 | def list(
9 | collection_name:,
10 | ids: nil,
11 | with_payload: nil,
12 | with_vector: nil,
13 | consistency: nil
14 | )
15 | response = client.connection.post("collections/#{collection_name}/#{PATH}") do |req|
16 | req.params["consistency"] = consistency unless consistency.nil?
17 |
18 | req.body = {}
19 | req.body["ids"] = ids
20 | req.body["with_payload"] = with_payload unless with_payload.nil?
21 | req.body["with_vector"] = with_vector unless with_vector.nil?
22 | end
23 | response.body
24 | end
25 |
26 | # Perform insert + updates on points. If point with given ID already exists - it will be overwritten.
27 | def upsert(
28 | collection_name:,
29 | wait: nil,
30 | ordering: nil,
31 | batch: nil,
32 | points: nil
33 | )
34 | response = client.connection.put("collections/#{collection_name}/#{PATH}") do |req|
35 | req.params = {}
36 | req.params["wait"] = wait unless wait.nil?
37 | req.params["ordering"] = ordering unless ordering.nil?
38 |
39 | req.body = {}
40 | req.body["batch"] = batch unless batch.nil?
41 | req.body["points"] = points unless points.nil?
42 | end
43 | response.body
44 | end
45 |
46 | # Delete points
47 | def delete(
48 | collection_name:,
49 | points: nil,
50 | wait: nil,
51 | ordering: nil,
52 | filter: nil
53 | )
54 |
55 | raise ArgumentError, "Either points or filter should be provided" if points.nil? && filter.nil?
56 |
57 | response = client.connection.post("collections/#{collection_name}/#{PATH}/delete") do |req|
58 | req.params["wait"] = wait unless wait.nil?
59 | req.params["ordering"] = ordering unless ordering.nil?
60 |
61 | req.body = {}
62 |
63 | req.body["points"] = points unless points.nil?
64 | req.body["filter"] = filter unless filter.nil?
65 | end
66 | response.body
67 | end
68 |
69 | # Retrieve full information of single point by id
70 | def get(
71 | collection_name:,
72 | id:,
73 | consistency: nil
74 | )
75 | response = client.connection.get("collections/#{collection_name}/#{PATH}/#{id}") do |req|
76 | req.params["consistency"] = consistency unless consistency.nil?
77 | end
78 | response.body
79 | end
80 |
81 | # Retrieve full information of points by ids
82 | def get_all(
83 | collection_name:,
84 | ids:,
85 | consistency: nil,
86 | with_payload: nil,
87 | with_vector: nil
88 | )
89 | response = client.connection.post("collections/#{collection_name}/#{PATH}") do |req|
90 | req.params["consistency"] = consistency unless consistency.nil?
91 |
92 | req.body = {}
93 | req.body["ids"] = ids
94 | req.body["with_payload"] = with_payload unless with_payload.nil?
95 | req.body["with_vector"] = with_vector unless with_vector.nil?
96 | end
97 | response.body
98 | end
99 |
100 | # Set payload values for points
101 | def set_payload(
102 | collection_name:,
103 | payload:,
104 | wait: nil,
105 | ordering: nil,
106 | points: nil,
107 | filter: nil
108 | )
109 | response = client.connection.post("collections/#{collection_name}/#{PATH}/payload") do |req|
110 | req.params["wait"] = wait unless wait.nil?
111 | req.params["ordering"] = ordering unless ordering.nil?
112 |
113 | req.body = {}
114 | req.body["payload"] = payload
115 | req.body["points"] = points unless points.nil?
116 | req.body["filter"] = filter unless filter.nil?
117 | end
118 | response.body
119 | end
120 |
121 | # Replace full payload of points with new one
122 | def overwrite_payload(
123 | collection_name:,
124 | payload:, wait: nil,
125 | ordering: nil,
126 | points: nil,
127 | filter: nil
128 | )
129 | response = client.connection.put("collections/#{collection_name}/#{PATH}/payload") do |req|
130 | req.params["wait"] = wait unless wait.nil?
131 | req.params["ordering"] = ordering unless ordering.nil?
132 |
133 | req.body = {}
134 | req.body["payload"] = payload
135 | req.body["points"] = points unless points.nil?
136 | req.body["filter"] = filter unless filter.nil?
137 | end
138 | response.body
139 | end
140 |
141 | # Delete specified key payload for points
142 | def clear_payload_keys(
143 | collection_name:,
144 | keys:, wait: nil,
145 | ordering: nil,
146 | points: nil,
147 | filter: nil
148 | )
149 | response = client.connection.post("collections/#{collection_name}/#{PATH}/payload/delete") do |req|
150 | req.params["wait"] = wait unless wait.nil?
151 | req.params["ordering"] = ordering unless ordering.nil?
152 |
153 | req.body = {}
154 | req.body["keys"] = keys
155 | req.body["points"] = points unless points.nil?
156 | req.body["filter"] = filter unless filter.nil?
157 | end
158 | response.body
159 | end
160 |
161 | # Remove all payload for specified points
162 | def clear_payload(
163 | collection_name:,
164 | wait: nil,
165 | ordering: nil,
166 | points: nil,
167 | filter: nil
168 | )
169 | response = client.connection.post("collections/#{collection_name}/#{PATH}/payload/clear") do |req|
170 | req.params["wait"] = wait unless wait.nil?
171 | req.params["ordering"] = ordering unless ordering.nil?
172 |
173 | req.body = {}
174 | req.body["points"] = points unless points.nil?
175 | req.body["filter"] = filter unless filter.nil?
176 | end
177 | response.body
178 | end
179 |
180 | # Scroll request - paginate over all points which matches given filtering condition
181 | def scroll(
182 | collection_name:,
183 | limit:,
184 | filter: nil,
185 | offset: nil,
186 | with_payload: nil,
187 | with_vector: nil,
188 | consistency: nil
189 | )
190 | response = client.connection.post("collections/#{collection_name}/#{PATH}/scroll") do |req|
191 | req.params["consistency"] = consistency unless consistency.nil?
192 |
193 | req.body = {}
194 | req.body["limit"] = limit
195 | req.body["filter"] = filter unless filter.nil?
196 | req.body["offset"] = offset unless offset.nil?
197 | req.body["with_payload"] = with_payload unless with_payload.nil?
198 | req.body["with_vector"] = with_vector unless with_vector.nil?
199 | end
200 | response.body
201 | end
202 |
203 | # Retrieve closest points based on vector similarity and given filtering conditions
204 | def search(
205 | collection_name:,
206 | vector:,
207 | limit:,
208 | filter: nil,
209 | params: nil,
210 | offset: nil,
211 | with_payload: nil,
212 | with_vector: nil,
213 | score_threshold: nil,
214 | consistency: nil
215 | )
216 | response = client.connection.post("collections/#{collection_name}/#{PATH}/search") do |req|
217 | req.params["consistency"] = consistency unless consistency.nil?
218 |
219 | req.body = {}
220 | req.body["vector"] = vector
221 | req.body["limit"] = limit
222 | req.body["filter"] = filter unless filter.nil?
223 | req.body["params"] = params unless params.nil?
224 | req.body["offset"] = offset unless offset.nil?
225 | req.body["with_payload"] = with_payload unless with_payload.nil?
226 | req.body["with_vector"] = with_vector unless with_vector.nil?
227 | req.body["score_threshold"] = score_threshold unless score_threshold.nil?
228 | end
229 | response.body
230 | end
231 |
232 | # Retrieve by batch the closest points based on vector similarity and given filtering conditions
233 | def batch_search(
234 | collection_name:,
235 | searches:,
236 | consistency: nil
237 | )
238 | response = client.connection.post("collections/#{collection_name}/#{PATH}/search/batch") do |req|
239 | req.params["consistency"] = consistency unless consistency.nil?
240 |
241 | req.body = {}
242 | req.body["searches"] = searches
243 | end
244 | response.body
245 | end
246 |
247 | # Look for the points which are closer to stored positive examples and at the same time further to negative examples.
248 | def recommend(
249 | collection_name:,
250 | positive:,
251 | limit:,
252 | negative: nil,
253 | filter: nil,
254 | params: nil,
255 | offset: nil,
256 | with_payload: nil,
257 | with_vector: nil,
258 | score_threshold: nil,
259 | using: nil,
260 | lookup_from: nil,
261 | consistency: nil
262 | )
263 | response = client.connection.post("collections/#{collection_name}/#{PATH}/recommend") do |req|
264 | req.params["consistency"] = consistency unless consistency.nil?
265 |
266 | req.body = {}
267 | req.body["positive"] = positive
268 | req.body["negative"] = negative unless negative.nil?
269 | req.body["limit"] = limit
270 | req.body["filter"] = filter unless filter.nil?
271 | req.body["params"] = params unless params.nil?
272 | req.body["offset"] = offset unless offset.nil?
273 | req.body["with_payload"] = with_payload unless with_payload.nil?
274 | req.body["with_vector"] = with_vector unless with_vector.nil?
275 | req.body["score_threshold"] = score_threshold unless score_threshold.nil?
276 | req.body["using"] = using unless using.nil?
277 | req.body["lookup_from"] = lookup_from unless lookup_from.nil?
278 | end
279 | response.body
280 | end
281 |
282 | # Look for the points which are closer to stored positive examples and at the same time further to negative examples.
283 | def batch_recommend(
284 | collection_name:,
285 | searches:,
286 | consistency: nil
287 | )
288 | response = client.connection.post("collections/#{collection_name}/#{PATH}/recommend/batch") do |req|
289 | req.params["consistency"] = consistency unless consistency.nil?
290 |
291 | req.body = {}
292 | req.body["searches"] = searches
293 | end
294 | response.body
295 | end
296 |
297 | # Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
298 | def query(
299 | collection_name:,
300 | consistency: nil,
301 | timeout: nil,
302 | shard_key: nil,
303 | prefetch: nil,
304 | query: nil,
305 | using: nil,
306 | filter: nil,
307 | params: nil,
308 | score_threshold: nil,
309 | limit: nil,
310 | offset: nil,
311 | with_vector: nil,
312 | with_payload: nil,
313 | lookup_from: nil
314 | )
315 | response = client.connection.post("collections/#{collection_name}/#{PATH}/query") do |req|
316 | req.params["consistency"] = consistency unless consistency.nil?
317 | req.params["timeout"] = timeout unless timeout.nil?
318 |
319 | req.body = {}
320 | req.body["shard_key"] = shard_key unless shard_key.nil?
321 | req.body["prefetch"] = prefetch unless prefetch.nil?
322 | req.body["query"] = query unless query.nil?
323 | req.body["using"] = using unless using.nil?
324 | req.body["filter"] = filter unless filter.nil?
325 | req.body["params"] = params unless params.nil?
326 | req.body["score_threshold"] = score_threshold unless score_threshold.nil?
327 | req.body["limit"] = limit unless limit.nil?
328 | req.body["offset"] = offset unless offset.nil?
329 | req.body["with_vector"] = with_vector unless with_vector.nil?
330 | req.body["with_payload"] = with_payload unless with_payload.nil?
331 | req.body["lookup_from"] = lookup_from unless lookup_from.nil?
332 | end
333 | response.body
334 | end
335 |
336 | # Count points which matches given filtering condition
337 | def count(
338 | collection_name:,
339 | filter: nil,
340 | exact: nil
341 | )
342 | response = client.connection.post("collections/#{collection_name}/#{PATH}/count") do |req|
343 | req.body = {}
344 | req.body["filter"] = filter unless filter.nil?
345 | req.body["exact"] = filter unless exact.nil?
346 | end
347 | response.body
348 | end
349 | end
350 | end
351 |
--------------------------------------------------------------------------------
/lib/qdrant/service.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Service < Base
5 | # Collect telemetry data including app info, system info, collections info, cluster info, configs and statistics
6 | def telemetry(
7 | anonymize: nil
8 | )
9 | response = client.connection.get("telemetry") do |req|
10 | req.params["anonymize"] = anonymize if anonymize
11 | end
12 | response.body
13 | end
14 |
15 | # Collect metrics data including app info, collections info, cluster info and statistics
16 | def metrics(
17 | anonymize: nil
18 | )
19 | response = client.connection.get("metrics") do |req|
20 | req.params["anonymize"] = anonymize if anonymize
21 | end
22 | response.body
23 | end
24 |
25 | # Set lock options. If write is locked, all write operations and collection creation are forbidden. Returns previous lock options
26 | def set_lock(
27 | write:,
28 | error_message: nil
29 | )
30 | response = client.connection.post("locks") do |req|
31 | req.body = {
32 | write: write
33 | }
34 | req.body["error_message"] = error_message if error_message
35 | end
36 | response.body
37 | end
38 |
39 | # Get lock options. If write is locked, all write operations and collection creation are forbidden
40 | def locks
41 | response = client.connection.get("locks")
42 | response.body
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/lib/qdrant/snapshots.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | class Snapshots < Base
5 | PATH = "snapshots"
6 |
7 | # Get list of snapshots of the whole storage
8 | def list
9 | response = client.connection.get(PATH)
10 | response.body
11 | end
12 |
13 | # Create new snapshot of the whole storage
14 | def create
15 | response = client.connection.post(PATH)
16 | response.body
17 | end
18 |
19 | # Delete snapshot of the whole storage
20 | def delete(
21 | snapshot_name:
22 | )
23 | response = client.connection.delete("#{PATH}/#{snapshot_name}")
24 | response.body
25 | end
26 |
27 | # Download specified snapshot of the whole storage as a file
28 | def download(
29 | snapshot_name:,
30 | filepath:
31 | )
32 | response = client.connection.get("#{PATH}/#{snapshot_name}")
33 | File.open(File.expand_path(filepath), "wb+") { |fp| fp.write(response.body) }
34 | end
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/lib/qdrant/version.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Qdrant
4 | VERSION = "0.9.9"
5 | end
6 |
--------------------------------------------------------------------------------
/qdrant.gemspec:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative "lib/qdrant/version"
4 |
5 | Gem::Specification.new do |spec|
6 | spec.name = "qdrant-ruby"
7 | spec.version = Qdrant::VERSION
8 | spec.authors = ["Andrei Bondarev"]
9 | spec.email = ["andrei@sourcelabs.io", "andrei.bondarev13@gmail.com"]
10 |
11 | spec.summary = "Ruby wrapper for the Qdrant vector search database API"
12 | spec.description = "Ruby wrapper for the Qdrant vector search database API"
13 | spec.homepage = "https://github.com/andreibondarev/qdrant-ruby"
14 | spec.license = "Apache-2.0"
15 | spec.required_ruby_version = ">= 2.6.0"
16 |
17 | spec.metadata["homepage_uri"] = spec.homepage
18 | spec.metadata["source_code_uri"] = "https://github.com/andreibondarev/qdrant-ruby"
19 | spec.metadata["changelog_uri"] = "https://github.com/andreibondarev/qdrant-ruby/CHANGELOG.md"
20 |
21 | # Specify which files should be added to the gem when it is released.
22 | # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
23 | spec.files = Dir.chdir(__dir__) do
24 | `git ls-files -z`.split("\x0").reject do |f|
25 | (f == __FILE__) || f.match(%r{\A(?:(?:bin|test|spec|features)/|\.(?:git|circleci)|appveyor)})
26 | end
27 | end
28 | spec.bindir = "exe"
29 | spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
30 | spec.require_paths = ["lib"]
31 |
32 | # Uncomment to register a new dependency of your gem
33 | # spec.add_dependency "example-gem", "~> 1.0"
34 |
35 | # For more information and examples about making a new gem, check out our
36 | # guide at: https://bundler.io/guides/creating_gem.html
37 |
38 | spec.add_dependency "faraday", ">= 2.0.1", "< 3"
39 | spec.add_development_dependency "pry-byebug", "~> 3.9"
40 | end
41 |
--------------------------------------------------------------------------------
/sig/qdrant.rbs:
--------------------------------------------------------------------------------
1 | module Qdrant
2 | VERSION: String
3 | # See the writing guide of rbs: https://github.com/ruby/rbs#guides
4 | end
5 |
--------------------------------------------------------------------------------
/spec/fixtures/aliases.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "aliases": [
4 | {
5 | "alias_name": "alias_test_collection",
6 | "collection_name": "test_collection"
7 | }
8 | ]
9 | },
10 | "status": "ok",
11 | "time": 7.267e-06
12 | }
13 |
--------------------------------------------------------------------------------
/spec/fixtures/batch_delete_object.json:
--------------------------------------------------------------------------------
1 | {
2 | "dryRun": false,
3 | "match": {
4 | "class": "Question",
5 | "where": {
6 | "operands": null,
7 | "operator": "Equal",
8 | "path": ["id"],
9 | "valueString": "1"
10 | }
11 | },
12 | "output": "minimal",
13 | "results": {
14 | "failed": 0,
15 | "limit": 10000,
16 | "matches": 1,
17 | "objects": null,
18 | "successful": 1
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/spec/fixtures/cluster.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "status": "disabled"
4 | },
5 | "status": "ok",
6 | "time": 1.591e-06
7 | }
--------------------------------------------------------------------------------
/spec/fixtures/collection.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "status": "green",
4 | "optimizer_status": "ok",
5 | "vectors_count": 0,
6 | "indexed_vectors_count": 0,
7 | "points_count": 0,
8 | "segments_count": 2,
9 | "config": {
10 | "params": {
11 | "vectors": {
12 | "size": 4,
13 | "distance": "Dot"
14 | },
15 | "shard_number": 1,
16 | "replication_factor": 1,
17 | "write_consistency_factor": 1,
18 | "on_disk_payload": true
19 | },
20 | "hnsw_config": {
21 | "m": 16,
22 | "ef_construct": 100,
23 | "full_scan_threshold": 10000,
24 | "max_indexing_threads": 0,
25 | "on_disk": false
26 | },
27 | "optimizer_config": {
28 | "deleted_threshold": 0.2,
29 | "vacuum_min_vector_number": 1000,
30 | "default_segment_number": 0,
31 | "max_segment_size": null,
32 | "memmap_threshold": null,
33 | "indexing_threshold": 20000,
34 | "flush_interval_sec": 5,
35 | "max_optimization_threads": 1
36 | },
37 | "wal_config": {
38 | "wal_capacity_mb": 32,
39 | "wal_segments_ahead": 0
40 | },
41 | "quantization_config": null
42 | },
43 | "payload_schema": {}
44 | },
45 | "status": "ok",
46 | "time": 3.3369e-05
47 | }
48 |
--------------------------------------------------------------------------------
/spec/fixtures/collection_cluster.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "peer_id": 111,
4 | "shard_count": 1,
5 | "local_shards": [
6 | {
7 | "shard_id": 0,
8 | "points_count": 0,
9 | "state": "Active"
10 | }
11 | ],
12 | "remote_shards": [],
13 | "shard_transfers": []
14 | },
15 | "status": "ok",
16 | "time": 1.2886e-05
17 | }
18 |
--------------------------------------------------------------------------------
/spec/fixtures/collections.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "collections": [
4 | {
5 | "name": "test_collection"
6 | }
7 | ]
8 | },
9 | "status": "ok",
10 | "time": 3.385e-06
11 | }
--------------------------------------------------------------------------------
/spec/fixtures/count_points.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "count": 5
4 | },
5 | "status": "ok",
6 | "time": 1.7854e-05
7 | }
--------------------------------------------------------------------------------
/spec/fixtures/locks.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "error_message": "my error msg",
4 | "write": true
5 | },
6 | "status": "ok",
7 | "time": 1.77e-06
8 | }
9 |
--------------------------------------------------------------------------------
/spec/fixtures/point.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "id": 1,
4 | "payload": {
5 | "city": "Berlin"
6 | },
7 | "vector": [
8 | 0.05,
9 | 0.61,
10 | 0.76,
11 | 0.74
12 | ]
13 | },
14 | "status": "ok",
15 | "time": 5.824e-05
16 | }
17 |
--------------------------------------------------------------------------------
/spec/fixtures/points.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": [
3 | {
4 | "id": 4,
5 | "version": 2,
6 | "score": 1.362,
7 | "payload": null,
8 | "vector": null
9 | },
10 | {
11 | "id": 1,
12 | "version": 2,
13 | "score": 1.273,
14 | "payload": null,
15 | "vector": null
16 | },
17 | {
18 | "id": 2,
19 | "version": 2,
20 | "score": 0.871,
21 | "payload": null,
22 | "vector": null
23 | },
24 | {
25 | "id": 5,
26 | "version": 2,
27 | "score": 0.572,
28 | "payload": null,
29 | "vector": null
30 | },
31 | {
32 | "id": 6,
33 | "version": 2,
34 | "score": 0.485,
35 | "payload": null,
36 | "vector": null
37 | }
38 | ],
39 | "status": "ok",
40 | "time": 8.1504e-05
41 | }
42 |
--------------------------------------------------------------------------------
/spec/fixtures/snapshot.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": {
3 | "name": "test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot",
4 | "creation_time": "2023-04-06T20:43:03",
5 | "size": 67214848
6 | },
7 | "status": "ok",
8 | "time": 0.317997872
9 | }
10 |
--------------------------------------------------------------------------------
/spec/fixtures/snapshots.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": [
3 | {
4 | "name": "full-snapshot-2023-04-07-14-32-20.snapshot",
5 | "creation_time": "2023-04-07T14:32:20",
6 | "size": 67217408
7 | },
8 | {
9 | "name": "full-snapshot-2023-04-06-20-59-10.snapshot",
10 | "creation_time": "2023-04-06T20:59:10",
11 | "size": 67217408
12 | }
13 | ],
14 | "status": "ok",
15 | "time": 0.000277949
16 | }
17 |
--------------------------------------------------------------------------------
/spec/fixtures/status_response.json:
--------------------------------------------------------------------------------
1 | {
2 | "result": true,
3 | "status": "ok",
4 | "time": 0.098916343
5 | }
--------------------------------------------------------------------------------
/spec/qdrant/aliases_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "spec_helper"
4 |
5 | RSpec.describe Qdrant::Aliases do
6 | let(:client) {
7 | Qdrant::Client.new(
8 | url: "localhost:8080",
9 | api_key: "123"
10 | )
11 | }
12 |
13 | let(:aliases_fixture) { JSON.parse(File.read("spec/fixtures/aliases.json")) }
14 |
15 | describe "#list" do
16 | let(:response) {
17 | OpenStruct.new(body: aliases_fixture)
18 | }
19 |
20 | before do
21 | allow_any_instance_of(Faraday::Connection).to receive(:get)
22 | .with(Qdrant::Aliases::PATH)
23 | .and_return(response)
24 | end
25 |
26 | it "return the nodes info" do
27 | expect(client.aliases).to be_a(Hash)
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/spec/qdrant/client_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "spec_helper"
4 |
5 | RSpec.describe Qdrant::Client do
6 | let(:client) {
7 | Qdrant::Client.new(
8 | url: "localhost:8080",
9 | api_key: "123"
10 | )
11 | }
12 |
13 | describe "#initialize" do
14 | it "creates a client" do
15 | expect(client).to be_a(Qdrant::Client)
16 | end
17 |
18 | it "accepts a custom logger" do
19 | logger = Logger.new($stdout)
20 | client = Qdrant::Client.new(
21 | url: "localhost:8080",
22 | api_key: "123",
23 | logger: logger
24 | )
25 | expect(client.logger).to eq(logger)
26 | end
27 | end
28 |
29 | describe "#points" do
30 | it "returns an objects client" do
31 | expect(client.points).to be_a(Qdrant::Points)
32 | end
33 | end
34 |
35 | describe "#snapshots" do
36 | it "returns a backups client" do
37 | expect(client.snapshots).to be_a(Qdrant::Snapshots)
38 | end
39 | end
40 |
41 | describe "#clusters" do
42 | it "returns a clusters client" do
43 | expect(client.clusters).to be_a(Qdrant::Clusters)
44 | end
45 | end
46 |
47 | describe "#collections" do
48 | it "returns a collections client" do
49 | expect(client.collections).to be_a(Qdrant::Collections)
50 | end
51 | end
52 |
53 | describe "#service" do
54 | it "returns a services client" do
55 | expect(client.service).to be_a(Qdrant::Service)
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/spec/qdrant/clusters_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "spec_helper"
4 |
5 | RSpec.describe Qdrant::Clusters do
6 | let(:client) {
7 | Qdrant::Client.new(
8 | url: "localhost:8080",
9 | api_key: "123"
10 | )
11 | }
12 |
13 | let(:cluster_fixture) { JSON.parse(File.read("spec/fixtures/cluster.json")) }
14 |
15 | let(:response) {
16 | OpenStruct.new(body: cluster_fixture)
17 | }
18 |
19 | describe "#info" do
20 | before do
21 | allow_any_instance_of(Faraday::Connection).to receive(:get)
22 | .with("cluster")
23 | .and_return(response)
24 | end
25 |
26 | it "return the cluster info" do
27 | expect(client.clusters.info.dig("status")).to eq("ok")
28 | end
29 | end
30 |
31 | describe "#recover" do
32 | before do
33 | allow_any_instance_of(Faraday::Connection).to receive(:post)
34 | .with("cluster/recover")
35 | .and_return(response)
36 | end
37 |
38 | it "return the data" do
39 | expect(client.clusters.recover.dig("status")).to eq("ok")
40 | end
41 | end
42 |
43 | xdescribe "#remove_peer" do
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/spec/qdrant/collections_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "spec_helper"
4 |
5 | RSpec.describe Qdrant::Collections do
6 | let(:client) {
7 | Qdrant::Client.new(
8 | url: "localhost:8080",
9 | api_key: "123"
10 | )
11 | }
12 | let(:collections) { client.collections }
13 | let(:status_response_fixture) { JSON.parse(File.read("spec/fixtures/status_response.json")) }
14 | let(:collection_fixture) { JSON.parse(File.read("spec/fixtures/collection.json")) }
15 | let(:collections_fixture) { JSON.parse(File.read("spec/fixtures/collections.json")) }
16 | let(:aliases_fixture) { JSON.parse(File.read("spec/fixtures/aliases.json")) }
17 |
18 | describe "#list" do
19 | let(:response) { OpenStruct.new(body: collections_fixture) }
20 |
21 | before do
22 | allow_any_instance_of(Faraday::Connection).to receive(:get)
23 | .with(Qdrant::Collections::PATH)
24 | .and_return(response)
25 | end
26 |
27 | it "returns collections" do
28 | expect(collections.list.dig("result", "collections").count).to eq(1)
29 | end
30 | end
31 |
32 | describe "#get" do
33 | let(:response) { OpenStruct.new(body: collection_fixture) }
34 |
35 | before do
36 | allow_any_instance_of(Faraday::Connection).to receive(:get)
37 | .with("collections/test_collection")
38 | .and_return(response)
39 | end
40 |
41 | it "returns the collection" do
42 | response = collections.get(collection_name: "test_collection")
43 | expect(response.dig("result", "status")).to eq("green")
44 | end
45 | end
46 |
47 | describe "#create" do
48 | let(:response) { OpenStruct.new(body: status_response_fixture) }
49 |
50 | before do
51 | allow_any_instance_of(Faraday::Connection).to receive(:put)
52 | .with("collections/test_collection")
53 | .and_return(response)
54 | end
55 |
56 | it "returns the status" do
57 | response = collections.create(
58 | collection_name: "test_collection",
59 | vectors: {
60 | size: 4,
61 | distance: "Dot"
62 | }
63 | )
64 | expect(response.dig("status")).to eq("ok")
65 | expect(response.dig("result")).to eq(true)
66 | end
67 | end
68 |
69 | describe "#delete" do
70 | let(:response) { OpenStruct.new(body: status_response_fixture) }
71 |
72 | before do
73 | allow_any_instance_of(Faraday::Connection).to receive(:delete)
74 | .with("collections/test_collection")
75 | .and_return(response)
76 | end
77 |
78 | it "returns the schema" do
79 | response = collections.delete(collection_name: "test_collection")
80 | expect(response.dig("status")).to eq("ok")
81 | expect(response.dig("result")).to eq(true)
82 | end
83 | end
84 |
85 | describe "#update" do
86 | let(:response) { OpenStruct.new(body: status_response_fixture) }
87 |
88 | before do
89 | allow_any_instance_of(Faraday::Connection).to receive(:patch)
90 | .with("collections/test_collection")
91 | .and_return(response)
92 | end
93 |
94 | it "returns the schema" do
95 | response = collections.update(
96 | collection_name: "test_collection",
97 | params: {
98 | replication_factor: 1
99 | }
100 | )
101 | expect(response.dig("status")).to eq("ok")
102 | expect(response.dig("result")).to eq(true)
103 | end
104 | end
105 |
106 | describe "#update_aliases" do
107 | let(:response) { OpenStruct.new(body: status_response_fixture) }
108 |
109 | before do
110 | allow_any_instance_of(Faraday::Connection).to receive(:post)
111 | .with("collections/aliases")
112 | .and_return(response)
113 | end
114 |
115 | it "returns the schema" do
116 | response = collections.update_aliases(
117 | actions: [{
118 | create_alias: {
119 | collection_name: "test_collection",
120 | alias_name: "alias_test_collection"
121 | }
122 | }]
123 | )
124 | expect(response.dig("status")).to eq("ok")
125 | expect(response.dig("result")).to eq(true)
126 | end
127 | end
128 |
129 | describe "#aliases" do
130 | let(:response) { OpenStruct.new(body: aliases_fixture) }
131 |
132 | before do
133 | allow_any_instance_of(Faraday::Connection).to receive(:get)
134 | .with("collections/test_collection/aliases")
135 | .and_return(response)
136 | end
137 |
138 | it "returns the schema" do
139 | response = collections.aliases(collection_name: "test_collection")
140 | expect(response.dig("result", "aliases").count).to eq(1)
141 | end
142 | end
143 |
144 | describe "#create_index" do
145 | let(:response) { OpenStruct.new(body: status_response_fixture) }
146 |
147 | before do
148 | allow_any_instance_of(Faraday::Connection).to receive(:put)
149 | .with("collections/test_collection/index")
150 | .and_return(response)
151 | end
152 |
153 | it "returns the schema" do
154 | response = collections.create_index(
155 | collection_name: "test_collection",
156 | field_name: "description",
157 | field_schema: "text"
158 | )
159 | expect(response.dig("status")).to eq("ok")
160 | expect(response.dig("result")).to eq(true)
161 | end
162 | end
163 |
164 | describe "#delete_index" do
165 | let(:response) { OpenStruct.new(body: status_response_fixture) }
166 |
167 | before do
168 | allow_any_instance_of(Faraday::Connection).to receive(:delete)
169 | .with("collections/test_collection/index/description")
170 | .and_return(response)
171 | end
172 |
173 | it "returns the schema" do
174 | response = collections.delete_index(
175 | collection_name: "test_collection",
176 | field_name: "description"
177 | )
178 | expect(response.dig("status")).to eq("ok")
179 | expect(response.dig("result")).to eq(true)
180 | end
181 | end
182 |
183 | describe "#cluster_info" do
184 | let(:response) { OpenStruct.new(body: JSON.parse(File.read("spec/fixtures/collection_cluster.json"))) }
185 |
186 | before do
187 | allow_any_instance_of(Faraday::Connection).to receive(:get)
188 | .with("collections/test_collection/cluster")
189 | .and_return(response)
190 | end
191 |
192 | it "returns the cluster info" do
193 | response = collections.cluster_info(
194 | collection_name: "test_collection"
195 | )
196 | expect(response.dig("result", "peer_id")).to eq(111)
197 | end
198 | end
199 |
200 | describe "#update_cluster" do
201 | let(:response) { OpenStruct.new(body: status_response_fixture) }
202 |
203 | before do
204 | allow_any_instance_of(Faraday::Connection).to receive(:post)
205 | .with("collections/test_collection/cluster")
206 | .and_return(response)
207 | end
208 |
209 | it "returns the schema" do
210 | response = collections.update_cluster(
211 | collection_name: "test_collection",
212 | move_shard: {
213 | shard_id: 0,
214 | to_peer_id: 222,
215 | from_peer_id: 111
216 | }
217 | )
218 | expect(response.dig("status")).to eq("ok")
219 | expect(response.dig("result")).to eq(true)
220 | end
221 | end
222 |
223 | describe "#list_snapshots" do
224 | let(:response) { OpenStruct.new(body: JSON.parse(File.read("spec/fixtures/snapshots.json"))) }
225 |
226 | before do
227 | allow_any_instance_of(Faraday::Connection).to receive(:get)
228 | .with("collections/test_collection/snapshots")
229 | .and_return(response)
230 | end
231 |
232 | it "returns the cluster info" do
233 | response = collections.list_snapshots(
234 | collection_name: "test_collection"
235 | )
236 | expect(response.dig("result").count).to eq(2)
237 | end
238 | end
239 |
240 | let(:snapshot_fixture) { JSON.parse(File.read("spec/fixtures/snapshot.json")) }
241 |
242 | describe "#create_snapshot" do
243 | let(:response) { OpenStruct.new(body: snapshot_fixture) }
244 |
245 | before do
246 | allow_any_instance_of(Faraday::Connection).to receive(:post)
247 | .with("collections/test_collection/snapshots")
248 | .and_return(response)
249 | end
250 |
251 | it "returns the schema" do
252 | response = collections.create_snapshot(
253 | collection_name: "test_collection"
254 | )
255 | expect(response.dig("status")).to eq("ok")
256 | expect(response.dig("result", "name")).to eq("test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot")
257 | end
258 | end
259 |
260 | describe "#delete_snapshot" do
261 | let(:response) { OpenStruct.new(body: status_response_fixture) }
262 |
263 | before do
264 | allow_any_instance_of(Faraday::Connection).to receive(:delete)
265 | .with("collections/test_collection/snapshots/test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot")
266 | .and_return(response)
267 | end
268 |
269 | it "returns the schema" do
270 | response = collections.delete_snapshot(
271 | collection_name: "test_collection",
272 | snapshot_name: "test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot"
273 | )
274 | expect(response.dig("status")).to eq("ok")
275 | expect(response.dig("result")).to eq(true)
276 | end
277 | end
278 |
279 | describe "#download_snapshot" do
280 | let(:response) { OpenStruct.new(body: status_response_fixture) }
281 |
282 | before do
283 | allow_any_instance_of(Faraday::Connection).to receive(:get)
284 | .with("collections/test_collection/snapshots/test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot")
285 | .and_return(response)
286 |
287 | allow(File).to receive(:open).with("/dir/snapshot.txt", "wb+").and_return(999)
288 | end
289 |
290 | it "returns the schema" do
291 | response = collections.download_snapshot(
292 | collection_name: "test_collection",
293 | snapshot_name: "test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot",
294 | filepath: "/dir/snapshot.txt"
295 | )
296 | expect(response).to eq(999)
297 | end
298 | end
299 |
300 | describe "#restore_snapshot" do
301 | let(:response) { OpenStruct.new(body: status_response_fixture) }
302 |
303 | before do
304 | allow_any_instance_of(Faraday::Connection).to receive(:post)
305 | .with("collections/test_collection/snapshots/recover")
306 | .and_return(response)
307 | end
308 |
309 | it "returns the schema" do
310 | response = collections.restore_snapshot(
311 | collection_name: "test_collection",
312 | filepath: "test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot"
313 | )
314 | expect(response.dig("status")).to eq("ok")
315 | expect(response.dig("result")).to eq(true)
316 | end
317 | end
318 | end
319 |
--------------------------------------------------------------------------------
/spec/qdrant/points_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "spec_helper"
4 |
5 | RSpec.describe Qdrant::Points do
6 | let(:client) {
7 | Qdrant::Client.new(
8 | url: "localhost:8080",
9 | api_key: "123"
10 | )
11 | }
12 | let(:points) { client.points }
13 | let(:point_fixture) { JSON.parse(File.read("spec/fixtures/point.json")) }
14 | let(:points_fixture) { JSON.parse(File.read("spec/fixtures/points.json")) }
15 | let(:status_response_fixture) { JSON.parse(File.read("spec/fixtures/status_response.json")) }
16 |
17 | describe "#upsert" do
18 | let(:response) {
19 | OpenStruct.new(body: status_response_fixture)
20 | }
21 |
22 | before do
23 | allow_any_instance_of(Faraday::Connection).to receive(:put)
24 | .with("collections/test_collection/points")
25 | .and_return(response)
26 | end
27 |
28 | it "return the data" do
29 | response = client.points.upsert(
30 | collection_name: "test_collection",
31 | points: [
32 | {id: 1, vector: [0.05, 0.61, 0.76, 0.74], payload: {city: "Berlin"}},
33 | {id: 2, vector: [0.19, 0.81, 0.75, 0.11], payload: {city: ["Berlin", "London"]}}
34 | ]
35 | )
36 | expect(response.dig("status")).to eq("ok")
37 | end
38 | end
39 |
40 | describe "#get_all" do
41 | let(:response) {
42 | OpenStruct.new(body: points_fixture)
43 | }
44 |
45 | before do
46 | allow_any_instance_of(Faraday::Connection).to receive(:post)
47 | .with("collections/test_collection/points")
48 | .and_return(response)
49 | end
50 |
51 | it "return the data" do
52 | response = client.points.get_all(
53 | collection_name: "test_collection",
54 | ids: [4, 1, 2, 5, 6]
55 | )
56 | expect(response.dig("result").count).to eq(5)
57 | end
58 | end
59 |
60 | describe "#delete" do
61 | let(:response) {
62 | OpenStruct.new(body: status_response_fixture)
63 | }
64 |
65 | before do
66 | allow_any_instance_of(Faraday::Connection).to receive(:post)
67 | .with("collections/test_collection/points/delete")
68 | .and_return(response)
69 | end
70 |
71 | it "return the data" do
72 | response = client.points.delete(
73 | collection_name: "test_collection",
74 | points: [3],
75 | filter: []
76 | )
77 | expect(response.dig("status")).to eq("ok")
78 | end
79 | end
80 |
81 | describe "#search" do
82 | let(:response) {
83 | OpenStruct.new(body: points_fixture)
84 | }
85 |
86 | before do
87 | allow_any_instance_of(Faraday::Connection).to receive(:post)
88 | .with("collections/test_collection/points/search")
89 | .and_return(response)
90 | end
91 |
92 | it "return the data" do
93 | response = client.points.search(
94 | collection_name: "test_collection",
95 | vector: [0.05, 0.61, 0.76, 0.74],
96 | limit: 10
97 | )
98 | expect(response.dig("result").count).to eq(5)
99 | end
100 | end
101 |
102 | let(:count_response_fixture) { JSON.parse(File.read("spec/fixtures/count_points.json")) }
103 |
104 | describe "#count" do
105 | let(:response) {
106 | OpenStruct.new(body: count_response_fixture)
107 | }
108 |
109 | before do
110 | allow_any_instance_of(Faraday::Connection).to receive(:post)
111 | .with("collections/test_collection/points/count")
112 | .and_return(response)
113 | end
114 |
115 | it "return the data" do
116 | response = client.points.count(
117 | collection_name: "test_collection"
118 | )
119 | expect(response.dig("result", "count")).to eq(5)
120 | end
121 | end
122 |
123 | describe "#batch_search" do
124 | let(:response) {
125 | OpenStruct.new(body: points_fixture)
126 | }
127 |
128 | before do
129 | allow_any_instance_of(Faraday::Connection).to receive(:post)
130 | .with("collections/test_collection/points/search/batch")
131 | .and_return(response)
132 | end
133 |
134 | it "return the data" do
135 | response = client.points.batch_search(
136 | collection_name: "test_collection",
137 | searches: [{
138 | vectors: [[0.05, 0.61, 0.76, 0.74], [0.19, 0.81, 0.75, 0.11]],
139 | limit: 10
140 | }]
141 | )
142 | expect(response.dig("result").count).to eq(5)
143 | end
144 | end
145 |
146 | describe "#recommend" do
147 | let(:response) {
148 | OpenStruct.new(body: points_fixture)
149 | }
150 |
151 | before do
152 | allow_any_instance_of(Faraday::Connection).to receive(:post)
153 | .with("collections/test_collection/points/recommend")
154 | .and_return(response)
155 | end
156 |
157 | it "return the data" do
158 | response = client.points.recommend(
159 | collection_name: "test_collection",
160 | positive: [1, 2],
161 | limit: 5
162 | )
163 | expect(response.dig("result").count).to eq(5)
164 | end
165 | end
166 |
167 | describe "#batch_recommend" do
168 | let(:response) {
169 | OpenStruct.new(body: points_fixture)
170 | }
171 |
172 | before do
173 | allow_any_instance_of(Faraday::Connection).to receive(:post)
174 | .with("collections/test_collection/points/recommend/batch")
175 | .and_return(response)
176 | end
177 |
178 | it "return the data" do
179 | response = client.points.batch_recommend(
180 | collection_name: "test_collection",
181 | searches: [{
182 | positive: [1, 2],
183 | limit: 5
184 | }]
185 | )
186 | expect(response.dig("result").count).to eq(5)
187 | end
188 | end
189 |
190 | describe "#scroll" do
191 | let(:response) {
192 | OpenStruct.new(body: points_fixture)
193 | }
194 |
195 | before do
196 | allow_any_instance_of(Faraday::Connection).to receive(:post)
197 | .with("collections/test_collection/points/scroll")
198 | .and_return(response)
199 | end
200 |
201 | it "return the data" do
202 | response = client.points.scroll(
203 | collection_name: "test_collection",
204 | limit: 5
205 | )
206 | expect(response.dig("result").count).to eq(5)
207 | end
208 | end
209 |
210 | describe "#list" do
211 | let(:response) {
212 | OpenStruct.new(body: points_fixture)
213 | }
214 |
215 | before do
216 | allow_any_instance_of(Faraday::Connection).to receive(:post)
217 | .with("collections/test_collection/points")
218 | .and_return(response)
219 | end
220 |
221 | it "return the data" do
222 | response = client.points.list(
223 | collection_name: "test_collection",
224 | ids: [4, 5, 1, 2, 6]
225 | )
226 | expect(response.dig("result").count).to eq(5)
227 | end
228 | end
229 |
230 | describe "#set_payload" do
231 | let(:response) {
232 | OpenStruct.new(body: status_response_fixture)
233 | }
234 |
235 | before do
236 | allow_any_instance_of(Faraday::Connection).to receive(:post)
237 | .with("collections/test_collection/points/payload")
238 | .and_return(response)
239 | end
240 |
241 | it "return the data" do
242 | response = client.points.set_payload(
243 | collection_name: "test_collection",
244 | payload: {
245 | city: "Berlin"
246 | },
247 | points: [1]
248 | )
249 | expect(response.dig("status")).to eq("ok")
250 | end
251 | end
252 |
253 | describe "#clear_payload" do
254 | let(:response) {
255 | OpenStruct.new(body: status_response_fixture)
256 | }
257 |
258 | before do
259 | allow_any_instance_of(Faraday::Connection).to receive(:post)
260 | .with("collections/test_collection/points/payload/clear")
261 | .and_return(response)
262 | end
263 |
264 | it "return the data" do
265 | response = client.points.clear_payload(
266 | collection_name: "test_collection",
267 | points: [1]
268 | )
269 | expect(response.dig("status")).to eq("ok")
270 | end
271 | end
272 |
273 | describe "#clear_payload_keys" do
274 | let(:response) {
275 | OpenStruct.new(body: status_response_fixture)
276 | }
277 |
278 | before do
279 | allow_any_instance_of(Faraday::Connection).to receive(:post)
280 | .with("collections/test_collection/points/payload/delete")
281 | .and_return(response)
282 | end
283 |
284 | it "return the data" do
285 | response = client.points.clear_payload_keys(
286 | collection_name: "test_collection",
287 | keys: ["city"],
288 | points: [1]
289 | )
290 | expect(response.dig("status")).to eq("ok")
291 | end
292 | end
293 |
294 | describe "#query" do
295 | let(:response) {
296 | OpenStruct.new(body: points_fixture)
297 | }
298 |
299 | before do
300 | allow_any_instance_of(Faraday::Connection).to receive(:post)
301 | .with("collections/test_collection/points/query")
302 | .and_return(response)
303 | end
304 |
305 | it "returns the data" do
306 | response = client.points.query(
307 | collection_name: "test_collection",
308 | query: [0.05, 0.61, 0.76, 0.74],
309 | limit: 10
310 | )
311 | expect(response.dig("result").count).to eq(5)
312 | end
313 | end
314 | end
315 |
--------------------------------------------------------------------------------
/spec/qdrant/service_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "spec_helper"
4 |
5 | RSpec.describe Qdrant::Service do
6 | let(:client) {
7 | Qdrant::Client.new(
8 | url: "localhost:8080",
9 | api_key: "123"
10 | )
11 | }
12 |
13 | describe "#telemetry" do
14 | let(:response) {
15 | OpenStruct.new(body: {
16 | result: {
17 | id: "11111",
18 | app: {
19 | name: "qdrant",
20 | version: "1.1.0"
21 | }
22 | }
23 | })
24 | }
25 |
26 | before do
27 | allow_any_instance_of(Faraday::Connection).to receive(:get)
28 | .with("telemetry")
29 | .and_return(response)
30 | end
31 |
32 | it "return the data" do
33 | expect(client.telemetry.dig(:result, :id)).to eq("11111")
34 | end
35 | end
36 |
37 | describe "#metrics" do
38 | let(:response) { OpenStruct.new(body: "metrics") }
39 |
40 | before do
41 | allow_any_instance_of(Faraday::Connection).to receive(:get)
42 | .with("metrics")
43 | .and_return(response)
44 | end
45 |
46 | it "returns the data" do
47 | expect(client.metrics).to eq("metrics")
48 | end
49 | end
50 |
51 | let(:locks_fixture) { JSON.parse(File.read("spec/fixtures/locks.json")) }
52 |
53 | describe "#set_lock" do
54 | let(:response) { OpenStruct.new(body: locks_fixture) }
55 |
56 | before do
57 | allow_any_instance_of(Faraday::Connection).to receive(:post)
58 | .with("locks")
59 | .and_return(response)
60 | end
61 |
62 | it "returns the data" do
63 | response = client.set_lock(
64 | write: true,
65 | error_message: "my error msg"
66 | )
67 | expect(response.dig("result", "error_message")).to eq("my error msg")
68 | expect(response.dig("result", "write")).to eq(true)
69 | end
70 | end
71 |
72 | describe "#locks" do
73 | let(:response) { OpenStruct.new(body: locks_fixture) }
74 |
75 | before do
76 | allow_any_instance_of(Faraday::Connection).to receive(:get)
77 | .with("locks")
78 | .and_return(response)
79 | end
80 |
81 | it "returns the data" do
82 | response = client.locks
83 | expect(response.dig("result", "error_message")).to eq("my error msg")
84 | expect(response.dig("result", "write")).to eq(true)
85 | end
86 | end
87 | end
88 |
--------------------------------------------------------------------------------
/spec/qdrant/snapshots_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "spec_helper"
4 |
5 | RSpec.describe Qdrant::Snapshots do
6 | let(:client) {
7 | Qdrant::Client.new(
8 | url: "localhost:8080",
9 | api_key: "123"
10 | )
11 | }
12 |
13 | let(:snapshots) { client.snapshots }
14 | let(:snapshot_fixture) { JSON.parse(File.read("spec/fixtures/snapshot.json")) }
15 | let(:snapshots_fixture) { JSON.parse(File.read("spec/fixtures/snapshots.json")) }
16 | let(:status_response_fixture) { JSON.parse(File.read("spec/fixtures/status_response.json")) }
17 |
18 | describe "#create" do
19 | let(:response) { OpenStruct.new(body: snapshot_fixture) }
20 |
21 | before do
22 | allow_any_instance_of(Faraday::Connection).to receive(:post)
23 | .with(Qdrant::Snapshots::PATH)
24 | .and_return(response)
25 | end
26 |
27 | it "creates the backup" do
28 | response = snapshots.create
29 | expect(response.dig("result", "name")).to eq("test_collection-6106351684939824381-2023-04-06-20-43-03.snapshot")
30 | expect(response["status"]).to eq("ok")
31 | end
32 | end
33 |
34 | describe "#list" do
35 | let(:response) { OpenStruct.new(body: snapshots_fixture) }
36 |
37 | before do
38 | allow_any_instance_of(Faraday::Connection).to receive(:get)
39 | .with(Qdrant::Snapshots::PATH)
40 | .and_return(response)
41 | end
42 |
43 | it "restores the backup" do
44 | response = snapshots.list
45 | expect(response["result"].count).to eq(2)
46 | expect(response["status"]).to eq("ok")
47 | end
48 | end
49 |
50 | describe "#delete" do
51 | let(:response) { OpenStruct.new(body: status_response_fixture) }
52 |
53 | before do
54 | allow_any_instance_of(Faraday::Connection).to receive(:delete)
55 | .with("snapshots/my-snapshot")
56 | .and_return(response)
57 | end
58 |
59 | it "returns the restore status" do
60 | response = snapshots.delete(
61 | snapshot_name: "my-snapshot"
62 | )
63 | expect(response["result"]).to eq(true)
64 | expect(response["status"]).to eq("ok")
65 | end
66 | end
67 |
68 | describe "#download" do
69 | before do
70 | allow_any_instance_of(Faraday::Connection).to receive(:get)
71 | .with("snapshots/my-snapshot")
72 | .and_return("01010101001")
73 |
74 | allow(File).to receive(:open).with("/dir/snapshot.txt", "wb+").and_return(999)
75 | end
76 |
77 | it "returns the restore status" do
78 | response = snapshots.download(
79 | snapshot_name: "my-snapshot",
80 | filepath: "/dir/snapshot.txt"
81 | )
82 | expect(response).to eq(999) # Random number of bytes written
83 | end
84 | end
85 | end
86 |
--------------------------------------------------------------------------------
/spec/qdrant_spec.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | RSpec.describe Qdrant do
4 | it "has a version number" do
5 | expect(Qdrant::VERSION).not_to be nil
6 | end
7 | end
8 |
--------------------------------------------------------------------------------
/spec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "json"
4 | require "ostruct"
5 | require "qdrant"
6 | require "faraday"
7 |
8 | RSpec.configure do |config|
9 | # Enable flags like --only-failures and --next-failure
10 | config.example_status_persistence_file_path = ".rspec_status"
11 |
12 | # Disable RSpec exposing methods globally on `Module` and `main`
13 | config.disable_monkey_patching!
14 |
15 | config.expect_with :rspec do |c|
16 | c.syntax = :expect
17 | end
18 | end
19 |
--------------------------------------------------------------------------------