├── .gitignore ├── .rustfmt.toml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── docker-compose.yml ├── docs └── logo-sofa.png └── src ├── client.rs ├── database.rs ├── document.rs ├── error.rs ├── lib.rs ├── model.rs └── types ├── document.rs ├── find.rs ├── index.rs ├── mod.rs └── system.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/rust 2 | 3 | ### Rust ### 4 | # Generated by Cargo 5 | # will have compiled files and executables 6 | /target/ 7 | 8 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 9 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 10 | Cargo.lock 11 | 12 | # These are backup files generated by rustfmt 13 | **/*.rs.bk 14 | 15 | 16 | # End of https://www.gitignore.io/api/rust -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | unstable_features = true 2 | use_field_init_shorthand = true 3 | indent_style = "Block" 4 | match_block_trailing_comma = true 5 | reorder_impl_items = true 6 | combine_control_expr = true 7 | comment_width = 120 8 | wrap_comments = true 9 | condense_wildcard_suffixes = true 10 | imports_layout = "HorizontalVertical" 11 | merge_imports = true 12 | max_width = 120 13 | newline_style = "Unix" 14 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [0.6.0] - 2018-08-23 11 | 12 | ### Added 13 | 14 | - Added `failure` dependency 15 | - Added `Client::make_db` 16 | - Added `docker-compose.yml` 17 | - Added `.rustfmt.toml` 18 | 19 | ### Changed 20 | 21 | - Optimized memory consumption by moving `iter()` calls to `into_iter()` where needed 22 | - Changed `SofaError` to derive `failure` 23 | - Changed `Client::check_status` signature to remove potentially panicking `unwrap()` calls 24 | - Changed `Client::create_path` signature to remove potentially panicking `unwrap()` calls 25 | - Changed `Client::db` signature to remove potentially panicking `unwrap()` calls 26 | - Changed `Client::delete` signature to remove potentially panicking `unwrap()` calls 27 | - Changed `Client::destroy_db` signature to remove potentially panicking `unwrap()` calls 28 | - Changed `Client::get` signature to remove potentially panicking `unwrap()` calls 29 | - Changed `Client::gzip` signature to remove potentially panicking `unwrap()` calls 30 | - Changed `Client::head` signature to remove potentially panicking `unwrap()` calls 31 | - Changed `Client::list_dbs` signature to remove potentially panicking `unwrap()` calls 32 | - Changed `Client::new` signature to remove potentially panicking `unwrap()` calls 33 | - Changed `Client::post` signature to remove potentially panicking `unwrap()` calls 34 | - Changed `Client::pub` signature to remove potentially panicking `unwrap()` calls 35 | - Changed `Client::req` signature to remove potentially panicking `unwrap()` calls 36 | - Changed `Client::timeout` signature to remove potentially panicking `unwrap()` calls 37 | - Changed `Database::create` signature to remove potentially panicking `unwrap()` calls 38 | - Changed `Database::ensure_index` signature to remove potentially panicking `unwrap()` calls 39 | - Changed `Database::find` signature to remove potentially panicking `unwrap()` calls 40 | - Changed `Database::get` signature to remove potentially panicking `unwrap()` calls 41 | - Changed `Database::insert_index` signature to remove potentially panicking `unwrap()` calls 42 | - Changed `Database::read_indexes` signature to remove potentially panicking `unwrap()` calls 43 | - Changed `Database::save` signature to remove potentially panicking `unwrap()` calls 44 | 45 | ### Removed 46 | 47 | - Removed env files that were necessary for single-threaded test run. Added section in README to reflect that. 48 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sofa" 3 | version = "0.6.0" 4 | authors = ["Mathieu Amiot "] 5 | license = "MIT/Apache-2.0" 6 | description = "Sofa - CouchDB for Rust" 7 | readme = "README.md" 8 | documentation = "https://docs.rs/sofa" 9 | homepage = "https://github.com/YellowInnovation/sofa" 10 | repository = "https://github.com/YellowInnovation/sofa" 11 | keywords = ["couchdb", "orm", "database", "nosql"] 12 | categories = ["database"] 13 | include = [ 14 | "**/*.rs", 15 | "Cargo.toml" 16 | ] 17 | 18 | [dependencies] 19 | failure = "0.1" 20 | serde = "1.0" 21 | serde_derive = "1.0" 22 | serde_json = "1.0" 23 | reqwest = "0.8" 24 | 25 | [dev-dependencies] 26 | pretty_assertions = "0.5" 27 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | 2 | Version 2.0, January 2004 3 | https://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2015-2019 Yellow Innovation 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | https://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015-2019 Yellow Innovation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sofa - CouchDB for Rust 2 | 3 | [![Crates.io](https://img.shields.io/crates/v/sofa.svg)](https://crates.io/crates/sofa) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa?ref=badge_shield) 4 | 5 | [![docs.rs](https://docs.rs/sofa/badge.svg)](https://docs.rs/sofa) 6 | 7 | ![sofa-logo](https://raw.githubusercontent.com/YellowInnovation/sofa/master/docs/logo-sofa.png "Logo Sofa") 8 | 9 | ## Documentation 10 | 11 | Here: [http://docs.rs/sofa](http://docs.rs/sofa) 12 | 13 | ## Installation 14 | 15 | ```toml 16 | [dependencies] 17 | sofa = "0.6" 18 | ``` 19 | 20 | ## Description 21 | 22 | This crate is an interface to CouchDB HTTP REST API. Works with stable Rust. 23 | 24 | Does not support `#![no_std]` 25 | 26 | After trying most crates for CouchDB in Rust (`chill`, `couchdb` in particular), none of them fit our needs hence the need to create our own. 27 | 28 | No async I/O (yet), uses a mix of Reqwest and Serde under the hood, with a few nice abstractions out there. 29 | 30 | **NOT 1.0 YET, so expect changes** 31 | 32 | **Supports CouchDB 2.0 and up.** 33 | 34 | Be sure to check [CouchDB's Documentation](http://docs.couchdb.org/en/latest/index.html) in detail to see what's possible. 35 | 36 | ## Running tests 37 | 38 | Make sure that you have an instance of CouchDB 2.0+ running, either via the supplied `docker-compose.yml` file or by yourself. It must be listening on the default port. 39 | 40 | And then 41 | `cargo test -- --test-threads=1` 42 | 43 | Single-threading the tests is very important because we need to make sure that the basic features are working before actually testing features on dbs/documents. 44 | 45 | ## Why the name "Sofa" 46 | 47 | CouchDB has a nice name, and I wanted to reflect that. 48 | 49 | ## License 50 | 51 | Licensed under either of these: 52 | 53 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 54 | [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) 55 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 56 | [https://opensource.org/licenses/MIT](https://opensource.org/licenses/MIT)) 57 | 58 | 59 | [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2FYellowInnovation%2Fsofa?ref=badge_large) 60 | 61 | ## Yellow Innovation 62 | 63 | Yellow Innovation is the innovation laboratory of the French postal service: La Poste. 64 | 65 | We create innovative user experiences and journeys through services with a focus on IoT lately. 66 | 67 | [Yellow Innovation's website and works](http://yellowinnovation.fr/en/) -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | 5 | couchdb: 6 | image: couchdb:2.2 7 | restart: always 8 | ports: 9 | - '5984:5984' 10 | -------------------------------------------------------------------------------- /docs/logo-sofa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/66Origin/sofa/93d91e9c45972aaa3e97eece83f8af6ef1208c9f/docs/logo-sofa.png -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::time::Duration; 3 | use failure::Error; 4 | use serde_json::from_reader; 5 | 6 | use reqwest::{self, Url, Method, RequestBuilder, StatusCode}; 7 | 8 | use ::database::*; 9 | use ::types::*; 10 | use ::error::SofaError; 11 | 12 | /// Client handles the URI manipulation logic and the HTTP calls to the CouchDB REST API. 13 | /// It is also responsible for the creation/access/destruction of databases. 14 | #[derive(Debug, Clone)] 15 | pub struct Client { 16 | _client: reqwest::Client, 17 | dbs: Vec<&'static str>, 18 | _gzip: bool, 19 | _timeout: u8, 20 | pub uri: String, 21 | pub db_prefix: String 22 | } 23 | 24 | impl Client { 25 | pub fn new(uri: String) -> Result { 26 | let client = reqwest::Client::builder() 27 | .gzip(true) 28 | .timeout(Duration::new(4, 0)) 29 | .build()?; 30 | 31 | Ok(Client { 32 | _client: client, 33 | uri: uri, 34 | _gzip: true, 35 | _timeout: 4, 36 | dbs: Vec::new(), 37 | db_prefix: String::new() 38 | }) 39 | } 40 | 41 | fn create_client(&self) -> Result { 42 | let client = reqwest::Client::builder() 43 | .gzip(self._gzip) 44 | .timeout(Duration::new(self._timeout as u64, 0)) 45 | .build()?; 46 | 47 | Ok(client) 48 | } 49 | 50 | pub fn get_self(&mut self) -> &mut Self { 51 | self 52 | } 53 | 54 | pub fn set_uri(&mut self, uri: String) -> &Self { 55 | self.uri = uri; 56 | self 57 | } 58 | 59 | pub fn set_prefix(&mut self, prefix: String) -> &Self { 60 | self.db_prefix = prefix; 61 | self 62 | } 63 | 64 | pub fn gzip(&mut self, enabled: bool) -> Result<&Self, Error> { 65 | self._gzip = enabled; 66 | self._client = self.create_client()?; 67 | 68 | Ok(self) 69 | } 70 | 71 | pub fn timeout(&mut self, to: u8) -> Result<&Self, Error> { 72 | self._timeout = to; 73 | self._client = self.create_client()?; 74 | 75 | Ok(self) 76 | } 77 | 78 | pub fn list_dbs(&self) -> Result, Error> { 79 | let mut response = self.get(String::from("/_all_dbs"), None)?.send()?; 80 | let data = response.json::>()?; 81 | 82 | Ok(data) 83 | } 84 | 85 | fn build_dbname(&self, dbname: &'static str) -> String { 86 | self.db_prefix.clone() + dbname 87 | } 88 | 89 | pub fn db(&self, dbname: &'static str) -> Result { 90 | let name = self.build_dbname(dbname); 91 | 92 | let db = Database::new(name.clone(), self.clone()); 93 | 94 | let path = self.create_path(name, None)?; 95 | 96 | let head_response = self._client.head(&path) 97 | .header(reqwest::header::ContentType::json()) 98 | .send()?; 99 | 100 | match head_response.status() { 101 | StatusCode::Ok => Ok(db), 102 | _ => self.make_db(dbname), 103 | } 104 | } 105 | 106 | pub fn make_db(&self, dbname: &'static str) -> Result { 107 | let name = self.build_dbname(dbname); 108 | 109 | let db = Database::new(name.clone(), self.clone()); 110 | 111 | let path = self.create_path(name, None)?; 112 | 113 | let put_response = self._client.put(&path) 114 | .header(reqwest::header::ContentType::json()) 115 | .send()?; 116 | 117 | let s: CouchResponse = from_reader(put_response)?; 118 | 119 | match s.ok { 120 | Some(true) => Ok(db), 121 | Some(false) | _ => { 122 | let err = s.error.unwrap_or(s!("unspecified error")); 123 | Err(SofaError(err).into()) 124 | }, 125 | } 126 | } 127 | 128 | pub fn destroy_db(&self, dbname: &'static str) -> Result { 129 | let path = self.create_path(self.build_dbname(dbname), None)?; 130 | let response = self._client.delete(&path) 131 | .header(reqwest::header::ContentType::json()) 132 | .send()?; 133 | 134 | let s: CouchResponse = from_reader(response)?; 135 | 136 | Ok(s.ok.unwrap_or(false)) 137 | } 138 | 139 | pub fn check_status(&self) -> Result { 140 | let response = self._client.get(&self.uri) 141 | .header(reqwest::header::ContentType::json()) 142 | .send()?; 143 | 144 | let status = from_reader(response)?; 145 | 146 | Ok(status) 147 | } 148 | 149 | fn create_path(&self, 150 | path: String, 151 | args: Option> 152 | ) -> Result { 153 | let mut uri = Url::parse(&self.uri)?.join(&path)?; 154 | 155 | if let Some(ref map) = args { 156 | let mut qp = uri.query_pairs_mut(); 157 | for (k, v) in map { 158 | qp.append_pair(k, v); 159 | } 160 | } 161 | 162 | Ok(uri.into_string()) 163 | } 164 | 165 | pub fn req(&self, 166 | method: Method, 167 | path: String, 168 | opts: Option> 169 | ) -> Result { 170 | let uri = self.create_path(path, opts)?; 171 | let mut req = self._client.request(method, &uri); 172 | req.header(reqwest::header::Referer::new(uri.clone())); 173 | req.header(reqwest::header::ContentType::json()); 174 | 175 | Ok(req) 176 | } 177 | 178 | pub fn get(&self, path: String, args: Option>) -> Result { 179 | Ok(self.req(Method::Get, path, args)?) 180 | } 181 | 182 | pub fn post(&self, path: String, body: String) -> Result { 183 | let mut req = self.req(Method::Post, path, None)?; 184 | req.body(body); 185 | Ok(req) 186 | } 187 | 188 | pub fn put(&self, path: String, body: String) -> Result { 189 | let mut req = self.req(Method::Put, path, None)?; 190 | req.body(body); 191 | Ok(req) 192 | } 193 | 194 | pub fn head(&self, path: String, args: Option>) -> Result { 195 | Ok(self.req(Method::Head, path, args)?) 196 | } 197 | 198 | pub fn delete(&self, path: String, args: Option>) -> Result { 199 | Ok(self.req(Method::Delete, path, args)?) 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /src/database.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use reqwest::StatusCode; 4 | 5 | use failure::Error; 6 | use serde_json; 7 | use serde_json::{from_reader, to_string, Value}; 8 | 9 | use client::*; 10 | use document::*; 11 | use error::SofaError; 12 | use types::*; 13 | 14 | /// Database holds the logic of making operations on a CouchDB Database 15 | /// (sometimes called Collection in other NoSQL flavors such as MongoDB). 16 | #[derive(Debug, Clone)] 17 | pub struct Database { 18 | _client: Client, 19 | name: String, 20 | } 21 | 22 | impl Database { 23 | pub fn new(name: String, client: Client) -> Database { 24 | Database { 25 | _client: client, 26 | name: name, 27 | } 28 | } 29 | 30 | fn create_document_path(&self, id: DocumentId) -> String { 31 | let mut result: String = self.name.clone(); 32 | result.push_str("/"); 33 | result.push_str(&id); 34 | result 35 | } 36 | 37 | #[allow(dead_code)] 38 | fn create_design_path(&self, id: DocumentId) -> String { 39 | let mut result: String = self.name.clone(); 40 | result.push_str("/_design/"); 41 | result.push_str(&id); 42 | result 43 | } 44 | 45 | fn create_compact_path(&self, design_name: &'static str) -> String { 46 | let mut result: String = self.name.clone(); 47 | result.push_str("/_compact/"); 48 | result.push_str(design_name); 49 | result 50 | } 51 | 52 | /// Launches the compact process 53 | pub fn compact(&self) -> bool { 54 | let mut path: String = self.name.clone(); 55 | path.push_str("/_compact"); 56 | 57 | let request = self._client.post(path, "".into()); 58 | 59 | request 60 | .and_then(|mut req| { 61 | Ok(req.send() 62 | .and_then(|res| Ok(res.status() == StatusCode::Accepted)) 63 | .unwrap_or(false)) 64 | }) 65 | .unwrap_or(false) 66 | } 67 | 68 | /// Starts the compaction of all views 69 | pub fn compact_views(&self) -> bool { 70 | let mut path: String = self.name.clone(); 71 | path.push_str("/_view_cleanup"); 72 | 73 | let request = self._client.post(path, "".into()); 74 | 75 | request 76 | .and_then(|mut req| { 77 | Ok(req.send() 78 | .and_then(|res| Ok(res.status() == StatusCode::Accepted)) 79 | .unwrap_or(false)) 80 | }) 81 | .unwrap_or(false) 82 | } 83 | 84 | /// Starts the compaction of a given index 85 | pub fn compact_index(&self, index: &'static str) -> bool { 86 | let request = self._client.post(self.create_compact_path(index), "".into()); 87 | 88 | request 89 | .and_then(|mut req| { 90 | Ok(req.send() 91 | .and_then(|res| Ok(res.status() == StatusCode::Accepted)) 92 | .unwrap_or(false)) 93 | }) 94 | .unwrap_or(false) 95 | } 96 | 97 | /// Checks if a document ID exists 98 | pub fn exists(&self, id: DocumentId) -> bool { 99 | let request = self._client.head(self.create_document_path(id), None); 100 | 101 | request 102 | .and_then(|mut req| { 103 | Ok(req.send() 104 | .and_then(|res| { 105 | Ok(match res.status() { 106 | StatusCode::Ok | StatusCode::NotModified => true, 107 | _ => false, 108 | }) 109 | }) 110 | .unwrap_or(false)) 111 | }) 112 | .unwrap_or(false) 113 | } 114 | 115 | /// Gets one document 116 | pub fn get(&self, id: DocumentId) -> Result { 117 | let response = self._client.get(self.create_document_path(id), None)?.send()?; 118 | 119 | Ok(Document::new(from_reader(response)?)) 120 | } 121 | 122 | /// Gets documents in bulk with provided IDs list 123 | pub fn get_bulk(&self, ids: Vec) -> Result { 124 | self.get_bulk_params(ids, None) 125 | } 126 | 127 | /// Gets documents in bulk with provided IDs list, with added params. Params description can be found here: Parameters description can be found here: http://docs.couchdb.org/en/latest/api/ddoc/views.html#api-ddoc-view 128 | pub fn get_bulk_params( 129 | &self, 130 | ids: Vec, 131 | params: Option>, 132 | ) -> Result { 133 | let mut options; 134 | if let Some(opts) = params { 135 | options = opts; 136 | } else { 137 | options = HashMap::new(); 138 | } 139 | 140 | options.insert(s!("include_docs"), s!("true")); 141 | 142 | let mut body = HashMap::new(); 143 | body.insert(s!("keys"), ids); 144 | 145 | let response = self._client 146 | .get(self.create_document_path("_all_docs".into()), Some(options))? 147 | .body(to_string(&body)?) 148 | .send()?; 149 | 150 | Ok(DocumentCollection::new(from_reader(response)?)) 151 | } 152 | 153 | /// Gets all the documents in database 154 | pub fn get_all(&self) -> Result { 155 | self.get_all_params(None) 156 | } 157 | 158 | /// Gets all the documents in database, with applied parameters. Parameters description can be found here: http://docs.couchdb.org/en/latest/api/ddoc/views.html#api-ddoc-view 159 | pub fn get_all_params(&self, params: Option>) -> Result { 160 | let mut options; 161 | if let Some(opts) = params { 162 | options = opts; 163 | } else { 164 | options = HashMap::new(); 165 | } 166 | 167 | options.insert(s!("include_docs"), s!("true")); 168 | 169 | let response = self._client 170 | .get(self.create_document_path("_all_docs".into()), Some(options))? 171 | .send()?; 172 | 173 | Ok(DocumentCollection::new(from_reader(response)?)) 174 | } 175 | 176 | /// Finds a document in the database through a Mango query. Parameters here http://docs.couchdb.org/en/latest/api/database/find.html 177 | pub fn find(&self, params: Value) -> Result { 178 | let path = self.create_document_path("_find".into()); 179 | let response = self._client.post(path, js!(¶ms))?.send()?; 180 | 181 | let data: FindResult = from_reader(response)?; 182 | if let Some(doc_val) = data.docs { 183 | let documents: Vec = doc_val 184 | .into_iter() 185 | .filter(|d| { 186 | // Remove _design documents 187 | let id: String = json_extr!(d["_id"]); 188 | !id.starts_with('_') 189 | }) 190 | .map(|v| Document::new(v.clone())) 191 | .collect(); 192 | 193 | Ok(DocumentCollection::new_from_documents(documents)) 194 | } else if let Some(err) = data.error { 195 | Err(SofaError(err).into()) 196 | } else { 197 | Ok(DocumentCollection::default()) 198 | } 199 | } 200 | 201 | /// Updates a document 202 | pub fn save(&self, doc: Document) -> Result { 203 | let id = doc._id.to_owned(); 204 | let raw = doc.get_data(); 205 | 206 | let response = self._client 207 | .put(self.create_document_path(id), to_string(&raw)?)? 208 | .send()?; 209 | 210 | let data: DocumentCreatedResult = from_reader(response)?; 211 | 212 | match data.ok { 213 | Some(true) => { 214 | let mut val = doc.get_data(); 215 | val["_rev"] = json!(data.rev); 216 | 217 | Ok(Document::new(val)) 218 | } 219 | Some(false) | _ => { 220 | let err = data.error.unwrap_or(s!("unspecified error")); 221 | return Err(SofaError(err).into()); 222 | } 223 | } 224 | } 225 | 226 | /// Creates a document from a raw JSON document Value. 227 | pub fn create(&self, raw_doc: Value) -> Result { 228 | let response = self._client.post(self.name.clone(), to_string(&raw_doc)?)?.send()?; 229 | 230 | let data: DocumentCreatedResult = from_reader(response)?; 231 | 232 | match data.ok { 233 | Some(true) => { 234 | let data_id = match data.id { 235 | Some(id) => id, 236 | _ => return Err(SofaError(s!("invalid id")).into()), 237 | }; 238 | 239 | let data_rev = match data.rev { 240 | Some(rev) => rev, 241 | _ => return Err(SofaError(s!("invalid rev")).into()), 242 | }; 243 | 244 | let mut val = raw_doc.clone(); 245 | val["_id"] = json!(data_id); 246 | val["_rev"] = json!(data_rev); 247 | 248 | Ok(Document::new(val)) 249 | } 250 | Some(false) | _ => { 251 | let err = data.error.unwrap_or(s!("unspecified error")); 252 | return Err(SofaError(err).into()); 253 | } 254 | } 255 | } 256 | 257 | /// Removes a document from the database. Returns success in a `bool` 258 | pub fn remove(&self, doc: Document) -> bool { 259 | let request = self._client.delete( 260 | self.create_document_path(doc._id.clone()), 261 | Some({ 262 | let mut h = HashMap::new(); 263 | h.insert(s!("rev"), doc._rev.clone()); 264 | h 265 | }), 266 | ); 267 | 268 | request 269 | .and_then(|mut req| { 270 | Ok(req.send() 271 | .and_then(|res| { 272 | Ok(match res.status() { 273 | StatusCode::Ok | StatusCode::Accepted => true, 274 | _ => false, 275 | }) 276 | }) 277 | .unwrap_or(false)) 278 | }) 279 | .unwrap_or(false) 280 | } 281 | 282 | /// Inserts an index in a naive way, if it already exists, will throw an 283 | /// `Err` 284 | pub fn insert_index(&self, name: String, spec: IndexFields) -> Result { 285 | let response = self._client 286 | .post( 287 | self.create_document_path("_index".into()), 288 | js!(json!({ 289 | "name": name, 290 | "index": spec 291 | })), 292 | )? 293 | .send()?; 294 | 295 | let data: IndexCreated = from_reader(response)?; 296 | 297 | if data.error.is_some() { 298 | let err = data.error.unwrap_or(s!("unspecified error")); 299 | Err(SofaError(err).into()) 300 | } else { 301 | Ok(data) 302 | } 303 | } 304 | 305 | /// Reads the database's indexes and returns them 306 | pub fn read_indexes(&self) -> Result { 307 | let response = self._client 308 | .get(self.create_document_path("_index".into()), None)? 309 | .send()?; 310 | 311 | Ok(from_reader(response)?) 312 | } 313 | 314 | /// Method to ensure an index is created on the database with the following 315 | /// spec. Returns `true` when we created a new one, or `false` when the 316 | /// index was already existing. 317 | pub fn ensure_index(&self, name: String, spec: IndexFields) -> Result { 318 | let db_indexes = self.read_indexes()?; 319 | 320 | // We look for our index 321 | for i in db_indexes.indexes.into_iter() { 322 | if i.name == name { 323 | // Found? Ok let's return 324 | return Ok(false); 325 | } 326 | } 327 | 328 | // Let's create it then 329 | let _ = self.insert_index(name, spec)?; 330 | 331 | // Created and alright 332 | Ok(true) 333 | } 334 | } 335 | -------------------------------------------------------------------------------- /src/document.rs: -------------------------------------------------------------------------------- 1 | use database::*; 2 | use serde_json; 3 | use serde_json::Value; 4 | use std::ops::{Index, IndexMut}; 5 | use types::*; 6 | 7 | /// Document abstracts the handling of JSON values and provides direct access 8 | /// and casting to the fields of your documents You can get access to the 9 | /// fields via the implementation of the `Index` and `IndexMut` traits 10 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 11 | pub struct Document { 12 | /// Document ID, provided by CouchDB 13 | #[serde(skip_serializing)] 14 | pub _id: DocumentId, 15 | 16 | /// Document Revision, provided by CouchDB, helps negotiating conflicts 17 | #[serde(skip_serializing)] 18 | pub _rev: String, 19 | 20 | doc: Value, 21 | } 22 | 23 | impl Document { 24 | pub fn new(doc: Value) -> Document { 25 | Document { 26 | _id: json_extr!(doc["_id"]), 27 | _rev: json_extr!(doc["_rev"]), 28 | doc: doc, 29 | } 30 | } 31 | 32 | /// Returns all document's keys 33 | pub fn get_keys(&self) -> Vec { 34 | let mut ret: Vec = Vec::new(); 35 | 36 | if let Some(obj) = self.doc.as_object() { 37 | for (k, _) in obj.into_iter() { 38 | ret.push(k.clone()); 39 | } 40 | } 41 | 42 | ret 43 | } 44 | 45 | /// Returns raw JSON data from document 46 | pub fn get_data(&self) -> Value { 47 | self.doc.clone() 48 | } 49 | 50 | /// Merges this document with a raw JSON value, useful to update data with 51 | /// a payload 52 | pub fn merge(&mut self, doc: Value) -> &Self { 53 | if let Some(obj) = doc.as_object() { 54 | for (k, v) in obj.into_iter() { 55 | match k.as_str() { 56 | "_id" | "_rev" => { 57 | continue; 58 | } 59 | _ => { 60 | self[k] = v.clone(); 61 | } 62 | } 63 | } 64 | } 65 | 66 | self 67 | } 68 | 69 | /// Recursively populates field (must be an array of IDs from another 70 | /// database) with provided database documents 71 | pub fn populate(&mut self, field: &String, db: Database) -> &Self { 72 | let ref val = self[field].clone(); 73 | if *val == Value::Null { 74 | return self; 75 | } 76 | 77 | let ids = val.as_array() 78 | .unwrap_or(&Vec::new()) 79 | .into_iter() 80 | .map(|v| s!(v.as_str().unwrap_or(""))) 81 | .collect(); 82 | 83 | let data = db.get_bulk(ids).and_then(|docs| Ok(docs.get_data())); 84 | 85 | match data { 86 | Ok(data) => { 87 | self[field] = data.into_iter() 88 | .filter_map(|d: Value| { 89 | let did = match d["_id"].as_str() { 90 | Some(did) => did, 91 | None => return None, 92 | }; 93 | 94 | if val[did] != Value::Null { 95 | Some(d.clone()) 96 | } else { 97 | None 98 | } 99 | }) 100 | .collect(); 101 | } 102 | Err(_) => { 103 | return self; 104 | } 105 | } 106 | 107 | self 108 | } 109 | } 110 | 111 | impl Index for Document 112 | where 113 | I: serde_json::value::Index, 114 | { 115 | type Output = Value; 116 | 117 | fn index(&self, index: I) -> &Value { 118 | &self.doc[index] 119 | } 120 | } 121 | 122 | impl IndexMut for Document 123 | where 124 | I: serde_json::value::Index, 125 | { 126 | fn index_mut(&mut self, index: I) -> &mut Value { 127 | &mut self.doc[index] 128 | } 129 | } 130 | 131 | /// Used inside a `DocumentCollection`, to wrap the document itself and 132 | /// facilitate lookups by Document ID. 133 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 134 | pub struct DocumentCollectionItem { 135 | pub id: DocumentId, 136 | pub doc: Document, 137 | } 138 | 139 | impl DocumentCollectionItem { 140 | pub fn new(doc: Document) -> DocumentCollectionItem { 141 | let id = doc._id.clone(); 142 | DocumentCollectionItem { doc: doc, id: id } 143 | } 144 | } 145 | 146 | /// Memory-optimized, iterable document collection, mostly returned in calls 147 | /// that involve multiple documents results Can target a specific index through 148 | /// implementation of `Index` and `IndexMut` 149 | #[derive(Default, Serialize, Deserialize, PartialEq, Debug, Clone)] 150 | pub struct DocumentCollection { 151 | pub offset: u32, 152 | pub rows: Vec, 153 | pub total_rows: u32, 154 | } 155 | 156 | impl DocumentCollection { 157 | pub fn new(doc: Value) -> DocumentCollection { 158 | let rows: Vec = json_extr!(doc["rows"]); 159 | let items: Vec = rows.into_iter() 160 | .filter(|d| { 161 | // Remove _design documents 162 | let id: String = json_extr!(d["doc"]["_id"]); 163 | !id.starts_with('_') 164 | }) 165 | .map(|d| { 166 | let document: Value = json_extr!(d["doc"]); 167 | DocumentCollectionItem::new(Document::new(document)) 168 | }) 169 | .collect(); 170 | 171 | DocumentCollection { 172 | offset: json_extr!(doc["offset"]), 173 | total_rows: items.len() as u32, 174 | rows: items, 175 | } 176 | } 177 | 178 | pub fn new_from_documents(docs: Vec) -> DocumentCollection { 179 | let len = docs.len() as u32; 180 | 181 | DocumentCollection { 182 | offset: 0, 183 | total_rows: len, 184 | rows: docs.into_iter().map(|d| DocumentCollectionItem::new(d)).collect(), 185 | } 186 | } 187 | 188 | /// Returns raw JSON data from documents 189 | pub fn get_data(&self) -> Vec { 190 | self.rows.iter().map(|doc_item| doc_item.doc.get_data()).collect() 191 | } 192 | } 193 | 194 | impl Index for DocumentCollection { 195 | type Output = DocumentCollectionItem; 196 | 197 | fn index(&self, index: usize) -> &DocumentCollectionItem { 198 | &self.rows.get(index).unwrap() 199 | } 200 | } 201 | 202 | impl IndexMut for DocumentCollection { 203 | fn index_mut(&mut self, index: usize) -> &mut DocumentCollectionItem { 204 | self.rows.get_mut(index).unwrap() 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Fail, Debug)] 2 | #[fail(display = "Custom error: {}", _0)] 3 | pub struct SofaError(pub String); 4 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Sofa - CouchDB for Rust 2 | //! 3 | //! [![Crates.io](https://img.shields.io/crates/v/sofa.svg)](https://crates.io/crates/sofa) 4 | //! [![docs.rs](https://docs.rs/sofa/badge.svg)](https://docs.rs/sofa) 5 | //! 6 | //! ![sofa-logo](https://raw.githubusercontent.com/YellowInnovation/sofa/master/docs/logo-sofa.png "Logo Sofa") 7 | //! 8 | //! ## Documentation 9 | //! 10 | //! Here: [http://docs.rs/sofa](http://docs.rs/sofa) 11 | //! 12 | //! ## Installation 13 | //! 14 | //! ```toml 15 | //! [dependencies] 16 | //! sofa = "0.6" 17 | //! ``` 18 | //! 19 | //! ## Description 20 | //! 21 | //! This crate is an interface to CouchDB HTTP REST API. Works with stable Rust. 22 | //! 23 | //! Does not support `#![no_std]` 24 | //! 25 | //! After trying most crates for CouchDB in Rust (`chill`, `couchdb` in particular), none of them fit our needs hence 26 | //! the need to create our own. 27 | //! 28 | //! No async I/O (yet), uses a mix of Reqwest and Serde under the hood, with a 29 | //! few nice abstractions out there. 30 | //! 31 | //! **NOT 1.0 YET, so expect changes** 32 | //! 33 | //! **Supports CouchDB 2.0 and up.** 34 | //! 35 | //! Be sure to check [CouchDB's Documentation](http://docs.couchdb.org/en/latest/index.html) in detail to see what's possible. 36 | //! 37 | //! ## Running tests 38 | //! 39 | //! Make sure that you have an instance of CouchDB 2.0+ running, either via the 40 | //! supplied `docker-compose.yml` file or by yourself. It must be listening on 41 | //! the default port. 42 | //! 43 | //! And then 44 | //! `cargo test -- --test-threads=1` 45 | //! 46 | //! Single-threading the tests is very important because we need to make sure 47 | //! that the basic features are working before actually testing features on 48 | //! dbs/documents. 49 | //! 50 | //! ## Why the name "Sofa" 51 | //! 52 | //! CouchDB has a nice name, and I wanted to reflect that. 53 | //! 54 | //! ## License 55 | //! 56 | //! Licensed under either of these: 57 | //! 58 | //! * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 59 | //! [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) 60 | //! * MIT license ([LICENSE-MIT](LICENSE-MIT) or 61 | //! [https://opensource.org/licenses/MIT](https://opensource.org/licenses/MIT)) 62 | //! 63 | //! ## Yellow Innovation 64 | //! 65 | //! Yellow Innovation is the innovation laboratory of the French postal 66 | //! service: La Poste. 67 | //! 68 | //! We create innovative user experiences and journeys through services with a 69 | //! focus on IoT lately. 70 | //! 71 | //! [Yellow Innovation's website and works](http://yellowinnovation.fr/en/) 72 | 73 | #[macro_use] 74 | extern crate failure; 75 | extern crate reqwest; 76 | extern crate serde; 77 | #[macro_use] 78 | extern crate serde_json; 79 | #[macro_use] 80 | extern crate serde_derive; 81 | 82 | #[cfg(test)] 83 | #[macro_use] 84 | extern crate pretty_assertions; 85 | 86 | /// Macros that the crate exports to facilitate most of the 87 | /// doc-to-json-to-string-related tasks 88 | #[allow(unused_macros)] 89 | #[macro_use] 90 | mod macros { 91 | /// Shortcut to `mod $mod; pub use mod::*;` 92 | macro_rules! mod_use { 93 | ($module:ident) => { 94 | mod $module; 95 | pub use self::$module::*; 96 | }; 97 | } 98 | 99 | /// Extracts a JSON Value to a defined Struct 100 | macro_rules! json_extr { 101 | ($e:expr) => { 102 | serde_json::from_value($e.to_owned()).unwrap() 103 | }; 104 | } 105 | 106 | /// Automatic call to serde_json::to_string() function, with prior 107 | /// Document::get_data() call to get documents' inner data 108 | macro_rules! dtj { 109 | ($e:expr) => { 110 | js!(&$e.get_data()) 111 | }; 112 | } 113 | 114 | /// Automatic call to serde_json::to_string() function 115 | macro_rules! js { 116 | ($e:expr) => { 117 | serde_json::to_string(&$e).unwrap() 118 | }; 119 | } 120 | 121 | /// String creation 122 | macro_rules! s { 123 | ($e:expr) => { 124 | String::from($e) 125 | }; 126 | } 127 | 128 | /// Gets milliseconds from timespec 129 | macro_rules! tspec_ms { 130 | ($tspec:ident) => {{ 131 | $tspec.sec * 1000 + $tspec.nsec as i64 / 1000000 132 | }}; 133 | } 134 | 135 | /// Gets current UNIX time in milliseconds 136 | macro_rules! msnow { 137 | () => {{ 138 | let tm = time::now().to_timespec(); 139 | tspec_ms!(tm) 140 | }}; 141 | } 142 | } 143 | 144 | mod_use!(client); 145 | mod_use!(database); 146 | mod_use!(document); 147 | mod_use!(error); 148 | pub mod types; 149 | mod_use!(model); 150 | 151 | #[allow(unused_mut, unused_variables)] 152 | #[cfg(test)] 153 | mod sofa_tests { 154 | mod a_sys { 155 | use *; 156 | 157 | #[test] 158 | fn a_should_check_couchdbs_status() { 159 | let client = Client::new("http://localhost:5984".into()).unwrap(); 160 | let status = client.check_status(); 161 | assert!(status.is_ok()); 162 | } 163 | 164 | #[test] 165 | fn b_should_create_sofa_test_db() { 166 | let client = Client::new("http://localhost:5984".into()).unwrap(); 167 | let dbw = client.db("b_should_create_sofa_test_db"); 168 | assert!(dbw.is_ok()); 169 | 170 | let _ = client.destroy_db("b_should_create_sofa_test_db"); 171 | } 172 | 173 | #[test] 174 | fn c_should_create_a_document() { 175 | let client = Client::new("http://localhost:5984".into()).unwrap(); 176 | let dbw = client.db("c_should_create_a_document"); 177 | assert!(dbw.is_ok()); 178 | let db = dbw.unwrap(); 179 | 180 | let ndoc_result = db.create(json!({ 181 | "thing": true 182 | })); 183 | 184 | assert!(ndoc_result.is_ok()); 185 | 186 | let mut doc = ndoc_result.unwrap(); 187 | assert_eq!(doc["thing"], json!(true)); 188 | 189 | let _ = client.destroy_db("c_should_create_a_document"); 190 | } 191 | 192 | #[test] 193 | fn d_should_destroy_the_db() { 194 | let client = Client::new("http://localhost:5984".into()).unwrap(); 195 | let _ = client.db("d_should_destroy_the_db"); 196 | 197 | assert!(client.destroy_db("d_should_destroy_the_db").unwrap()); 198 | } 199 | } 200 | 201 | mod b_db { 202 | use *; 203 | 204 | fn setup(dbname: &'static str) -> (Client, Database, Document) { 205 | let client = Client::new("http://localhost:5984".into()).unwrap(); 206 | let dbw = client.db(dbname); 207 | assert!(dbw.is_ok()); 208 | let db = dbw.unwrap(); 209 | 210 | let ndoc_result = db.create(json!({ 211 | "thing": true 212 | })); 213 | 214 | assert!(ndoc_result.is_ok()); 215 | 216 | let mut doc = ndoc_result.unwrap(); 217 | assert_eq!(doc["thing"], json!(true)); 218 | 219 | (client, db, doc) 220 | } 221 | 222 | fn teardown(client: Client, dbname: &'static str) { 223 | assert!(client.destroy_db(dbname).unwrap()) 224 | } 225 | 226 | #[test] 227 | fn a_should_update_a_document() { 228 | let (client, db, mut doc) = setup("a_should_update_a_document"); 229 | 230 | doc["thing"] = json!(false); 231 | 232 | let save_result = db.save(doc); 233 | assert!(save_result.is_ok()); 234 | let new_doc = save_result.unwrap(); 235 | assert_eq!(new_doc["thing"], json!(false)); 236 | 237 | teardown(client, "a_should_update_a_document"); 238 | } 239 | 240 | #[test] 241 | fn b_should_remove_a_document() { 242 | let (client, db, doc) = setup("b_should_remove_a_document"); 243 | assert!(db.remove(doc)); 244 | 245 | teardown(client, "b_should_remove_a_document"); 246 | } 247 | 248 | #[test] 249 | fn c_should_get_a_single_document() { 250 | let (client, ..) = setup("c_should_get_a_single_document"); 251 | assert!(true); 252 | teardown(client, "c_should_get_a_single_document"); 253 | } 254 | 255 | fn setup_create_indexes(dbname: &'static str) -> (Client, Database, Document) { 256 | let (client, db, doc) = setup(dbname); 257 | 258 | let spec = types::IndexFields::new(vec![types::SortSpec::Simple(s!("thing"))]); 259 | 260 | let res = db.insert_index("thing-index".into(), spec); 261 | 262 | assert!(res.is_ok()); 263 | 264 | (client, db, doc) 265 | } 266 | 267 | #[test] 268 | fn d_should_create_index_in_db() { 269 | let (client, db, _) = setup_create_indexes("d_should_create_index_in_db"); 270 | assert!(true); 271 | teardown(client, "d_should_create_index_in_db"); 272 | } 273 | 274 | #[test] 275 | fn e_should_list_indexes_in_db() { 276 | let (client, db, _) = setup_create_indexes("e_should_list_indexes_in_db"); 277 | 278 | let index_list = db.read_indexes().unwrap(); 279 | assert!(index_list.indexes.len() > 1); 280 | let ref findex = index_list.indexes[1]; 281 | 282 | assert_eq!(findex.name.as_str(), "thing-index"); 283 | teardown(client, "e_should_list_indexes_in_db"); 284 | } 285 | 286 | #[test] 287 | fn f_should_ensure_index_in_db() { 288 | let (client, db, _) = setup("f_should_ensure_index_in_db"); 289 | 290 | let spec = types::IndexFields::new(vec![types::SortSpec::Simple(s!("thing"))]); 291 | 292 | let res = db.ensure_index("thing-index".into(), spec); 293 | assert!(res.is_ok()); 294 | 295 | teardown(client, "f_should_ensure_index_in_db"); 296 | } 297 | 298 | #[test] 299 | fn g_should_find_documents_in_db() { 300 | let (client, db, doc) = setup_create_indexes("g_should_find_documents_in_db"); 301 | 302 | let documents_res = db.find(json!({ 303 | "selector": { 304 | "thing": true 305 | }, 306 | "limit": 1, 307 | "sort": [{ 308 | "thing": "desc" 309 | }] 310 | })); 311 | 312 | assert!(documents_res.is_ok()); 313 | let documents = documents_res.unwrap(); 314 | assert_eq!(documents.rows.len(), 1); 315 | 316 | teardown(client, "g_should_find_documents_in_db"); 317 | } 318 | } 319 | } 320 | -------------------------------------------------------------------------------- /src/model.rs: -------------------------------------------------------------------------------- 1 | use serde::de::DeserializeOwned; 2 | use serde::ser::Serialize; 3 | use serde_json::{from_value, to_value}; 4 | use ::document::*; 5 | use std::marker::Sized; 6 | 7 | /// Trait that provides methods that can be used to switch between abstract `Document` and concrete `Model` implementors (such as your custom data models) 8 | pub trait Model { 9 | fn from_document(d: Document) -> T { 10 | from_value(d.get_data()).unwrap() 11 | } 12 | 13 | fn to_document(&self) -> Document where Self: Serialize { 14 | Document::new(to_value(self).unwrap()) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/types/document.rs: -------------------------------------------------------------------------------- 1 | /// String that represents a Document ID in CouchDB 2 | pub type DocumentId = String; 3 | 4 | /// DocumentRef is an abstraction over populated/unpopulated data fields 5 | #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] 6 | #[serde(untagged)] 7 | pub enum DocumentRef { 8 | Ref(DocumentId), 9 | Populated(T) 10 | } 11 | 12 | /// Abstracted document creation result 13 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 14 | pub struct DocumentCreatedResult { 15 | pub id: Option, 16 | pub ok: Option, 17 | pub rev: Option, 18 | pub error: Option, 19 | pub reason: Option 20 | } 21 | -------------------------------------------------------------------------------- /src/types/find.rs: -------------------------------------------------------------------------------- 1 | use serde_json::{Value}; 2 | use std::collections::HashMap; 3 | 4 | /// Sort direction abstraction 5 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 6 | #[serde(untagged)] 7 | pub enum SortDirection { 8 | Desc, 9 | Asc 10 | } 11 | 12 | impl From for SortDirection { 13 | fn from(original: String) -> SortDirection { 14 | match original.as_ref() { 15 | "desc" => SortDirection::Desc, 16 | "asc" | _ => SortDirection::Asc 17 | } 18 | } 19 | } 20 | 21 | /// Sort spec content abstraction 22 | pub type SortSpecContent = HashMap; 23 | 24 | /// Sort spec abstraction 25 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 26 | #[serde(untagged)] 27 | pub enum SortSpec { 28 | Simple(String), 29 | Complex(SortSpecContent) 30 | } 31 | 32 | /// Index spec abstraction 33 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 34 | #[serde(untagged)] 35 | pub enum IndexSpec { 36 | DesignDocument(String), 37 | IndexName((String, String)) 38 | } 39 | 40 | /// Find query abstraction 41 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 42 | pub struct FindQuery { 43 | pub selector: Value, 44 | pub limit: Option, 45 | pub skip: Option, 46 | pub sort: Option, 47 | pub fields: Option>, 48 | pub use_index: Option 49 | } 50 | 51 | /// Find result abstraction 52 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 53 | pub struct FindResult { 54 | pub docs: Option>, 55 | pub warning: Option, 56 | pub error: Option, 57 | pub reason: Option 58 | } 59 | 60 | /// Explain result abstraction 61 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 62 | pub struct ExplainResult { 63 | pub dbname: String, 64 | pub index: IndexSpec, 65 | pub selector: Value, 66 | pub opts: Value, 67 | pub limit: u32, 68 | pub skip: u64, 69 | pub fields: Vec, 70 | pub range: Value 71 | } 72 | -------------------------------------------------------------------------------- /src/types/index.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | /// Index fields abstraction 4 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 5 | pub struct IndexFields { 6 | pub fields: Vec 7 | } 8 | 9 | impl IndexFields { 10 | pub fn new(fields: Vec) -> IndexFields { 11 | IndexFields { 12 | fields: fields 13 | } 14 | } 15 | } 16 | 17 | /// Index abstraction 18 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 19 | pub struct Index { 20 | pub ddoc: Option, 21 | pub name: String, 22 | #[serde(rename = "type")] 23 | pub index_type: String, 24 | pub def: IndexFields 25 | } 26 | 27 | /// Index created abstraction 28 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 29 | pub struct IndexCreated { 30 | pub result: Option, 31 | pub id: Option, 32 | pub name: Option, 33 | pub error: Option, 34 | pub reason: Option 35 | } 36 | 37 | /// Database index list abstraction 38 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 39 | pub struct DatabaseIndexList { 40 | pub total_rows: u32, 41 | pub indexes: Vec 42 | } 43 | -------------------------------------------------------------------------------- /src/types/mod.rs: -------------------------------------------------------------------------------- 1 | mod_use!(system); 2 | mod_use!(document); 3 | mod_use!(find); 4 | mod_use!(index); 5 | -------------------------------------------------------------------------------- /src/types/system.rs: -------------------------------------------------------------------------------- 1 | /// Couch vendor abstraction 2 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 3 | pub struct CouchVendor { 4 | pub name: String, 5 | pub version: Option 6 | } 7 | 8 | /// Couch status abstraction 9 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 10 | pub struct CouchStatus { 11 | pub couchdb: String, 12 | pub git_sha: Option, 13 | pub uuid: Option, 14 | pub version: String, 15 | pub vendor: CouchVendor 16 | } 17 | 18 | /// Couch response abstraction 19 | #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] 20 | pub struct CouchResponse { 21 | pub ok: Option, 22 | pub error: Option, 23 | pub reason: Option 24 | } 25 | --------------------------------------------------------------------------------