├── .gitignore ├── public ├── logo.png └── index.html ├── docs ├── assets │ ├── banner.png │ ├── video.png │ └── screenshot.png └── protocol-study │ ├── ours.png │ ├── theirs.png │ ├── packet-study.txt │ └── their-packet-study.txt ├── tests ├── fixtures │ └── binaries │ │ ├── large-update-1.bin │ │ └── large-update-2.bin ├── create_test.rs ├── drop_database_test.rs ├── build_info_test.rs ├── list_indexes_test.rs ├── delete_test.rs ├── count_test.rs ├── list_databases_test.rs ├── create_indexes_test.rs ├── get_parameters_test.rs ├── find_and_modify_test.rs ├── insert_test.rs ├── common.rs └── find_test.rs ├── .env.example ├── src ├── lib.rs ├── commands │ ├── ping.rs │ ├── whats_my_uri.rs │ ├── get_cmd_line_opts.rs │ ├── connection_status.rs │ ├── drop.rs │ ├── drop_database.rs │ ├── aggregate │ │ ├── match_stage.rs │ │ ├── count_stage.rs │ │ ├── group_id.rs │ │ ├── group_stage.rs │ │ ├── sql_statement.rs │ │ └── mod.rs │ ├── insert.rs │ ├── is_master.rs │ ├── hello.rs │ ├── coll_stats.rs │ ├── create_indexes.rs │ ├── create.rs │ ├── build_info.rs │ ├── list_collections.rs │ ├── mod.rs │ ├── delete.rs │ ├── db_stats.rs │ ├── count.rs │ ├── list_databases.rs │ ├── update.rs │ ├── find.rs │ ├── list_indexes.rs │ ├── find_and_modify.rs │ └── get_parameter.rs ├── wire │ ├── op_reply.rs │ ├── op_query.rs │ ├── op_msg.rs │ ├── mod.rs │ └── util.rs ├── runtime.js ├── threadpool.rs ├── deserializer.rs ├── ui.rs ├── serializer.rs ├── main.rs ├── server.rs ├── parser │ └── update_parser.rs └── handler.rs ├── Makefile ├── Dockerfile.web ├── Dockerfile ├── scripts ├── start.sh ├── release-arm64 └── release-linux ├── .github ├── release-drafter.yml └── workflows │ ├── release.yml │ ├── release-drafter.yml │ └── ci.yml ├── Cargo.toml └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .env 3 | .DS_Store 4 | .vscode 5 | -------------------------------------------------------------------------------- /public/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/public/logo.png -------------------------------------------------------------------------------- /docs/assets/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/docs/assets/banner.png -------------------------------------------------------------------------------- /docs/assets/video.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/docs/assets/video.png -------------------------------------------------------------------------------- /docs/assets/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/docs/assets/screenshot.png -------------------------------------------------------------------------------- /docs/protocol-study/ours.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/docs/protocol-study/ours.png -------------------------------------------------------------------------------- /docs/protocol-study/theirs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/docs/protocol-study/theirs.png -------------------------------------------------------------------------------- /tests/fixtures/binaries/large-update-1.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/tests/fixtures/binaries/large-update-1.bin -------------------------------------------------------------------------------- /tests/fixtures/binaries/large-update-2.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fcoury/oxide/HEAD/tests/fixtures/binaries/large-update-2.bin -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | DATABASE_URL=postgresql://postgres:postgres@localhost/oxidedb 2 | TEST_DATABASE_URL=postgresql://postgres:postgres@localhost 3 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod commands; 2 | pub mod deserializer; 3 | pub mod handler; 4 | pub mod parser; 5 | pub mod pg; 6 | pub mod serializer; 7 | pub mod server; 8 | pub mod shell; 9 | pub mod threadpool; 10 | pub mod utils; 11 | pub mod wire; 12 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | NAME = oxide 2 | 3 | check: 4 | cargo check 5 | 6 | start: 7 | ./scripts/start.sh 8 | 9 | debug: 10 | ./scripts/start.sh --debug 11 | 12 | test: 13 | # cargo nextest run 14 | cargo test 15 | 16 | web: 17 | ./scripts/start.sh web 18 | -------------------------------------------------------------------------------- /Dockerfile.web: -------------------------------------------------------------------------------- 1 | ARG DATABASE_URL 2 | 3 | FROM rust:1.62 AS builder 4 | COPY . . 5 | RUN cargo build --release 6 | 7 | FROM debian:buster-slim 8 | COPY --from=builder ./target/release/oxide ./target/release/oxide 9 | 10 | EXPOSE 8087 11 | CMD ["/target/release/oxide", "web", "--listen-addr", "0.0.0.0"] 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.64 AS builder 2 | COPY . . 3 | RUN cargo build --release 4 | 5 | FROM debian:bullseye-slim 6 | COPY --from=builder ./target/release/oxide ./target/release/oxide 7 | 8 | EXPOSE 27017 8087 9 | CMD ["/target/release/oxide", "--web", "--listen-addr", "0.0.0.0", "--web-addr", "0.0.0.0:8087"] 10 | -------------------------------------------------------------------------------- /scripts/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ! -f "`which cargo-watch`" ]; then 4 | if [ -f "`which cargo-binstall`" ]; then 5 | cargo binstall cargo-watch 6 | else 7 | echo Please accept the installation of cargo-watch next 8 | echo 9 | cargo install cargo-watch 10 | fi 11 | fi 12 | echo "run -- $@" 13 | cargo watch -x "run -- $@" 14 | -------------------------------------------------------------------------------- /src/commands/ping.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | commands::Handler, 3 | handler::{CommandExecutionError, Request}, 4 | }; 5 | use bson::{doc, Bson, Document}; 6 | 7 | pub struct Ping {} 8 | 9 | impl Handler for Ping { 10 | fn new() -> Self { 11 | Ping {} 12 | } 13 | 14 | fn handle( 15 | &self, 16 | _request: &Request, 17 | _msg: &Vec, 18 | ) -> Result { 19 | Ok(doc! { 20 | "ok": Bson::Double(1.into()) 21 | }) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /scripts/release-arm64: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | LAST_RELEASE=`gh release list | head -1 | awk '{print $1}'` 6 | cargo build --release 7 | 8 | cd target/release 9 | zip "oxide_${LAST_RELEASE}_arm64-apple-darwin.zip" oxide 10 | shasum -a 256 "oxide_${LAST_RELEASE}_arm64-apple-darwin.zip" > "oxide_${LAST_RELEASE}_arm64-apple-darwin.zip.sha256sum" 11 | 12 | gh release upload "$LAST_RELEASE" "oxide_${LAST_RELEASE}_arm64-apple-darwin.zip" 13 | gh release upload "$LAST_RELEASE" "oxide_${LAST_RELEASE}_arm64-apple-darwin.zip.sha256sum" 14 | cd - 15 | 16 | echo Done. 17 | -------------------------------------------------------------------------------- /public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | OxideDB - Interface 11 | 12 | 13 | 14 | 15 |
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /src/commands/whats_my_uri.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::CommandExecutionError; 2 | use crate::{commands::Handler, handler::Request}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct WhatsMyUri {} 6 | 7 | impl Handler for WhatsMyUri { 8 | fn new() -> Self { 9 | WhatsMyUri {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | req: &Request, 15 | _msg: &Vec, 16 | ) -> Result { 17 | Ok(doc! { 18 | "ok": Bson::Double(1.0), 19 | "you": req.peer_addr().to_string(), 20 | }) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /scripts/release-linux: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | LAST_RELEASE=`gh release list | head -1 | awk '{print $1}'` 6 | cargo build --release 7 | 8 | cd target/release 9 | zip "oxide_${LAST_RELEASE}_x86_64-unknown-linux-musl.zip" oxide 10 | shasum -a 256 "oxide_${LAST_RELEASE}_x86_64-unknown-linux-musl.zip" > "oxide_${LAST_RELEASE}_x86_64-unknown-linux-musl.zip.sha256sum" 11 | 12 | gh release upload "$LAST_RELEASE" "oxide_${LAST_RELEASE}_x86_64-unknown-linux-musl.zip" 13 | gh release upload "$LAST_RELEASE" "oxide_${LAST_RELEASE}_x86_64-unknown-linux-musl.zip.sha256sum" 14 | cd - 15 | 16 | echo Done. 17 | -------------------------------------------------------------------------------- /src/commands/get_cmd_line_opts.rs: -------------------------------------------------------------------------------- 1 | use crate::commands::Handler; 2 | use crate::handler::{CommandExecutionError, Request}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct GetCmdLineOpts {} 6 | 7 | impl Handler for GetCmdLineOpts { 8 | fn new() -> Self { 9 | GetCmdLineOpts {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | _request: &Request, 15 | _msg: &Vec, 16 | ) -> Result { 17 | Ok(doc! { 18 | "argv": Bson::Array(vec![Bson::String("oxidedb".to_string())]), 19 | "parsed": doc!{}, 20 | "ok": Bson::Double(1.into()) 21 | }) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /tests/create_test.rs: -------------------------------------------------------------------------------- 1 | use bson::{doc, Document}; 2 | 3 | mod common; 4 | 5 | #[test] 6 | fn test_create_basic() { 7 | let ctx = common::setup(); 8 | 9 | ctx.db() 10 | .collection::(&ctx.collection) 11 | .drop(None) 12 | .unwrap(); 13 | 14 | let names = ctx.db().list_collection_names(None).unwrap(); 15 | assert!(!names.contains(&ctx.collection)); 16 | 17 | ctx.db() 18 | .run_command( 19 | doc! { 20 | "create": ctx.clone().collection, 21 | }, 22 | None, 23 | ) 24 | .unwrap(); 25 | 26 | let names = ctx.db().list_collection_names(None).unwrap(); 27 | assert!(names.contains(&ctx.collection)); 28 | } 29 | -------------------------------------------------------------------------------- /src/commands/connection_status.rs: -------------------------------------------------------------------------------- 1 | use crate::commands::Handler; 2 | use crate::handler::{CommandExecutionError, Request}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct ConnectionStatus {} 6 | 7 | impl Handler for ConnectionStatus { 8 | fn new() -> Self { 9 | ConnectionStatus {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | _request: &Request, 15 | _msg: &Vec, 16 | ) -> Result { 17 | Ok(doc! { 18 | "authInfo": { 19 | "authenticatedUsers": [], 20 | "authenticatedUserRoles": [], 21 | "authenticatedUserPrivileges": [], 22 | }, 23 | "ok": Bson::Double(1.into()) 24 | }) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$RESOLVED_VERSION 🌈' 2 | tag-template: 'v$RESOLVED_VERSION' 3 | categories: 4 | - title: '🚀 Features' 5 | labels: 6 | - 'feature' 7 | - 'enhancement' 8 | - title: '🐛 Bug Fixes' 9 | labels: 10 | - 'fix' 11 | - 'bugfix' 12 | - 'bug' 13 | - title: '🧰 Maintenance' 14 | label: 'chore' 15 | change-template: '- $TITLE @$AUTHOR (#$NUMBER)' 16 | change-title-escapes: '\<*_&' # You can add # and @ to disable mentions, and add ` to disable code blocks. 17 | version-resolver: 18 | major: 19 | labels: 20 | - 'major' 21 | minor: 22 | labels: 23 | - 'minor' 24 | patch: 25 | labels: 26 | - 'patch' 27 | default: patch 28 | template: | 29 | ## Changes 30 | 31 | $CHANGES 32 | -------------------------------------------------------------------------------- /src/commands/drop.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use crate::{commands::Handler, pg::SqlParam}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct Drop {} 6 | 7 | impl Handler for Drop { 8 | fn new() -> Self { 9 | Drop {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | request: &Request, 15 | docs: &Vec, 16 | ) -> Result { 17 | let mut client = request.get_client(); 18 | let sp = SqlParam::from(&docs[0], "drop"); 19 | client.drop_table(&sp).unwrap(); 20 | 21 | Ok(doc! { 22 | "nIndexesWas": Bson::Int32(1), // TODO 23 | "ns": Bson::String(sp.to_string()), 24 | "ok": Bson::Double(1.0), 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/commands/drop_database.rs: -------------------------------------------------------------------------------- 1 | use crate::commands::Handler; 2 | use crate::handler::{CommandExecutionError, Request}; 3 | use bson::{doc, Bson, Document}; 4 | use sql_lexer::sanitize_string; 5 | 6 | pub struct DropDatabase {} 7 | 8 | impl Handler for DropDatabase { 9 | fn new() -> Self { 10 | DropDatabase {} 11 | } 12 | 13 | fn handle( 14 | &self, 15 | request: &Request, 16 | docs: &Vec, 17 | ) -> Result { 18 | let mut client = request.get_client(); 19 | let db = sanitize_string(docs[0].get_str("$db").unwrap().to_string()); 20 | client.drop_schema(&db).unwrap(); 21 | 22 | Ok(doc! { 23 | "dropped": db, 24 | "ok": Bson::Double(1.0), 25 | }) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | release: 9 | name: release ${{ matrix.target }} 10 | runs-on: ubuntu-latest 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | include: 15 | - target: x86_64-pc-windows-gnu 16 | archive: zip 17 | - target: x86_64-unknown-linux-musl 18 | archive: tar.gz tar.xz 19 | - target: x86_64-apple-darwin 20 | archive: zip 21 | steps: 22 | - uses: actions/checkout@master 23 | - name: Compile and release 24 | uses: rust-build/rust-build.action@v1.3.2 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }} 27 | with: 28 | RUSTTARGET: ${{ matrix.target }} 29 | ARCHIVE_TYPES: ${{ matrix.archive }} 30 | -------------------------------------------------------------------------------- /src/commands/aggregate/match_stage.rs: -------------------------------------------------------------------------------- 1 | use crate::parser::parse; 2 | use crate::utils::expand_fields; 3 | use bson::Document; 4 | use eyre::Result; 5 | 6 | use super::sql_statement::SqlStatement; 7 | 8 | pub fn process_match(doc: &Document) -> Result { 9 | let mut sql = SqlStatement::builder().build(); 10 | 11 | let filter_doc = expand_fields(&doc)?; 12 | let filter = parse(filter_doc)?; 13 | if filter != "" { 14 | sql.add_filter(&filter); 15 | } 16 | 17 | Ok(sql) 18 | } 19 | 20 | #[cfg(test)] 21 | mod tests { 22 | use super::*; 23 | 24 | use bson::doc; 25 | 26 | #[test] 27 | fn test_process_match() { 28 | let doc = doc! { "age": { "$gt": 20 } }; 29 | let sql = process_match(&doc).unwrap(); 30 | 31 | assert_eq!( 32 | sql.filters[0], 33 | r#"(jsonb_typeof(_jsonb->'age') = 'number' OR jsonb_typeof(_jsonb->'age'->'$f') = 'number') AND CASE WHEN (_jsonb->'age' ? '$f') THEN (_jsonb->'age'->>'$f')::numeric ELSE (_jsonb->'age')::numeric END > '20'"# 34 | ); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/commands/insert.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use crate::{commands::Handler, pg::SqlParam}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct Insert {} 6 | 7 | impl Handler for Insert { 8 | fn new() -> Self { 9 | Insert {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | request: &Request, 15 | docs: &Vec, 16 | ) -> Result { 17 | let doc = &docs[0]; 18 | let db = doc.get_str("$db").unwrap(); 19 | let collection = doc.get_str("insert").unwrap(); 20 | let mut docs: Vec = doc.get_array("documents").unwrap().iter().map(|d| d.as_document().unwrap().clone()).collect(); 21 | 22 | let mut client = request.get_client(); 23 | client.create_table_if_not_exists(db, collection).unwrap(); 24 | 25 | let sp = SqlParam::new(db, collection); 26 | let inserted = client.insert_docs(sp, &mut docs).unwrap(); 27 | 28 | Ok(doc! { 29 | "n": Bson::Int64(inserted.try_into().unwrap()), 30 | "ok": Bson::Double(1.0), 31 | }) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /tests/drop_database_test.rs: -------------------------------------------------------------------------------- 1 | use mongodb::bson::{doc, Document}; 2 | 3 | mod common; 4 | 5 | #[test] 6 | fn test_drop_database() { 7 | let ctx = common::setup(); 8 | let db = ctx.mongodb().database("test_drop_database_1"); 9 | let col = db.collection::("test"); 10 | 11 | col.insert_one(doc! { "x": 1 }, None).unwrap(); 12 | 13 | let dbs = ctx.mongodb().list_database_names(None, None).unwrap(); 14 | assert!(dbs.contains(&"test_drop_database_1".to_string())); 15 | 16 | db.drop(None).unwrap(); 17 | 18 | let dbs = ctx.mongodb().list_database_names(None, None).unwrap(); 19 | assert!(!dbs.contains(&"test_drop_database_1".to_string())); 20 | } 21 | 22 | #[test] 23 | fn test_drop_inexistent_database() { 24 | let ctx = common::setup(); 25 | let db = ctx.mongodb().database("test_drop_database_2"); 26 | 27 | let dbs = ctx.mongodb().list_database_names(None, None).unwrap(); 28 | assert!(!dbs.contains(&"test_drop_database_2".to_string())); 29 | 30 | db.drop(None).unwrap(); 31 | 32 | let dbs = ctx.mongodb().list_database_names(None, None).unwrap(); 33 | assert!(!dbs.contains(&"test_drop_database_2".to_string())); 34 | } 35 | -------------------------------------------------------------------------------- /src/commands/is_master.rs: -------------------------------------------------------------------------------- 1 | use crate::commands::Handler; 2 | use crate::handler::{CommandExecutionError, Request}; 3 | use crate::wire::{MAX_DOCUMENT_LEN, MAX_MSG_LEN}; 4 | use bson::{doc, Bson, Document}; 5 | use std::time::{SystemTime, UNIX_EPOCH}; 6 | 7 | pub struct IsMaster {} 8 | 9 | impl Handler for IsMaster { 10 | fn new() -> Self { 11 | IsMaster {} 12 | } 13 | 14 | fn handle( 15 | &self, 16 | _request: &Request, 17 | _msg: &Vec, 18 | ) -> Result { 19 | let local_time = SystemTime::now() 20 | .duration_since(UNIX_EPOCH) 21 | .unwrap() 22 | .as_millis(); 23 | Ok(doc! { 24 | "ismaster": Bson::Boolean(true), 25 | "maxBsonObjectSize": MAX_DOCUMENT_LEN, 26 | "maxMessageSizeBytes": MAX_MSG_LEN, 27 | "maxWriteBatchSize": 100000, 28 | "localTime": Bson::DateTime(bson::DateTime::from_millis(local_time.try_into().unwrap())), 29 | "minWireVersion": 0, 30 | "maxWireVersion": 13, 31 | "readOnly": Bson::Boolean(false), 32 | "ok": Bson::Double(1.into()) 33 | }) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/commands/hello.rs: -------------------------------------------------------------------------------- 1 | use crate::commands::Handler; 2 | use crate::handler::{CommandExecutionError, Request}; 3 | use crate::wire::{MAX_DOCUMENT_LEN, MAX_MSG_LEN}; 4 | use bson::{doc, Bson, Document}; 5 | use std::time::{SystemTime, UNIX_EPOCH}; 6 | 7 | pub struct Hello {} 8 | 9 | impl Handler for Hello { 10 | fn new() -> Self { 11 | Hello {} 12 | } 13 | 14 | fn handle( 15 | &self, 16 | _request: &Request, 17 | _msg: &Vec, 18 | ) -> Result { 19 | let local_time = SystemTime::now() 20 | .duration_since(UNIX_EPOCH) 21 | .unwrap() 22 | .as_millis(); 23 | Ok(doc! { 24 | "isWritablePrimary": Bson::Boolean(true), 25 | "maxBsonObjectSize": MAX_DOCUMENT_LEN, 26 | "maxMessageSizeBytes": MAX_MSG_LEN, 27 | "maxWriteBatchSize": 100000, 28 | "localTime": Bson::DateTime(bson::DateTime::from_millis(local_time.try_into().unwrap())), 29 | "minWireVersion": 0, 30 | "maxWireVersion": 13, 31 | "readOnly": Bson::Boolean(false), 32 | "ok": Bson::Double(1.into()) 33 | }) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /tests/build_info_test.rs: -------------------------------------------------------------------------------- 1 | use mongodb::bson::{doc, Bson}; 2 | use oxide::wire::MAX_DOCUMENT_LEN; 3 | 4 | mod common; 5 | 6 | #[test] 7 | fn test_build_info() { 8 | let ctx = common::setup(); 9 | 10 | let res = ctx.db().run_command(doc! { "buildInfo": 1 }, None).unwrap(); 11 | assert_eq!(res.get_str("version").unwrap(), "5.0.42"); 12 | assert_eq!( 13 | res.get_str("gitVersion").unwrap(), 14 | "30cf72e1380e1732c0e24016f092ed58e38eeb58" 15 | ); 16 | assert_eq!(res.get_array("modules").unwrap(), &[]); 17 | assert_eq!(res.get_str("sysInfo").unwrap(), "deprecated"); 18 | assert_eq!( 19 | res.get_array("versionArray").unwrap(), 20 | &[ 21 | Bson::Int32(5), 22 | Bson::Int32(0), 23 | Bson::Int32(42), 24 | Bson::Int32(0) 25 | ] 26 | ); 27 | assert_eq!(res.get_i32("bits").unwrap(), 64 as i32); 28 | assert_eq!(res.get_bool("debug").unwrap(), false); 29 | assert_eq!( 30 | res.get_i32("maxBsonObjectSize").unwrap(), 31 | MAX_DOCUMENT_LEN as i32 32 | ); 33 | assert_eq!(res.get_document("buildEnvironment").unwrap(), &doc! {}); 34 | assert_eq!(res.get_f64("ok").unwrap(), 1.0); 35 | } 36 | -------------------------------------------------------------------------------- /src/commands/coll_stats.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::CommandExecutionError; 2 | use crate::pg::SqlParam; 3 | use crate::{commands::Handler, handler::Request}; 4 | use bson::{doc, Bson, Document}; 5 | 6 | pub struct CollStats {} 7 | 8 | impl Handler for CollStats { 9 | fn new() -> Self { 10 | CollStats {} 11 | } 12 | 13 | fn handle( 14 | &self, 15 | request: &Request, 16 | docs: &Vec, 17 | ) -> Result { 18 | let doc = &docs[0]; 19 | let sp = SqlParam::from(&doc, "collStats"); 20 | 21 | let mut client = request.get_client(); 22 | let stats = client.schema_stats(&sp.db, Some(&sp.collection)).unwrap(); 23 | 24 | Ok(doc! { 25 | "ns": sp.to_string(), 26 | "count": Bson::Int32(stats.get("RowCount")), 27 | "size": Bson::Int32(stats.get("TotalSize")), 28 | "storageSize": Bson::Int32(stats.get("RelationSize")), 29 | "totalIndexSize": Bson::Int32(stats.get("IndexSize")), 30 | "totalSize": Bson::Int32(stats.get("TotalSize")), 31 | "scaleFactor": Bson::Int32(1), 32 | "ok": Bson::Double(1.0), 33 | }) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/commands/create_indexes.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use crate::{commands::Handler, pg::SqlParam}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct CreateIndexes {} 6 | 7 | impl Handler for CreateIndexes { 8 | fn new() -> Self { 9 | CreateIndexes {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | request: &Request, 15 | docs: &Vec, 16 | ) -> Result { 17 | let doc = &docs[0]; 18 | let db = doc.get_str("$db").unwrap(); 19 | let collection = doc.get_str("createIndexes").unwrap(); 20 | let indexes = doc.get_array("indexes").unwrap(); 21 | let sp = SqlParam::new(db, collection); 22 | 23 | let mut client = request.get_client(); 24 | 25 | client.create_schema_if_not_exists(&sp.db).unwrap(); 26 | client 27 | .create_table_if_not_exists(&sp.db, &sp.collection) 28 | .unwrap(); 29 | 30 | for index in indexes { 31 | client 32 | .create_index(&sp, index.as_document().unwrap()) 33 | .unwrap(); 34 | } 35 | 36 | Ok(doc! { 37 | "ok": Bson::Double(1.0), 38 | }) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /tests/list_indexes_test.rs: -------------------------------------------------------------------------------- 1 | use bson::doc; 2 | 3 | mod common; 4 | 5 | #[test] 6 | fn test_list_indexes_basic() { 7 | let ctx = common::setup(); 8 | 9 | ctx.col().insert_one(doc! { "x": 1 }, None).unwrap(); 10 | 11 | let res = ctx 12 | .db() 13 | .run_command( 14 | doc! { 15 | "listIndexes": ctx.clone().collection, 16 | }, 17 | None, 18 | ) 19 | .unwrap(); 20 | 21 | let cursor = res.get_document("cursor").unwrap(); 22 | assert_eq!(cursor.get_array("firstBatch").unwrap(), &vec![]); 23 | assert_eq!(cursor.get_i64("id").unwrap(), 0); 24 | assert_eq!( 25 | cursor.get_str("ns").unwrap(), 26 | format!("{}.$cmd.listIndexes.{}", ctx.db, ctx.collection) 27 | ); 28 | } 29 | 30 | #[test] 31 | fn test_list_indexes_collection_not_found() { 32 | let ctx = common::setup(); 33 | 34 | let res = ctx.db().run_command( 35 | doc! { 36 | "listIndexes": ctx.clone().collection, 37 | }, 38 | None, 39 | ); 40 | 41 | assert!(res.is_err()); 42 | assert_eq!( 43 | res.unwrap_err().to_string(), 44 | format!( 45 | r#"Command failed (CommandNotFound): Collection '{}' doesn't exist)"#, 46 | ctx.collection 47 | ) 48 | ); 49 | } 50 | -------------------------------------------------------------------------------- /src/commands/create.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use crate::pg::CreateTableError; 3 | use crate::{commands::Handler, pg::SqlParam}; 4 | use bson::{doc, Bson, Document}; 5 | 6 | pub struct Create {} 7 | 8 | impl Handler for Create { 9 | fn new() -> Self { 10 | Create {} 11 | } 12 | 13 | fn handle( 14 | &self, 15 | request: &Request, 16 | docs: &Vec, 17 | ) -> Result { 18 | let doc = &docs[0]; 19 | let db = doc.get_str("$db").unwrap(); 20 | let collection = doc.get_str("create").unwrap(); 21 | let sp = SqlParam::new(db, collection); 22 | 23 | let mut client = request.get_client(); 24 | 25 | client.create_schema_if_not_exists(&sp.db).unwrap(); 26 | 27 | let r = client.create_table(sp.clone()); 28 | match r { 29 | Ok(_) => Ok(doc! { 30 | "ok": Bson::Double(1.0), 31 | }), 32 | Err(e) => match e { 33 | CreateTableError::AlreadyExists(_) => Err(CommandExecutionError::new(format!( 34 | "a collection '{}' already exists", 35 | sp.clone() 36 | ))), 37 | CreateTableError::Other(e) => Err(CommandExecutionError::new(format!("{}", e))), 38 | }, 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/commands/build_info.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::CommandExecutionError; 2 | use crate::wire::MAX_DOCUMENT_LEN; 3 | use crate::{commands::Handler, handler::Request}; 4 | use bson::{doc, Bson, Document}; 5 | 6 | const MONGO_DB_VERSION: &str = "5.0.42"; 7 | 8 | pub struct BuildInfo {} 9 | 10 | impl Handler for BuildInfo { 11 | fn new() -> Self { 12 | BuildInfo {} 13 | } 14 | 15 | fn handle( 16 | &self, 17 | _request: &Request, 18 | _msg: &Vec, 19 | ) -> Result { 20 | Ok(doc! { 21 | "version": MONGO_DB_VERSION, 22 | "gitVersion": "30cf72e1380e1732c0e24016f092ed58e38eeb58", // FIXME: get this from git 23 | "modules": Bson::Array(vec![]), 24 | "sysInfo": "deprecated", 25 | "versionArray": Bson::Array(vec![ 26 | Bson::Int32(5), 27 | Bson::Int32(0), 28 | Bson::Int32(42), 29 | Bson::Int32(0), 30 | ]), 31 | "bits": Bson::Int32(64), 32 | "debug": false, 33 | "maxBsonObjectSize": Bson::Int32(MAX_DOCUMENT_LEN.try_into().unwrap()), 34 | "buildEnvironment": doc!{}, 35 | 36 | // our extensions 37 | // "ferretdbVersion", version.Get().Version, 38 | 39 | "ok": Bson::Double(1.0) 40 | }) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /tests/delete_test.rs: -------------------------------------------------------------------------------- 1 | use bson::{doc, Document}; 2 | 3 | mod common; 4 | 5 | #[test] 6 | fn test_delete_basic() { 7 | let ctx = common::setup(); 8 | 9 | ctx.col() 10 | .insert_many( 11 | vec![doc! { "x": 1 }, doc! { "x": 2, "a": 1 }, doc! { "x": 3 }], 12 | None, 13 | ) 14 | .unwrap(); 15 | 16 | ctx.col().delete_many(doc! { "a": 1 }, None).unwrap(); 17 | let cursor = ctx.col().find(doc! {}, None).unwrap(); 18 | let results = cursor.collect::>(); 19 | assert_eq!(results.len(), 2); 20 | } 21 | 22 | #[test] 23 | fn test_delete_one() { 24 | // FIXME right now it only crashes 25 | let ctx = common::setup(); 26 | 27 | ctx.col() 28 | .insert_many( 29 | vec![doc! { "a": 1 }, doc! { "x": 2, "a": 1 }, doc! { "a": 1 }], 30 | None, 31 | ) 32 | .unwrap(); 33 | 34 | ctx.col().delete_one(doc! { "a": 1 }, None).unwrap(); 35 | let cursor = ctx.col().find(doc! {}, None).unwrap(); 36 | let results = cursor.collect::>(); 37 | assert_eq!(results.len(), 2); 38 | } 39 | 40 | #[test] 41 | fn test_delete_inexistent() { 42 | let ctx = common::setup(); 43 | 44 | let res = ctx 45 | .db() 46 | .collection::("this_doesnt_exist") 47 | .delete_many(doc! { "x": 1 }, None) 48 | .unwrap(); 49 | 50 | assert_eq!(res.deleted_count, 0); 51 | } 52 | -------------------------------------------------------------------------------- /tests/count_test.rs: -------------------------------------------------------------------------------- 1 | use mongodb::bson::doc; 2 | 3 | mod common; 4 | 5 | #[test] 6 | fn test_count() { 7 | let ctx = common::setup(); 8 | ctx.col() 9 | .insert_many( 10 | vec![ 11 | doc! { 12 | "name": "John", 13 | "age": 30, 14 | "city": "New York", 15 | }, 16 | doc! { 17 | "name": "Paul", 18 | "age": 29, 19 | "city": "Ann Arbor", 20 | }, 21 | doc! { 22 | "name": "Ella", 23 | "age": 33, 24 | "city": "Ann Arbor", 25 | }, 26 | doc! { 27 | "name": "Jane", 28 | "age": 31, 29 | "city": "New York", 30 | }, 31 | ], 32 | None, 33 | ) 34 | .unwrap(); 35 | 36 | let res = ctx 37 | .db() 38 | .run_command(doc! {"count": &ctx.collection}, None) 39 | .unwrap(); 40 | 41 | assert_eq!(res.get_i32("n").unwrap(), 4); 42 | } 43 | 44 | #[test] 45 | fn test_count_for_non_existent_collection() { 46 | let ctx = common::setup(); 47 | let res = ctx 48 | .db() 49 | .run_command(doc! {"count": "i-dont-exist"}, None) 50 | .unwrap(); 51 | 52 | assert_eq!(res.get_i32("n").unwrap(), 0); 53 | } 54 | -------------------------------------------------------------------------------- /src/commands/list_collections.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::Request; 2 | use crate::{commands::Handler, handler::CommandExecutionError}; 3 | use bson::{bson, doc, Bson, Document}; 4 | 5 | pub struct ListCollections {} 6 | 7 | impl Handler for ListCollections { 8 | fn new() -> Self { 9 | ListCollections {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | request: &Request, 15 | docs: &Vec, 16 | ) -> Result { 17 | let doc = &docs[0]; 18 | let mut client = request.get_client(); 19 | let db = doc.get_str("$db").unwrap(); 20 | let tables = client.get_tables(db); 21 | let collections = tables 22 | .into_iter() 23 | .map(|t| { 24 | bson!({ 25 | "name": t, 26 | "type": "collection", 27 | "options": {}, 28 | "info": { 29 | "readOnly": false, 30 | }, 31 | "idIndex": {}, 32 | }) 33 | }) 34 | .collect(); 35 | 36 | Ok(doc! { 37 | "cursor": doc! { 38 | "id": Bson::Int64(0), 39 | "ns": Bson::String(format!("{}.$cmd.listCollections", db)), 40 | "firstBatch": Bson::Array(collections), 41 | }, 42 | "ok": Bson::Double(1.0), 43 | }) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /tests/list_databases_test.rs: -------------------------------------------------------------------------------- 1 | use mongodb::bson::doc; 2 | 3 | mod common; 4 | 5 | #[test] 6 | fn test_list_database() { 7 | let ctx = common::setup_with_pg_db("test_list_1", true); 8 | 9 | // initially only public database is listed 10 | let res = ctx.mongodb().list_databases(None, None).unwrap(); 11 | assert_eq!(res.len(), 1); 12 | assert_eq!(res.get(0).unwrap().name, "public"); 13 | 14 | ctx.col().insert_one(doc! { "x": 1 }, None).unwrap(); 15 | 16 | // lists the newly created database 17 | let res = ctx.mongodb().list_databases(None, None).unwrap(); 18 | assert_eq!(res.len(), 2); 19 | let dbs = res.iter().map(|db| db.name.to_owned()).collect::>(); 20 | assert!(dbs.contains(&"public".to_string())); 21 | assert!(dbs.contains(&ctx.db)); 22 | } 23 | 24 | #[test] 25 | fn test_list_database_name_only() { 26 | let ctx = common::setup_with_pg_db("test_list_2", true); 27 | 28 | let res = ctx.mongodb().list_database_names(None, None).unwrap(); 29 | assert_eq!(res.len(), 1); 30 | assert_eq!(res.get(0).unwrap(), "public"); 31 | } 32 | 33 | #[test] 34 | fn test_list_database_with_table_with_spaces() { 35 | let ctx = common::setup_with_pg_db("test_list_3", true); 36 | 37 | ctx.db() 38 | .collection("my col") 39 | .insert_one(doc! { "x": 1 }, None) 40 | .unwrap(); 41 | 42 | let res = ctx.mongodb().list_databases(None, None).unwrap(); 43 | assert_eq!(res.len(), 2); 44 | } 45 | -------------------------------------------------------------------------------- /src/commands/aggregate/count_stage.rs: -------------------------------------------------------------------------------- 1 | use super::sql_statement::SqlStatement; 2 | use eyre::Result; 3 | 4 | pub fn process_count(count_field: &str) -> Result { 5 | if count_field.contains(".") { 6 | return Err(eyre::eyre!("the count field cannot contain '.'")); 7 | } 8 | 9 | if count_field.contains("$") { 10 | return Err(eyre::eyre!("the count field cannot be a $-prefixed path")); 11 | } 12 | 13 | let sql = SqlStatement::builder() 14 | .field(&format!( 15 | "json_build_object('{}', COUNT(*))::jsonb AS _jsonb", 16 | count_field 17 | )) 18 | .build(); 19 | Ok(sql) 20 | } 21 | 22 | #[cfg(test)] 23 | mod tests { 24 | use super::*; 25 | 26 | #[test] 27 | fn test_process_count() { 28 | let sql = process_count("total").unwrap(); 29 | 30 | assert_eq!( 31 | sql.fields[0], 32 | r#"json_build_object('total', COUNT(*))::jsonb AS _jsonb"# 33 | ); 34 | } 35 | 36 | #[test] 37 | fn test_dot_error() { 38 | let err = process_count("total.name").unwrap_err(); 39 | assert_eq!(err.to_string(), "the count field cannot contain '.'"); 40 | } 41 | 42 | #[test] 43 | fn test_dollar_sign_error() { 44 | let err = process_count("$name").unwrap_err(); 45 | assert_eq!( 46 | err.to_string(), 47 | "the count field cannot be a $-prefixed path" 48 | ); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | # branches to consider in the event; optional, defaults to all 6 | branches: 7 | - master 8 | # pull_request event is required only for autolabeler 9 | pull_request: 10 | # Only following types are handled by the action, but one can default to all as well 11 | types: [opened, reopened, synchronize] 12 | # pull_request_target event is required for autolabeler to support PRs from forks 13 | # pull_request_target: 14 | # types: [opened, reopened, synchronize] 15 | 16 | permissions: 17 | contents: read 18 | 19 | jobs: 20 | update_release_draft: 21 | permissions: 22 | contents: write # for release-drafter/release-drafter to create a github release 23 | pull-requests: write # for release-drafter/release-drafter to add label to PR 24 | runs-on: ubuntu-latest 25 | steps: 26 | # (Optional) GitHub Enterprise requires GHE_HOST variable set 27 | #- name: Set GHE_HOST 28 | # run: | 29 | # echo "GHE_HOST=${GITHUB_SERVER_URL##https:\/\/}" >> $GITHUB_ENV 30 | 31 | # Drafts your next Release notes as Pull Requests are merged into "master" 32 | - uses: release-drafter/release-drafter@v5 33 | # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml 34 | # with: 35 | # config-name: my-config.yml 36 | # disable-autolabeler: true 37 | env: 38 | GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }} 39 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | # This workflow run tests and build for each push 4 | 5 | on: 6 | push: 7 | branches: 8 | - master 9 | - 'feature-**' 10 | - 'fix-**' 11 | 12 | jobs: 13 | 14 | test_oxide: 15 | runs-on: ubuntu-latest 16 | services: 17 | postgres: 18 | image: postgres 19 | env: 20 | POSTGRES_USER: postgres 21 | POSTGRES_PASSWORD: postgres 22 | options: >- 23 | --health-cmd pg_isready 24 | --health-interval 10s 25 | --health-timeout 5s 26 | --health-retries 5 27 | ports: 28 | - 5432:5432 29 | env: 30 | DATABASE_URL: postgresql://postgres:postgres@localhost:5432/postgres 31 | TEST_DATABASE_URL: postgresql://postgres:postgres@localhost:5432 32 | steps: 33 | - uses: actions/checkout@v2 34 | 35 | - name: Install cargo-nextest 36 | uses: baptiste0928/cargo-install@v1 37 | with: 38 | crate: cargo-nextest 39 | locked: true 40 | 41 | - name: Toolchain info 42 | run: | 43 | cargo --version --verbose 44 | rustc --version 45 | cargo clippy --version 46 | 47 | - name: Test 48 | run: | 49 | make test 50 | # build_oxide: 51 | # runs-on: ubuntu-latest 52 | # steps: 53 | # - uses: actions/checkout@v2 54 | 55 | # - name: Build Docker 56 | # run: | 57 | # make docker 58 | # - name: Docker image info 59 | # run: | 60 | # docker images 61 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | description = "A database compatible with MongoDB Wire Protocol that uses PostgreSQL for backend storage." 3 | edition = "2021" 4 | name = "oxide" 5 | version = "0.1.7" 6 | 7 | [dependencies] 8 | autoincrement = {version = "1", features = ["derive", "async"]} 9 | bson = {version = "2.4.0", features = ["chrono-0_4"]} 10 | byteorder = "1.4.3" 11 | chrono = "0.4" 12 | clap = {version = "3.2.8", features = ["derive"]} 13 | dotenv = "0.15" 14 | env_logger = "0.9.0" 15 | futures = "0.3" 16 | indoc = "1.0.6" 17 | log = "0.4" 18 | # mongodb = {version = "2.1", features = ["tokio-sync"], default-features = false} 19 | # mongodb-language-model = {path = "../mongodb-language-model-rust"} 20 | color-eyre = "0.6" 21 | colored = "2.0.0" 22 | deno_core = "0.149.0" 23 | dirs = "4.0.0" 24 | eyre = "0.6" 25 | mongodb-language-model = "0.1.6" 26 | nickel = "0.11" 27 | portpicker = "0.1" 28 | postgres = {version = "0.19", features = ["with-serde_json-1"]} 29 | pretty-hex = "0.3.0" 30 | r2d2 = "0.8.10" 31 | r2d2_postgres = "0.18.1" 32 | regex = "1" 33 | rust-embed = "6.4.0" 34 | rustyline = "10.0.0" 35 | serde = "1" 36 | serde_json = {version = "1", features = ["preserve_order"]} 37 | serde_v8 = "0.62.0" 38 | sql_lexer = "0.9.3" 39 | tokio = {version = "1.19.2", features = ["full"]} 40 | 41 | [dependencies.mongodb] 42 | default-features = false 43 | features = ["sync"] 44 | version = "2.3.0" 45 | 46 | [dependencies.uuid] 47 | features = [ 48 | "v4", # Lets you generate random UUIDs 49 | "fast-rng", # Use a faster (but still sufficiently random) RNG 50 | "macro-diagnostics", # Enable better diagnostics for compile-time UUIDs 51 | ] 52 | version = "1.1.2" 53 | -------------------------------------------------------------------------------- /src/wire/op_reply.rs: -------------------------------------------------------------------------------- 1 | use bson::{doc, ser, Bson, Document}; 2 | use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; 3 | use std::io::{BufRead, Cursor, Read, Write}; 4 | 5 | use super::{MsgHeader, Serializable}; 6 | 7 | #[derive(Debug, Clone)] 8 | pub struct OpReply { 9 | pub header: MsgHeader, 10 | pub flags: u32, 11 | pub cursor_id: u64, 12 | pub starting_from: u32, 13 | pub number_returned: u32, 14 | pub documents: Vec, 15 | } 16 | 17 | impl OpReply { 18 | pub fn new( 19 | header: MsgHeader, 20 | flags: u32, 21 | cursor_id: u64, 22 | starting_from: u32, 23 | number_returned: u32, 24 | documents: Vec, 25 | ) -> Self { 26 | OpReply { 27 | header, 28 | flags, 29 | cursor_id, 30 | starting_from, 31 | number_returned, 32 | documents, 33 | } 34 | } 35 | } 36 | 37 | impl Serializable for OpReply { 38 | fn to_vec(&self) -> Vec { 39 | let mut writer = Cursor::new(Vec::new()); 40 | writer.write_all(&self.header.to_vec()).unwrap(); 41 | writer.write_u32::(self.flags).unwrap(); 42 | writer.write_u64::(self.cursor_id).unwrap(); 43 | writer 44 | .write_u32::(self.starting_from) 45 | .unwrap(); 46 | writer 47 | .write_u32::(self.number_returned) 48 | .unwrap(); 49 | 50 | // FIXME support multiple documents 51 | let bson_vec = ser::to_vec(&self.documents[0]).unwrap(); 52 | let bson_data: &[u8] = &bson_vec; 53 | writer.write(bson_data).unwrap(); 54 | 55 | writer.into_inner() 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/commands/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use bson::Document; 3 | 4 | mod aggregate; 5 | mod build_info; 6 | mod coll_stats; 7 | mod connection_status; 8 | mod count; 9 | mod create; 10 | mod create_indexes; 11 | mod db_stats; 12 | mod delete; 13 | mod drop; 14 | mod drop_database; 15 | mod find; 16 | mod find_and_modify; 17 | mod get_cmd_line_opts; 18 | mod get_parameter; 19 | mod hello; 20 | mod insert; 21 | mod is_master; 22 | mod list_collections; 23 | mod list_databases; 24 | mod list_indexes; 25 | mod ping; 26 | mod update; 27 | mod whats_my_uri; 28 | 29 | pub use self::aggregate::build_sql; 30 | pub use self::aggregate::Aggregate; 31 | pub use self::build_info::BuildInfo; 32 | pub use self::coll_stats::CollStats; 33 | pub use self::connection_status::ConnectionStatus; 34 | pub use self::count::Count; 35 | pub use self::create::Create; 36 | pub use self::create_indexes::CreateIndexes; 37 | pub use self::db_stats::DbStats; 38 | pub use self::delete::Delete; 39 | pub use self::drop::Drop; 40 | pub use self::drop_database::DropDatabase; 41 | pub use self::find::Find; 42 | pub use self::find_and_modify::FindAndModify; 43 | pub use self::get_cmd_line_opts::GetCmdLineOpts; 44 | pub use self::get_parameter::GetParameter; 45 | pub use self::hello::Hello; 46 | pub use self::insert::Insert; 47 | pub use self::is_master::IsMaster; 48 | pub use self::list_collections::ListCollections; 49 | pub use self::list_databases::ListDatabases; 50 | pub use self::list_indexes::ListIndexes; 51 | pub use self::ping::Ping; 52 | pub use self::update::Update; 53 | pub use self::whats_my_uri::WhatsMyUri; 54 | 55 | pub trait Handler { 56 | fn new() -> Self; 57 | fn handle( 58 | &self, 59 | request: &Request, 60 | msg: &Vec, 61 | ) -> Result; 62 | } 63 | -------------------------------------------------------------------------------- /src/commands/delete.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use crate::{commands::Handler, pg::SqlParam}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct Delete {} 6 | 7 | impl Handler for Delete { 8 | fn new() -> Self { 9 | Delete {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | request: &Request, 15 | docs: &Vec, 16 | ) -> Result { 17 | let doc = &docs[0]; 18 | let db = doc.get_str("$db").unwrap(); 19 | let collection = doc.get_str("delete").unwrap(); 20 | let deletes = doc.get_array("deletes").unwrap(); 21 | let sp = SqlParam::new(db, collection); 22 | let mut client = request.get_client(); 23 | 24 | let exists = sp.exists(&mut client); 25 | 26 | match exists { 27 | Ok(exists) => { 28 | if !exists { 29 | return Ok(doc! { 30 | "n": Bson::Int64(0), 31 | "ok": Bson::Double(1.0), 32 | }); 33 | } 34 | } 35 | Err(e) => { 36 | return Err(CommandExecutionError::new(e.to_string())); 37 | } 38 | }; 39 | 40 | if deletes.len() > 1 { 41 | return Err(CommandExecutionError::new( 42 | "Only one delete operation is supported".to_string(), 43 | )); 44 | } 45 | 46 | let delete_doc = deletes[0].as_document().unwrap(); 47 | let filter = delete_doc.get_document("q").unwrap(); 48 | let limit: Option = delete_doc.get_i32("limit").ok(); 49 | 50 | let n = client.delete(&sp, Some(filter), limit).unwrap(); 51 | 52 | Ok(doc! { 53 | "n": Bson::Int64(n as i64), 54 | "ok": Bson::Double(1.0), 55 | }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/commands/db_stats.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::Request; 2 | use crate::{commands::Handler, handler::CommandExecutionError}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct DbStats {} 6 | 7 | impl Handler for DbStats { 8 | fn new() -> Self { 9 | DbStats {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | request: &Request, 15 | docs: &Vec, 16 | ) -> Result { 17 | let doc = &docs[0]; 18 | let db = doc.get_str("$db").unwrap(); 19 | let scale = doc.get_f64("scale").unwrap_or(1.0); 20 | let mut client = request.get_client(); 21 | let stats = client.schema_stats(db, None).unwrap(); 22 | 23 | let table_count: i32 = stats.get("TableCount"); 24 | let row_count: i32 = stats.get("RowCount"); 25 | let total_size: i32 = stats.get("TotalSize"); 26 | let index_size: i32 = stats.get("IndexSize"); 27 | let relation_size: i32 = stats.get("RelationSize"); 28 | let index_count: i32 = stats.get("IndexCount"); 29 | 30 | let avg_obj_size = if row_count > 0 { 31 | relation_size as f64 / row_count as f64 32 | } else { 33 | 0.0 34 | }; 35 | 36 | Ok(doc! { 37 | "db": db, 38 | "collections": Bson::Int32(table_count.try_into().unwrap()), 39 | "views": Bson::Int32(0), // TODO 40 | "objects": Bson::Int32(row_count), 41 | "avgObjSize": Bson::Double(avg_obj_size), 42 | "dataSize": Bson::Double(relation_size as f64/scale), 43 | "indexes": Bson::Int32(index_count), 44 | "indexSize": Bson::Double(index_size as f64/scale), 45 | "totalSize": Bson::Double(total_size as f64/scale), 46 | "scaleFactor": scale, 47 | "ok": Bson::Double(1.0), 48 | }) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/commands/count.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use crate::commands::Handler; 3 | use crate::handler::{CommandExecutionError, Request}; 4 | use crate::pg::SqlParam; 5 | use bson::{doc, Bson, Document}; 6 | 7 | pub struct Count {} 8 | 9 | impl Handler for Count { 10 | fn new() -> Self { 11 | Count {} 12 | } 13 | 14 | fn handle( 15 | &self, 16 | request: &Request, 17 | docs: &Vec, 18 | ) -> Result { 19 | let doc = &docs[0]; 20 | let db = doc.get_str("$db").unwrap(); 21 | let collection = doc.get_str("count").unwrap(); 22 | let sp = SqlParam::new(db, collection); 23 | 24 | let mut client = request.get_client(); 25 | 26 | // returns empty if db or collection doesn't exist 27 | if !client.table_exists(db, collection).unwrap() { 28 | return Ok(doc! { 29 | "n": 0, 30 | "ok": Bson::Double(1.0), 31 | }); 32 | } 33 | 34 | let filter = if doc.contains_key("filter") { 35 | Some(doc.get_document("filter").unwrap().clone()) 36 | } else { 37 | None 38 | }; 39 | 40 | let r = client.query("SELECT COUNT(*) FROM %table%", sp, filter, &[]); 41 | match r { 42 | Ok(rows) => { 43 | let row = rows.iter().next().unwrap(); 44 | let n: i64 = row.get(0); 45 | Ok(doc! { 46 | "n": Bson::Int32(n as i32), 47 | "ok": Bson::Double(1.0), 48 | }) 49 | } 50 | Err(error) => { 51 | log::error!("Error during count: {:?} - doc: {}", error, &doc); 52 | Err(CommandExecutionError::new(format!( 53 | "error during count: {:?}", 54 | error 55 | ))) 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/commands/list_databases.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::Request; 2 | use crate::{commands::Handler, handler::CommandExecutionError}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct ListDatabases {} 6 | 7 | impl Handler for ListDatabases { 8 | fn new() -> Self { 9 | ListDatabases {} 10 | } 11 | 12 | fn handle( 13 | &self, 14 | request: &Request, 15 | docs: &Vec, 16 | ) -> Result { 17 | let doc = &docs[0]; 18 | let name_only = doc.get_bool("nameOnly").unwrap_or(false); 19 | let mut client = request.get_client(); 20 | 21 | let mut total_size: i64 = 0; 22 | let mut databases: Vec = vec![]; 23 | for schema in client.get_schemas() { 24 | if schema.starts_with("pg_") || schema == "information_schema" { 25 | continue; 26 | } 27 | if name_only { 28 | databases.push(doc!["name": schema].into()); 29 | } else { 30 | let mut size: i64 = 0; 31 | for table in client.get_tables(&schema) { 32 | let db_size = client.get_table_size(&schema, &table); 33 | size += db_size; 34 | total_size += db_size; 35 | } 36 | let empty = size <= 0; 37 | databases.push(doc!["name": schema, "sizeOnDisk": size, "empty": empty].into()); 38 | } 39 | } 40 | 41 | let databases_doc = Bson::Array(databases); 42 | if name_only { 43 | Ok(doc! { 44 | "databases": databases_doc, 45 | "ok": Bson::Double(1.0), 46 | }) 47 | } else { 48 | Ok(doc! { 49 | "databases": databases_doc, 50 | "totalSize": Bson::Int64(total_size), 51 | "totalSizeMb": Bson::Int64(total_size/1024/1024), 52 | "ok": Bson::Double(1.0), 53 | }) 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/commands/aggregate/group_id.rs: -------------------------------------------------------------------------------- 1 | use crate::utils::field_to_jsonb; 2 | 3 | use super::sql_statement::SqlStatement; 4 | use bson::{Bson, Document}; 5 | use eyre::Result; 6 | 7 | pub fn process_id(doc: &mut Document) -> Result { 8 | let field = doc.remove("_id").unwrap(); 9 | 10 | match field { 11 | Bson::String(str) => process_id_str(str), 12 | Bson::Document(doc) => process_id_doc(doc), 13 | t => { 14 | return Err(eyre::eyre!( 15 | "missing implementation for _id with type {:?}", 16 | t 17 | )) 18 | } 19 | } 20 | } 21 | 22 | fn process_id_str(field: String) -> Result { 23 | if let Some(field) = field.strip_prefix("$") { 24 | let field = field_to_jsonb(field); 25 | Ok(SqlStatement::builder() 26 | .field(&format!("{} AS _id", field)) 27 | .group(&"_id") 28 | .build()) 29 | } else { 30 | Err(eyre::eyre!("Invalid _id value for $group stage '{}'. Currently only fields with the '$field' notation are supported.", field)) 31 | } 32 | } 33 | 34 | fn process_id_doc(doc: Document) -> Result { 35 | // FIXME the doc must have exactly one key 36 | // MongoServerError: An object representing an expression must have exactly one field: { $dateToString: { format: "%Y-%m-%d", date: "$date" }, $other: 1 } 37 | let (key, value) = doc.iter().next().unwrap(); 38 | match key.as_str() { 39 | "$dateToString" => { 40 | let value = value.as_document().unwrap(); 41 | let field = value.get_str("date").unwrap().strip_prefix("$").unwrap(); 42 | let field = format!( 43 | "TO_CHAR(TO_TIMESTAMP(({}->>'$d')::numeric / 1000), 'YYYY-MM-DD') AS _id", 44 | field_to_jsonb(field) 45 | ); 46 | 47 | Ok(SqlStatement::builder().field(&field).group("_id").build()) 48 | } 49 | _ => { 50 | return Err(eyre::eyre!( 51 | "process_id_doc - unhandled _id operation {:?}", 52 | key 53 | )) 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/commands/update.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use crate::handler::{CommandExecutionError, Request}; 3 | use crate::parser::parse_update; 4 | use crate::{commands::Handler, pg::SqlParam, pg::UpdateResult}; 5 | use bson::{doc, Bson, Document}; 6 | 7 | pub struct Update {} 8 | 9 | impl Handler for Update { 10 | fn new() -> Self { 11 | Update {} 12 | } 13 | 14 | fn handle( 15 | &self, 16 | request: &Request, 17 | docs: &Vec, 18 | ) -> Result { 19 | let doc = &docs[0]; 20 | let db = doc.get_str("$db").unwrap(); 21 | let collection = doc.get_str("update").unwrap(); 22 | let updates = doc.get_array("updates").unwrap(); 23 | let sp = SqlParam::new(db, collection); 24 | 25 | let mut client = request.get_client(); 26 | client.create_table_if_not_exists(db, collection).unwrap(); 27 | 28 | let mut n = 0; 29 | for update in updates { 30 | let doc = update.as_document().unwrap(); 31 | let q = doc.get_document("q").unwrap(); 32 | let update_doc = parse_update(doc.get_document("u").unwrap()); 33 | let upsert = doc.get_bool("upsert").unwrap_or(false); 34 | let multi = doc.get_bool("multi").unwrap_or(false); 35 | 36 | if update_doc.is_err() { 37 | return Err(CommandExecutionError::new(format!("{:?}", update_doc))); 38 | } 39 | 40 | let result = client.update( 41 | &sp, 42 | Some(q), 43 | None, 44 | update_doc.unwrap(), 45 | upsert, 46 | multi, 47 | false, 48 | ); 49 | 50 | match result { 51 | Ok(UpdateResult::Count(total)) => n += total, 52 | Ok(UpdateResult::Document(_)) => n += 1, 53 | Err(err) => return Err(CommandExecutionError::new(format!("{:?}", err))), 54 | } 55 | } 56 | 57 | Ok(doc! { 58 | "n": Bson::Int64(n.try_into().unwrap()), 59 | "nModified": Bson::Int64(n as i64), 60 | "ok": Bson::Double(1.0), 61 | }) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /tests/create_indexes_test.rs: -------------------------------------------------------------------------------- 1 | use bson::doc; 2 | use mongodb::IndexModel; 3 | 4 | mod common; 5 | 6 | #[test] 7 | fn create_indexes_test() { 8 | let ctx = common::setup(); 9 | 10 | let model = IndexModel::builder().keys(doc! { "x": 1, "z": 1 }).build(); 11 | let options = None; 12 | ctx.col().create_index(model, options).unwrap(); 13 | 14 | let cursor = ctx.col().list_indexes(None).unwrap(); 15 | let indexes = cursor 16 | .collect::>() 17 | .iter() 18 | .map(|x| x.clone().unwrap()) 19 | .collect::>(); 20 | let index = indexes 21 | .iter() 22 | .find(|x| x.keys == doc! { "x": 1, "z": 1 }) 23 | .unwrap(); 24 | assert_eq!(index.keys, doc! { "x": 1, "z": 1 }); 25 | } 26 | 27 | #[test] 28 | fn create_indexes_test_already_existing() { 29 | let ctx = common::setup(); 30 | 31 | let model = IndexModel::builder().keys(doc! { "a": 1, "b": 1 }).build(); 32 | let options = None; 33 | ctx.col() 34 | .create_index(model.clone(), options.clone()) 35 | .unwrap(); 36 | ctx.col().create_index(model, options).unwrap(); 37 | 38 | let cursor = ctx.col().list_indexes(None).unwrap(); 39 | let indexes = cursor 40 | .collect::>() 41 | .iter() 42 | .map(|x| x.clone().unwrap()) 43 | .collect::>(); 44 | let index = indexes 45 | .iter() 46 | .find(|x| x.keys == doc! { "a": 1, "b": 1 }) 47 | .unwrap(); 48 | assert_eq!(index.keys, doc! { "a": 1, "b": 1 }); 49 | } 50 | 51 | #[test] 52 | fn create_nested_index() { 53 | let ctx = common::setup(); 54 | 55 | let model = IndexModel::builder() 56 | .keys(doc! { "a.z": 1, "b.c.d": 1 }) 57 | .build(); 58 | let options = None; 59 | ctx.col().create_index(model, options).unwrap(); 60 | 61 | let cursor = ctx.col().list_indexes(None).unwrap(); 62 | let indexes = cursor 63 | .collect::>() 64 | .iter() 65 | .map(|x| x.clone().unwrap()) 66 | .collect::>(); 67 | let index = indexes 68 | .iter() 69 | .find(|x| x.keys == doc! { "a.z": 1, "b.c.d": 1 }) 70 | .unwrap(); 71 | assert_eq!(index.keys, doc! { "a.z": 1, "b.c.d": 1 }); 72 | } 73 | -------------------------------------------------------------------------------- /src/commands/find.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use crate::deserializer::PostgresJsonDeserializer; 3 | use crate::handler::{CommandExecutionError, Request}; 4 | use crate::{commands::Handler, pg::SqlParam}; 5 | use bson::{doc, Bson, Document}; 6 | 7 | pub struct Find {} 8 | 9 | impl Handler for Find { 10 | fn new() -> Self { 11 | Find {} 12 | } 13 | 14 | fn handle( 15 | &self, 16 | request: &Request, 17 | docs: &Vec, 18 | ) -> Result { 19 | let doc = &docs[0]; 20 | let db = doc.get_str("$db").unwrap(); 21 | let collection = doc.get_str("find").unwrap(); 22 | let sp = SqlParam::new(db, collection); 23 | 24 | let mut client = request.get_client(); 25 | 26 | // returns empty if db or collection doesn't exist 27 | if !client.table_exists(db, collection).unwrap() { 28 | return Ok(doc! { 29 | "cursor": doc! { 30 | "firstBatch": Bson::Array(vec![]), 31 | "id": Bson::Int64(0), 32 | "ns": format!("{}.{}", db, collection), 33 | }, 34 | "ok": Bson::Double(1.0), 35 | }); 36 | } 37 | 38 | let filter = if doc.contains_key("filter") { 39 | Some(doc.get_document("filter").unwrap().clone()) 40 | } else { 41 | None 42 | }; 43 | 44 | let r = client.query("SELECT * FROM %table%", sp, filter, &[]); 45 | match r { 46 | Ok(rows) => { 47 | let mut res: Vec = vec![]; 48 | for row in rows.iter() { 49 | let json_value: serde_json::Value = row.get(0); 50 | let bson_value = json_value.from_psql_json(); 51 | res.push(bson_value); 52 | } 53 | 54 | Ok(doc! { 55 | "cursor": doc! { 56 | "firstBatch": res, 57 | "id": Bson::Int64(0), 58 | "ns": format!("{}.{}", db, collection), 59 | }, 60 | "ok": Bson::Double(1.0), 61 | }) 62 | } 63 | Err(error) => { 64 | log::error!("Error during find: {:?} - doc: {}", error, &doc); 65 | Err(CommandExecutionError::new(format!( 66 | "error during find: {:?}", 67 | error 68 | ))) 69 | } 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/runtime.js: -------------------------------------------------------------------------------- 1 | class Collection { 2 | static get(db, name) { 3 | return new Collection(db, name); 4 | } 5 | 6 | constructor(db, name) { 7 | this.db = db; 8 | this.name = name; 9 | } 10 | 11 | find(filter = {}) { 12 | return Deno.core.opSync("op_find", this, filter); 13 | } 14 | 15 | insertOne(doc) { 16 | return Deno.core.opSync("op_insert_one", this, doc); 17 | } 18 | 19 | insertMany(docs) { 20 | return Deno.core.opSync("op_insert_many", this, docs); 21 | } 22 | 23 | updateOne(doc, update) { 24 | return Deno.core.opSync("op_update_one", this, doc, update); 25 | } 26 | 27 | updateMany(docs, update) { 28 | return Deno.core.opSync("op_update_many", this, docs, update); 29 | } 30 | 31 | deleteOne(doc) { 32 | return Deno.core.opSync("op_delete_one", this, doc); 33 | } 34 | 35 | deleteMany(docs) { 36 | return Deno.core.opSync("op_delete_many", this, docs); 37 | } 38 | 39 | aggregate(pipeline) { 40 | return Deno.core.opSync("op_aggregate", this, pipeline); 41 | } 42 | } 43 | 44 | class Db { 45 | static get(global) { 46 | const { db, dbAddr, dbPort } = global._state; 47 | const target = new Db(db, dbAddr, dbPort); 48 | const handler = { 49 | get(target, prop, _receiver) { 50 | if ( 51 | !target.hasOwnProperty(prop) && 52 | typeof target[prop] !== "function" 53 | ) { 54 | return Collection.get(target, prop); 55 | } 56 | return Reflect.get(...arguments); 57 | }, 58 | }; 59 | 60 | return new Proxy(target, handler); 61 | } 62 | 63 | constructor(name = "test", addr, port) { 64 | this.name = name; 65 | this.addr = addr; 66 | this.port = port; 67 | } 68 | 69 | listCollections() { 70 | return Deno.core.opSync("op_list_collections", this); 71 | } 72 | } 73 | 74 | function ObjectId(value) { 75 | return { $oid: value }; 76 | } 77 | 78 | ((globalThis) => { 79 | const core = Deno.core; 80 | 81 | function argsToMessage(...args) { 82 | return args.map((arg) => JSON.stringify(arg)).join(" "); 83 | } 84 | 85 | const console = { 86 | log: (...args) => { 87 | core.print(`${argsToMessage(...args)}\n`, false); 88 | }, 89 | error: (...args) => { 90 | core.print(`${argsToMessage(...args)}\n`, true); 91 | }, 92 | }; 93 | 94 | globalThis.console = console; 95 | 96 | globalThis.__defineGetter__("db", () => { 97 | return Db.get(globalThis); 98 | }); 99 | 100 | globalThis.use = (name) => { 101 | globalThis._state = globalThis._state || {}; 102 | globalThis._state.db = name; 103 | return name; 104 | }; 105 | })(globalThis); 106 | -------------------------------------------------------------------------------- /src/threadpool.rs: -------------------------------------------------------------------------------- 1 | use std::sync::mpsc; 2 | use std::sync::Arc; 3 | use std::sync::Mutex; 4 | use std::thread; 5 | 6 | pub struct ThreadPool { 7 | workers: Vec, 8 | sender: mpsc::Sender, 9 | } 10 | 11 | type Job = Box; 12 | 13 | enum Message { 14 | NewJob(Job), 15 | Terminate, 16 | } 17 | 18 | impl ThreadPool { 19 | /// Create a new ThreadPool. 20 | /// 21 | /// The size is the number of threads in the pool. 22 | /// 23 | /// # Panics 24 | /// 25 | /// The `new` function will panic if the size is zero. 26 | pub fn new(size: usize) -> ThreadPool { 27 | assert!(size > 0); 28 | 29 | let (sender, receiver) = mpsc::channel(); 30 | 31 | let receiver = Arc::new(Mutex::new(receiver)); 32 | 33 | let mut workers = Vec::with_capacity(size); 34 | 35 | for id in 0..size { 36 | workers.push(Worker::new(id, Arc::clone(&receiver))); 37 | } 38 | 39 | ThreadPool { workers, sender } 40 | } 41 | 42 | pub fn execute(&self, f: F) 43 | where 44 | F: FnOnce() + Send + 'static, 45 | { 46 | let job = Box::new(f); 47 | 48 | self.sender.send(Message::NewJob(job)).unwrap(); 49 | } 50 | } 51 | 52 | impl Drop for ThreadPool { 53 | fn drop(&mut self) { 54 | log::trace!("Sending terminate message to all workers."); 55 | 56 | for _ in &self.workers { 57 | self.sender.send(Message::Terminate).unwrap(); 58 | } 59 | 60 | log::trace!("Shutting down all workers."); 61 | 62 | for worker in &mut self.workers { 63 | log::trace!("Shutting down worker {}", worker.id); 64 | 65 | if let Some(thread) = worker.thread.take() { 66 | thread.join().unwrap(); 67 | } 68 | } 69 | } 70 | } 71 | 72 | struct Worker { 73 | id: usize, 74 | thread: Option>, 75 | } 76 | 77 | impl Worker { 78 | fn new(id: usize, receiver: Arc>>) -> Worker { 79 | let thread = thread::spawn(move || loop { 80 | let message = receiver.lock().unwrap().recv().unwrap(); 81 | 82 | match message { 83 | Message::NewJob(job) => { 84 | log::trace!("Worker {} got a job; executing.", id); 85 | 86 | job(); 87 | } 88 | Message::Terminate => { 89 | log::trace!("Worker {} was told to terminate.", id); 90 | 91 | break; 92 | } 93 | } 94 | }); 95 | 96 | Worker { 97 | id, 98 | thread: Some(thread), 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/commands/list_indexes.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use crate::{commands::Handler, pg::SqlParam}; 3 | use bson::{doc, Bson, Document}; 4 | use regex::Regex; 5 | 6 | pub struct ListIndexes {} 7 | 8 | impl Handler for ListIndexes { 9 | fn new() -> Self { 10 | ListIndexes {} 11 | } 12 | 13 | fn handle( 14 | &self, 15 | request: &Request, 16 | docs: &Vec, 17 | ) -> Result { 18 | let doc = &docs[0]; 19 | let db = doc.get_str("$db").unwrap(); 20 | let collection = doc.get_str("listIndexes").unwrap(); 21 | let sp = SqlParam::new(db, collection); 22 | 23 | let mut client = request.get_client(); 24 | let tables = client.get_tables(&sp.db); 25 | 26 | if !tables.contains(&collection.to_string()) { 27 | return Err(CommandExecutionError::new(format!( 28 | "Collection '{}' doesn't exist", 29 | collection 30 | ))); 31 | } 32 | 33 | let mut indexes: Vec = vec![]; 34 | for table in tables { 35 | for row in &mut client.get_table_indexes(&sp.db, &table).unwrap() { 36 | let name: String = row.get("indexname"); 37 | let def: String = row.get("indexdef"); 38 | 39 | let mut keys: Document = doc! {}; 40 | for field in parse_index_definition(def.as_str()) { 41 | keys.insert(field, 1); 42 | } 43 | 44 | indexes.push(Bson::Document(doc! { 45 | "v": 2, 46 | "key": keys, 47 | "name": name, 48 | })); 49 | } 50 | } 51 | 52 | return Ok(doc! { 53 | "cursor": doc! { 54 | "id": Bson::Int64(0), 55 | "ns": format!("{}.$cmd.listIndexes.{}", db, collection), 56 | "firstBatch": Bson::Array(indexes), 57 | }, 58 | "ok": Bson::Double(1.0), 59 | }); 60 | } 61 | } 62 | 63 | fn parse_index_definition(def: &str) -> Vec { 64 | let regex = Regex::new(r"\s->\s'(.*?)'").unwrap(); 65 | def.split("USING btree ") 66 | .nth(1) 67 | .unwrap() 68 | .split(", ") 69 | .map(|field| { 70 | regex 71 | .captures_iter(field) 72 | .map(|c| c[1].to_string()) 73 | .collect::>() 74 | .join(".") 75 | }) 76 | .collect::>() 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use super::*; 82 | 83 | #[test] 84 | fn test_parse_nested_index_definition() { 85 | let def = r#"CREATE INDEX a_z_1_b_c_d_1 ON db_test."test_27edecea-7d1d-44c3-8443-98b100371df7" USING btree ((((_jsonb -> 'a'::text) -> 'z'::text)), ((((_jsonb -> 'b'::text) -> 'c'::text) -> 'd'::text)))"#; 86 | let keys = parse_index_definition(def); 87 | assert_eq!(&keys[0], "a.z"); 88 | assert_eq!(&keys[1], "b.c.d"); 89 | } 90 | 91 | #[test] 92 | fn test_parse_simple_index_definition() { 93 | let def = r#"REATE INDEX a_1_b_1 ON db_test."test_74885191-7780-4f29-9133-f2ced35cbc40" USING btree (((_jsonb -> 'a'::text)), ((_jsonb -> 'b'::text)))"#; 94 | let keys = parse_index_definition(def); 95 | assert_eq!(&keys[0], "a"); 96 | assert_eq!(&keys[1], "b"); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/deserializer.rs: -------------------------------------------------------------------------------- 1 | use bson::{ser, Bson}; 2 | use chrono::{TimeZone, Utc}; 3 | use serde_json::Value; 4 | 5 | pub trait PostgresJsonDeserializer { 6 | fn from_psql_json(&self) -> Bson; 7 | } 8 | 9 | impl PostgresJsonDeserializer for Value { 10 | fn from_psql_json(&self) -> Bson { 11 | match self { 12 | serde_json::Value::String(s) => Bson::String(s.to_string()), 13 | serde_json::Value::Number(n) => { 14 | let s = n.to_string(); 15 | if s.contains(".") { 16 | Bson::Double(n.as_f64().unwrap()) 17 | } else { 18 | if let Some(n) = n.as_i64() { 19 | Bson::Int32(n.try_into().unwrap()) 20 | } else if let Some(n) = n.as_f64() { 21 | Bson::Double(n) 22 | } else { 23 | panic!("Unsupported number type while attempting to deserialize Value::Number for {}", n); 24 | } 25 | } 26 | } 27 | serde_json::Value::Bool(b) => Bson::Boolean(b.to_owned()), 28 | serde_json::Value::Null => Bson::Null, 29 | serde_json::Value::Array(a) => { 30 | Bson::Array(a.into_iter().map(|v| v.from_psql_json()).collect()) 31 | } 32 | serde_json::Value::Object(o) => { 33 | if o.contains_key("$o") { 34 | return Bson::ObjectId( 35 | bson::oid::ObjectId::parse_str(o["$o"].as_str().unwrap().to_string()) 36 | .unwrap(), 37 | ); 38 | } 39 | if o.contains_key("$d") { 40 | return Bson::DateTime(Utc.timestamp_millis(o["$d"].as_i64().unwrap()).into()); 41 | } 42 | if o.contains_key("$f") { 43 | return Bson::Double(o["$f"].as_f64().unwrap()); 44 | } 45 | if o.contains_key("$j") { 46 | if o.contains_key("s") { 47 | return Bson::JavaScriptCodeWithScope(bson::JavaScriptCodeWithScope { 48 | code: o["$j"].as_str().unwrap().to_string(), 49 | scope: ser::to_document(&o["s"]).unwrap(), 50 | }); 51 | } else { 52 | return Bson::JavaScriptCode(o["$j"].as_str().unwrap().to_string()); 53 | } 54 | } 55 | if o.contains_key("$r") { 56 | return Bson::RegularExpression(bson::Regex { 57 | pattern: o["$r"].as_str().unwrap().to_string(), 58 | options: o["o"].as_str().unwrap().to_string(), 59 | }); 60 | } 61 | let mut m = bson::Document::new(); 62 | for (k, v) in o { 63 | m.insert(k, v.from_psql_json()); 64 | } 65 | Bson::Document(m) 66 | } 67 | } 68 | } 69 | } 70 | 71 | #[cfg(test)] 72 | mod tests { 73 | use super::*; 74 | 75 | #[test] 76 | fn test_deserialize_date() { 77 | let json = r#"{"$d":1546300800000}"#; 78 | let bson: serde_json::Value = serde_json::from_str(json).unwrap(); 79 | let bson = bson.from_psql_json(); 80 | println!("{:?}", bson); 81 | assert_eq!(bson, Bson::DateTime(Utc.timestamp(1546300800, 0).into())); 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/wire/op_query.rs: -------------------------------------------------------------------------------- 1 | use bson::{doc, ser, Bson, Document}; 2 | use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; 3 | use std::ffi::CString; 4 | use std::io::{BufRead, Cursor, Read, Write}; 5 | 6 | use crate::handler::{Request, Response}; 7 | 8 | use super::{ 9 | MsgHeader, OpCode, OpReply, Replyable, Serializable, UnknownMessageKindError, HEADER_SIZE, 10 | OP_REPLY, 11 | }; 12 | 13 | #[derive(Debug, Clone)] 14 | pub struct OpQuery { 15 | pub header: MsgHeader, 16 | pub flags: u32, 17 | pub collection: String, 18 | pub number_to_skip: u32, 19 | pub number_to_return: u32, 20 | pub query: Document, 21 | pub return_fields: Option, 22 | } 23 | 24 | impl OpQuery { 25 | pub fn parse(header: MsgHeader, cursor: &mut Cursor<&[u8]>) -> OpQuery { 26 | let flags = cursor.read_u32::().unwrap(); 27 | 28 | // collection is a CString 29 | let mut buffer: Vec = vec![]; 30 | cursor.read_until(0, &mut buffer).unwrap(); 31 | let collection = unsafe { CString::from_vec_unchecked(buffer) } 32 | .to_string_lossy() 33 | .to_string(); 34 | 35 | let number_to_skip = cursor.read_u32::().unwrap(); 36 | let number_to_return = cursor.read_u32::().unwrap(); 37 | let mut new_cursor = cursor.clone(); 38 | new_cursor.set_position(cursor.position()); 39 | 40 | let len = cursor.get_ref().len(); 41 | if (cursor.position() as usize) < len - 1 { 42 | return OpQuery { 43 | header, 44 | flags, 45 | collection, 46 | number_to_skip, 47 | number_to_return, 48 | query: doc! {}, 49 | return_fields: None, 50 | }; 51 | } 52 | 53 | let query = Document::from_reader(cursor).unwrap(); 54 | let bson_vec = ser::to_vec(&query).unwrap(); 55 | let query_size: u64 = bson_vec.len().try_into().unwrap(); 56 | new_cursor.set_position(new_cursor.position() + query_size); 57 | let return_fields = match Document::from_reader(new_cursor) { 58 | Ok(doc) => Some(doc), 59 | Err(_) => None, 60 | }; 61 | 62 | OpQuery { 63 | header, 64 | flags, 65 | collection, 66 | number_to_skip, 67 | number_to_return, 68 | query, 69 | return_fields, 70 | } 71 | } 72 | } 73 | 74 | impl Replyable for OpQuery { 75 | fn reply(&self, res: Response) -> Result, UnknownMessageKindError> { 76 | // FIXME defer this logic to MsgHeader 77 | let bson_vec = ser::to_vec(&res.get_doc()).unwrap(); 78 | let bson_data: &[u8] = &bson_vec; 79 | let message_length = HEADER_SIZE + 20 + bson_data.len() as u32; 80 | 81 | if let OpCode::OpQuery(op_query) = res.get_op_code().to_owned() { 82 | let header = 83 | op_query 84 | .header 85 | .get_response_with_op_code(res.get_id(), message_length, OP_REPLY); 86 | let cursor_id = 0; 87 | let starting_from = 0; 88 | let number_returned = 1; 89 | let docs = vec![res.get_doc().to_owned()]; 90 | 91 | return Ok(OpReply::new( 92 | header, 93 | self.flags, 94 | cursor_id, 95 | starting_from, 96 | number_returned, 97 | docs, 98 | ) 99 | .to_vec()); 100 | } 101 | Err(UnknownMessageKindError) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /docs/protocol-study/packet-study.txt: -------------------------------------------------------------------------------- 1 | Length: 181 (0xb5) bytes 2 | 0000: b5 00 00 00 02 00 00 00 00 00 00 00 dd 07 00 00 ................ 3 | 0010: a5 00 00 00 08 69 73 6d 61 73 74 65 72 00 01 10 .....ismaster... 4 | 0020: 6d 61 78 42 73 6f 6e 4f 62 6a 65 63 74 53 69 7a maxBsonObjectSiz 5 | 0030: 65 00 00 00 00 01 10 6d 61 78 4d 65 73 73 61 67 e......maxMessag 6 | 0040: 65 53 69 7a 65 42 79 74 65 73 00 00 6c dc 02 10 eSizeBytes..l... 7 | 0050: 6d 61 78 57 72 69 74 65 42 61 74 63 68 53 69 7a maxWriteBatchSiz 8 | 0060: 65 00 a0 86 01 00 12 6c 6f 63 61 6c 54 69 6d 65 e......localTime 9 | 0070: 00 d6 4e 10 d4 81 01 00 00 10 6d 69 6e 57 69 72 ..N.......minWir 10 | 0080: 65 56 65 72 73 69 6f 6e 00 00 00 00 00 10 6d 61 eVersion......ma 11 | 0090: 78 57 69 72 65 56 65 72 73 69 6f 6e 00 0d 00 00 xWireVersion.... 12 | 00a0: 00 08 72 65 61 64 4f 6e 6c 79 00 00 10 6f 6b 00 ..readOnly...ok. 13 | 00b0: 01 00 00 00 00 ..... 14 | 15 | 0000: b5 00 00 00 02 00 00 00 00 00 00 00 dd 07 00 00 ................ 16 | 0010: a5 00 00 00 08 69 73 6d 61 73 74 65 72 00 01 10 .....ismaster... 17 | 0020: 6d 61 78 42 73 6f 6e 4f 62 6a 65 63 74 53 69 7a maxBsonObjectSiz 18 | 0030: 65 00 00 00 00 01 10 6d 61 78 4d 65 73 73 61 67 e......maxMessag 19 | 0040: 65 53 69 7a 65 42 79 74 65 73 00 00 6c dc 02 10 eSizeBytes..l... 20 | 0050: 6d 61 78 57 72 69 74 65 42 61 74 63 68 53 69 7a maxWriteBatchSiz 21 | 0060: 65 00 a0 86 01 00 12 6c 6f 63 61 6c 54 69 6d 65 e......localTime 22 | 0070: 00 cd 50 10 d4 81 01 00 00 10 6d 69 6e 57 69 72 ..P.......minWir 23 | 0080: 65 56 65 72 73 69 6f 6e 00 00 00 00 00 10 6d 61 eVersion......ma 24 | 0090: 78 57 69 72 65 56 65 72 73 69 6f 6e 00 0d 00 00 xWireVersion.... 25 | 00a0: 00 08 72 65 61 64 4f 6e 6c 79 00 00 10 6f 6b 00 ..readOnly...ok. 26 | 00b0: 01 00 00 00 00 ..... 27 | 28 | 29 | 30 | Message Length = 181 bytes = 0xb5 (Little Endian b5 00 00 00) 31 | Request Id = 2 = 0x02 (Little Endian 02 00 00 00) 32 | Response To = 0 = 0x00 (Little Endian 00 00 00 00) 33 | OpCode = 2013 = 0x07dd (Little Endian dd 07 00 00) 34 | 35 | BSON Doc Size = 165 bytes = 0xa5 (Little Endian a5 00 00 00) 36 | Type Boolean = 0x08 - 37 | 69 73 6d 61 73 74 65 72 00 = ismaster + 0x00 38 | 01 = boolean true (0x01) 39 | Type int32 = 0x10 - 40 | 6d 61 78 42 73 6f 6e 4f 62 6a 65 63 74 53 69 7a 65 00 = maxBsonObjectSize + 0x00   41 | 00 00 00 01 = 0x01 00 00 00 = 16777216 42 | Type int32 = 0x10 - 43 | 6d 61 78 4d 65 73 73 61 67 65 53 69 7a 65 42 79 74 65 73 00 = maxMessagesBytes + 0x00 44 | 00 6c dc 02 = 0x00 6c dc 02 = 48000000 45 | Type int32 = 0x10 - 46 | 6d 61 78 57 72 69 74 65 42 61 74 63 68 53 69 7a 65 00 = maxWriteBatchSize + 0x00 47 | a0 86 01 00 = 0xa0 86 01 00 = 100000 48 | Type int64 = 0x12 - 49 | 6c 6f 63 61 6c 54 69 6d 65 00 = localTime + 0x00 50 | cd 50 10 d4 81 01 00 00 = 1657120248013 51 | Type int32 = 0x10 - 52 | 6d 69 6e 57 69 72 65 56 65 72 73 69 6f 6e 00 = minWireVersion + 0x00 53 | 00 00 00 00 = 0x00 00 00 00 = 0 54 | Type int32 = 0x10 - 55 | 78 57 69 72 65 56 65 72 73 69 6f 6e 00 = maxWireVersion + 0x00 56 | 0d 00 00 00 = 0x0d 00 00 00 = 13 57 | Type Boolean = 0x08 - 58 | 72 65 61 64 4f 6e 6c 79 00 = readOnly + 0x00 59 | 00 = boolean false (0x00) 60 | Type int32 = 0x10 - 61 | 6f 6b 00 01 00 00 00 00 = ok + 0x00 62 | 01 00 00 00 = 0x01 00 00 00 = 1 63 | 64 | Document end = 0x00 65 | 66 | 67 | -------------------------------------------------------------------------------- /tests/get_parameters_test.rs: -------------------------------------------------------------------------------- 1 | use mongodb::bson::{doc, Bson}; 2 | 3 | mod common; 4 | 5 | #[test] 6 | fn get_parameters_selected_params_test() { 7 | let ctx = common::setup(); 8 | 9 | let res = ctx 10 | .db() 11 | .run_command( 12 | doc! { "getParameter": 1, "acceptApiVersion2": 1, "authSchemaVersion": 1 }, 13 | None, 14 | ) 15 | .unwrap(); 16 | assert_eq!( 17 | res, 18 | doc! { 19 | "acceptApiVersion2": false, 20 | "authSchemaVersion": 5, 21 | "ok": Bson::Double(1.0), 22 | } 23 | ); 24 | } 25 | 26 | #[test] 27 | fn test_get_parameters_selected_params_with_details() { 28 | let ctx = common::setup(); 29 | 30 | let res = ctx 31 | .db() 32 | .run_command( 33 | doc! { 34 | "getParameter": doc! { "showDetails":true }, 35 | "featureCompatibilityVersion": 1, 36 | "quiet": 1 37 | }, 38 | None, 39 | ) 40 | .unwrap(); 41 | assert_eq!( 42 | res, 43 | doc! { 44 | "featureCompatibilityVersion": doc! { 45 | "value": Bson::Double(5.0), 46 | "settableAtRuntime": true, 47 | "settableAtStartup": true, 48 | }, 49 | "quiet": doc! { 50 | "value": false, 51 | "settableAtRuntime": true, 52 | "settableAtStartup": true, 53 | }, 54 | "ok": Bson::Double(1.0), 55 | } 56 | ); 57 | } 58 | 59 | #[test] 60 | fn test_get_parameters_all() { 61 | let ctx = common::setup(); 62 | 63 | let res = ctx 64 | .db() 65 | .run_command(doc! { "getParameter": "*" }, None) 66 | .unwrap(); 67 | assert_eq!( 68 | res, 69 | doc! { 70 | "acceptApiVersion2": false, 71 | "authSchemaVersion": 5, 72 | "tlsMode": "disabled", 73 | "sslMode": "disabled", 74 | "quiet": false, 75 | "featureCompatibilityVersion": Bson::Double(5.0), 76 | "ok": Bson::Double(1.0), 77 | } 78 | ); 79 | } 80 | 81 | #[test] 82 | fn test_get_parameters_all_with_details() { 83 | let ctx = common::setup(); 84 | 85 | let res = ctx 86 | .db() 87 | .run_command( 88 | doc! { "getParameter": doc!{ "allParameters": true, "showDetails": true } }, 89 | None, 90 | ) 91 | .unwrap(); 92 | assert_eq!( 93 | res, 94 | doc! { 95 | "acceptApiVersion2": doc! { 96 | "value": false, 97 | "settableAtRuntime": true, 98 | "settableAtStartup": true, 99 | }, 100 | "authSchemaVersion": doc! { 101 | "value": Bson::Int32(5), 102 | "settableAtRuntime": true, 103 | "settableAtStartup": true, 104 | }, 105 | "tlsMode": doc! { 106 | "value": "disabled", 107 | "settableAtRuntime": true, 108 | "settableAtStartup": false, 109 | }, 110 | "sslMode": doc! { 111 | "value": "disabled", 112 | "settableAtRuntime": true, 113 | "settableAtStartup": false, 114 | }, 115 | "quiet": doc! { 116 | "value": false, 117 | "settableAtRuntime": true, 118 | "settableAtStartup": true, 119 | }, 120 | "featureCompatibilityVersion": doc! { 121 | "value": Bson::Double(5.0), 122 | "settableAtRuntime": true, 123 | "settableAtStartup": true, 124 | }, 125 | "ok": Bson::Double(1.0), 126 | } 127 | ); 128 | } 129 | -------------------------------------------------------------------------------- /docs/protocol-study/their-packet-study.txt: -------------------------------------------------------------------------------- 1 | 0000: be 00 00 00 01 00 00 00 00 00 00 00 dd 07 00 00 ................ 2 | 0010: 00 00 00 00 00 a9 00 00 00 08 69 73 6d 61 73 74 ..........ismast 3 | 0020: 65 72 00 01 10 6d 61 78 42 73 6f 6e 4f 62 6a 65 er...maxBsonObje 4 | 0030: 63 74 53 69 7a 65 00 00 00 00 01 10 6d 61 78 4d ctSize......maxM 5 | 0040: 65 73 73 61 67 65 53 69 7a 65 42 79 74 65 73 00 essageSizeBytes. 6 | 0050: 00 6c dc 02 10 6d 61 78 57 72 69 74 65 42 61 74 .l...maxWriteBat 7 | 0060: 63 68 53 69 7a 65 00 a0 86 01 00 09 6c 6f 63 61 chSize......loca 8 | 0070: 6c 54 69 6d 65 00 24 23 39 d4 81 01 00 00 10 6d lTime.$#9......m 9 | 0080: 69 6e 57 69 72 65 56 65 72 73 69 6f 6e 00 0d 00 inWireVersion... 10 | 0090: 00 00 10 6d 61 78 57 69 72 65 56 65 72 73 69 6f ...maxWireVersio 11 | 00a0: 6e 00 0d 00 00 00 08 72 65 61 64 4f 6e 6c 79 00 n......readOnly. 12 | 00b0: 00 01 6f 6b 00 00 00 00 00 00 00 f0 3f 00 ..ok........?. 13 | 14 | 15 | Message Length = 190 bytes = 0xbe (Little Endian be 00 00 00) 16 | Request Id = 1 = 0x01 (Little Endian 01 00 00 00) 17 | Response To = 0 = 0x00 (Little Endian 00 00 00 00) 18 | OpCode = 2013 = 0x07dd (Little Endian dd 07 00 00) 19 | 20 | Flag bits = 0 = 0x00 (Little Endian 00 00 00 00) 21 | Section Kind = 0 = 0x00 (byte 00) 22 | 23 | Body Size = 0x00a9 = 169 (Little Endian 00 00 a9 00) 24 | 25 | 190 - 4(len) - 4(reqid) - 4(respto) - 4(opcode) - 4(flagbits) - 1(kind) = 169 bytes 26 | 27 | Type Boolean = 0x08 - 28 | 69 73 6d 61 73 74 65 72 00 = ismaster + 0x00 29 | 01 = boolean true 30 | 31 | 32 | BSON Doc Size = 165 bytes = 0xa5 (Little Endian a5 00 00 00) 33 | 34 | Type Double = 0x01 35 | 6f 6b 00 = 0x6f6b00 = ok 0x00 36 | 00 00 00 00 00 00 f0 3f 37 | 38 | 00 00 00 00 00 A9 00 00 00 = 169 39 | 00 00 00 00 00 32 00 00 00 = 50 40 | 41 | 0 00 00 00 00 00 A9 00 00 00 08 69 73 6D 61 73 74 ..........ismast 42 | 16 65 72 00 01 10 6D 61 78 42 73 6F 6E 4F 62 6A 65 er...maxBsonObje 43 | 32 63 74 53 69 7A 65 00 00 00 00 01 10 6D 61 78 4D ctSize......maxM 44 | 48 65 73 73 61 67 65 53 69 7A 65 42 79 74 65 73 00 essageSizeBytes. 45 | 64 00 6C DC 02 10 6D 61 78 57 72 69 74 65 42 61 74 .l...maxWriteBat 46 | 80 63 68 53 69 7A 65 00 A0 86 01 00 09 6C 6F 63 61 chSize......loca 47 | 96 6C 54 69 6D 65 00 35 E1 73 D4 81 01 00 00 10 6D lTime.5.s......m 48 | 112 69 6E 57 69 72 65 56 65 72 73 69 6F 6E 00 00 00 inWireVersion... 49 | 128 00 00 10 6D 61 78 57 69 72 65 56 65 72 73 69 6F ...maxWireVersio 50 | 144 6E 00 0D 00 00 00 08 72 65 61 64 4F 6E 6C 79 00 n......readOnly. 51 | 160 00 01 6F 6B 00 00 00 00 00 00 00 F0 3F 00 ..ok........?. 52 | 53 | 0 00 00 00 00 00 32 00 00 00 10 6C 69 73 74 44 61 .....2....listDa 54 | 16 74 61 62 61 73 65 73 00 01 00 00 00 08 6E 61 6D tabases......nam 55 | 32 65 4F 6E 6C 79 00 01 02 24 64 62 00 06 00 00 00 eOnly...$db..... 56 | 48 61 64 6D 69 6E 00 00 admin.. 57 | 58 | 10 6C 69 73 74 44 61 74 8 59 | 61 62 61 73 65 73 00 01 16 60 | 00 00 00 08 6E 61 6D 65 24 61 | 4F 6E 6C 79 00 01 02 24 32 62 | 64 62 00 06 00 00 00 61 40 63 | 64 6D 69 6E 00 00 46 64 | 65 | 08 69 73 6D 61 73 74 65 72 00 10 66 | 01 10 6D 61 78 42 73 6F 6E 4F 20 67 | 62 6A 65 63 74 53 69 7A 65 00 30 68 | 00 00 00 01 10 6D 61 78 4D 65 40 69 | 73 73 61 67 65 53 69 7A 65 42 50 70 | 79 74 65 73 00 00 6C DC 02 10 60 71 | 6D 61 78 57 72 69 74 65 42 61 70 72 | 74 63 68 53 69 7A 65 00 A0 86 80 73 | 01 00 09 6C 6F 63 61 6C 54 69 90 74 | 6D 65 00 35 E1 73 D4 81 01 00 100 75 | 00 10 6D 69 6E 57 69 72 65 56 110 76 | 65 72 73 69 6F 6E 00 00 00 00 120 77 | 00 10 6D 61 78 57 69 72 65 56 130 78 | 65 72 73 69 6F 6E 00 0D 00 00 140 79 | 00 08 72 65 61 64 4F 6E 6C 79 150 80 | 00 00 01 6F 6B 00 00 00 00 00 160 81 | 00 00 F0 3F 00 165 82 | -------------------------------------------------------------------------------- /tests/find_and_modify_test.rs: -------------------------------------------------------------------------------- 1 | use bson::doc; 2 | use mongodb::options::{FindOneAndUpdateOptions, ReturnDocument}; 3 | 4 | mod common; 5 | 6 | #[test] 7 | fn test_find_and_modify() { 8 | let col = insert! { 9 | doc! { 10 | "name": "John", 11 | "age": 32, 12 | }, 13 | doc! { 14 | "name": "Sheila", 15 | "age": 22, 16 | }, 17 | doc! { 18 | "name": "Mike", 19 | "age": 87, 20 | } 21 | }; 22 | 23 | let res = col 24 | .find_one_and_update( 25 | doc! { "name": "Mike" }, 26 | doc! { "$set": { "age": 44 } }, 27 | None, 28 | ) 29 | .unwrap(); 30 | let updated = res.unwrap(); 31 | 32 | assert_eq!(updated.get_str("name").unwrap(), "Mike"); 33 | assert_eq!(updated.get_i32("age").unwrap(), 44); 34 | 35 | let rows = common::get_rows(col.find(None, None).unwrap()); 36 | let mike = rows 37 | .iter() 38 | .find(|r| r.get_str("name").unwrap() == "Mike") 39 | .unwrap(); 40 | assert_eq!(mike.get_i32("age").unwrap(), 44); 41 | } 42 | 43 | #[test] 44 | fn test_find_and_modify_upsert() { 45 | let ctx = common::setup(); 46 | 47 | let res = ctx 48 | .col() 49 | .find_one_and_update( 50 | doc! { "name": "Mike", "active": "true" }, 51 | doc! { "$set": { "age": 44 } }, 52 | FindOneAndUpdateOptions::builder().upsert(true).build(), 53 | ) 54 | .unwrap(); 55 | assert_eq!(res, None); 56 | 57 | let rows = common::get_rows(ctx.col().find(None, None).unwrap()); 58 | let row = rows[0].clone(); 59 | assert_eq!(row.get_i32("age").unwrap(), 44); 60 | } 61 | 62 | #[test] 63 | fn test_find_and_modify_upsert_with_returning_new() { 64 | let ctx = common::setup(); 65 | 66 | let res = ctx 67 | .col() 68 | .find_one_and_update( 69 | doc! { "name": "Mike", "active": true }, 70 | doc! { "$set": { "age": 44 } }, 71 | FindOneAndUpdateOptions::builder() 72 | .upsert(true) 73 | .return_document(ReturnDocument::After) 74 | .build(), 75 | ) 76 | .unwrap() 77 | .unwrap(); 78 | assert_eq!(res.get_str("name").unwrap(), "Mike"); 79 | assert_eq!(res.get_bool("active").unwrap(), true); 80 | assert_eq!(res.get_i32("age").unwrap(), 44); 81 | 82 | let rows = common::get_rows(ctx.col().find(None, None).unwrap()); 83 | let row = rows[0].clone(); 84 | assert_eq!(row.get_i32("age").unwrap(), 44); 85 | } 86 | 87 | #[test] 88 | fn test_find_and_modify_empty() { 89 | let ctx = common::setup(); 90 | 91 | let res = ctx 92 | .col() 93 | .find_one_and_update( 94 | doc! {"name": "Mike"}, 95 | doc! { "$set": { "name": "Joe"}}, 96 | None, 97 | ) 98 | .unwrap(); 99 | 100 | assert_eq!(res, None); 101 | } 102 | 103 | #[test] 104 | fn test_find_and_modify_with_sort() { 105 | let col = insert! { 106 | doc! { 107 | "ext_id": 1, 108 | "name": "John", 109 | "age": 45, 110 | }, 111 | doc! { 112 | "ext_id": 2, 113 | "name": "John", 114 | "age": 22, 115 | }, 116 | doc! { 117 | "ext_id": 3, 118 | "name": "John", 119 | "age": 87, 120 | } 121 | }; 122 | 123 | let res = col 124 | .find_one_and_update( 125 | doc! { "name": "John" }, 126 | doc! { "$set": { "age": 44 } }, 127 | FindOneAndUpdateOptions::builder() 128 | .sort(doc! { "age": -1 }) 129 | .build(), 130 | ) 131 | .unwrap(); 132 | let updated = res.unwrap(); 133 | 134 | assert_eq!(updated.get_i32("age").unwrap(), 44); 135 | assert_eq!(updated.get_i32("ext_id").unwrap(), 3); 136 | } 137 | 138 | #[test] 139 | #[ignore = "this is not yet implemented"] 140 | fn test_find_and_modify_inexistent_table() {} 141 | -------------------------------------------------------------------------------- /src/commands/find_and_modify.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{CommandExecutionError, Request}; 2 | use crate::parser::parse_update; 3 | use crate::pg::UpdateResult; 4 | use crate::{commands::Handler, pg::SqlParam}; 5 | use bson::{doc, Bson, Document}; 6 | 7 | pub struct FindAndModify {} 8 | 9 | impl Handler for FindAndModify { 10 | fn new() -> Self { 11 | FindAndModify {} 12 | } 13 | 14 | fn handle( 15 | &self, 16 | request: &Request, 17 | docs: &Vec, 18 | ) -> Result { 19 | let doc = &docs[0]; 20 | let db = doc.get_str("$db").unwrap(); 21 | let collection = doc.get_str("findAndModify").unwrap(); 22 | let sp = SqlParam::new(db, collection); 23 | let query = doc.get_document("query").unwrap(); 24 | let sort = match doc.get_document("sort") { 25 | Ok(sort_doc) => Some(sort_doc), 26 | _ => None, 27 | }; 28 | let raw_update = doc.get_document("update").unwrap(); 29 | let update_doc = parse_update(raw_update); 30 | let upsert = doc.get_bool("upsert").unwrap_or(false); 31 | let new = doc.get_bool("new").unwrap_or(false); 32 | 33 | log::debug!("findAndModify doc = {:#?}", doc); 34 | 35 | let mut client = request.get_client(); 36 | client.create_table_if_not_exists(db, collection).unwrap(); 37 | 38 | let res = client 39 | .update( 40 | &sp, 41 | Some(query), 42 | sort, 43 | update_doc.unwrap(), 44 | false, 45 | false, 46 | true, 47 | ) 48 | .unwrap(); 49 | 50 | match res { 51 | UpdateResult::Count(total) => { 52 | if total == 0 { 53 | if upsert { 54 | let mut obj = query.clone(); 55 | obj.extend(extract_operator_values(&raw_update)); 56 | 57 | let res = client.insert_doc(sp, &obj).unwrap(); 58 | let mut doc = doc! { 59 | "value": null, 60 | "lastErrorObject": { 61 | "updatedExisting": false, 62 | "upserted": res.get_object_id("_id").unwrap().to_string(), 63 | "n": 1, 64 | }, 65 | "ok": 1.0, 66 | }; 67 | if new { 68 | doc.insert("value", res.clone()); 69 | } 70 | 71 | return Ok(doc); 72 | } else { 73 | return Ok(doc! { 74 | "value": null, 75 | "ok": Bson::Double(1.0), 76 | }); 77 | } 78 | } else { 79 | unreachable!( 80 | "Unexpected numeric result for a findAndUpdate command: {:#?}", 81 | doc 82 | ); 83 | } 84 | } 85 | UpdateResult::Document(value) => Ok(doc! { 86 | "n": Bson::Int64(1), 87 | "value": value, 88 | "ok": Bson::Double(1.0), 89 | }), 90 | } 91 | } 92 | } 93 | 94 | fn extract_operator_values(doc: &Document) -> Document { 95 | let mut res = Document::new(); 96 | for (key, value) in doc { 97 | if key.starts_with("$") { 98 | if let Some(value) = value.as_document() { 99 | res.extend(value.clone()); 100 | } 101 | } 102 | } 103 | res 104 | } 105 | 106 | #[cfg(test)] 107 | mod tests { 108 | use super::*; 109 | 110 | #[test] 111 | fn test_extract_operator_values() { 112 | assert_eq!( 113 | extract_operator_values(&doc! { "$inc": { "score": 1 }, "$set": { "name": "abc" } }), 114 | doc! { "score": 1, "name": "abc" } 115 | ); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /tests/insert_test.rs: -------------------------------------------------------------------------------- 1 | use bson::Document; 2 | use indoc::indoc; 3 | use mongodb::bson::doc; 4 | use oxide::utils::hexdump_to_bytes; 5 | 6 | mod common; 7 | 8 | #[test] 9 | fn test_basic_insert() { 10 | let ctx = common::setup(); 11 | 12 | ctx.col() 13 | .insert_many(vec![doc! { "x": 1 }, doc! { "x": 2 }], None) 14 | .unwrap(); 15 | 16 | let mut cursor = ctx.col().find(None, None).unwrap(); 17 | let row1 = cursor.next().unwrap().unwrap(); 18 | assert_eq!(row1.get_i32("x").unwrap(), 1); 19 | let row2 = cursor.next().unwrap().unwrap(); 20 | assert_eq!(row2.get_i32("x").unwrap(), 2); 21 | assert!(cursor.next().is_none()); 22 | } 23 | 24 | #[test] 25 | fn test_insert_without_id() { 26 | let ctx = common::setup(); 27 | ctx.db() 28 | .run_command( 29 | doc! { 30 | "insert": &ctx.collection, 31 | "documents": vec![doc! { 32 | "name": "Felipe" 33 | }] 34 | }, 35 | None, 36 | ) 37 | .unwrap(); 38 | let doc = ctx 39 | .col() 40 | .find(doc! { "name": "Felipe" }, None) 41 | .unwrap() 42 | .next() 43 | .unwrap() 44 | .unwrap(); 45 | assert!(doc.contains_key("_id")); 46 | } 47 | 48 | #[test] 49 | fn test_raw_kind2_op_msg_insert() { 50 | let ctx = common::setup(); 51 | 52 | // FIXME count() is not working because it needs the aggregation pipeline 53 | // once its done replace this helper function with it 54 | 55 | fn count_documents(ctx: common::TestContext) -> usize { 56 | let cursor = ctx 57 | .mongodb() 58 | .database("test") 59 | .collection::("col") 60 | .find(None, None) 61 | .unwrap(); 62 | let rows: Vec> = cursor.collect(); 63 | rows.len() 64 | } 65 | 66 | let count = count_documents(ctx.clone()); 67 | 68 | let kind2insert = indoc! {" 69 | 0000 96 00 00 00 61 00 00 00 00 00 00 00 dd 07 00 00 ....a........... 70 | 0010 00 00 00 00 01 2f 00 00 00 64 6f 63 75 6d 65 6e ...../...documen 71 | 0020 74 73 00 21 00 00 00 07 5f 69 64 00 62 ce d6 9a ts.!...._id.b... 72 | 0030 33 78 79 a1 ac c2 9d 40 01 78 00 00 00 00 00 00 3xy....@.x...... 73 | 0040 00 f0 3f 00 00 51 00 00 00 02 69 6e 73 65 72 74 ..?..Q....insert 74 | 0050 00 04 00 00 00 63 6f 6c 00 08 6f 72 64 65 72 65 .....col..ordere 75 | 0060 64 00 01 03 6c 73 69 64 00 1e 00 00 00 05 69 64 d...lsid......id 76 | 0070 00 10 00 00 00 04 e1 54 58 c6 4e 89 4c a3 81 0f .......TX.N.L... 77 | 0080 19 59 d3 a3 2c cf 00 02 24 64 62 00 05 00 00 00 .Y..,...$db..... 78 | 0090 74 65 73 74 00 00 test.. 79 | "}; 80 | 81 | ctx.send(&hexdump_to_bytes(kind2insert)); 82 | assert_eq!(count_documents(ctx), count + 1); 83 | } 84 | 85 | #[test] 86 | fn test_raw_jetbrains_idea_insert() { 87 | let ctx = common::setup(); 88 | let count = count_documents(ctx.clone()); 89 | 90 | fn count_documents(ctx: common::TestContext) -> usize { 91 | let cursor = ctx 92 | .mongodb() 93 | .database("test") 94 | .collection::("inventory") 95 | .find(None, None) 96 | .unwrap(); 97 | let rows: Vec> = cursor.collect(); 98 | rows.len() 99 | } 100 | 101 | let insert = indoc! {" 102 | 0000 c9 00 00 00 36 00 00 00 00 00 00 00 dd 07 00 00 ....6........... 103 | 0010 00 00 00 00 00 33 00 00 00 02 69 6e 73 65 72 74 .....3....insert 104 | 0020 00 0a 00 00 00 69 6e 76 65 6e 74 6f 72 79 00 08 .....inventory.. 105 | 0030 6f 72 64 65 72 65 64 00 01 02 24 64 62 00 05 00 ordered...$db... 106 | 0040 00 00 74 65 73 74 00 00 01 80 00 00 00 64 6f 63 ..test.......doc 107 | 0050 75 6d 65 6e 74 73 00 72 00 00 00 07 5f 69 64 00 uments.r...._id. 108 | 0060 63 0b ac 82 29 0b 4a 69 98 af 9c ac 02 69 74 65 c...).Ji.....ite 109 | 0070 6d 00 07 00 00 00 63 61 6e 76 61 73 00 10 71 74 m.....canvas..qt 110 | 0080 79 00 64 00 00 00 04 74 61 67 73 00 13 00 00 00 y.d....tags..... 111 | 0090 02 30 00 07 00 00 00 63 6f 74 74 6f 6e 00 00 03 .0.....cotton... 112 | 00a0 73 69 7a 65 00 23 00 00 00 10 68 00 1c 00 00 00 size.#....h..... 113 | 00b0 01 77 00 00 00 00 00 00 c0 41 40 02 75 6f 6d 00 .w.......A@.uom. 114 | 00c0 03 00 00 00 63 6d 00 00 00 ....cm... 115 | "}; 116 | 117 | let bytes = hexdump_to_bytes(insert); 118 | ctx.send(&bytes); 119 | 120 | assert_eq!(count_documents(ctx), count + 1); 121 | } 122 | -------------------------------------------------------------------------------- /src/commands/get_parameter.rs: -------------------------------------------------------------------------------- 1 | use crate::commands::Handler; 2 | use crate::handler::{CommandExecutionError, Request}; 3 | use bson::{doc, Bson, Document}; 4 | 5 | pub struct GetParameter {} 6 | 7 | fn get_params(doc: Document) -> (bool, bool) { 8 | if let Some(param) = doc.get("getParameter") { 9 | match param { 10 | Bson::String(str) => (str == "*", false), 11 | Bson::Document(doc) => ( 12 | doc.get_bool("allParameters").unwrap_or(false), 13 | doc.get_bool("showDetails").unwrap_or(false), 14 | ), 15 | _ => (false, false), 16 | } 17 | } else { 18 | (false, false) 19 | } 20 | } 21 | 22 | impl Handler for GetParameter { 23 | fn new() -> Self { 24 | GetParameter {} 25 | } 26 | 27 | fn handle( 28 | &self, 29 | _request: &Request, 30 | docs: &Vec, 31 | ) -> Result { 32 | let doc = docs[0].clone(); 33 | 34 | let data = doc! { 35 | "acceptApiVersion2": doc! { 36 | "value": false, 37 | "settableAtRuntime": true, 38 | "settableAtStartup": true, 39 | }, 40 | "authSchemaVersion": doc! { 41 | "value": Bson::Int32(5), 42 | "settableAtRuntime": true, 43 | "settableAtStartup": true, 44 | }, 45 | "tlsMode": doc! { 46 | "value": "disabled", 47 | "settableAtRuntime": true, 48 | "settableAtStartup": false, 49 | }, 50 | "sslMode": doc! { 51 | "value": "disabled", 52 | "settableAtRuntime": true, 53 | "settableAtStartup": false, 54 | }, 55 | "quiet": doc! { 56 | "value": false, 57 | "settableAtRuntime": true, 58 | "settableAtStartup": true, 59 | }, 60 | "featureCompatibilityVersion": doc! { 61 | "value": Bson::Double(5.0), 62 | "settableAtRuntime": true, 63 | "settableAtStartup": true, 64 | }, 65 | }; 66 | 67 | let (all_params, show_details) = get_params(doc.clone()); 68 | let selected_keys = if all_params { data.keys() } else { doc.keys() }; 69 | 70 | if all_params && show_details { 71 | let mut doc = data.clone(); 72 | doc.insert("ok", Bson::Double(1.0)); 73 | return Ok(doc); 74 | } 75 | 76 | // determine what keys from doc we need to return 77 | let keys: Vec = selected_keys 78 | .into_iter() 79 | .filter(|k| { 80 | k.as_str() != "getParameter" && k.as_str() != "comment" && k.as_str() != "$db" 81 | }) 82 | .filter(|k| all_params || doc.get(k).is_some()) 83 | .map(|k| k.to_string()) 84 | .collect(); 85 | 86 | // filters the keys from data and if show_details is true returns the whole object 87 | // otherwise just the value of the key 88 | let mut res = doc! {}; 89 | for key in keys { 90 | if let Some(value) = data.get(key.clone()) { 91 | if show_details { 92 | res.insert(key.to_string(), value.clone()); 93 | } else { 94 | res.insert( 95 | key.to_string(), 96 | value.as_document().unwrap().get("value").unwrap(), 97 | ); 98 | } 99 | } 100 | } 101 | 102 | res.insert("ok", Bson::Double(1.0)); 103 | Ok(res) 104 | } 105 | } 106 | 107 | #[cfg(test)] 108 | mod tests { 109 | use super::*; 110 | 111 | #[test] 112 | fn get_params_asterisk_test() { 113 | let doc = doc! { 114 | "getParameter": "*" 115 | }; 116 | let (all, show_details) = get_params(doc); 117 | assert_eq!(all, true); 118 | assert_eq!(show_details, false); 119 | } 120 | 121 | #[test] 122 | fn get_params_all_only_test() { 123 | let doc = doc! { 124 | "getParameter": doc! { "allParameters": true } 125 | }; 126 | let (all, show_details) = get_params(doc); 127 | assert_eq!(all, true); 128 | assert_eq!(show_details, false); 129 | } 130 | 131 | #[test] 132 | fn get_params_show_details_only_test() { 133 | let doc = doc! { 134 | "getParameter": doc! { "showDetails": true } 135 | }; 136 | let (all, show_details) = get_params(doc); 137 | assert_eq!(all, false); 138 | assert_eq!(show_details, true); 139 | } 140 | 141 | #[test] 142 | fn get_params_none_test() { 143 | let doc = doc! {}; 144 | let (all, show_details) = get_params(doc); 145 | assert_eq!(all, false); 146 | assert_eq!(show_details, false); 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/ui.rs: -------------------------------------------------------------------------------- 1 | use crate::commands::build_sql; 2 | use crate::pg::{PgDb, SqlParam}; 3 | use bson::ser; 4 | use nickel::{HttpRouter, JsonBody, MediaType, Nickel, Options}; 5 | use rust_embed::RustEmbed; 6 | use serde_json::{json, Value}; 7 | use std::env; 8 | use std::path::Path; 9 | 10 | #[derive(RustEmbed)] 11 | #[folder = "public/"] 12 | struct Asset; 13 | 14 | pub fn start(listen_addr: &str, port: u16, postgres_url: Option) { 15 | let mut server = Nickel::new(); 16 | server.options = Options::default().output_on_listen(false); 17 | 18 | let mut pg_url = postgres_url; 19 | if pg_url.is_none() { 20 | pg_url = env::var("DATABASE_URL").ok(); 21 | } 22 | if pg_url.is_none() { 23 | log::error!(indoc::indoc! {" 24 | No PostgreSQL URL specified. 25 | Use --postgres-url or env var DATABASE_URL to set the connection URL and try again. 26 | For more information use --help. 27 | "}); 28 | } 29 | let pg_url = pg_url.unwrap(); 30 | 31 | let index_html = Asset::get("index.html").unwrap(); 32 | let index_data = std::str::from_utf8(index_html.data.as_ref()); 33 | let str = format!("{}", index_data.unwrap()); 34 | 35 | server.get( 36 | "/", 37 | middleware! { |_req, _res| 38 | log::info!("GET /index.html (static)"); 39 | str.clone() 40 | }, 41 | ); 42 | 43 | server.post( 44 | "/api/convert", 45 | middleware! { |req, _res| 46 | let req_json = req.json_as::().unwrap(); 47 | log::info!("POST /api/convert\n{:?}", req_json); 48 | let doc = ser::to_document(&req_json).unwrap(); 49 | let sp = SqlParam::new(doc.get_str("database").unwrap(), doc.get_str("collection").unwrap()); 50 | let res = build_sql(&sp, doc.get_array("pipeline").unwrap()); 51 | if res.is_err() { 52 | let err = res.unwrap_err(); 53 | log::error!("{}", err); 54 | json!({ "error":err.to_string() }) 55 | } else { 56 | let sql = res.unwrap(); 57 | json!({ "sql": sql }) 58 | } 59 | 60 | }, 61 | ); 62 | 63 | let uri = pg_url.clone(); 64 | server.post( 65 | "/api/run", 66 | middleware! { |req, _res| 67 | let req_json = req.json_as::().unwrap(); 68 | let query = req_json["query"].as_str().unwrap(); 69 | log::info!("POST /api/query\n{}", query); 70 | let mut client = PgDb::new_with_uri(&uri); 71 | let mut rows = vec![]; 72 | let res = client.raw_query(query, &[]); 73 | if res.is_err() { 74 | let err = res.unwrap_err(); 75 | log::error!("{}", err); 76 | json!({ "error":err.to_string() }) 77 | } else { 78 | for row in res.unwrap() { 79 | let row: serde_json::Value = row.try_get::<&str, serde_json::Value>("_jsonb").unwrap(); 80 | rows.push(row); 81 | } 82 | json!({ "rows": rows }) 83 | } 84 | }, 85 | ); 86 | 87 | let uri = pg_url.clone(); 88 | server.get( 89 | "/api/databases", 90 | middleware! { |_req, _res| 91 | log::info!("GET /api/databases"); 92 | let mut client = PgDb::new_with_uri(&uri); 93 | let databases = client.get_schemas(); 94 | json!({ "databases": databases }) 95 | 96 | }, 97 | ); 98 | 99 | let uri = pg_url.clone(); 100 | server.get( 101 | "/api/databases/:database/collections", 102 | middleware! { |req, _res| 103 | let database = req.param("database").unwrap(); 104 | log::info!("GET /api/collections\ndatabase = {}", database); 105 | let mut client = PgDb::new_with_uri(&uri); 106 | let collections = client.get_tables(database); 107 | json!({ "collections": collections }) 108 | 109 | }, 110 | ); 111 | 112 | server.utilize(router! { 113 | get "/**" => |req, mut res| { 114 | let uri = req.path_without_query().unwrap(); 115 | let file = uri.trim_start_matches("/"); 116 | log::info!("GET /{} (static)", file); 117 | 118 | let media_type = mime_from_filename(file).unwrap_or(MediaType::Html); 119 | res.set(media_type); 120 | 121 | let contents = Asset::get(file); 122 | contents.unwrap().data.as_ref() 123 | } 124 | }); 125 | 126 | log::info!("Web UI started at http://{}:{}...", listen_addr, port); 127 | server.listen(format!("{}:{}", listen_addr, port)).unwrap(); 128 | } 129 | 130 | fn mime_from_filename>(path: P) -> Option { 131 | path.as_ref() 132 | .extension() 133 | .and_then(|os| os.to_str()) 134 | // Lookup mime from file extension 135 | .and_then(|s| s.parse().ok()) 136 | } 137 | -------------------------------------------------------------------------------- /src/serializer.rs: -------------------------------------------------------------------------------- 1 | use bson::{Bson, JavaScriptCodeWithScope}; 2 | use serde_json::{json, Value}; 3 | 4 | // Intermediate representation of a document as it is stored on the database. 5 | // 6 | // It uses some JSON representations for the BSON types, and it follows the same 7 | // standards used by FerretDB: 8 | // 9 | // $f - for floating point numbers 10 | // $o - for Object ID 11 | // $d - for dates, stored as millis since epoch 12 | 13 | pub trait PostgresSerializer { 14 | fn into_psql_json(self) -> Value; 15 | } 16 | 17 | impl PostgresSerializer for Bson { 18 | fn into_psql_json(self) -> Value { 19 | match self { 20 | Bson::Int32(i) => json!(i), 21 | // Bson::Int64(i) => json!({ "$i": i.to_string() }), 22 | Bson::Int64(i) => json!(i), 23 | Bson::Double(f) => { 24 | json!({ "$f": f }) 25 | } 26 | Bson::DateTime(date) => { 27 | json!({ "$d": date.timestamp_millis() }) 28 | } 29 | Bson::Array(arr) => Value::Array(arr.into_iter().map(Bson::into_psql_json).collect()), 30 | Bson::Document(arr) => Value::Object( 31 | arr.into_iter() 32 | .map(|(k, v)| (k, v.into_psql_json())) 33 | .collect(), 34 | ), 35 | Bson::JavaScriptCode(code) => json!({ "$j": code }), 36 | Bson::JavaScriptCodeWithScope(JavaScriptCodeWithScope { code, scope }) => json!({ 37 | "$j": code, 38 | "s": serde_json::to_string(&scope).unwrap(), 39 | }), 40 | Bson::RegularExpression(bson::Regex { pattern, options }) => { 41 | let mut chars: Vec<_> = options.chars().collect(); 42 | chars.sort_unstable(); 43 | let options: String = chars.into_iter().collect(); 44 | json!({ 45 | "$regex": pattern, 46 | "$options": options, 47 | }) 48 | } 49 | Bson::ObjectId(v) => json!({"$o": v.to_hex()}), 50 | 51 | other => other.into_relaxed_extjson(), 52 | } 53 | } 54 | } 55 | 56 | #[cfg(test)] 57 | mod tests { 58 | use bson::{doc, Bson}; 59 | use std::time::{SystemTime, UNIX_EPOCH}; 60 | 61 | use super::*; 62 | 63 | #[test] 64 | fn test_parse_string() { 65 | let json = Bson::String("hello".into()).into_psql_json().to_string(); 66 | assert_eq!(r#""hello""#, json); 67 | } 68 | 69 | #[test] 70 | fn test_parse_int32() { 71 | let json = Bson::Int32(1).into_psql_json().to_string(); 72 | assert_eq!(r#"1"#, json); 73 | } 74 | 75 | #[test] 76 | fn test_parse_int64() { 77 | let json = Bson::Int64(1).into_psql_json().to_string(); 78 | // assert_eq!(r#"{"$i":"1"}"#, json); 79 | assert_eq!(r#"1"#, json); 80 | } 81 | 82 | #[test] 83 | fn test_parse_float() { 84 | let json = Bson::Double(1.0).into_psql_json().to_string(); 85 | assert_eq!(r#"{"$f":1.0}"#, json); 86 | } 87 | 88 | #[test] 89 | fn test_parse_datetime() { 90 | let date = chrono::DateTime::parse_from_rfc3339("1996-12-19T16:39:57-08:00").unwrap(); 91 | let time: u128 = SystemTime::from(date) 92 | .duration_since(UNIX_EPOCH) 93 | .unwrap() 94 | .as_millis(); 95 | let json = Bson::DateTime(bson::DateTime::from_millis(time.try_into().unwrap())) 96 | .into_psql_json() 97 | .to_string(); 98 | assert_eq!(r#"{"$d":851042397000}"#, json); 99 | } 100 | 101 | #[test] 102 | fn test_parse_object_id() { 103 | let json = 104 | Bson::ObjectId(bson::oid::ObjectId::parse_str("62c75f564f084cd855b6ac3f").unwrap()) 105 | .into_psql_json() 106 | .to_string(); 107 | assert_eq!(r#"{"$o":"62c75f564f084cd855b6ac3f"}"#, json); 108 | } 109 | 110 | #[test] 111 | fn test_parse_javascript() { 112 | let json = Bson::JavaScriptCode("function a() { return 'hey'; }".to_string()) 113 | .into_psql_json() 114 | .to_string(); 115 | assert_eq!(r#"{"$j":"function a() { return 'hey'; }"}"#, json); 116 | } 117 | 118 | #[test] 119 | fn test_parse_javascript_with_scope() { 120 | let json = Bson::JavaScriptCodeWithScope(bson::JavaScriptCodeWithScope { 121 | code: "function a() { return 'hey'; }".to_string(), 122 | scope: doc! { "a": 1, "b": 2 }, 123 | }) 124 | .into_psql_json() 125 | .to_string(); 126 | assert_eq!( 127 | r#"{"$j":"function a() { return 'hey'; }","s":"{\"a\":1,\"b\":2}"}"#, 128 | json 129 | ); 130 | } 131 | 132 | #[test] 133 | fn test_parse_regex() { 134 | let json = Bson::RegularExpression(bson::Regex { 135 | pattern: "^[a-z]+$".to_string(), 136 | options: "i".to_string(), 137 | }) 138 | .into_psql_json() 139 | .to_string(); 140 | assert_eq!(r#"{"$regex":"^[a-z]+$","$options":"i"}"#, json); 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::{AppSettings, Parser, Subcommand}; 2 | use indoc::indoc; 3 | use server::Server; 4 | use std::env; 5 | use std::thread; 6 | 7 | #[macro_use] 8 | extern crate nickel; 9 | 10 | pub mod commands; 11 | pub mod deserializer; 12 | pub mod handler; 13 | pub mod parser; 14 | pub mod pg; 15 | pub mod serializer; 16 | pub mod server; 17 | pub mod shell; 18 | pub mod threadpool; 19 | pub mod ui; 20 | pub mod utils; 21 | pub mod wire; 22 | 23 | #[derive(Subcommand, Debug)] 24 | enum Commands { 25 | /// Start web interface 26 | Web { 27 | /// Listening address, defaults to 127.0.0.1 28 | #[clap(short, long)] 29 | listen_addr: Option, 30 | 31 | /// Listening port, defaults to 8087 32 | #[clap(short, long)] 33 | port: Option, 34 | 35 | /// PostgreSQL connection URL 36 | #[clap(short = 'u', long)] 37 | postgres_url: Option, 38 | }, 39 | 40 | /// Start JavaScript shell 41 | Shell { 42 | /// Server address 43 | #[clap(short = 'l', long, default_value_t = String::from("127.0.0.1"))] 44 | server_addr: String, 45 | 46 | /// Server port 47 | #[clap(short = 'p', long, default_value_t = 27017)] 48 | server_port: u16, 49 | }, 50 | } 51 | 52 | #[derive(Parser, Debug)] 53 | #[clap(author, version, about)] 54 | #[clap(global_setting(AppSettings::ArgsNegateSubcommands))] 55 | struct Cli { 56 | #[clap(subcommand)] 57 | command: Option, 58 | 59 | /// Listening address, defaults to 127.0.0.1 60 | #[clap(short, long)] 61 | listen_addr: Option, 62 | 63 | /// Listening port, defaults to 27017 64 | #[clap(short, long)] 65 | port: Option, 66 | 67 | /// PostgreSQL connection URL 68 | #[clap(short = 'u', long)] 69 | postgres_url: Option, 70 | 71 | /// Starts web interface 72 | #[clap(short, long)] 73 | web: bool, 74 | 75 | /// Web binding address 76 | #[clap(long)] 77 | web_addr: Option, 78 | 79 | /// Show debugging information 80 | #[clap(short, long)] 81 | debug: bool, 82 | } 83 | 84 | fn main() { 85 | dotenv::dotenv().ok(); 86 | 87 | let cli = Cli::parse(); 88 | 89 | let log_level = if cli.debug { 90 | "oxide=debug" 91 | } else { 92 | "oxide=info" 93 | }; 94 | env_logger::init_from_env( 95 | env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, log_level), 96 | ); 97 | 98 | match cli.command { 99 | Some(Commands::Web { 100 | listen_addr, 101 | port, 102 | postgres_url, 103 | }) => { 104 | ui::start( 105 | &listen_addr.unwrap_or("localhost".to_string()), 106 | port.unwrap_or(8087), 107 | postgres_url, 108 | ); 109 | } 110 | Some(Commands::Shell { 111 | server_addr, 112 | server_port, 113 | }) => { 114 | let shell = shell::Shell::new(&server_addr, server_port); 115 | shell.start(); 116 | } 117 | None => { 118 | start( 119 | cli.listen_addr, 120 | cli.port, 121 | cli.postgres_url, 122 | cli.web, 123 | cli.web_addr, 124 | ); 125 | } 126 | } 127 | 128 | fn start( 129 | listen_addr: Option, 130 | port: Option, 131 | postgres_url: Option, 132 | web: bool, 133 | web_addr: Option, 134 | ) { 135 | let ip_addr = listen_addr 136 | .unwrap_or(env::var("OXIDE_LISTEN_ADDR").unwrap_or_else(|_| "0.0.0.0".to_string())); 137 | let port = port.unwrap_or( 138 | env::var("OXIDE_PORT") 139 | .unwrap_or("27017".to_string()) 140 | .parse() 141 | .unwrap(), 142 | ); 143 | let mut pg_url = postgres_url; 144 | if pg_url.is_none() { 145 | pg_url = env::var("DATABASE_URL").ok(); 146 | } 147 | if let Some(pg_url) = pg_url { 148 | if web || web_addr.is_some() { 149 | let pg_url_clone = pg_url.clone(); 150 | let parts = web_addr.unwrap_or( 151 | env::var("OXIDE_WEB_ADDR").unwrap_or_else(|_| "0.0.0.0:8087".to_string()), 152 | ); 153 | let parts_vec = parts.split(':').collect::>(); 154 | let web_addr = parts_vec[0].to_string(); 155 | let port = parts_vec[1].parse::().unwrap_or(8087); 156 | thread::spawn(move || { 157 | ui::start(&web_addr, port, Some(pg_url_clone)); 158 | }); 159 | } 160 | 161 | Server::new_with_pgurl(ip_addr, port, pg_url).start(); 162 | } else { 163 | log::error!(indoc! {" 164 | No PostgreSQL URL specified. 165 | Use --postgres-url or env var DATABASE_URL to set the connection URL and try again. 166 | For more information use --help. 167 | "}); 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/wire/op_msg.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{Request, Response}; 2 | use crate::utils::to_cstring; 3 | use crate::wire::Replyable; 4 | use crate::wire::{OpCode, UnknownMessageKindError, CHECKSUM_PRESENT, HEADER_SIZE, OP_MSG}; 5 | use bson::{doc, ser, Bson, Document}; 6 | use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; 7 | use pretty_hex::pretty_hex; 8 | use std::ffi::CString; 9 | use std::io::{BufRead, Cursor, Read, Write}; 10 | 11 | use super::util::parse_section; 12 | use super::{MsgHeader, Serializable}; 13 | 14 | #[derive(Debug, Clone)] 15 | pub struct OpMsg { 16 | pub header: MsgHeader, 17 | pub flags: u32, 18 | pub sections: Vec, 19 | pub checksum: Option, 20 | } 21 | 22 | #[derive(Debug, Clone, PartialEq)] 23 | pub struct OpMsgSection { 24 | pub kind: u8, 25 | pub identifier: Option, 26 | pub documents: Vec, 27 | } 28 | 29 | impl OpMsgSection { 30 | pub fn from_bytes( 31 | mut bytes: Vec, 32 | ) -> Result<(OpMsgSection, Vec), UnknownMessageKindError> { 33 | parse_section(&mut bytes) 34 | } 35 | } 36 | 37 | impl OpMsg { 38 | pub fn new_with_body_kind( 39 | header: MsgHeader, 40 | flags: u32, 41 | checksum: Option, 42 | doc: &Document, 43 | ) -> OpMsg { 44 | OpMsg { 45 | header, 46 | flags, 47 | sections: vec![OpMsgSection { 48 | kind: 0, 49 | identifier: None, 50 | documents: vec![doc.to_owned()], 51 | }], 52 | checksum, 53 | } 54 | } 55 | 56 | pub fn from_bytes(bytes: &[u8]) -> Result { 57 | let mut cursor = Cursor::new(bytes); 58 | let mut header_buffer: Vec = vec![0u8; HEADER_SIZE as usize]; 59 | cursor.read_exact(&mut header_buffer).unwrap(); 60 | 61 | let header = MsgHeader::from_bytes(header_buffer).unwrap(); 62 | let flags = cursor.read_u32::().unwrap(); 63 | 64 | let mut bytes: Vec = vec![]; 65 | cursor.read_to_end(&mut bytes).unwrap(); 66 | 67 | let mut sections = vec![]; 68 | loop { 69 | let (section, remaining) = parse_section(&mut bytes).unwrap(); 70 | bytes = remaining; 71 | sections.push(section); 72 | if bytes.is_empty() { 73 | break; 74 | } 75 | if (bytes.len() as u64) <= 4 { 76 | break; 77 | } 78 | } 79 | 80 | let mut checksum = None; 81 | if flags & CHECKSUM_PRESENT != 0 { 82 | checksum = Some(cursor.read_u32::().unwrap()); 83 | } 84 | 85 | Ok(OpMsg { 86 | header, 87 | flags, 88 | sections, 89 | checksum, 90 | }) 91 | } 92 | } 93 | 94 | impl Replyable for OpMsg { 95 | fn reply(&self, res: Response) -> Result, UnknownMessageKindError> { 96 | // FIXME extract this serialization of a document to a helper 97 | let bson_vec = ser::to_vec(&res.get_doc()).unwrap(); 98 | let bson_data: &[u8] = &bson_vec; 99 | let message_length = HEADER_SIZE + 5 + bson_data.len() as u32; 100 | 101 | if let OpCode::OpMsg(op_msg) = res.get_op_code().to_owned() { 102 | let header = op_msg.header.get_response(res.get_id(), message_length); 103 | 104 | if self.sections.len() > 0 && self.sections[0].kind == 0 { 105 | return Ok(OpMsg::new_with_body_kind( 106 | header, 107 | self.flags, 108 | self.checksum, 109 | res.get_doc(), 110 | ) 111 | .to_vec()); 112 | } else if self.sections.len() > 0 && self.sections[0].kind == 1 { 113 | return Ok(OpMsg::new_with_body_kind( 114 | header, 115 | self.flags, 116 | self.checksum, 117 | res.get_doc(), 118 | ) 119 | .to_vec()); 120 | } 121 | } 122 | 123 | log::error!( 124 | "Received unsupported section for msg = {:?}", 125 | res.get_op_code() 126 | ); 127 | 128 | Err(UnknownMessageKindError) 129 | } 130 | } 131 | 132 | impl Serializable for OpMsg { 133 | fn to_vec(&self) -> Vec { 134 | let mut writer = Cursor::new(Vec::new()); 135 | writer.write_all(&self.header.to_vec()).unwrap(); 136 | writer.write_u32::(self.flags).unwrap(); 137 | for section in &self.sections { 138 | writer.write(&[section.kind]).unwrap(); 139 | for doc in §ion.documents { 140 | let bson_vec = ser::to_vec(&doc).unwrap(); 141 | let bson_data: &[u8] = &bson_vec; 142 | writer.write(bson_data).unwrap(); 143 | } 144 | } 145 | if (self.flags & CHECKSUM_PRESENT) != 0 { 146 | writer 147 | .write_u32::(self.checksum.unwrap()) 148 | .unwrap(); 149 | } 150 | writer.into_inner() 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/wire/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code, unused_imports)] 2 | use bson::{doc, ser, Bson, Document}; 3 | use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; 4 | use pretty_hex::*; 5 | use std::ffi::CString; 6 | use std::io::{BufRead, Cursor, Read, Write}; 7 | 8 | mod op_msg; 9 | mod op_query; 10 | mod op_reply; 11 | mod util; 12 | 13 | use crate::handler::{Request, Response}; 14 | 15 | pub use self::op_msg::OpMsg; 16 | pub use self::op_msg::OpMsgSection; 17 | pub use self::op_query::OpQuery; 18 | pub use self::op_reply::OpReply; 19 | 20 | // OpCodes 21 | pub const OP_MSG: u32 = 2013; 22 | pub const OP_REPLY: u32 = 1; 23 | pub const OP_QUERY: u32 = 2004; 24 | 25 | pub const MAX_DOCUMENT_LEN: u32 = 16777216; 26 | pub const MAX_MSG_LEN: u32 = 48000000; 27 | pub const HEADER_SIZE: u32 = 16; 28 | 29 | pub const CHECKSUM_PRESENT: u32 = 1 << 0; 30 | pub const MORE_TO_COME: u32 = 1 << 1; 31 | pub const EXHAUST_ALLOWED: u32 = 1 << 16; 32 | 33 | #[derive(Debug, Clone)] 34 | pub struct OpCodeNotImplementedError { 35 | op_code: u32, 36 | } 37 | 38 | #[derive(Debug, Clone)] 39 | pub struct UnknownMessageKindError; 40 | 41 | #[derive(Debug, Clone)] 42 | pub struct MsgHeader { 43 | pub message_length: u32, 44 | pub request_id: u32, 45 | pub response_to: u32, 46 | pub op_code: u32, 47 | } 48 | 49 | impl MsgHeader { 50 | pub fn from_bytes(bytes: Vec) -> Result { 51 | let mut cursor = Cursor::new(bytes); 52 | let message_length = cursor.read_u32::().unwrap(); 53 | let request_id = cursor.read_u32::().unwrap(); 54 | let response_to = cursor.read_u32::().unwrap(); 55 | let op_code = cursor.read_u32::().unwrap(); 56 | Ok(MsgHeader { 57 | message_length, 58 | request_id, 59 | response_to, 60 | op_code, 61 | }) 62 | } 63 | 64 | pub fn get_response(&self, request_id: u32, message_length: u32) -> MsgHeader { 65 | self.get_response_with_op_code(request_id, message_length, self.op_code) 66 | } 67 | 68 | pub fn get_response_with_op_code( 69 | &self, 70 | request_id: u32, 71 | message_length: u32, 72 | op_code: u32, 73 | ) -> MsgHeader { 74 | MsgHeader { 75 | message_length, 76 | request_id, 77 | response_to: self.request_id, 78 | op_code, 79 | } 80 | } 81 | } 82 | 83 | #[derive(Debug, Clone)] 84 | pub enum OpCode { 85 | OpMsg(OpMsg), 86 | OpQuery(OpQuery), 87 | OpReply(OpReply), 88 | } 89 | 90 | impl OpCode { 91 | pub fn reply(&self, response: Response) -> Result, UnknownMessageKindError> { 92 | match self { 93 | OpCode::OpMsg(op_msg) => Ok(op_msg.reply(response).unwrap()), 94 | OpCode::OpQuery(op_query) => Ok(op_query.reply(response).unwrap()), 95 | _ => { 96 | log::error!("Unknown message during reply - {:#?}", self); 97 | Err(UnknownMessageKindError) 98 | } 99 | } 100 | } 101 | } 102 | 103 | pub trait Replyable { 104 | fn reply(&self, response: Response) -> Result, UnknownMessageKindError> 105 | where 106 | Self: Sized; 107 | } 108 | 109 | pub trait Serializable { 110 | fn to_vec(&self) -> Vec; 111 | } 112 | 113 | pub fn parse(buffer: &[u8]) -> Result { 114 | let mut cursor = Cursor::new(buffer); 115 | let header = MsgHeader::parse(&mut cursor); 116 | 117 | if header.op_code == OP_MSG { 118 | let mut msg_buffer: Vec = vec![0; header.message_length as usize]; 119 | cursor.set_position(0); 120 | cursor.read_exact(&mut msg_buffer).unwrap(); 121 | 122 | Ok(OpCode::OpMsg(OpMsg::from_bytes(&msg_buffer).unwrap())) 123 | } else if header.op_code == OP_QUERY { 124 | Ok(OpCode::OpQuery(OpQuery::parse(header, &mut cursor))) 125 | } else { 126 | Err(OpCodeNotImplementedError { 127 | op_code: header.op_code, 128 | }) 129 | } 130 | } 131 | 132 | impl MsgHeader { 133 | fn new(message_length: u32, request_id: u32, response_to: u32, op_code: u32) -> MsgHeader { 134 | MsgHeader { 135 | message_length, 136 | request_id, 137 | response_to, 138 | op_code, 139 | } 140 | } 141 | 142 | fn parse(cursor: &mut Cursor<&[u8]>) -> MsgHeader { 143 | let message_length = cursor.read_u32::().unwrap(); 144 | let request_id = cursor.read_u32::().unwrap(); 145 | let response_to = cursor.read_u32::().unwrap(); 146 | let op_code = cursor.read_u32::().unwrap(); 147 | MsgHeader { 148 | message_length, 149 | request_id, 150 | response_to, 151 | op_code, 152 | } 153 | } 154 | 155 | fn to_vec(&self) -> Vec { 156 | let mut cursor = Cursor::new(Vec::new()); 157 | cursor 158 | .write_u32::(self.message_length) 159 | .unwrap(); 160 | cursor.write_u32::(self.request_id).unwrap(); 161 | cursor.write_u32::(self.response_to).unwrap(); 162 | cursor.write_u32::(self.op_code).unwrap(); 163 | cursor.into_inner() 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use crate::handler::{handle, Response}; 2 | use crate::threadpool::ThreadPool; 3 | use crate::wire::parse; 4 | use autoincrement::prelude::AsyncIncremental; 5 | use bson::{doc, Bson}; 6 | use byteorder::{ByteOrder, LittleEndian}; 7 | use r2d2_postgres::{postgres::NoTls, PostgresConnectionManager}; 8 | use std::env; 9 | use std::io::prelude::*; 10 | use std::net::{Shutdown, TcpListener, TcpStream}; 11 | 12 | #[derive(AsyncIncremental, PartialEq, Eq, Debug)] 13 | struct RequestId(u32); 14 | 15 | pub struct Server { 16 | listen_addr: String, 17 | port: u16, 18 | pg_url: String, 19 | } 20 | 21 | impl Server { 22 | pub fn new(listen_addr: String, port: u16) -> Self { 23 | Self::new_with_pgurl( 24 | listen_addr, 25 | port, 26 | env::var("DATABASE_URL").unwrap_or("postgres:://localhost:5432/oxide".to_string()), 27 | ) 28 | } 29 | 30 | pub fn new_with_pgurl(listen_addr: String, port: u16, pg_url: String) -> Self { 31 | Server { 32 | listen_addr, 33 | port, 34 | pg_url, 35 | } 36 | } 37 | 38 | pub fn start(&self) { 39 | let uri = &self.pg_url; 40 | let sanitized_uri = format!( 41 | "postgres://*****:*****@{}", 42 | uri.split("@").collect::>()[1] 43 | ); 44 | log::info!("Connecting to {}...", sanitized_uri); 45 | let manager = PostgresConnectionManager::new(self.pg_url.parse().unwrap(), NoTls); 46 | if let Ok(pool) = r2d2::Pool::new(manager) { 47 | self.start_with_pool(pool); 48 | } else { 49 | log::error!("Failed to connect to PostgreSQL."); 50 | } 51 | } 52 | 53 | pub fn start_with_pool(&self, pg_pool: r2d2::Pool>) { 54 | let addr = format!("{}:{}", self.listen_addr, self.port); 55 | let listener = TcpListener::bind(&addr).unwrap(); 56 | let pool = ThreadPool::new(100); 57 | let generator = RequestId::init(); 58 | 59 | log::info!("OxideDB listening on {}...", addr); 60 | for stream in listener.incoming() { 61 | let stream = stream.unwrap(); 62 | let id = generator.pull(); 63 | let pg_pool = pg_pool.clone(); 64 | 65 | stream.set_nodelay(true).unwrap(); 66 | 67 | pool.execute(|| { 68 | handle_connection(stream, id, pg_pool); 69 | }); 70 | } 71 | 72 | log::info!("Shutting down."); 73 | } 74 | } 75 | 76 | fn handle_connection( 77 | mut stream: TcpStream, 78 | id: RequestId, 79 | pool: r2d2::Pool>, 80 | ) { 81 | let addr = stream.peer_addr().unwrap(); 82 | log::debug!("Client connected: {}", addr); 83 | 84 | loop { 85 | let mut size_buffer = [0; 4]; 86 | let read = stream.peek(&mut size_buffer).unwrap(); 87 | let size = LittleEndian::read_u32(&size_buffer); 88 | if size == 0 { 89 | stream.flush().unwrap(); 90 | break; 91 | } 92 | let mut buffer = vec![0; size as usize]; 93 | 94 | match stream.read_exact(&mut buffer) { 95 | Ok(_read) => { 96 | use std::time::Instant; 97 | let now = Instant::now(); 98 | 99 | let op_code = parse(&buffer); 100 | log::trace!("{} {}bytes: {:?}", addr, read, op_code); 101 | if op_code.is_err() { 102 | log::error!( 103 | "Could not understand - {} {} bytes: {:?}", 104 | addr, 105 | read, 106 | op_code 107 | ); 108 | stream.write(&[0x00, 0x00, 0x00, 0x00]).unwrap(); 109 | stream.write(&[0x00, 0x00, 0x00, 0x00]).unwrap(); 110 | stream.write(&[0x00, 0x00, 0x00, 0x00]).unwrap(); 111 | stream.write(&[0x00, 0x00, 0x00, 0x00]).unwrap(); 112 | return; 113 | } 114 | 115 | let op_code = op_code.unwrap(); 116 | let mut response = match handle(id.0, &pool, addr, &op_code) { 117 | Ok(reply) => reply, 118 | Err(e) => { 119 | log::error!("Error while handling: {}", e); 120 | let err = doc! { 121 | "ok": Bson::Double(0.0), 122 | "errmsg": Bson::String(format!("{}", e)), 123 | "code": Bson::Int32(59), 124 | "codeName": "CommandNotFound", 125 | }; 126 | let request = Response::new(id.0, &op_code, vec![err]); 127 | op_code.reply(request).unwrap() 128 | } 129 | }; 130 | 131 | response.flush().unwrap(); 132 | 133 | let elapsed = now.elapsed(); 134 | log::trace!("Processed {}bytes in {:.2?}\n", response.len(), elapsed); 135 | 136 | stream.write_all(&response).unwrap(); 137 | } 138 | Err(e) => { 139 | log::error!("Error on request id {}: {}", id.0, e); 140 | stream.shutdown(Shutdown::Both).unwrap(); 141 | return; 142 | } 143 | }; 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/parser/update_parser.rs: -------------------------------------------------------------------------------- 1 | use bson::Document; 2 | 3 | use crate::utils::expand_fields; 4 | 5 | #[derive(Debug, Clone, PartialEq)] 6 | pub enum UpdateOper { 7 | Update(Vec), 8 | Replace(Document), 9 | } 10 | 11 | #[derive(Debug, Clone, PartialEq)] 12 | pub enum UpdateDoc { 13 | Inc(Document), 14 | Set(Document), 15 | Unset(Document), 16 | AddToSet(Document), 17 | } 18 | 19 | #[derive(Debug, Clone, PartialEq)] 20 | pub struct InvalidUpdateError { 21 | reason: String, 22 | } 23 | 24 | impl InvalidUpdateError { 25 | pub fn new(reason: String) -> Self { 26 | InvalidUpdateError { reason } 27 | } 28 | } 29 | 30 | impl UpdateDoc { 31 | fn validate(&self) -> Result { 32 | match self { 33 | UpdateDoc::Set(doc) => match expand_fields(doc) { 34 | Ok(u) => Ok(UpdateDoc::Set(u)), 35 | Err(e) => { 36 | return Err(InvalidUpdateError::new(format!( 37 | "Cannot update '{}' and '{}' at the same time", 38 | e.target, e.source 39 | ))); 40 | } 41 | }, 42 | UpdateDoc::Unset(doc) => Ok(UpdateDoc::Unset(doc.clone())), 43 | UpdateDoc::Inc(u) => Ok(UpdateDoc::Inc(u.clone())), 44 | UpdateDoc::AddToSet(doc) => Ok(UpdateDoc::AddToSet(doc.clone())), 45 | // _ => { 46 | // return Err(InvalidUpdateError::new(format!( 47 | // "Unhandled update operation: {:?}", 48 | // self 49 | // ))); 50 | // } 51 | } 52 | } 53 | } 54 | 55 | pub fn parse_update(doc: &Document) -> Result { 56 | let mut res: Vec = vec![]; 57 | if !doc.keys().any(|k| k.starts_with("$")) { 58 | return Ok(UpdateOper::Replace(doc.clone())); 59 | } 60 | for (key, value) in doc.iter() { 61 | match key.as_str() { 62 | "$set" => { 63 | let expanded_doc = match expand_fields(value.as_document().unwrap()) { 64 | Ok(doc) => doc, 65 | Err(e) => { 66 | return Err(InvalidUpdateError::new(format!( 67 | "Cannot update '{}' and '{}' at the same time", 68 | e.target, e.source 69 | ))); 70 | } 71 | }; 72 | match UpdateDoc::Set(expanded_doc).validate() { 73 | Ok(update_doc) => res.push(update_doc), 74 | Err(e) => { 75 | return Err(InvalidUpdateError::new(format!("{:?}", e))); 76 | } 77 | } 78 | } 79 | "$unset" => { 80 | let expanded_doc = match expand_fields(value.as_document().unwrap()) { 81 | Ok(doc) => doc, 82 | Err(e) => { 83 | return Err(InvalidUpdateError::new(format!( 84 | "Cannot update '{}' and '{}' at the same time", 85 | e.target, e.source 86 | ))); 87 | } 88 | }; 89 | match UpdateDoc::Unset(expanded_doc).validate() { 90 | Ok(update_doc) => res.push(update_doc), 91 | Err(e) => { 92 | return Err(InvalidUpdateError::new(format!("{:?}", e))); 93 | } 94 | } 95 | } 96 | "$inc" => { 97 | let expanded_doc = match expand_fields(value.as_document().unwrap()) { 98 | Ok(doc) => doc, 99 | Err(e) => { 100 | return Err(InvalidUpdateError::new(format!( 101 | "Cannot update '{}' and '{}' at the same time", 102 | e.target, e.source 103 | ))); 104 | } 105 | }; 106 | match UpdateDoc::Inc(expanded_doc).validate() { 107 | Ok(update_doc) => res.push(update_doc), 108 | Err(e) => { 109 | return Err(InvalidUpdateError::new(format!("{:?}", e))); 110 | } 111 | } 112 | } 113 | "$addToSet" => { 114 | let doc = value.as_document().unwrap().clone(); 115 | match UpdateDoc::AddToSet(doc).validate() { 116 | Ok(update_doc) => res.push(update_doc), 117 | Err(e) => { 118 | return Err(InvalidUpdateError::new(format!("{:?}", e))); 119 | } 120 | } 121 | } 122 | _ => { 123 | if key.starts_with("$") || res.len() > 0 { 124 | log::error!("Unhandled update operator: {}\nDocument = {:#?}", key, doc); 125 | return Err(InvalidUpdateError::new(format!( 126 | "Unknown update operator: {}", 127 | key 128 | ))); 129 | } 130 | } 131 | } 132 | } 133 | Ok(UpdateOper::Update(res)) 134 | } 135 | 136 | #[cfg(test)] 137 | mod tests { 138 | use super::*; 139 | use bson::doc; 140 | 141 | #[test] 142 | fn test_parse_update() { 143 | let set_doc = doc! { "$set": { "a": 1 } }; 144 | let repl_doc = doc! { "b": 2, "c": 8, "d": 9 }; 145 | let unknown_doc = doc! { "$xyz": { "a": 1 } }; 146 | let mixed_doc = doc! { "$set": { "x": 1 }, "b": 2 }; 147 | 148 | assert_eq!( 149 | parse_update(&set_doc).unwrap(), 150 | UpdateOper::Update(vec![UpdateDoc::Set(doc! { "a": 1 })]) 151 | ); 152 | assert_eq!( 153 | parse_update(&repl_doc).unwrap(), 154 | UpdateOper::Replace(repl_doc) 155 | ); 156 | assert_eq!( 157 | parse_update(&unknown_doc).unwrap_err(), 158 | InvalidUpdateError::new("Unknown update operator: $xyz".to_string()) 159 | ); 160 | assert_eq!( 161 | parse_update(&mixed_doc).unwrap_err(), 162 | InvalidUpdateError::new("Unknown update operator: b".to_string()) 163 | ); 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /tests/common.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use bson::Bson; 3 | use mongodb::bson::Document; 4 | use mongodb::sync::Cursor; 5 | use oxide::pg::PgDb; 6 | use oxide::server::Server; 7 | use r2d2::Pool; 8 | use r2d2_postgres::{postgres::NoTls, PostgresConnectionManager}; 9 | use std::fs::{self, File}; 10 | use std::io::{Read, Write}; 11 | use std::net::{Shutdown, TcpStream}; 12 | use std::sync::Once; 13 | use std::time::{SystemTime, UNIX_EPOCH}; 14 | use std::{env, thread}; 15 | 16 | static INIT: Once = Once::new(); 17 | 18 | #[derive(Debug, Clone)] 19 | pub struct TestContext { 20 | pub db: String, 21 | pub collection: String, 22 | mongodb: mongodb::sync::Client, 23 | port: u16, 24 | } 25 | 26 | impl TestContext { 27 | pub fn new(port: u16, db: String) -> Self { 28 | let id = uuid::Uuid::new_v4().to_string(); 29 | let collection = format!("test_{}", id).to_string(); 30 | let client_uri = format!("mongodb://localhost:{}/test", port); 31 | let mongodb = mongodb::sync::Client::with_uri_str(&client_uri).unwrap(); 32 | 33 | TestContext { 34 | db, 35 | collection, 36 | mongodb, 37 | port, 38 | } 39 | } 40 | 41 | pub fn mongodb(&self) -> &mongodb::sync::Client { 42 | &self.mongodb 43 | } 44 | 45 | pub fn db(&self) -> mongodb::sync::Database { 46 | self.mongodb().database(self.db.as_str()) 47 | } 48 | 49 | pub fn col(&self) -> mongodb::sync::Collection { 50 | self.db().collection(self.collection.as_str()) 51 | } 52 | 53 | pub fn send(&self, bytes: &[u8]) -> Vec { 54 | let mut stream = TcpStream::connect(&format!("localhost:{}", self.port)).unwrap(); 55 | stream.write_all(bytes).unwrap(); 56 | stream.flush().unwrap(); 57 | let mut buffer = [0u8; 1024]; 58 | stream.read(&mut buffer).unwrap(); 59 | stream.shutdown(Shutdown::Write).unwrap(); 60 | 61 | buffer[..].to_vec() 62 | } 63 | 64 | pub fn send_file(&self, filename: &str) -> Vec { 65 | let mut f = File::open(&filename).unwrap(); 66 | let metadata = fs::metadata(&filename).unwrap(); 67 | println!("file size = {}", metadata.len()); 68 | let mut buffer = vec![0; metadata.len() as usize]; 69 | f.read(&mut buffer).unwrap(); 70 | 71 | self.send(&buffer) 72 | } 73 | } 74 | 75 | pub fn initialize(pool: Pool>) { 76 | INIT.call_once(|| { 77 | let mut pg = PgDb::new_from_pool(pool); 78 | pg.exec( 79 | indoc::indoc! {" 80 | DO $$ DECLARE 81 | r RECORD; 82 | BEGIN 83 | FOR r IN (SELECT indexname FROM pg_indexes WHERE schemaname = 'db_test') LOOP 84 | EXECUTE 'DROP INDEX IF EXISTS db_test.' || quote_ident(r.indexname) || ' CASCADE'; 85 | END LOOP; 86 | 87 | FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'db_test') LOOP 88 | EXECUTE 'DROP TABLE IF EXISTS db_test.' || quote_ident(r.tablename) || ' CASCADE'; 89 | END LOOP; 90 | END $$; 91 | "}, 92 | &[], 93 | ) 94 | .unwrap(); 95 | }); 96 | } 97 | 98 | pub fn setup_with_pg_db(name: &str, drop: bool) -> TestContext { 99 | // static ID_COUNTER: AtomicU32 = AtomicU32::new(0); 100 | // let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); 101 | 102 | let _ = env_logger::builder().is_test(true).try_init(); 103 | dotenv::dotenv().ok(); 104 | 105 | if drop { 106 | PgDb::new().drop_db(name).unwrap(); 107 | } 108 | 109 | PgDb::new().create_db_if_not_exists(name).unwrap(); 110 | 111 | let pg_url = format!("{}/{}", env::var("TEST_DATABASE_URL").unwrap(), name); 112 | let port: u16 = portpicker::pick_unused_port().unwrap(); 113 | 114 | let manager = PostgresConnectionManager::new(pg_url.parse().unwrap(), NoTls); 115 | let pool = r2d2::Pool::builder().max_size(2).build(manager).unwrap(); 116 | initialize(pool.clone()); 117 | 118 | thread::spawn(move || { 119 | Server::new("localhost".to_string(), port).start_with_pool(pool); 120 | }); 121 | 122 | TestContext::new(port, "db_test".to_string()) 123 | } 124 | 125 | pub fn setup() -> TestContext { 126 | setup_with_pg_db("db_test", false) 127 | } 128 | 129 | pub fn setup_with_drop(drop: bool) -> TestContext { 130 | setup_with_pg_db("test", drop) 131 | } 132 | 133 | pub fn get_rows(cursor: Cursor) -> Vec { 134 | let rows: Vec> = cursor.collect(); 135 | let rows: Result, mongodb::error::Error> = rows.into_iter().collect(); 136 | rows.unwrap() 137 | } 138 | 139 | pub fn get_datetime(rfc3339: &str) -> Bson { 140 | let date = chrono::DateTime::parse_from_rfc3339(rfc3339).unwrap(); 141 | let time: u128 = SystemTime::from(date) 142 | .duration_since(UNIX_EPOCH) 143 | .unwrap() 144 | .as_millis(); 145 | Bson::DateTime(bson::DateTime::from_millis(time.try_into().unwrap())) 146 | } 147 | 148 | #[macro_export] 149 | macro_rules! insert { 150 | ( $( $x:expr ),+ $(,)? ) => { 151 | { 152 | let ctx = common::setup(); 153 | ctx.col() 154 | .insert_many( 155 | vec![ 156 | $( $x, )* 157 | ], 158 | None, 159 | ) 160 | .unwrap(); 161 | ctx.col() 162 | } 163 | }; 164 | } 165 | 166 | #[macro_export] 167 | macro_rules! assert_row_count { 168 | ( $col:expr, $query:expr, $exp:expr ) => {{ 169 | let cursor = $col.find($query, None).unwrap(); 170 | let rows: Vec> = cursor.collect(); 171 | assert_eq!($exp, rows.len()); 172 | }}; 173 | } 174 | 175 | #[macro_export] 176 | macro_rules! assert_unique_row_value { 177 | ( $cursor:expr, $field:expr, $value:expr ) => {{ 178 | use mongodb::bson::Document; 179 | use std::any::Any; 180 | 181 | let rows: Vec> = $cursor.collect(); 182 | if rows.len() < 1 { 183 | assert!(false, "No rows found: {:?}", rows); 184 | } 185 | if rows.len() > 1 { 186 | assert!(false, "More than one row found: {:?}", rows); 187 | } 188 | let rows: Result, mongodb::error::Error> = rows.into_iter().collect(); 189 | 190 | if let Err(r) = rows { 191 | return assert!(false, "Error: {:?}", r); 192 | } 193 | 194 | let row = &rows.unwrap()[0]; 195 | if let Some(f) = (&$value as &dyn Any).downcast_ref::() { 196 | let value = row.get_i32($field).unwrap(); 197 | assert_eq!(f, &value); 198 | } else { 199 | unimplemented!("can't handle type for {:?}", &$value); 200 | } 201 | }}; 202 | } 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![banner-small](https://user-images.githubusercontent.com/1371/184679510-4f455245-8580-4b00-809b-19f865dd0931.png) 2 | 3 |

4 | 5 | 6 | 7 | 8 |

9 | 10 |

11 | “If it looks like MongoDB, swims like MongoDB, and quacks like MongoDB, then it probably is PostgreSQL.” :upside_down_face: 12 |

13 | 14 |

15 | Discord | Online Demo | Intro Video | Quickstart 16 |

17 | 18 | # OxideDB 19 | 20 | OxideDB is a translation layer that works as a MongoDB database server while using PostgreSQL's JSON capabilities as the underlying data store. 21 | 22 | ## Audience 23 | 24 | This project might be something that you could be interested on if: 25 | 26 | - You spend too much time managing; or too much money paying for a MongoDB instance, while only using it as a simple 27 | document store, without any sharding features 28 | - You already have a running PostgreSQL deployment, or prefer to manage it over MongoDB 29 | 30 | On the other hand, if your use-case leverages MongoDB as a distributed database, then unfortunately this project might 31 | not be for you. At least right now supporting multi-sharding and scale-out deployments is not part of the roadmap. 32 | 33 | ## Current status 34 | 35 | The project was heavily inspired by [FerretDB](https://ferretdb.io) and is on its early days. The main difference is that 36 | there is no intention to support any database other than PostgreSQL (FerretDB is also supporting Tigris) and it's written 37 | in Rust, as opposed to Go. 38 | 39 | In order to translate the MongoDB Query language - which is based on JSON - to SQL I have ported [the mongodb-language-model library](https://github.com/mongodb-js/mongodb-language-model) that was originally written in Node.js and PEG.js to Rust and [pest.rs](https://pest.rs/). It was an excellent opportunity to learn how parsers work in a bit more depth. 40 | 41 | You can check it out here: [mongodb-language-model-rust](https://github.com/fcoury/mongodb-language-model-rust). 42 | 43 | At this moment, it's being developed as a personal project, but contributors are highly welcomed. If that something you'd 44 | be interested on, be more than welcome to contact me. 45 | 46 | ## Online demo 47 | 48 | There's an online [demo of the Web Interface live hosted at Railway.app](https://demo.oxidedb.com/). 49 | 50 | ## Quickstart 51 | 52 | Download the [latest binary](https://github.com/fcoury/oxide/releases/latest) and run it. You will need to point it to a running PostgreSQL for Oxide to use as its backend. 53 | 54 | ``` 55 | > $ ./oxide --postgres-url "postgres://postgres:postgres@localhost:5432/test" 56 | [2022-07-13T02:57:46Z INFO oxide::server] Connecting to postgres://*****:*****@localhost:5432/test... 57 | [2022-07-13T02:57:46Z INFO oxide::server] OxideDB listening on 127.0.0.1:27017... 58 | ``` 59 | 60 | And with the database configuration set, you can use any [MongoDB](https://www.mongodb.com) client to connect to OxideDB, like [mongosh](https://www.mongodb.com/docs/mongodb-shell/): 61 | 62 | ``` 63 | > $ mongosh 64 | Current Mongosh Log ID: 62ce3531d10f489bc82520c4 65 | Connecting to: mongodb://127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+1.5.0 66 | Using MongoDB: 3.6.23 67 | Using Mongosh: 1.5.0 68 | 69 | For mongosh info see: https://docs.mongodb.com/mongodb-shell/ 70 | 71 | ------ 72 | The server generated these startup warnings when booting 73 | 2022-07-12T18:56:41.654-0300: 74 | 2022-07-12T18:56:41.654-0300: ** WARNING: Access control is not enabled for the database. 75 | 2022-07-12T18:56:41.654-0300: ** Read and write access to data and configuration is unrestricted. 76 | 2022-07-12T18:56:41.654-0300: 77 | ------ 78 | 79 | test> db.col.insertMany([{ name: "Felipe" }, { name: "Fernanda" }]); 80 | { 81 | acknowledged: true, 82 | insertedIds: { 83 | '0': ObjectId("62ce3536d10f489bc82520c5"), 84 | '1': ObjectId("62ce3536d10f489bc82520c6") 85 | } 86 | } 87 | test> db.col.find({ "name": "Fernanda" }) 88 | [ { _id: ObjectId("62ce3536d10f489bc82520c6"), name: 'Fernanda' } ] 89 | ``` 90 | 91 | ## Quick intro 92 | 93 | [![Intro to OxideDB](docs/assets/video.png)](https://youtu.be/8TkcGV0TkgM) 94 | 95 | ## Web interface 96 | 97 | ![Web UI](docs/assets/screenshot.png) 98 | 99 | If you want to perform quick queries on your database, you can use the built in web interface, just start oxide with the `--web` flag: 100 | 101 | ``` 102 | > $ ./oxide --postgres-url postgres://postgres:postgres@localhost:5432/oxide 103 | [2022-07-29T00:20:11Z INFO oxide::server] Connecting to postgres://*****:*****@localhost:5432/oxide... 104 | [2022-07-29T00:20:11Z INFO oxide::server] OxideDB listening on 0.0.0.0:27017... 105 | ``` 106 | 107 | Now just navigate to http://localhost:5432 and you'll be able to perform quick queries. 108 | 109 | ## Running options 110 | 111 | You can also set the `DATABASE_URL` environment variable or even use a `.env` file. 112 | 113 | By default oxide will bind to 127.0.0.1 and port 27017. You can change those settings using the following parameters: 114 | 115 | ``` 116 | > $ ./oxide --help 117 | oxide 0.1.6 118 | A database compatible with MongoDB Wire Protocol that uses PostgreSQL for backend storage. 119 | 120 | USAGE: 121 | oxide [OPTIONS] 122 | oxide 123 | 124 | OPTIONS: 125 | -d, --debug Show debugging information 126 | -h, --help Print help information 127 | -l, --listen-addr Listening address, defaults to 127.0.0.1 128 | -p, --port Listening port, defaults to 27017 129 | -u, --postgres-url PostgreSQL connection URL 130 | -V, --version Print version information 131 | -w, --web Starts web interface 132 | --web-addr Web binding address 133 | 134 | SUBCOMMANDS: 135 | help Print this message or the help of the given subcommand(s) 136 | web Start OxideDB web interface 137 | ``` 138 | 139 | ### Running with Docker 140 | 141 | Assuming you're running a local PostgreSQL instance, you can run OxideDB with Docker with the command below. 142 | 143 | ``` 144 | docker build -t oxide . 145 | docker run -d -p 27017:27017 -p 8087:8087 -e DATABASE_URL=postgres://postgres:postgres@host.docker.internal:5432/test --name oxide oxide 146 | ``` 147 | 148 | ### Running from source 149 | 150 | ```shell 151 | git clone https://github.com/fcoury/oxide.git 152 | cd oxide 153 | export DATABASE_URL="postgres://postgres:postgres@localhost:5432/test" 154 | make start 155 | ``` 156 | -------------------------------------------------------------------------------- /src/wire/util.rs: -------------------------------------------------------------------------------- 1 | use crate::utils::to_cstring; 2 | use crate::wire::{OpMsg, OpMsgSection}; 3 | use bson::{ser, Document}; 4 | use byteorder::{LittleEndian, ReadBytesExt}; 5 | use indoc::indoc; 6 | use std::io::{BufRead, Cursor, Read}; 7 | 8 | use super::UnknownMessageKindError; 9 | 10 | pub fn parse_section( 11 | bytes: &mut Vec, 12 | ) -> Result<(OpMsgSection, Vec), UnknownMessageKindError> { 13 | let kind = bytes[0]; 14 | if kind == 0 { 15 | return Ok(parse_kind0(bytes.clone())); 16 | } else if kind == 1 { 17 | return Ok(parse_kind1(bytes.clone())); 18 | } 19 | 20 | // FIXME add the kind to the error 21 | Err(UnknownMessageKindError {}) 22 | } 23 | 24 | fn parse_kind0(bytes: Vec) -> (OpMsgSection, Vec) { 25 | let mut cursor = Cursor::new(bytes); 26 | 27 | let kind = cursor.read_u8().unwrap(); 28 | 29 | let mut new_cursor = cursor.clone(); 30 | let document = Document::from_reader(cursor).unwrap(); 31 | let bson_vec = ser::to_vec(&document).unwrap(); 32 | new_cursor.set_position(new_cursor.position() + bson_vec.len() as u64); 33 | 34 | let mut tail: Vec = vec![]; 35 | new_cursor.read_to_end(&mut tail).unwrap(); 36 | 37 | ( 38 | OpMsgSection { 39 | kind, 40 | identifier: None, 41 | documents: vec![document], 42 | }, 43 | tail, 44 | ) 45 | } 46 | 47 | fn parse_kind1_documents(data: &[u8]) -> Vec { 48 | let size = data.len(); 49 | let mut cursor = Cursor::new(data); 50 | let mut documents = Vec::new(); 51 | 52 | let mut read_size = 0; 53 | while read_size < size as usize { 54 | let doc_cursor = cursor.clone(); 55 | let document = Document::from_reader(doc_cursor).unwrap(); 56 | let bson_vec = ser::to_vec(&document).unwrap(); 57 | let document_size = bson_vec.len(); 58 | cursor.set_position(cursor.position() + document_size as u64); 59 | 60 | documents.push(document); 61 | read_size += document_size; 62 | } 63 | 64 | return documents; 65 | } 66 | 67 | fn parse_kind1(bytes: Vec) -> (OpMsgSection, Vec) { 68 | let mut cursor = Cursor::new(bytes); 69 | 70 | // session kind 71 | let kind = cursor.read_u8().unwrap(); 72 | 73 | // session contents size 74 | let size = cursor.read_u32::().unwrap(); 75 | 76 | // identifier 77 | let mut identifier_buffer: Vec = vec![]; 78 | cursor.read_until(0, &mut identifier_buffer).unwrap(); 79 | let identifier_size: u32 = identifier_buffer.len() as u32; 80 | let identifier = to_cstring(identifier_buffer); 81 | 82 | // whole section = size - sizeof(size) - sizeof(identifier) 83 | // size - 4 - len(identifier_buffer) 84 | let remaining_size: u32 = size - 4 - identifier_size; 85 | let mut section_buffer: Vec = vec![0u8; remaining_size as usize]; 86 | cursor.read_exact(&mut section_buffer).unwrap(); 87 | 88 | let documents = parse_kind1_documents(§ion_buffer); 89 | let mut tail: Vec = vec![]; 90 | cursor.read_to_end(&mut tail).unwrap(); 91 | 92 | ( 93 | OpMsgSection { 94 | kind, 95 | identifier: Some(identifier), 96 | documents, 97 | }, 98 | tail, 99 | ) 100 | } 101 | 102 | #[cfg(test)] 103 | mod tests { 104 | use crate::utils::hexstring_to_bytes; 105 | 106 | use super::*; 107 | 108 | #[test] 109 | fn test_parse_sections() { 110 | let kind1kind0 = indoc! {" 111 | 01 2f 00 00 00 64 6f 63 75 6d 65 6e 112 | 74 73 00 21 00 00 00 07 5f 69 64 00 62 ce d6 9a 113 | 33 78 79 a1 ac c2 9d 40 01 78 00 00 00 00 00 00 114 | 00 f0 3f 00 00 51 00 00 00 02 69 6e 73 65 72 74 115 | 00 04 00 00 00 63 6f 6c 00 08 6f 72 64 65 72 65 116 | 64 00 01 03 6c 73 69 64 00 1e 00 00 00 05 69 64 117 | 00 10 00 00 00 04 e1 54 58 c6 4e 89 4c a3 81 0f 118 | 19 59 d3 a3 2c cf 00 02 24 64 62 00 05 00 00 00 119 | 74 65 73 74 00 00 120 | "}; 121 | let mut bytes = hexstring_to_bytes(kind1kind0); 122 | let (section1, mut bytes) = parse_section(&mut bytes).unwrap(); 123 | assert_eq!(section1.kind, 1); 124 | assert_eq!(section1.identifier.unwrap(), "documents\0"); 125 | assert_eq!(section1.documents.len(), 1); 126 | assert_eq!( 127 | section1.documents[0].get_object_id("_id").unwrap(), 128 | bson::oid::ObjectId::parse_str("62ced69a337879a1acc29d40").unwrap() 129 | ); 130 | assert_eq!(section1.documents[0].get_f64("x").unwrap(), 1.0); 131 | assert_eq!(bytes.len(), 82); 132 | 133 | let (section0, bytes) = parse_section(&mut bytes).unwrap(); 134 | assert_eq!(section0.kind, 0); 135 | assert_eq!(section0.identifier, None); 136 | assert_eq!(section0.documents.len(), 1); 137 | assert_eq!(section0.documents[0].get_str("insert").unwrap(), "col"); 138 | assert_eq!(section0.documents[0].get_bool("ordered").unwrap(), true); 139 | assert_eq!(section0.documents[0].get_str("$db").unwrap(), "test"); 140 | assert_eq!(bytes.len(), 0); 141 | } 142 | 143 | #[test] 144 | fn test_parse_kind0_section() { 145 | let kind0 = indoc! {" 146 | 00 51 00 00 00 02 69 6e 73 65 72 74 147 | 00 04 00 00 00 63 6f 6c 00 08 6f 72 64 65 72 65 148 | 64 00 01 03 6c 73 69 64 00 1e 00 00 00 05 69 64 149 | 00 10 00 00 00 04 e1 54 58 c6 4e 89 4c a3 81 0f 150 | 19 59 d3 a3 2c cf 00 02 24 64 62 00 05 00 00 00 151 | 74 65 73 74 00 00 152 | "}; 153 | 154 | let mut bytes = hexstring_to_bytes(kind0); 155 | let (section, _) = parse_section(&mut bytes).unwrap(); 156 | assert_eq!(section.kind, 0); 157 | } 158 | 159 | #[test] 160 | fn test_parse_kind1_section() { 161 | // 96 00 00 00 61 00 00 00 00 00 00 00 dd 07 00 00 -- heade 162 | // 00 00 00 00 -- flags 163 | // 01 -- kind 164 | // size -- 2f 00 00 00 - 0x0000002f = 47 165 | // 64 6f 63 75 6d 65 6e 166 | 167 | let kind1 = indoc! {" 168 | 01 2f 00 00 00 64 6f 63 75 6d 65 6e 74 73 00 21 00 169 | 00 00 07 5f 69 64 00 62 ce d6 9a 33 78 79 a1 ac 170 | c2 9d 40 01 78 00 00 00 00 00 00 00 f0 3f 00 00 171 | 51 00 00 00 02 69 6e 73 65 72 74 00 04 00 00 00 172 | 63 6f 6c 00 08 6f 72 64 65 72 65 64 00 01 03 6c 173 | 73 69 64 00 1e 00 00 00 05 69 64 00 10 00 00 00 174 | 04 e1 54 58 c6 4e 89 4c a3 81 0f 19 59 d3 a3 2c 175 | cf 00 02 24 64 62 00 05 00 00 00 74 65 73 74 00 176 | 00 177 | "}; 178 | 179 | let mut bytes = hexstring_to_bytes(kind1); 180 | let (section, _) = parse_section(&mut bytes).unwrap(); 181 | assert_eq!(section.kind, 1); 182 | } 183 | 184 | #[test] 185 | fn test_kind1_op_msg() { 186 | let op_msg_hexstr = indoc! {" 187 | 96 00 00 00 61 00 00 00 00 00 00 00 dd 07 00 00 188 | 00 00 00 00 01 2f 00 00 00 64 6f 63 75 6d 65 6e 189 | 74 73 00 21 00 00 00 07 5f 69 64 00 62 ce d6 9a 190 | 33 78 79 a1 ac c2 9d 40 01 78 00 00 00 00 00 00 191 | 00 f0 3f 00 00 51 00 00 00 02 69 6e 73 65 72 74 192 | 00 04 00 00 00 63 6f 6c 00 08 6f 72 64 65 72 65 193 | 64 00 01 03 6c 73 69 64 00 1e 00 00 00 05 69 64 194 | 00 10 00 00 00 04 e1 54 58 c6 4e 89 4c a3 81 0f 195 | 19 59 d3 a3 2c cf 00 02 24 64 62 00 05 00 00 00 196 | 74 65 73 74 00 00 197 | "}; 198 | 199 | let bytes = hexstring_to_bytes(op_msg_hexstr); 200 | let op_msg = OpMsg::from_bytes(&bytes).unwrap(); 201 | 202 | assert_eq!(op_msg.header.message_length, 150); 203 | assert_eq!(op_msg.header.request_id, 97); 204 | assert_eq!(op_msg.header.response_to, 0); 205 | assert_eq!(op_msg.header.op_code, 2013); 206 | 207 | assert_eq!(op_msg.flags, 0); 208 | assert_eq!(op_msg.sections.len(), 2); 209 | 210 | let section1 = &op_msg.sections[0]; 211 | let identifier = section1.identifier.clone().unwrap(); 212 | assert_eq!(section1.kind, 1); 213 | assert_eq!(section1.documents.len(), 1); 214 | assert_eq!(identifier, "documents\0"); 215 | assert_eq!( 216 | section1.documents[0].get_object_id("_id").unwrap(), 217 | bson::oid::ObjectId::parse_str("62ced69a337879a1acc29d40").unwrap() 218 | ); 219 | assert_eq!(section1.documents[0].get_f64("x").unwrap(), 1.0); 220 | 221 | let section0 = &op_msg.sections[1]; 222 | assert_eq!(section0.kind, 0); 223 | assert_eq!(section0.identifier, None); 224 | assert_eq!(section0.documents.len(), 1); 225 | assert_eq!(section0.documents[0].get_str("insert").unwrap(), "col"); 226 | assert_eq!(section0.documents[0].get_bool("ordered").unwrap(), true); 227 | assert_eq!(section0.documents[0].get_str("$db").unwrap(), "test"); 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /src/commands/aggregate/group_stage.rs: -------------------------------------------------------------------------------- 1 | use super::{group_id::process_id, sql_statement::SqlStatement}; 2 | use crate::utils::{collapse_fields, convert_if_numeric, field_to_jsonb}; 3 | use bson::{Bson, Document}; 4 | 5 | #[derive(Debug)] 6 | pub struct InvalidGroupError { 7 | pub message: String, 8 | } 9 | 10 | pub fn process_group(doc: &Document) -> eyre::Result { 11 | let mut doc = doc.clone(); 12 | let mut sql = SqlStatement::new(); 13 | 14 | if doc.contains_key("_id") { 15 | sql.append(&mut process_id(&mut doc)?); 16 | } 17 | 18 | let doc = collapse_fields(&doc); 19 | 20 | for (raw_key, value) in doc.iter() { 21 | let mut value: Bson = value.to_owned(); 22 | let keys = raw_key.split(".").collect::>(); 23 | for key in keys.iter().skip(1).rev() { 24 | match *key { 25 | "$sum" | "$avg" => { 26 | let oper = key.strip_prefix("$").unwrap(); 27 | let sql_func = oper.to_ascii_uppercase(); 28 | match value { 29 | Bson::String(str_val) => { 30 | value = Bson::String(process(&sql_func, &str_val)); 31 | } 32 | Bson::Int32(i32val) => { 33 | value = Bson::String(process(&sql_func, &i32val.to_string())); 34 | } 35 | Bson::Int64(i64val) => { 36 | value = Bson::String(process(&sql_func, &i64val.to_string())); 37 | } 38 | _ => { 39 | return Err(eyre::eyre!( 40 | "Cannot currently use {} on non-numeric field", 41 | oper 42 | )); 43 | } 44 | } 45 | } 46 | "$add" | "$multiply" | "$subtract" | "$divide" => { 47 | let oper = match *key { 48 | "$add" => "+", 49 | "$multiply" => "*", 50 | "$subtract" => "-", 51 | "$divide" => "/", 52 | _ => { 53 | return Err(eyre::eyre!( 54 | "Operation invalid or not yet implemented: {}", 55 | key 56 | )); 57 | } 58 | }; 59 | if let Some(values) = value.as_array() { 60 | // checks if values all start with $ 61 | let items = parse_math_oper_params(values)?; 62 | value = Bson::String(format!("{}", items.join(&format!(" {} ", oper)))); 63 | } else { 64 | return Err(eyre::eyre!( 65 | "Cannot {} can only take an array, got {:?}", 66 | oper, 67 | value 68 | )); 69 | } 70 | } 71 | _ => { 72 | return Err(eyre::eyre!("Operation missing or not implemented: {}", key)); 73 | } 74 | } 75 | } 76 | match value { 77 | Bson::String(str_val) => { 78 | sql.add_field(&format!("{} AS {}", str_val, keys[0])); 79 | } 80 | _ => { 81 | return Err(eyre::eyre!( 82 | r#"The field '{}' must be an accumulator object. Try wrapping it on an object like {{ "field": {{ "{}": {} }} }}."#, 83 | raw_key, 84 | raw_key, 85 | value 86 | )); 87 | } 88 | } 89 | } 90 | 91 | Ok(sql) 92 | } 93 | 94 | fn parse_math_oper_params(attributes: &bson::Array) -> eyre::Result> { 95 | let mut items: Vec = vec![]; 96 | for attr in attributes.into_iter() { 97 | match attr { 98 | Bson::String(str_val) => { 99 | if !str_val.starts_with("$") { 100 | return Err(eyre::eyre!("Prefixing fields with $ is mandatory. Use ${} if you want to use a field as attribute.", str_val)); 101 | } 102 | items.push(convert_if_numeric(&field_to_jsonb( 103 | str_val.strip_prefix("$").unwrap(), 104 | ))); 105 | } 106 | _ => { 107 | return Err(eyre::eyre!("Cannot use {:?} as a parameter", attr)); 108 | } 109 | } 110 | } 111 | Ok(items) 112 | } 113 | 114 | fn process(sql_func: &str, value: &str) -> String { 115 | // if it's a string starting with $ we take it as a field name 116 | let value = if let Some(field_name) = value.strip_prefix("$") { 117 | convert_if_numeric(&field_to_jsonb(field_name)) 118 | } else { 119 | value.to_owned() 120 | }; 121 | 122 | format!("{}({})", sql_func, value) 123 | } 124 | 125 | #[cfg(test)] 126 | mod tests { 127 | use super::*; 128 | 129 | use bson::doc; 130 | 131 | #[test] 132 | fn test_process_group_with_sum_int() { 133 | let doc = doc! { "_id": "$field", "count": { "$sum": 1 } }; 134 | let sql = process_group(&doc).unwrap(); 135 | assert_eq!(sql.fields[0], "_jsonb->'field' AS _id"); 136 | assert_eq!(sql.fields[1], "SUM(1) AS count"); 137 | assert_eq!(sql.groups[0], "_id"); 138 | } 139 | 140 | #[test] 141 | fn test_process_group_with_sum_field() { 142 | let doc = doc! { "_id": "$other", "qty": { "$sum": "$qty" } }; 143 | let sql = process_group(&doc).unwrap(); 144 | assert_eq!(sql.fields[0], "_jsonb->'other' AS _id"); 145 | assert_eq!(sql.fields[1], "SUM(CASE WHEN (_jsonb->'qty' ? '$f') THEN (_jsonb->'qty'->>'$f')::numeric ELSE (_jsonb->'qty')::numeric END) AS qty"); 146 | assert_eq!(sql.groups[0], "_id"); 147 | } 148 | 149 | #[test] 150 | fn test_process_group_with_avg_int() { 151 | let doc = doc! { "_id": "$field", "count": { "$avg": 1 } }; 152 | let sql = process_group(&doc).unwrap(); 153 | assert_eq!(sql.fields[0], "_jsonb->'field' AS _id"); 154 | assert_eq!(sql.fields[1], "AVG(1) AS count"); 155 | assert_eq!(sql.groups[0], "_id"); 156 | } 157 | 158 | #[test] 159 | fn test_process_group_with_avg_field() { 160 | let doc = doc! { "_id": "$other", "qty": { "$avg": "$qty" } }; 161 | let sql = process_group(&doc).unwrap(); 162 | assert_eq!(sql.fields[0], "_jsonb->'other' AS _id"); 163 | assert_eq!(sql.fields[1], "AVG(CASE WHEN (_jsonb->'qty' ? '$f') THEN (_jsonb->'qty'->>'$f')::numeric ELSE (_jsonb->'qty')::numeric END) AS qty"); 164 | assert_eq!(sql.groups[0], "_id"); 165 | } 166 | 167 | #[test] 168 | fn test_process_group_with_date_to_str() { 169 | let doc = doc! { "_id": { 170 | "$dateToString": { 171 | "format": "%Y-%m-%d", 172 | "date": "$date", 173 | } 174 | }, "qty": { "$avg": "$qty" } }; 175 | let sql = process_group(&doc).unwrap(); 176 | assert_eq!( 177 | sql.fields[0], 178 | "TO_CHAR(TO_TIMESTAMP((_jsonb->'date'->>'$d')::numeric / 1000), 'YYYY-MM-DD') AS _id" 179 | ); 180 | assert_eq!(sql.fields[1], "AVG(CASE WHEN (_jsonb->'qty' ? '$f') THEN (_jsonb->'qty'->>'$f')::numeric ELSE (_jsonb->'qty')::numeric END) AS qty"); 181 | assert_eq!(sql.groups[0], "_id"); 182 | } 183 | 184 | #[test] 185 | fn test_process_group_with_sum_of_multiply() { 186 | let doc = doc! { "_id": "$field", "total": { "$sum": { "$multiply": ["$a", "$b"] } } }; 187 | let sql = process_group(&doc).unwrap(); 188 | assert_eq!(sql.fields[0], "_jsonb->'field' AS _id"); 189 | assert_eq!(sql.fields[1], "SUM(CASE WHEN (_jsonb->'a' ? '$f') THEN (_jsonb->'a'->>'$f')::numeric ELSE (_jsonb->'a')::numeric END * CASE WHEN (_jsonb->'b' ? '$f') THEN (_jsonb->'b'->>'$f')::numeric ELSE (_jsonb->'b')::numeric END) AS total"); 190 | assert_eq!(sql.groups[0], "_id"); 191 | } 192 | 193 | #[test] 194 | fn test_process_group_with_sum_of_add() { 195 | let doc = doc! { "_id": "$field", "total": { "$sum": { "$add": ["$a", "$b"] } } }; 196 | let sql = process_group(&doc).unwrap(); 197 | assert_eq!(sql.fields[0], "_jsonb->'field' AS _id"); 198 | assert_eq!(sql.fields[1], "SUM(CASE WHEN (_jsonb->'a' ? '$f') THEN (_jsonb->'a'->>'$f')::numeric ELSE (_jsonb->'a')::numeric END + CASE WHEN (_jsonb->'b' ? '$f') THEN (_jsonb->'b'->>'$f')::numeric ELSE (_jsonb->'b')::numeric END) AS total"); 199 | assert_eq!(sql.groups[0], "_id"); 200 | } 201 | 202 | #[test] 203 | fn test_process_group_with_sum_of_subtract() { 204 | let doc = doc! { "_id": "$field", "total": { "$sum": { "$subtract": ["$a", "$b"] } } }; 205 | let sql = process_group(&doc).unwrap(); 206 | assert_eq!(sql.fields[0], "_jsonb->'field' AS _id"); 207 | assert_eq!(sql.fields[1], "SUM(CASE WHEN (_jsonb->'a' ? '$f') THEN (_jsonb->'a'->>'$f')::numeric ELSE (_jsonb->'a')::numeric END - CASE WHEN (_jsonb->'b' ? '$f') THEN (_jsonb->'b'->>'$f')::numeric ELSE (_jsonb->'b')::numeric END) AS total"); 208 | assert_eq!(sql.groups[0], "_id"); 209 | } 210 | 211 | #[test] 212 | fn test_process_group_with_sum_of_divide() { 213 | let doc = doc! { "_id": "$field", "total": { "$sum": { "$divide": ["$a", "$b"] } } }; 214 | let sql = process_group(&doc).unwrap(); 215 | assert_eq!(sql.fields[0], "_jsonb->'field' AS _id"); 216 | assert_eq!(sql.fields[1], "SUM(CASE WHEN (_jsonb->'a' ? '$f') THEN (_jsonb->'a'->>'$f')::numeric ELSE (_jsonb->'a')::numeric END / CASE WHEN (_jsonb->'b' ? '$f') THEN (_jsonb->'b'->>'$f')::numeric ELSE (_jsonb->'b')::numeric END) AS total"); 217 | assert_eq!(sql.groups[0], "_id"); 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /tests/find_test.rs: -------------------------------------------------------------------------------- 1 | use bson::{Bson, Document}; 2 | use mongodb::bson::doc; 3 | 4 | mod common; 5 | 6 | #[test] 7 | fn test_basic_find() { 8 | let ctx = common::setup(); 9 | 10 | ctx.col() 11 | .insert_many( 12 | vec![doc! { "x": 1 }, doc! { "x": 2, 'a': 1 }, doc! { "x": 3 }], 13 | None, 14 | ) 15 | .unwrap(); 16 | 17 | let mut cursor = ctx.col().find(doc! { 'a': 1 }, None).unwrap(); 18 | let row1 = cursor.next().unwrap().unwrap(); 19 | assert_eq!(row1.get_i32("x").unwrap(), 2); 20 | assert!(cursor.next().is_none()); 21 | } 22 | 23 | #[test] 24 | fn test_find_oid() { 25 | let ctx = common::setup(); 26 | 27 | ctx.col() 28 | .insert_many( 29 | vec![doc! { "x": 1 }, doc! { "x": 2, 'a': 1 }, doc! { "x": 3 }], 30 | None, 31 | ) 32 | .unwrap(); 33 | let oid = ctx 34 | .col() 35 | .find_one(doc! {}, None) 36 | .unwrap() 37 | .unwrap() 38 | .get_object_id("_id") 39 | .unwrap(); 40 | let mut cursor = ctx.col().find(doc! { "_id": oid }, None).unwrap(); 41 | let row1 = cursor.next().unwrap().unwrap(); 42 | assert_eq!(row1.get_i32("x").unwrap(), 1); 43 | assert!(cursor.next().is_none()); 44 | } 45 | 46 | #[test] 47 | fn test_find_string() { 48 | let ctx = common::setup(); 49 | 50 | ctx.col() 51 | .insert_many( 52 | vec![ 53 | doc! { "x": 1, "name": "Felipe" }, 54 | doc! { "x": 2, "name": "James" }, 55 | ], 56 | None, 57 | ) 58 | .unwrap(); 59 | 60 | let mut cursor = ctx.col().find(doc! { "name": "James" }, None).unwrap(); 61 | let row1 = cursor.next().unwrap().unwrap(); 62 | assert_eq!(row1.get_i32("x").unwrap(), 2); 63 | assert!(cursor.next().is_none()); 64 | } 65 | 66 | #[test] 67 | fn test_find_with_or() { 68 | let ctx = common::setup(); 69 | 70 | ctx.col() 71 | .insert_many( 72 | vec![ 73 | doc! { "x": 1, "name": "Peter" }, 74 | doc! { "x": 2, "name": "James" }, 75 | doc! { "x": 3, "name": "Mary" }, 76 | ], 77 | None, 78 | ) 79 | .unwrap(); 80 | 81 | let cursor = ctx 82 | .col() 83 | .find( 84 | doc! { "$or": vec![ doc!{ "name": "Peter" }, doc! { "x": 3 }] }, 85 | None, 86 | ) 87 | .unwrap(); 88 | let rows: Vec> = cursor.collect(); 89 | assert_eq!(2, rows.len()); 90 | assert_eq!( 91 | vec!["Peter", "Mary"], 92 | rows.into_iter() 93 | .filter(|r| r.is_ok()) 94 | .map(|r| r.unwrap().get_str("name").unwrap().to_string()) 95 | .collect::>() 96 | ); 97 | } 98 | 99 | #[test] 100 | fn test_find_with_float() { 101 | let ctx = common::setup(); 102 | 103 | ctx.col() 104 | .insert_many( 105 | vec![ 106 | doc! { "x": 1.2, "name": "Peter" }, 107 | doc! { "x": 2.3, "name": "James" }, 108 | doc! { "x": 3, "name": "Mary" }, 109 | ], 110 | None, 111 | ) 112 | .unwrap(); 113 | 114 | let cursor = ctx 115 | .col() 116 | .find(doc! { "x": Bson::Double(2.3) }, None) 117 | .unwrap(); 118 | let rows: Vec> = cursor.collect(); 119 | assert_eq!(1, rows.len()); 120 | assert_eq!( 121 | "James", 122 | rows.into_iter() 123 | .next() 124 | .unwrap() 125 | .unwrap() 126 | .get_str("name") 127 | .unwrap() 128 | ); 129 | } 130 | 131 | #[test] 132 | fn test_find_with_gt_float() { 133 | let ctx = common::setup(); 134 | 135 | ctx.col() 136 | .insert_many( 137 | vec![ 138 | doc! { "x": 1.2, "name": "Peter" }, 139 | doc! { "x": 2.3, "name": "James" }, 140 | doc! { "x": 3, "name": "Mary" }, 141 | ], 142 | None, 143 | ) 144 | .unwrap(); 145 | 146 | let cursor = ctx.col().find(doc! { "x": { "$lte": 2.3 } }, None).unwrap(); 147 | let rows: Vec> = cursor.collect(); 148 | assert_eq!(2, rows.len()); 149 | assert_eq!( 150 | "Peter", 151 | rows.into_iter() 152 | .next() 153 | .unwrap() 154 | .unwrap() 155 | .get_str("name") 156 | .unwrap() 157 | ); 158 | } 159 | 160 | #[test] 161 | fn test_find_type_bracketing() { 162 | let ctx = common::setup(); 163 | 164 | ctx.col() 165 | .insert_many( 166 | vec![ 167 | doc! { "counter": 1 }, 168 | doc! { "counter": "Str" }, 169 | doc! { "counter": 3 }, 170 | ], 171 | None, 172 | ) 173 | .unwrap(); 174 | 175 | let cursor = ctx 176 | .col() 177 | .find(doc! { "counter": { "$gt": 1 } }, None) 178 | .unwrap(); 179 | let rows: Vec> = cursor.collect(); 180 | assert_eq!(1, rows.len()); 181 | } 182 | 183 | #[test] 184 | fn test_find_with_exists() { 185 | let ctx = common::setup(); 186 | 187 | ctx.col() 188 | .insert_many( 189 | vec![ 190 | doc! { "counter": 1, "a": 1 }, 191 | doc! { "counter": "Str", "a": { "b": false } }, 192 | doc! { "counter": 3, "d": 0 }, 193 | ], 194 | None, 195 | ) 196 | .unwrap(); 197 | 198 | let res = ctx 199 | .col() 200 | .find(doc! { "a": { "$exists": true } }, None) 201 | .unwrap() 202 | .map(|r| r.unwrap()) 203 | .collect::>(); 204 | assert_eq!(2, res.len()); 205 | 206 | let res = ctx 207 | .col() 208 | .find(doc! { "a.b": { "$exists": true } }, None) 209 | .unwrap() 210 | .map(|r| r.unwrap()) 211 | .collect::>(); 212 | assert_eq!(1, res.len()); 213 | assert_eq!("Str", res[0].get_str("counter").unwrap()); 214 | 215 | let res = ctx 216 | .col() 217 | .find(doc! { "a.b": { "$exists": false } }, None) 218 | .unwrap() 219 | .map(|r| r.unwrap().get("counter").unwrap().to_owned()) 220 | .collect::>(); 221 | assert_eq!(2, res.len()); 222 | assert_eq!(res, [Bson::Int32(1), Bson::Int32(3)]); 223 | 224 | let res = ctx 225 | .col() 226 | .find(doc! { "a": { "b": { "$exists": false } } }, None) 227 | .unwrap() 228 | .map(|r| r.unwrap().get("counter").unwrap().to_owned()) 229 | .collect::>(); 230 | assert_eq!(2, res.len()); 231 | assert_eq!(res, [Bson::Int32(1), Bson::Int32(3)]); 232 | } 233 | 234 | #[test] 235 | fn find_with_in() { 236 | let ctx = common::setup(); 237 | 238 | ctx.col() 239 | .insert_many( 240 | vec![ 241 | doc! { "counter": 1, "a": 1 }, 242 | doc! { "counter": "Str", "a": { "b": false } }, 243 | doc! { "counter": 3, "a": 2 }, 244 | ], 245 | None, 246 | ) 247 | .unwrap(); 248 | 249 | let res = ctx 250 | .col() 251 | .find(doc! { "a": { "$in": [1, 2] } }, None) 252 | .unwrap() 253 | .map(|r| r.unwrap()) 254 | .collect::>(); 255 | assert_eq!(2, res.len()); 256 | 257 | let res = ctx 258 | .col() 259 | .find(doc! { "a": { "$nin": [1, 2] } }, None) 260 | .unwrap() 261 | .map(|r| r.unwrap()) 262 | .collect::>(); 263 | assert_eq!(1, res.len()); 264 | } 265 | 266 | #[test] 267 | fn test_with_nested() { 268 | let col = insert! { 269 | doc! { "a": { "b": { "c": 1 } } } 270 | }; 271 | 272 | assert_row_count!(col, doc! { "a.b.c": 1 }, 1); 273 | } 274 | 275 | #[test] 276 | fn test_with_multiple_fields() { 277 | let col = insert!( 278 | doc! { "counter": 1, "a": 1 }, 279 | doc! { "counter": "Str", "a": { "b": false } }, 280 | doc! { "counter": 3, "d": 0 }, 281 | ); 282 | 283 | let cursor = col.find(doc! { "counter": 1, "a": 1 }, None).unwrap(); 284 | let rows: Vec> = cursor.collect(); 285 | let rows: Result, mongodb::error::Error> = rows.into_iter().collect(); 286 | let rows: Vec = rows.unwrap(); 287 | assert_eq!(1, rows[0].get_i32("counter").unwrap()); 288 | assert_eq!(1, rows[0].get_i32("a").unwrap()); 289 | } 290 | 291 | #[test] 292 | fn test_with_array() { 293 | let col = insert!(doc! { "loginTokens": [{"when": "now", "token": "TOKEN_VALUE"}] }); 294 | let rows = common::get_rows( 295 | col.find(doc! { "loginTokens.token": "TOKEN_VALUE" }, None) 296 | .unwrap(), 297 | ); 298 | assert_eq!(1, rows.len()); 299 | } 300 | 301 | #[test] 302 | fn test_with_nested_arrays() { 303 | let col = insert!( 304 | doc! { "_id": 1, "loginTokens": {"when": "now", "tokens": {"name": "TOKEN_VALUE2"}} }, 305 | doc! { "_id": 2, "loginTokens": {"when": "now", "tokens": [{"name": "TOKEN_VALUE1"}, {"name": "TOKEN_VALUE2"}]} }, 306 | doc! { "_id": 3, "loginTokens": [{"when": "now", "tokens": [{"name": "TOKEN_VALUE1"}, {"name": "TOKEN_VALUE2"}]}] }, 307 | doc! { "_id": 4, "loginTokens": [{"when": "now", "tokens": [{"name": ["TOKEN_VALUE1", "TOKEN_VALUE2"]}]}] }, 308 | doc! { "_id": 5, "loginTokens": [{"when": "now", "tokens": {"name": ["TOKEN_VALUE1", "TOKEN_VALUE2"]}}] }, 309 | doc! { "_id": 6, "a": { "loginTokens": [{"when": "now", "tokens": [{"name": "TOKEN_VALUE1"}, {"name": "TOKEN_VALUE2"}]}] } } 310 | ); 311 | let rows = common::get_rows( 312 | col.find(doc! { "loginTokens.tokens.name": "TOKEN_VALUE2" }, None) 313 | .unwrap(), 314 | ); 315 | let ids = rows 316 | .iter() 317 | .map(|r| r.get_i32("_id").unwrap()) 318 | .collect::>(); 319 | assert_eq!(5, rows.len()); 320 | assert_eq!(ids, [1, 2, 3, 4, 5]); 321 | } 322 | -------------------------------------------------------------------------------- /src/handler.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use crate::commands::{ 3 | Aggregate, BuildInfo, CollStats, ConnectionStatus, Count, Create, CreateIndexes, DbStats, 4 | Delete, Drop, DropDatabase, Find, FindAndModify, GetCmdLineOpts, GetParameter, Handler, Hello, 5 | Insert, IsMaster, ListCollections, ListDatabases, ListIndexes, Ping, Update, WhatsMyUri, 6 | }; 7 | use crate::pg::PgDb; 8 | use crate::wire::{OpCode, OpMsg}; 9 | use bson::{doc, Bson, Document}; 10 | use postgres::NoTls; 11 | use r2d2_postgres::PostgresConnectionManager; 12 | use std::net::SocketAddr; 13 | 14 | pub struct Request<'a> { 15 | pool: &'a r2d2::Pool>, 16 | peer_addr: SocketAddr, 17 | op_code: &'a OpCode, 18 | } 19 | 20 | impl<'a> Request<'a> { 21 | pub fn new( 22 | pool: &'a r2d2::Pool>, 23 | peer_addr: SocketAddr, 24 | op_code: &'a OpCode, 25 | ) -> Self { 26 | Request { 27 | pool, 28 | peer_addr, 29 | op_code, 30 | } 31 | } 32 | 33 | pub fn peer_addr(&self) -> SocketAddr { 34 | self.peer_addr 35 | } 36 | 37 | pub fn get_op_code(&self) -> &OpCode { 38 | self.op_code 39 | } 40 | 41 | pub fn get_client(&self) -> PgDb { 42 | PgDb::new_from_pool(self.pool.clone()) 43 | } 44 | } 45 | 46 | #[derive(Debug, Clone)] 47 | pub struct Response<'a> { 48 | id: u32, 49 | op_code: &'a OpCode, 50 | docs: Vec, 51 | } 52 | 53 | impl<'a> Response<'a> { 54 | pub fn new(id: u32, op_code: &'a OpCode, docs: Vec) -> Self { 55 | Response { id, op_code, docs } 56 | } 57 | 58 | pub fn get_doc(&self) -> &Document { 59 | &self.docs[0] 60 | } 61 | 62 | pub fn get_id(&self) -> u32 { 63 | self.id 64 | } 65 | 66 | pub fn get_op_code(&self) -> &OpCode { 67 | self.op_code 68 | } 69 | } 70 | 71 | #[derive(Debug, Clone)] 72 | pub struct CommandExecutionError { 73 | pub message: String, 74 | } 75 | 76 | impl std::error::Error for CommandExecutionError {} 77 | 78 | impl std::fmt::Display for CommandExecutionError { 79 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 80 | write!(f, "{}", self.message) 81 | } 82 | } 83 | 84 | impl CommandExecutionError { 85 | pub fn new(message: String) -> Self { 86 | CommandExecutionError { message } 87 | } 88 | } 89 | 90 | pub fn handle( 91 | id: u32, 92 | pool: &r2d2::Pool>, 93 | peer_addr: SocketAddr, 94 | op_code: &OpCode, 95 | ) -> Result, CommandExecutionError> { 96 | let request = Request { 97 | pool, 98 | peer_addr, 99 | op_code: &op_code, 100 | }; 101 | match route(&request) { 102 | Ok(doc) => { 103 | log::trace!("Sending response: {:#?}", doc); 104 | let response = Response { 105 | id, 106 | op_code: &op_code, 107 | docs: vec![doc], 108 | }; 109 | Ok(op_code.reply(response).unwrap()) 110 | } 111 | Err(e) => Err(e), 112 | } 113 | } 114 | 115 | fn run(request: &Request, docs: &Vec) -> Result { 116 | let command = docs[0].keys().next().unwrap(); 117 | 118 | log::debug!("OP_MSG command: {}", command); 119 | log::trace!("Received document: {:#?}", docs); 120 | 121 | if command == "find" { 122 | Find::new().handle(request, docs) 123 | } else if command == "findAndModify" { 124 | FindAndModify::new().handle(request, docs) 125 | } else if command == "count" { 126 | Count::new().handle(request, docs) 127 | } else if command == "aggregate" { 128 | Aggregate::new().handle(request, docs) 129 | } else if command == "insert" { 130 | Insert::new().handle(request, docs) 131 | } else if command == "update" { 132 | Update::new().handle(request, docs) 133 | } else if command == "delete" { 134 | Delete::new().handle(request, docs) 135 | } else if command == "create" { 136 | Create::new().handle(request, docs) 137 | } else if command == "createIndexes" { 138 | CreateIndexes::new().handle(request, docs) 139 | } else if command == "drop" { 140 | Drop::new().handle(request, docs) 141 | } else if command == "dropDatabase" { 142 | DropDatabase::new().handle(request, docs) 143 | } else if command == "isMaster" || command == "ismaster" { 144 | IsMaster::new().handle(request, docs) 145 | } else if command == "buildInfo" || command == "buildinfo" { 146 | BuildInfo::new().handle(request, docs) 147 | } else if command == "whatsmyuri" { 148 | WhatsMyUri::new().handle(request, docs) 149 | } else if command == "dbStats" { 150 | DbStats::new().handle(request, docs) 151 | } else if command == "collStats" { 152 | CollStats::new().handle(request, docs) 153 | } else if command == "listDatabases" { 154 | ListDatabases::new().handle(request, docs) 155 | } else if command == "listCollections" { 156 | ListCollections::new().handle(request, docs) 157 | } else if command == "listIndexes" { 158 | ListIndexes::new().handle(request, docs) 159 | } else if command == "ping" { 160 | Ping::new().handle(request, docs) 161 | } else if command == "hello" { 162 | Hello::new().handle(request, docs) 163 | } else if command == "getCmdLineOpts" { 164 | GetCmdLineOpts::new().handle(request, docs) 165 | } else if command == "getParameter" { 166 | GetParameter::new().handle(request, docs) 167 | } else if command == "connectionStatus" { 168 | ConnectionStatus::new().handle(request, docs) 169 | } else { 170 | log::error!("Got unknown OP_MSG command: {}\n{:?}", command, docs); 171 | Ok(doc! { 172 | "ok": Bson::Double(0.0), 173 | "errmsg": Bson::String(format!("no such command: '{}'", command).to_string()), 174 | "code": Bson::Int32(59), 175 | "codeName": "CommandNotFound", 176 | }) 177 | } 178 | } 179 | 180 | fn run_op_query( 181 | request: &Request, 182 | docs: &Vec, 183 | ) -> Result { 184 | let empty = "".to_string(); 185 | let command = docs[0].keys().next().unwrap_or(&empty); 186 | 187 | log::debug!("OP_QUERY Command: {}", command); 188 | 189 | if command == "" || command == "isMaster" || command == "ismaster" { 190 | IsMaster::new().handle(request, docs) 191 | } else { 192 | log::error!("Got unknown OP_QUERY command: {}", command); 193 | Ok(doc! { 194 | "ok": Bson::Double(0.0), 195 | "errmsg": Bson::String(format!("no such command: '{}'", command).to_string()), 196 | "code": Bson::Int32(59), 197 | "codeName": "CommandNotFound", 198 | }) 199 | } 200 | } 201 | 202 | fn handle_op_msg(request: &Request, msg: OpMsg) -> Result { 203 | if msg.sections.len() < 1 { 204 | log::error!("Received OP_MSG with no sections:\n{:#?}", msg); 205 | return Err(CommandExecutionError::new( 206 | "OP_MSG must have at least one section, received none".to_string(), 207 | )); 208 | } 209 | 210 | let section = msg.sections[0].clone(); 211 | if section.kind == 0 { 212 | let mut documents = section.documents.clone(); 213 | if msg.sections.len() > 1 { 214 | for section in msg.sections[1..].iter() { 215 | if let Some(identifier) = section.identifier.clone() { 216 | if identifier == "documents\0" { 217 | let new_doc = section.documents[0].clone(); 218 | documents[0].insert("documents", Bson::Array(vec![new_doc.into()])); 219 | } 220 | } 221 | } 222 | } 223 | return run(request, &documents); 224 | } 225 | 226 | if section.kind == 1 { 227 | if section.identifier.is_none() { 228 | log::error!( 229 | "Received a kind 1 section from OP_MSG with no identifier:\n{:#?}", 230 | msg 231 | ); 232 | return Err(CommandExecutionError::new( 233 | "all kind 1 sections on OP_MSG must have an identifier, received none".to_string(), 234 | )); 235 | } 236 | 237 | let mut identifier = section.identifier.unwrap(); 238 | identifier.pop(); 239 | 240 | if identifier == "documents" { 241 | if msg.sections.len() < 2 { 242 | log::error!( 243 | "Received a document kind 1 section with no matching kind 0:\n{:#?}", 244 | msg 245 | ); 246 | return Err(CommandExecutionError::new( 247 | "OP_MSG with a kind 1 documents section must also have at least one kind 0 section, received none".to_string(), 248 | )); 249 | } 250 | 251 | let mut doc = msg.sections[1].documents[0].clone(); 252 | doc.insert(identifier, section.documents.clone()); 253 | return run(request, &vec![doc]); 254 | } 255 | 256 | log::error!( 257 | "Received unknown kind 1 section identifier from OP_MSG:\n{:#?}", 258 | msg 259 | ); 260 | return Err(CommandExecutionError::new( 261 | format!( 262 | "received unknown kind 1 section identifier from OP_MSG: {}", 263 | identifier 264 | ) 265 | .to_string(), 266 | )); 267 | } 268 | 269 | log::error!( 270 | "Received unknown section from OP_MSG: {}\n{:#?}", 271 | section.kind, 272 | msg 273 | ); 274 | Err(CommandExecutionError::new( 275 | format!( 276 | "received unknown section kind from OP_MSG: {}", 277 | section.kind 278 | ) 279 | .to_string(), 280 | )) 281 | } 282 | 283 | fn route(request: &Request) -> Result { 284 | match request.op_code { 285 | OpCode::OpMsg(msg) => handle_op_msg(request, msg.clone()), 286 | OpCode::OpQuery(query) => run_op_query(request, &vec![query.query.clone()]), 287 | _ => { 288 | log::error!("Unroutable opcode received: {:?}", request.op_code); 289 | Err(CommandExecutionError::new(format!( 290 | "can't handle opcode: {:?}", 291 | request.op_code 292 | ))) 293 | } 294 | } 295 | } 296 | -------------------------------------------------------------------------------- /src/commands/aggregate/sql_statement.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | use crate::pg::SqlParam; 4 | 5 | #[derive(Debug, Clone)] 6 | pub enum FromTypes { 7 | Table(SqlParam), 8 | Subquery(Box, Option), 9 | } 10 | 11 | impl Display for FromTypes { 12 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 13 | match self { 14 | FromTypes::Table(table) => write!(f, "{}", table), 15 | FromTypes::Subquery(subquery, alias) => { 16 | if let Some(alias) = alias { 17 | write!(f, "({}) AS {}", subquery, alias) 18 | } else { 19 | write!(f, "({})", subquery) 20 | } 21 | } 22 | } 23 | } 24 | } 25 | 26 | #[derive(Default, Debug, Clone)] 27 | pub struct SqlStatement { 28 | pub fields: Vec, 29 | pub groups: Vec, 30 | pub filters: Vec, 31 | pub from: Option, 32 | pub orders: Vec, 33 | pub limit: Option, 34 | pub offset: Option, 35 | } 36 | 37 | impl Display for SqlStatement { 38 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 39 | write!(f, "{}", self.to_string()) 40 | } 41 | } 42 | 43 | impl SqlStatement { 44 | pub fn new() -> Self { 45 | SqlStatement::default() 46 | } 47 | 48 | pub fn builder() -> SqlStatementBuilder { 49 | SqlStatementBuilder::default() 50 | } 51 | 52 | pub fn append(&mut self, other: &mut SqlStatement) { 53 | self.fields.append(&mut other.fields); 54 | self.groups.append(&mut other.groups); 55 | self.filters.append(&mut other.filters); 56 | self.orders.append(&mut other.orders); 57 | } 58 | 59 | pub fn add_field(&mut self, field: &str) { 60 | self.fields.push(field.to_string()); 61 | } 62 | 63 | pub fn add_fields(&mut self, fields: Vec) { 64 | for field in fields { 65 | self.add_field(&field); 66 | } 67 | } 68 | 69 | pub fn add_filter(&mut self, filter: &str) { 70 | self.filters.push(filter.to_string()); 71 | } 72 | 73 | pub fn add_order(&mut self, order: &str, asc: bool) { 74 | let order = format!("{} {}", order, if asc { "ASC" } else { "DESC" }); 75 | self.orders.push(order.to_string()); 76 | } 77 | 78 | pub fn fields_as_str(&self) -> String { 79 | if self.fields.is_empty() { 80 | return "*".to_string(); 81 | } 82 | self.fields.join(", ") 83 | } 84 | 85 | pub fn groups_as_str(&self) -> String { 86 | if self.groups.is_empty() { 87 | return "".to_string(); 88 | } 89 | format!(" GROUP BY {}", self.groups.join(", ")) 90 | } 91 | 92 | pub fn order_as_str(&self) -> String { 93 | if self.orders.is_empty() { 94 | return "".to_string(); 95 | } 96 | format!(" ORDER BY {}", self.orders.join(", ")) 97 | } 98 | 99 | pub fn offset_as_str(&self) -> String { 100 | if self.offset.is_none() { 101 | return "".to_string(); 102 | } 103 | format!(" OFFSET {}", self.offset.unwrap()) 104 | } 105 | 106 | pub fn limit_as_str(&self) -> String { 107 | if self.limit.is_none() { 108 | return "".to_string(); 109 | } 110 | format!(" LIMIT {}", self.limit.unwrap()) 111 | } 112 | 113 | pub fn set_table(&mut self, table: SqlParam) { 114 | self.from = Some(FromTypes::Table(table)); 115 | } 116 | 117 | pub fn to_string(&self) -> String { 118 | let from = match &self.from { 119 | Some(from) => format!("FROM {}", from), 120 | // None => todo!("table missing"), 121 | None => "".to_string(), 122 | }; 123 | 124 | let where_str = if self.filters.len() > 0 { 125 | format!(" WHERE {}", self.filters.join(" AND ")) 126 | } else { 127 | "".to_string() 128 | }; 129 | 130 | format!( 131 | "SELECT {} {}{}{}{}{}{}", 132 | self.fields_as_str(), 133 | from, 134 | where_str, 135 | self.groups_as_str(), 136 | self.order_as_str(), 137 | self.limit_as_str(), 138 | self.offset_as_str(), 139 | ) 140 | } 141 | 142 | pub fn add_subquery(&mut self, subquery: &mut SqlStatement) { 143 | self.from = Some(FromTypes::Subquery(Box::new(subquery.clone()), None)); 144 | } 145 | 146 | pub fn add_subquery_with_alias(&mut self, subquery: &mut SqlStatement, alias: &str) { 147 | self.from = Some(FromTypes::Subquery( 148 | Box::new(subquery.clone()), 149 | Some(alias.to_string()), 150 | )); 151 | } 152 | } 153 | 154 | #[derive(Default, Debug, Clone)] 155 | pub struct SqlStatementBuilder { 156 | fields: Vec, 157 | groups: Vec, 158 | filters: Vec, 159 | from: Option, 160 | orders: Vec, 161 | limit: Option, 162 | offset: Option, 163 | } 164 | 165 | impl SqlStatementBuilder { 166 | pub fn new() -> Self { 167 | SqlStatementBuilder::default() 168 | } 169 | 170 | pub fn field(mut self, field: &str) -> Self { 171 | self.fields.push(field.to_string()); 172 | self 173 | } 174 | 175 | pub fn group(mut self, group: &str) -> Self { 176 | self.groups.push(group.to_string()); 177 | self 178 | } 179 | 180 | pub fn from(mut self, from: FromTypes) -> Self { 181 | self.from = Some(from); 182 | self 183 | } 184 | 185 | pub fn limit(mut self, limit: i64) -> Self { 186 | self.limit = Some(limit); 187 | self 188 | } 189 | 190 | pub fn offset(mut self, offset: i64) -> Self { 191 | self.offset = Some(offset); 192 | self 193 | } 194 | 195 | pub fn order(mut self, order: &str, asc: bool) -> Self { 196 | let order = format!("{} {}", order, if asc { "ASC" } else { "DESC" }); 197 | self.orders.push(order.to_string()); 198 | self 199 | } 200 | 201 | pub fn from_table(mut self, table: SqlParam) -> Self { 202 | self.from = Some(FromTypes::Table(table)); 203 | self 204 | } 205 | 206 | pub fn from_subquery(mut self, subquery: SqlStatement) -> Self { 207 | self.from = Some(FromTypes::Subquery(Box::new(subquery), None)); 208 | self 209 | } 210 | 211 | pub fn from_subquery_with_alias(mut self, subquery: SqlStatement, alias: &str) -> Self { 212 | self.from = Some(FromTypes::Subquery( 213 | Box::new(subquery), 214 | Some(alias.to_string()), 215 | )); 216 | self 217 | } 218 | 219 | pub fn filter(mut self, filter: &str) -> Self { 220 | self.filters.push(filter.to_string()); 221 | self 222 | } 223 | 224 | pub fn build(self) -> SqlStatement { 225 | SqlStatement { 226 | fields: self.fields, 227 | groups: self.groups, 228 | filters: self.filters, 229 | from: self.from, 230 | orders: self.orders, 231 | limit: self.limit, 232 | offset: self.offset, 233 | } 234 | } 235 | } 236 | 237 | #[cfg(test)] 238 | mod tests { 239 | use crate::pg::SqlParam; 240 | 241 | use super::*; 242 | 243 | #[test] 244 | fn test_from_table() { 245 | let sql = SqlStatement::builder() 246 | .field("_jsonb") 247 | .filter("_jsonb->'count' = 1") 248 | .from(FromTypes::Table(SqlParam::new("schema", "table"))) 249 | .build(); 250 | assert_eq!( 251 | sql.to_string(), 252 | r#"SELECT _jsonb FROM "schema"."table" WHERE _jsonb->'count' = 1"# 253 | ); 254 | } 255 | 256 | #[test] 257 | fn test_from_subquery() { 258 | let subquery = SqlStatement::builder() 259 | .field("b") 260 | .from_table(SqlParam::new("schema", "table")) 261 | .build(); 262 | let sql = SqlStatement::builder() 263 | .field("alias.b") 264 | .from_subquery(subquery) 265 | .build(); 266 | assert_eq!( 267 | sql.to_string(), 268 | r#"SELECT alias.b FROM (SELECT b FROM "schema"."table")"# 269 | ); 270 | } 271 | 272 | #[test] 273 | fn test_from_subquery_with_alias() { 274 | let subquery = SqlStatement::builder() 275 | .field("b") 276 | .from_table(SqlParam::new("schema", "table")) 277 | .build(); 278 | let sql = SqlStatement::builder() 279 | .field("alias.b") 280 | .from_subquery_with_alias(subquery, "alias") 281 | .build(); 282 | assert_eq!( 283 | sql.to_string(), 284 | r#"SELECT alias.b FROM (SELECT b FROM "schema"."table") AS alias"# 285 | ); 286 | } 287 | 288 | #[test] 289 | fn test_from_nested_subquery() { 290 | let subquery1 = SqlStatement::builder() 291 | .field("c") 292 | .from_table(SqlParam::new("schema", "table")) 293 | .build(); 294 | let subquery2 = SqlStatement::builder() 295 | .field("b") 296 | .from_subquery(subquery1) 297 | .build(); 298 | let sql = SqlStatement::builder() 299 | .field("alias.b") 300 | .from_subquery(subquery2) 301 | .build(); 302 | assert_eq!( 303 | sql.to_string(), 304 | r#"SELECT alias.b FROM (SELECT b FROM (SELECT c FROM "schema"."table"))"# 305 | ); 306 | } 307 | 308 | #[test] 309 | fn test_groups() { 310 | let sql = SqlStatement::builder() 311 | .field("state") 312 | .field("sum(1) AS count") 313 | .group("state") 314 | .from(FromTypes::Table(SqlParam::new("schema", "table"))) 315 | .build(); 316 | assert_eq!( 317 | sql.to_string(), 318 | r#"SELECT state, sum(1) AS count FROM "schema"."table" GROUP BY state"# 319 | ); 320 | } 321 | 322 | #[test] 323 | fn test_limit_and_offset() { 324 | let sql = SqlStatement::builder() 325 | .from(FromTypes::Table(SqlParam::new("schema", "table"))) 326 | .limit(2) 327 | .offset(10) 328 | .build(); 329 | assert_eq!( 330 | sql.to_string(), 331 | r#"SELECT * FROM "schema"."table" LIMIT 2 OFFSET 10"# 332 | ); 333 | } 334 | 335 | #[test] 336 | fn test_offset() { 337 | let sql = SqlStatement::builder() 338 | .from(FromTypes::Table(SqlParam::new("schema", "table"))) 339 | .offset(10) 340 | .build(); 341 | assert_eq!( 342 | sql.to_string(), 343 | r#"SELECT * FROM "schema"."table" OFFSET 10"# 344 | ); 345 | } 346 | } 347 | -------------------------------------------------------------------------------- /src/commands/aggregate/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use crate::commands::Handler; 3 | use crate::handler::{CommandExecutionError, Request}; 4 | use crate::pg::SqlParam; 5 | use crate::utils::{field_to_jsonb, pg_rows_to_bson}; 6 | use bson::{doc, Bson, Document}; 7 | use group_stage::process_group; 8 | use match_stage::process_match; 9 | use project_stage::process_project; 10 | use sql_statement::SqlStatement; 11 | 12 | use self::count_stage::process_count; 13 | 14 | mod count_stage; 15 | mod group_id; 16 | mod group_stage; 17 | mod match_stage; 18 | mod project_stage; 19 | mod sql_statement; 20 | 21 | pub struct Aggregate {} 22 | 23 | impl Handler for Aggregate { 24 | fn new() -> Self { 25 | Aggregate {} 26 | } 27 | 28 | fn handle( 29 | &self, 30 | request: &Request, 31 | docs: &Vec, 32 | ) -> Result { 33 | let doc = &docs[0]; 34 | let db = doc.get_str("$db").unwrap(); 35 | let collection = doc.get_str("aggregate").unwrap(); 36 | let pipeline = doc.get_array("pipeline").unwrap(); 37 | let sp = SqlParam::new(db, collection); 38 | 39 | let mut client = request.get_client(); 40 | 41 | let sql = build_sql(&sp, pipeline); 42 | match sql { 43 | Ok(sql) => { 44 | log::debug!("SQL: {}", sql); 45 | 46 | match client.raw_query(&sql, &[]) { 47 | Ok(rows) => { 48 | let res_doc = doc![ 49 | "cursor": doc! { 50 | "firstBatch": pg_rows_to_bson(rows), 51 | "id": Bson::Int64(0), 52 | "ns": format!("{}.{}", db, collection), 53 | }, 54 | "ok": Bson::Double(1.0), 55 | ]; 56 | 57 | return Ok(res_doc); 58 | } 59 | Err(e) => Err(CommandExecutionError::new(e.to_string())), 60 | } 61 | } 62 | Err(e) => Err(e), 63 | } 64 | } 65 | } 66 | 67 | pub fn build_sql(sp: &SqlParam, pipeline: &Vec) -> Result { 68 | let mut stages: Vec<(String, SqlStatement)> = vec![]; 69 | for stage in pipeline { 70 | let stage_doc = stage.as_document().unwrap(); 71 | let name = stage_doc.keys().next().unwrap(); 72 | match name.as_str() { 73 | "$match" => { 74 | // adds the result of the match 75 | match process_match(stage_doc.get_document("$match").unwrap()) { 76 | Ok(sql) => stages.push((name.to_string(), sql)), 77 | Err(err) => return Err(CommandExecutionError::new(err.to_string())), 78 | } 79 | } 80 | "$group" => { 81 | // adds the group stage 82 | match process_group(stage_doc.get_document("$group").unwrap()) { 83 | Ok(sql) => stages.push((name.to_string(), sql)), 84 | Err(err) => return Err(CommandExecutionError::new(err.to_string())), 85 | } 86 | 87 | // and wraps it into a jsonb object 88 | let wrap_sql = SqlStatement::builder() 89 | .field("row_to_json(s_wrap)::jsonb AS _jsonb") 90 | .build(); 91 | stages.push(("$wrap".to_string(), wrap_sql)); 92 | } 93 | "$sort" => { 94 | // if there are no stages, add one 95 | if stages.len() < 1 { 96 | stages.push((name.to_string(), SqlStatement::new())); 97 | } 98 | 99 | // adds ORDER BY to the last stage so far 100 | if let Some(last_stage) = stages.last_mut() { 101 | for (field, value) in stage_doc.get_document("$sort").unwrap() { 102 | let field = if last_stage.0 == "$wrap" { 103 | format!("row_to_json(s_wrap)::jsonb->'{}'", field) 104 | } else { 105 | field_to_jsonb(field) 106 | }; 107 | let asc = match value { 108 | Bson::Int32(i) => *i > 0, 109 | Bson::Int64(i) => *i > 0, 110 | t => unimplemented!("Missing $sort handling for {:?}", t), 111 | }; 112 | last_stage.1.add_order(&field, asc); 113 | } 114 | } 115 | } 116 | "$project" => match process_project(stage_doc.get_document("$project").unwrap()) { 117 | Ok(sql) => { 118 | stages.push((name.to_string(), sql)); 119 | } 120 | Err(e) => { 121 | return Err(CommandExecutionError::new(e.message)); 122 | } 123 | }, 124 | "$count" => match process_count(stage_doc.get_str("$count").unwrap()) { 125 | Ok(sql) => { 126 | stages.push((name.to_string(), sql)); 127 | } 128 | Err(e) => { 129 | return Err(CommandExecutionError::new(e.to_string())); 130 | } 131 | }, 132 | "$skip" => { 133 | // if there are no stages, add one 134 | if stages.len() < 1 { 135 | stages.push((name.to_string(), SqlStatement::new())); 136 | } 137 | 138 | // adds offset to the last stage so far 139 | if let Some(last_stage) = stages.last_mut() { 140 | // FIXME: the documentation states i64 but we're using i32 here 141 | // https://www.mongodb.com/docs/manual/reference/operator/aggregation/skip/ 142 | last_stage.1.offset = 143 | Some(stage_doc.get_i32("$skip").unwrap().try_into().unwrap()); 144 | } 145 | } 146 | "$limit" => { 147 | // if there are no stages, add one 148 | if stages.len() < 1 { 149 | stages.push((name.to_string(), SqlStatement::new())); 150 | } 151 | 152 | // adds offset to the last stage so far 153 | if let Some(last_stage) = stages.last_mut() { 154 | // FIXME: the documentation states i64 but we're using i32 here 155 | // https://www.mongodb.com/docs/manual/reference/operator/aggregation/skip/ 156 | last_stage.1.limit = 157 | Some(stage_doc.get_i32("$limit").unwrap().try_into().unwrap()); 158 | } 159 | } 160 | _ => { 161 | return Err(CommandExecutionError::new(format!( 162 | "Unrecognized pipeline stage name: '{}'", 163 | stage 164 | ))) 165 | } 166 | }; 167 | } 168 | 169 | let mut sql: Option = None; 170 | for (name, mut stage_sql) in stages { 171 | if stage_sql.from.is_none() { 172 | if let Some(mut sql) = sql { 173 | let alias = format!("s_{}", name.strip_prefix("$").unwrap()); 174 | stage_sql.add_subquery_with_alias(&mut sql, &alias); 175 | } else { 176 | stage_sql.set_table(sp.clone()); 177 | } 178 | } 179 | sql = Some(stage_sql); 180 | } 181 | 182 | Ok(sql.unwrap().to_string()) 183 | } 184 | 185 | #[cfg(test)] 186 | mod tests { 187 | use super::*; 188 | 189 | #[test] 190 | fn test_build_sql() { 191 | let doc = doc! { 192 | "pipeline": [ 193 | doc! { 194 | "$match": doc! { 195 | "name": "Alice" 196 | } 197 | }, 198 | doc! { 199 | "$group": doc! { 200 | "_id": "$name", 201 | "count": doc! { 202 | "$sum": 1 203 | } 204 | } 205 | } 206 | ] 207 | }; 208 | 209 | let sp = SqlParam::new("schema", "table"); 210 | let sql = build_sql(&sp, doc.get_array("pipeline").unwrap()).unwrap(); 211 | assert_eq!( 212 | sql, 213 | r#"SELECT row_to_json(s_wrap)::jsonb AS _jsonb FROM (SELECT _jsonb->'name' AS _id, SUM(1) AS count FROM (SELECT * FROM "schema"."table" WHERE _jsonb->'name' = '"Alice"') AS s_group GROUP BY _id) AS s_wrap"# 214 | ); 215 | } 216 | 217 | #[test] 218 | fn test_build_sql_with_date() { 219 | let doc = doc! { 220 | "pipeline": [ 221 | doc! { 222 | "$group": { 223 | "_id": { 224 | "$dateToString": { 225 | "format": "%Y", 226 | "date": "$date" 227 | } 228 | }, 229 | "count": { 230 | "$sum": 1 231 | } 232 | } 233 | } 234 | ] 235 | }; 236 | 237 | let sp = SqlParam::new("schema", "table"); 238 | let sql = build_sql(&sp, doc.get_array("pipeline").unwrap()).unwrap(); 239 | assert_eq!( 240 | sql, 241 | r#"SELECT row_to_json(s_wrap)::jsonb AS _jsonb FROM (SELECT TO_CHAR(TO_TIMESTAMP((_jsonb->'date'->>'$d')::numeric / 1000), 'YYYY-MM-DD') AS _id, SUM(1) AS count FROM "schema"."table" GROUP BY _id) AS s_wrap"# 242 | ); 243 | } 244 | 245 | #[test] 246 | fn test_build_sql_with_multiply() { 247 | let doc = doc! { 248 | "pipeline": [ 249 | doc! { 250 | "$group": { 251 | "_id": "$item", 252 | "total_sum": { 253 | "$sum": { 254 | "$multiply": [ 255 | "$quantity", 256 | "$price" 257 | ] 258 | } 259 | } 260 | } 261 | } 262 | ] 263 | }; 264 | 265 | let sp = SqlParam::new("schema", "table"); 266 | let sql = build_sql(&sp, doc.get_array("pipeline").unwrap()).unwrap(); 267 | assert_eq!( 268 | sql, 269 | r#"SELECT row_to_json(s_wrap)::jsonb AS _jsonb FROM (SELECT _jsonb->'item' AS _id, SUM(CASE WHEN (_jsonb->'quantity' ? '$f') THEN (_jsonb->'quantity'->>'$f')::numeric ELSE (_jsonb->'quantity')::numeric END * CASE WHEN (_jsonb->'price' ? '$f') THEN (_jsonb->'price'->>'$f')::numeric ELSE (_jsonb->'price')::numeric END) AS total_sum FROM "schema"."table" GROUP BY _id) AS s_wrap"# 270 | ); 271 | } 272 | } 273 | --------------------------------------------------------------------------------