├── .DS_Store ├── .env.example ├── .gitignore ├── .nvmrc ├── datasets-examples ├── 1m-generated-example.json ├── 50m+-steam-reviews-dataset.json └── 7m-yelp-reviews-example.json ├── index.js ├── insert ├── 1m │ └── index.js ├── 50m+ │ ├── 200k-5x20k-6processes │ │ └── index.js │ └── read-size-of-dataset │ │ └── index.js └── 7m │ ├── 7m-m30-per100k.js │ └── 7m-m30-per200k-5X20k.js ├── package.json ├── readme.md ├── update ├── 10m │ ├── parralel │ │ └── update-100k-parralel.js │ └── sequential │ │ └── update.js ├── 1mil │ ├── bulk-write.js │ └── update.js ├── 50m+ │ ├── cursor │ │ └── index.js │ └── objectid │ │ ├── run.bash │ │ └── update.js ├── readme.md ├── reviews_by_count.json └── script.js └── yarn.lock /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harazdovskiy/mongo-performance/b4e48b6a850117c0723e4fa22e64d1419135b94f/.DS_Store -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | MONGO_CLUSTER_SHARED="mongodb+srv://:@cluster.xxxxxx.mongodb.net/?retryWrites=true&w=majority" 2 | MONGO_CLUSTER_M30="mongodb+srv://:@cluster.xxxxxx.mongodb.net/?retryWrites=true&w=majority" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .env 3 | node_modules 4 | package-lock.json 5 | results 6 | **/*.log 7 | **/*.png 8 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | v18.2.0 2 | -------------------------------------------------------------------------------- /datasets-examples/1m-generated-example.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "Dominic King", 4 | "text": "elit sed consequat auctor, nunc nulla vulputate dui, nec tempus", 5 | "numberrange": 3, 6 | "alphanumeric": "FAK07RDY6GL", 7 | "list": 3, 8 | "country": "Germany", 9 | "region": "Southwestern Tagalog Region", 10 | "postalZip": "88127", 11 | "email": "adipiscing@hotmail.couk" 12 | }, 13 | { 14 | "name": "Geoffrey Meyers", 15 | "text": "mi lorem, vehicula et, rutrum eu, ultrices sit amet, risus.", 16 | "numberrange": 5, 17 | "alphanumeric": "DAS80XJR3HC", 18 | "list": 3, 19 | "country": "Spain", 20 | "region": "Ilocos Region", 21 | "postalZip": "934568", 22 | "email": "ante.blandit@yahoo.ca" 23 | }, 24 | { 25 | "name": "Jacob Payne", 26 | "text": "Duis at lacus. Quisque purus sapien, gravida non, sollicitudin a,", 27 | "numberrange": 9, 28 | "alphanumeric": "OXC56RHV1MO", 29 | "list": 1, 30 | "country": "South Africa", 31 | "region": "Alsace", 32 | "postalZip": "31751", 33 | "email": "morbi@yahoo.ca" 34 | } 35 | ] -------------------------------------------------------------------------------- /datasets-examples/50m+-steam-reviews-dataset.json: -------------------------------------------------------------------------------- 1 | { 2 | "success": 1, 3 | "query_summary": { 4 | "num_reviews": 100 5 | }, 6 | "reviews": [ 7 | { 8 | "recommendationid": "96150189", 9 | "author": { 10 | "steamid": "76561198838108725", 11 | "num_games_owned": 62, 12 | "num_reviews": 16, 13 | "playtime_forever": 364, 14 | "playtime_last_two_weeks": 0, 15 | "playtime_at_review": 68, 16 | "last_played": 1627589277 17 | }, 18 | "language": "spanish", 19 | "review": "oui shephard mon amour", 20 | "timestamp_created": 1626898525, 21 | "timestamp_updated": 1633023708, 22 | "voted_up": true, 23 | "votes_up": 1, 24 | "votes_funny": 0, 25 | "weighted_vote_score": "0.497925341129302979", 26 | "comment_count": 0, 27 | "steam_purchase": true, 28 | "received_for_free": false, 29 | "written_during_early_access": false 30 | } 31 | ], 32 | "cursor": "AoJ4oqvF0PoCcu397AI=" 33 | } -------------------------------------------------------------------------------- /datasets-examples/7m-yelp-reviews-example.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "review_id": "KU_O5udG6zpxOg-VcAEodg", 4 | "user_id": "mh_-eMZ6K5RLWhZyISBhwA", 5 | "business_id": "XQfwVwDr-v0ZS3_CbbE5Xw", 6 | "stars": 3.0, 7 | "useful": 0, 8 | "funny": 0, 9 | "cool": 0, 10 | "text": "If you decide to eat here, just be aware it is going to take about 2 hours from beginning to end. We have tried it multiple times, because I want to like it! I have been to it's other locations in NJ and never had a bad experience. \n\nThe food is good, but it takes a very long time to come out. The waitstaff is very young, but usually pleasant. We have just had too many experiences where we spent way too long waiting. We usually opt for another diner or restaurant on the weekends, in order to be done quicker.", 11 | "date": "2018-07-07 22:09:11" 12 | }, 13 | { 14 | "review_id": "BiTunyQ73aT9WBnpR9DZGw", 15 | "user_id": "OyoGAe7OKpv6SyGZT5g77Q", 16 | "business_id": "7ATYjTIgM3jUlt4UM3IypQ", 17 | "stars": 5.0, 18 | "useful": 1, 19 | "funny": 0, 20 | "cool": 1, 21 | "text": "I've taken a lot of spin classes over the years, and nothing compares to the classes at Body Cycle. From the nice, clean space and amazing bikes, to the welcoming and motivating instructors, every class is a top notch work out.\n\nFor anyone who struggles to fit workouts in, the online scheduling system makes it easy to plan ahead (and there's no need to line up way in advanced like many gyms make you do).\n\nThere is no way I can write this review without giving Russell, the owner of Body Cycle, a shout out. Russell's passion for fitness and cycling is so evident, as is his desire for all of his clients to succeed. He is always dropping in to classes to check in\/provide encouragement, and is open to ideas and recommendations from anyone. Russell always wears a smile on his face, even when he's kicking your butt in class!", 22 | "date": "2012-01-03 15:28:18" 23 | }, 24 | { 25 | "review_id": "saUsX_uimxRlCVr67Z4Jig", 26 | "user_id": "8g_iMtfSiwikVnbP2etR0A", 27 | "business_id": "YjUWPpI6HXG530lwP-fb2A", 28 | "stars": 3.0, 29 | "useful": 0, 30 | "funny": 0, 31 | "cool": 0, 32 | "text": "Family diner. Had the buffet. Eclectic assortment: a large chicken leg, fried jalapeño, tamale, two rolled grape leaves, fresh melon. All good. Lots of Mexican choices there. Also has a menu with breakfast served all day long. Friendly, attentive staff. Good place for a casual relaxed meal with no expectations. Next to the Clarion Hotel.", 33 | "date": "2014-02-05 20:30:30" 34 | } 35 | ] -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/harazdovskiy/mongo-performance/b4e48b6a850117c0723e4fa22e64d1419135b94f/index.js -------------------------------------------------------------------------------- /insert/1m/index.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | const {promises: fs} = require('fs'); 4 | const path = require('path'); 5 | const {MongoClient, ServerApiVersion} = require('mongodb'); 6 | 7 | const DB_NAME = 'performance-1m'; 8 | const COLLECTION_NAME = '1m-collection'; 9 | 10 | // const PATH_1MIL = path.resolve('../dataset/1m-generated.json'); 11 | const PATH_1MIL = path.resolve('../dataset/7m-yelp-reviews.json'); 12 | 13 | (async () => { 14 | try { 15 | console.time('Reading json') 16 | const records = JSON.parse(await fs.readFile(PATH_1MIL)); 17 | console.timeEnd('Reading json') 18 | 19 | // const col = await getCollection(process.env.MONGO_CLUSTER_SHARED); 20 | const col = await getCollection(process.env.MONGO_CLUSTER_M30); 21 | 22 | console.log('Started insertion process successfully to server'); 23 | console.time('Inserting records') 24 | await col.insertMany(records); 25 | console.timeEnd('Inserting records') 26 | process.exit(); 27 | } catch (e) { 28 | console.error(e); 29 | } 30 | })() 31 | 32 | async function getCollection(url) { 33 | const client = new MongoClient(url, { 34 | useNewUrlParser: true, 35 | useUnifiedTopology: true, 36 | serverApi: ServerApiVersion.v1 37 | }); 38 | await client.connect(); 39 | 40 | console.log('Connected successfully to server'); 41 | const db = client.db(DB_NAME); 42 | return db.collection(COLLECTION_NAME) 43 | } -------------------------------------------------------------------------------- /insert/50m+/200k-5x20k-6processes/index.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | 4 | const fs = require('fs'); 5 | const {from = 1, to = 20} = require('minimist')(process.argv.slice(2)); 6 | const radash = require('radash'); 7 | const {MongoClient, ServerApiVersion} = require("mongodb"); 8 | const lodash = require("lodash"); 9 | const objectSize = require("object-sizeof"); 10 | 11 | const COLLECTION_NAME = '63mil-collection'; 12 | const DB_NAME = 'performance63m'; 13 | 14 | const BYTE_IN_MB = 0.00000095367432; 15 | const BASE_PATH = '../dataset/reviews'; 16 | const READING_CHUNK = 200_000; 17 | const PARALLEL_INSERTIONS = 5; 18 | const PARALLEL_FILES_READ = 5; 19 | const PARALLEL_EXECUTION_CHUNK = 20_000; 20 | const LOG_PATH_EVERY = 1_000_000; 21 | 22 | (async () => { 23 | const col = await getCollection() 24 | let started = Date.now() 25 | const dirsPaths = await fs.promises.readdir(BASE_PATH, {}); 26 | 27 | const workingPaths = dirsPaths 28 | .slice(+from, +to) 29 | .filter(path => !path.startsWith('.')) 30 | 31 | let doneReadingReviews = 0; 32 | let reviews = []; 33 | const workingPathsLength = workingPaths.length; 34 | console.log('\n All paths: ', workingPathsLength) 35 | let donePaths = 0; 36 | let moreThan = LOG_PATH_EVERY; 37 | for (const path of workingPaths) { 38 | 39 | const paginationDirPath = `${BASE_PATH}/${path}`; 40 | const innerPaths = await fs.promises.readdir(paginationDirPath); 41 | 42 | await radash.parallel(PARALLEL_FILES_READ, innerPaths, async (path) => { 43 | const filePath = `${paginationDirPath}/${path}`; 44 | const reviewsBuffer = await fs.promises.readFile(filePath); 45 | const reviewsRead = JSON.parse(Buffer.from(reviewsBuffer).toString()).reviews || []; 46 | reviews.push(...reviewsRead); 47 | doneReadingReviews += reviewsRead.length; 48 | 49 | if (doneReadingReviews > moreThan) { 50 | console.log(`done paths - ${donePaths} done reviews ${doneReadingReviews}`) 51 | moreThan += LOG_PATH_EVERY 52 | } 53 | }) 54 | 55 | if (reviews.length >= READING_CHUNK) { 56 | await insertReviews(col, reviews); 57 | reviews = [] 58 | } 59 | 60 | donePaths++ 61 | 62 | if (donePaths === workingPathsLength) { 63 | console.log('Last insert!') 64 | await insertReviews(col, reviews); 65 | reviews = [] 66 | } 67 | } 68 | 69 | console.log('Done reading reviews: ', doneReadingReviews) 70 | console.log('Script: took - ', (Date.now() - started) * 0.001, ' seconds\n'); 71 | process.exit() 72 | })() 73 | 74 | async function getCollection() { 75 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 76 | useNewUrlParser: true, 77 | useUnifiedTopology: true, 78 | serverApi: ServerApiVersion.v1 79 | }); 80 | await client.connect(); 81 | 82 | console.log('Connected successfully to server'); 83 | const db = client.db(DB_NAME); 84 | return db.collection(COLLECTION_NAME) 85 | } 86 | 87 | async function insertReviews(col, reviews) { 88 | console.log(`Started insertReview() count: ${reviews.length}`) 89 | console.time('Insert took: ') 90 | const chunks = lodash.chunk(reviews, PARALLEL_EXECUTION_CHUNK); 91 | await radash.parallel(PARALLEL_INSERTIONS, chunks, async (chunk) => { 92 | const now = Date.now() 93 | const stats = `size ${(objectSize(chunk) * BYTE_IN_MB).toFixed(3)} mb, records: ${chunk.length}` 94 | console.time(`id: ${now} - stats: ${stats} - took: `); 95 | await col.insertMany(chunk); 96 | console.timeEnd(`id: ${now} - stats: ${stats} - took: `); 97 | }) 98 | console.timeEnd('Insert took: ') 99 | console.log('\n'); 100 | } -------------------------------------------------------------------------------- /insert/50m+/read-size-of-dataset/index.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs'); 2 | const {from = 0, to = 1} = require('minimist')(process.argv.slice(2)); 3 | const radash = require('radash'); 4 | 5 | const BASE_PATH = '../dataset/reviews'; 6 | const PARALLEL_EXECUTIONS = 5; 7 | const LOG_PATH_EVERY = 1_000_000; 8 | 9 | (async () => { 10 | let started = Date.now() 11 | const dirsPaths = await fs.promises.readdir(BASE_PATH, {}); 12 | 13 | const workingPaths = dirsPaths.slice(+from, +to).filter(path => !path.startsWith('.')) 14 | let doneReadingReviews = 0; 15 | console.log('\n All paths', workingPaths.length) 16 | let donePaths = 0; 17 | let moreThan = LOG_PATH_EVERY; 18 | for (const path of workingPaths) { 19 | 20 | const paginationDirPath = `${BASE_PATH}/${path}`; 21 | const innerPaths = await fs.promises.readdir(paginationDirPath); 22 | 23 | await radash.parallel(PARALLEL_EXECUTIONS, innerPaths, async (path) => { 24 | const filePath = `${paginationDirPath}/${path}`; 25 | 26 | const chunk = await fs.promises.readFile(filePath); 27 | const reviews = JSON.parse(Buffer.from(chunk).toString()).reviews || []; 28 | doneReadingReviews += reviews.length; 29 | 30 | if (doneReadingReviews > moreThan) { 31 | console.log(`done paths - ${donePaths} done reviews ${doneReadingReviews}`) 32 | moreThan += LOG_PATH_EVERY 33 | } 34 | }) 35 | 36 | donePaths++; 37 | } 38 | console.log('Reviews', doneReadingReviews) 39 | console.log('Operation took - ', (Date.now() - started) * 0.001, ' seconds\n'); 40 | })() -------------------------------------------------------------------------------- /insert/7m/7m-m30-per100k.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | 4 | const fs = require('fs'); 5 | const objectSize = require('object-sizeof'); 6 | const radash = require('radash'); 7 | const JsonlParser = require('stream-json/jsonl/Parser'); 8 | const {MongoClient, ServerApiVersion} = require("mongodb"); 9 | const parser = new JsonlParser(); 10 | 11 | const PATH_7MIL = '../dataset/7m-yelp-reviews.json'; 12 | const COLLECTION_NAME = '7mil-collection'; 13 | const DB_NAME = 'performance7m'; 14 | 15 | const BYTE_IN_MB = 0.00000095367432; 16 | 17 | (async () => { 18 | let arrayToInsert = []; 19 | let objectCounter = 0; 20 | let started = Date.now() 21 | const col = await getCollection(); 22 | 23 | const pipeline = fs.createReadStream(PATH_7MIL).pipe(parser); 24 | 25 | pipeline.on('data', async data => { 26 | objectCounter++; 27 | arrayToInsert.push(data.value) 28 | 29 | if (objectCounter % 100_000 === 0) { 30 | pipeline.pause() 31 | console.log('arrayToInsert size -', objectSize(arrayToInsert) * BYTE_IN_MB, 'mb'); 32 | console.time(`Inserting time - ${objectCounter}`); 33 | await col.insertMany(arrayToInsert); 34 | console.timeEnd(`Inserting time - ${objectCounter}`); 35 | arrayToInsert = [] 36 | 37 | console.log('--------------\n'); 38 | await radash.sleep(100); 39 | pipeline.resume() 40 | } 41 | }); 42 | 43 | pipeline.on('end', async () => { 44 | console.log('Operation took - ', (Date.now() - started) * 0.001, ' seconds\n'); 45 | process.exit() 46 | }); 47 | })() 48 | 49 | 50 | async function getCollection() { 51 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 52 | useNewUrlParser: true, 53 | useUnifiedTopology: true, 54 | serverApi: ServerApiVersion.v1 55 | }); 56 | await client.connect(); 57 | 58 | console.log('Connected successfully to server'); 59 | const db = client.db(DB_NAME); 60 | return db.collection(COLLECTION_NAME) 61 | } -------------------------------------------------------------------------------- /insert/7m/7m-m30-per200k-5X20k.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | 4 | const fs = require('fs'); 5 | const {MongoClient, ServerApiVersion} = require("mongodb"); 6 | const JsonlParser = require('stream-json/jsonl/Parser'); 7 | const parser = new JsonlParser(); 8 | const objectSize = require('object-sizeof'); 9 | const radash = require('radash'); 10 | const lodash = require('lodash'); 11 | 12 | const PATH_7MIL = '../dataset/7m-yelp-reviews.json'; 13 | const COLLECTION_NAME = '7mil-collection-parallel'; 14 | const DB_NAME = 'performance7m'; 15 | 16 | const CHUNK = 200_000; 17 | const PARALLEL_EXECUTIONS = 5; 18 | const PARALLEL_EXECUTION_CHUNK = 20_000; 19 | 20 | const BYTE_IN_MB = 0.00000095367432; 21 | 22 | (async () => { 23 | let arrayToInsert = []; 24 | let objectCounter = 0; 25 | let started = Date.now() 26 | const col = await getCollection(); 27 | 28 | const pipeline = fs.createReadStream(PATH_7MIL).pipe(parser); 29 | pipeline.on('data', async data => { 30 | objectCounter++; 31 | arrayToInsert.push(data.value) 32 | 33 | if (arrayToInsert.length % CHUNK === 0) { 34 | console.log('arrayToInsert size -', objectSize(arrayToInsert) * BYTE_IN_MB, 'mb\n'); 35 | pipeline.pause() 36 | const chunks = lodash.chunk(arrayToInsert, PARALLEL_EXECUTION_CHUNK); 37 | await radash.parallel(PARALLEL_EXECUTIONS, chunks, async (chunk) => { 38 | const now = Date.now() 39 | const stats = `size ${(objectSize(chunk) * BYTE_IN_MB).toFixed(3)} mb, records: ${chunk.length}` 40 | console.time(`id: ${now} - stats: ${stats} - took: `); 41 | await col.insertMany(chunk); 42 | console.timeEnd(`id: ${now} - stats: ${stats} - took: `); 43 | }) 44 | console.log('--------------\n'); 45 | arrayToInsert = [] 46 | await radash.sleep(100); 47 | pipeline.resume() 48 | } 49 | }); 50 | 51 | pipeline.on('end', async () => { 52 | console.log('Operation took - ', (Date.now() - started) * 0.001, ' seconds\n'); 53 | process.exit() 54 | }); 55 | })() 56 | 57 | 58 | async function getCollection() { 59 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 60 | useNewUrlParser: true, 61 | useUnifiedTopology: true, 62 | serverApi: ServerApiVersion.v1 63 | }); 64 | await client.connect(); 65 | 66 | console.log('Connected successfully to server'); 67 | const db = client.db(DB_NAME); 68 | return db.collection(COLLECTION_NAME) 69 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mongo-performance", 3 | "version": "1.0.0", 4 | "description": "Testing mongodb performance working with thousands of records", 5 | "main": "index.js", 6 | "scripts": {}, 7 | "keywords": [ 8 | "mongodb", 9 | "performance" 10 | ], 11 | "author": "Dmytro Harazdovskiy", 12 | "license": "ISC", 13 | "dependencies": { 14 | "dotenv": "16.0.2", 15 | "lodash": "^4.17.21", 16 | "minimist": "^1.2.6", 17 | "mongodb": "4.9.1", 18 | "object-sizeof": "^1.6.3", 19 | "radash": "^7.1.0", 20 | "stream-json": "^1.7.4" 21 | }, 22 | "engines": { 23 | "node": ">=18" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Mongo Performance Scripts 2 | This repository contains some examples of scripts working with small (1m) and medium sized (50m+) records datasets for MongoDB with Node.js. 3 | 4 | Currently there are example for insert/update of 1m , 10m, 50m records. 5 | 6 | All the explanations can be found here: 7 | - [Optimizing massive MongoDB inserts, load 50 million records faster by 33%!](https://harazdovskiy.medium.com/50-million-records-insert-in-mongodb-using-node-js-5c62b7d7af5a) 8 | - [How to update 63 million records in MongoDB 50% faster?](https://harazdovskiy.medium.com) 9 | 10 | Therefore, you can reuse best practices in your own projects! -------------------------------------------------------------------------------- /update/10m/parralel/update-100k-parralel.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | const {MongoClient, ServerApiVersion} = require('mongodb'); 4 | const {chunk} = require("lodash"); 5 | 6 | const DB_NAME = 'performance63m'; 7 | const COLLECTION_NAME = '63mil-collection'; 8 | const radash = require('radash'); 9 | const CHUNK = 100_000; 10 | const BULK_CHUNK_SIZE = 20_000; 11 | 12 | (async () => { 13 | try { 14 | console.time('Script took'); 15 | 16 | const col = await getCollection(); 17 | const query = {language: 'schinese'}; 18 | console.time('Cursor'); 19 | const cursor = col 20 | .find( 21 | query, 22 | {votes_up: 1, votes_funny: 1, comment_count: 1} 23 | ) 24 | console.timeEnd('Cursor'); 25 | 26 | let objectCounter = 0; 27 | let operations = []; 28 | const cursorStream = cursor.stream(); 29 | console.log('Started reading....') 30 | cursorStream.on("data", async doc => { 31 | objectCounter++; 32 | operations.push(getBulkOperations(doc)) 33 | if (objectCounter % CHUNK === 0) { 34 | cursorStream.pause(); 35 | const bulkChunks = chunk(operations, BULK_CHUNK_SIZE) 36 | console.time('Parallel sequential took'); 37 | await radash.parallel(5, bulkChunks, (operations) => col.bulkWrite(operations)) 38 | console.timeEnd('Parallel sequential took'); 39 | operations = [] 40 | console.log('Done with ', objectCounter, 'records') 41 | cursorStream.resume() 42 | } 43 | }).on('end', () => { 44 | console.timeEnd('Script took'); 45 | process.exit(); 46 | }).on('error', (e) => { 47 | console.timeEnd('Script took'); 48 | console.error(e) 49 | process.exit(1); 50 | }); 51 | } catch (e) { 52 | console.timeEnd('Script took'); 53 | console.error(e); 54 | } 55 | })(); 56 | 57 | function getBulkOperations(record) { 58 | return { 59 | updateOne: { 60 | filter: {_id: record._id}, 61 | update: { 62 | $set: { 63 | 'parallel_popularity': record.votes_up + record.votes_funny + record.comment_count 64 | } 65 | } 66 | } 67 | } 68 | } 69 | 70 | async function getCollection() { 71 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 72 | useNewUrlParser: true, 73 | useUnifiedTopology: true, 74 | serverApi: ServerApiVersion.v1, 75 | readPreference: 'secondary', 76 | }); 77 | await client.connect(); 78 | 79 | console.log('Connected successfully to server'); 80 | const db = client.db(DB_NAME); 81 | return db.collection(COLLECTION_NAME) 82 | } -------------------------------------------------------------------------------- /update/10m/sequential/update.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | const {MongoClient, ServerApiVersion} = require('mongodb'); 4 | 5 | const DB_NAME = 'performance63m'; 6 | const COLLECTION_NAME = '63mil-collection'; 7 | const CHUNK = 20_000; 8 | 9 | (async () => { 10 | try { 11 | console.time('Script took'); 12 | 13 | const col = await getCollection(); 14 | const query = {language: 'schinese'}; 15 | console.time('Cursor'); 16 | const cursor = col 17 | .find( 18 | query, 19 | {votes_up: 1, votes_funny: 1, comment_count: 1} 20 | ) 21 | console.timeEnd('Cursor'); 22 | 23 | console.log('Calculating cursor size'); 24 | console.time('docs count'); 25 | const totalDocs = await col.countDocuments(query); 26 | console.timeEnd('docs count'); 27 | 28 | console.log({totalDocs}) 29 | let objectCounter = 0; 30 | let operations = []; 31 | const cursorStream = cursor.stream(); 32 | cursorStream.on("data", async doc => { 33 | objectCounter++; 34 | operations.push(getBulkOperations(doc)) 35 | if (objectCounter % CHUNK === 0) { 36 | cursorStream.pause(); 37 | await col.bulkWrite(operations); 38 | operations = [] 39 | console.log('objectCounter: ', objectCounter) 40 | cursorStream.resume() 41 | } 42 | }).on('end', () => { 43 | console.timeEnd('Script took'); 44 | process.exit(); 45 | }).on('error', (e) => { 46 | console.timeEnd('Script took'); 47 | console.error(e) 48 | process.exit(1); 49 | }); 50 | } catch (e) { 51 | console.timeEnd('Script took'); 52 | console.error(e); 53 | } 54 | })(); 55 | 56 | function getBulkOperations(record) { 57 | return { 58 | updateOne: { 59 | filter: {_id: record._id}, 60 | update: { 61 | $set: { 62 | 'popularity': record.votes_up + record.votes_funny + record.comment_count 63 | } 64 | } 65 | } 66 | } 67 | } 68 | 69 | async function getCollection() { 70 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 71 | useNewUrlParser: true, 72 | useUnifiedTopology: true, 73 | serverApi: ServerApiVersion.v1 74 | }); 75 | await client.connect(); 76 | 77 | console.log('Connected successfully to server'); 78 | const db = client.db(DB_NAME); 79 | return db.collection(COLLECTION_NAME) 80 | } -------------------------------------------------------------------------------- /update/1mil/bulk-write.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | const {MongoClient, ServerApiVersion} = require('mongodb'); 4 | 5 | const DB_NAME = 'performance1m'; 6 | const COLLECTION_NAME = 'one-million'; 7 | 8 | (async () => { 9 | try { 10 | 11 | const col = await getCollection(); 12 | 13 | console.log('Requesting 1m data....'); 14 | 15 | console.time('Requesting 1m data'); 16 | const toBeUpdated = await col.find({}).toArray(); 17 | console.timeEnd('Requesting 1m data'); 18 | 19 | console.log('Processing 1m data....'); 20 | console.time('Processing 1m data'); 21 | const operations = toBeUpdated.map((record) => { 22 | return { 23 | updateOne: { 24 | filter: {_id: record._id}, 25 | update: { 26 | $set: { 27 | 'newVal': record.name + record.number 28 | } 29 | } 30 | } 31 | } 32 | }) 33 | console.timeEnd('Processing 1m data'); 34 | 35 | console.log('Updating 1m data....'); 36 | console.time('Updating 1m data'); 37 | // https://www.mongodb.com/docs/manual/reference/method/db.collection.bulkWrite/#updateone-and-updatemany 38 | const res = await col.bulkWrite(operations); 39 | console.timeEnd('Updating 1m data'); 40 | 41 | console.log(res) 42 | 43 | process.exit(); 44 | } catch (e) { 45 | console.error(e); 46 | client.close() 47 | } 48 | })(); 49 | 50 | async function getCollection() { 51 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 52 | useNewUrlParser: true, 53 | useUnifiedTopology: true, 54 | serverApi: ServerApiVersion.v1 55 | }); 56 | await client.connect(); 57 | 58 | console.log('Connected successfully to server'); 59 | const db = client.db(DB_NAME); 60 | return db.collection(COLLECTION_NAME) 61 | } -------------------------------------------------------------------------------- /update/1mil/update.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | const {MongoClient, ServerApiVersion} = require('mongodb'); 4 | 5 | const DB_NAME = 'performance63m'; 6 | const COLLECTION_NAME = '63mil-collection'; 7 | 8 | (async () => { 9 | try { 10 | 11 | const col = await getCollection(); 12 | 13 | console.time('Updating 1,3mil data'); 14 | const res = await col.updateMany({language: 'polish'}, {$unset: {isPolish: true}}); 15 | console.timeEnd('Updating 1,3mil data'); 16 | 17 | console.log(res) 18 | 19 | process.exit(); 20 | } catch (e) { 21 | console.error(e); 22 | client.close() 23 | } 24 | })(); 25 | 26 | async function getCollection() { 27 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 28 | useNewUrlParser: true, 29 | useUnifiedTopology: true, 30 | serverApi: ServerApiVersion.v1 31 | }); 32 | await client.connect(); 33 | 34 | console.log('Connected successfully to server'); 35 | const db = client.db(DB_NAME); 36 | return db.collection(COLLECTION_NAME) 37 | } -------------------------------------------------------------------------------- /update/50m+/cursor/index.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | const {MongoClient, ServerApiVersion} = require('mongodb'); 4 | const {chunk} = require("lodash"); 5 | const {from = 0, to = 500_000} = require('minimist')(process.argv.slice(2)); 6 | 7 | const DB_NAME = 'performance63m'; 8 | const COLLECTION_NAME = '63mil-collection'; 9 | const radash = require('radash'); 10 | const CHUNK = 100_000; 11 | const BULK_CHUNK_SIZE = 20_000; 12 | 13 | (async () => { 14 | try { 15 | console.time('Script took'); 16 | 17 | const col = await getCollection(); 18 | 19 | console.time('Cursor'); 20 | const cursor = col.find({}, { 21 | votes_up: 1, 22 | votes_funny: 1, 23 | comment_count: 1 24 | }).batchSize(1000).skip(from).limit(to); 25 | console.timeEnd('Cursor'); 26 | 27 | let objectCounter = 0; 28 | let operations = []; 29 | const cursorStream = cursor.stream(); 30 | console.log('Started reading....') 31 | cursorStream.on("data", async doc => { 32 | objectCounter++; 33 | operations.push(getBulkOperations(doc)) 34 | if (objectCounter % CHUNK === 0) { 35 | cursorStream.pause(); 36 | const bulkChunks = chunk(operations, BULK_CHUNK_SIZE) 37 | console.time('Parallel sequential took'); 38 | await radash.parallel(5, bulkChunks, (operations) => col.bulkWrite(operations)) 39 | console.timeEnd('Parallel sequential took'); 40 | operations = [] 41 | console.log('Done with ', objectCounter, 'records') 42 | cursorStream.resume() 43 | } 44 | }).on('end', () => { 45 | console.timeEnd('Script took'); 46 | process.exit(); 47 | }).on('error', (e) => { 48 | console.timeEnd('Script took'); 49 | console.error(e) 50 | process.exit(1); 51 | }); 52 | } catch (e) { 53 | console.timeEnd('Script took'); 54 | console.error(e); 55 | } 56 | })(); 57 | 58 | function getBulkOperations(record) { 59 | return { 60 | updateOne: { 61 | filter: {_id: record._id}, 62 | update: { 63 | $set: { 64 | 'super_popularity': record.votes_up + record.votes_funny + record.comment_count 65 | } 66 | } 67 | } 68 | } 69 | } 70 | 71 | async function getCollection() { 72 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 73 | useNewUrlParser: true, 74 | useUnifiedTopology: true, 75 | serverApi: ServerApiVersion.v1, 76 | readPreference: 'secondary' 77 | }); 78 | await client.connect(); 79 | 80 | console.log('Connected successfully to server'); 81 | const db = client.db(DB_NAME); 82 | return db.collection(COLLECTION_NAME) 83 | } -------------------------------------------------------------------------------- /update/50m+/objectid/run.bash: -------------------------------------------------------------------------------- 1 | node update/50m+/objectid/update.js --from=0 --to=5250000 2 | node update/50m+/objectid/update.js --from=5250000 --to=10500000 3 | node update/50m+/objectid/update.js --from=10500000 --to=15750000 4 | 5 | node update/50m+/objectid/update.js --from=15750000 --to=21000000 6 | node update/50m+/objectid/update.js --from=21000000 --to=26250000 7 | node update/50m+/objectid/update.js --from=26250000 --to=31500000 8 | 9 | node update/50m+/objectid/update.js --from=31500000 --to=36750000 10 | node update/50m+/objectid/update.js --from=36750000 --to=42000000 11 | node update/50m+/objectid/update.js --from=42000000 --to=47250000 12 | 13 | node update/50m+/objectid/update.js --from=47250000 --to=52500000 14 | node update/50m+/objectid/update.js --from=52500000 --to=57750000 15 | node update/50m+/objectid/update.js --from=57750000 --to=63000000 -------------------------------------------------------------------------------- /update/50m+/objectid/update.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | dotenv.config() 3 | const {MongoClient, ServerApiVersion} = require('mongodb'); 4 | const {chunk} = require("lodash"); 5 | const {from = 0, to = 100_000} = require('minimist')(process.argv.slice(2)); 6 | 7 | const DB_NAME = 'performance63m'; 8 | const COLLECTION_NAME = '63mil-collection'; 9 | const radash = require('radash'); 10 | const READ_CHUNK = 10_000; 11 | const UPDATE_CHUNK_SIZE = 2000; 12 | 13 | (async () => { 14 | try { 15 | console.time('Script took'); 16 | 17 | const col = await getCollection(); 18 | const projection = { 19 | votes_up: 1, 20 | votes_funny: 1, 21 | comment_count: 1 22 | }; 23 | console.time('First docs'); 24 | 25 | const firstChunk = await col.find({}, projection).skip(from).limit(READ_CHUNK).toArray(); 26 | console.timeEnd('First docs'); 27 | 28 | let toProcess = to - from; 29 | let totalOperations = firstChunk.length; 30 | 31 | console.log('Started reading....') 32 | const operations = firstChunk.map(getBulkOperations); 33 | await write(col, operations); 34 | 35 | const lastElementsId = getNextId(firstChunk); 36 | await processBatch(lastElementsId) 37 | 38 | async function processBatch(nextId) { 39 | console.log('------------------------------------') 40 | console.log({totalOperations}, nextId.toString()) 41 | console.time("Get took") 42 | const docs = await col.find({_id: {$gt: nextId}}, projection).limit(READ_CHUNK).toArray(); 43 | console.timeEnd("Get took") 44 | const newNextId = getNextId(docs); 45 | totalOperations += docs.length; 46 | console.log(`Done ${(totalOperations * 100) / toProcess} %`) 47 | 48 | if (!newNextId || (totalOperations + from) >= to) { 49 | console.log('last Object Id', newNextId, totalOperations >= to) 50 | await write(col, docs.map(getBulkOperations)); 51 | return Promise.resolve() 52 | } 53 | await write(col, docs.map(getBulkOperations)); 54 | 55 | return processBatch(newNextId) 56 | } 57 | 58 | console.timeEnd('Script took'); 59 | process.exit() 60 | } catch (e) { 61 | console.timeEnd('Script took'); 62 | console.error(e); 63 | process.exit(1) 64 | } 65 | })(); 66 | 67 | function getNextId(docs) { 68 | if (!docs.length) { 69 | return null 70 | } 71 | const nextId = docs[docs.length - 1]._id; 72 | return nextId || null; 73 | } 74 | 75 | async function write(col, newOperations) { 76 | console.log('writing: ', newOperations.length) 77 | const bulkChunks = chunk(newOperations, UPDATE_CHUNK_SIZE) 78 | console.time('Parallel update took'); 79 | await radash.parallel(5, bulkChunks, (operations) => col.bulkWrite(operations)) 80 | console.timeEnd('Parallel update took'); 81 | return Promise.resolve(); 82 | } 83 | 84 | function getBulkOperations(record) { 85 | return { 86 | updateOne: { 87 | filter: {_id: record._id}, 88 | update: { 89 | $set: { 90 | 'calc_popularity_id': record.votes_up + record.votes_funny + record.comment_count 91 | } 92 | } 93 | } 94 | } 95 | } 96 | 97 | async function getCollection() { 98 | const client = new MongoClient(process.env.MONGO_CLUSTER_M30, { 99 | useNewUrlParser: true, 100 | useUnifiedTopology: true, 101 | serverApi: ServerApiVersion.v1, 102 | readPreference: 'secondary' 103 | }); 104 | await client.connect(); 105 | 106 | console.log('Connected successfully to server'); 107 | const db = client.db(DB_NAME); 108 | return db.collection(COLLECTION_NAME) 109 | } -------------------------------------------------------------------------------- /update/readme.md: -------------------------------------------------------------------------------- 1 | This section is comming soon! -------------------------------------------------------------------------------- /update/reviews_by_count.json: -------------------------------------------------------------------------------- 1 | [{ 2 | "_id" : "english", 3 | "count" : 28247901.0 4 | }, 5 | { 6 | "_id" : "schinese", 7 | "count" : 10589453.0 8 | }, 9 | { 10 | "_id" : "russian", 11 | "count" : 7208355.0 12 | }, 13 | { 14 | "_id" : "brazilian", 15 | "count" : 2644543.0 16 | }, 17 | { 18 | "_id" : "spanish", 19 | "count" : 2463678.0 20 | }, 21 | { 22 | "_id" : "german", 23 | "count" : 2052290.0 24 | }, 25 | { 26 | "_id" : "turkish", 27 | "count" : 2036677.0 28 | }, 29 | { 30 | "_id" : "koreana", 31 | "count" : 1445616.0 32 | }, 33 | { 34 | "_id" : "french", 35 | "count" : 1399185.0 36 | }, 37 | { 38 | "_id" : "polish", 39 | "count" : 1341264.0 40 | }, 41 | { 42 | "_id" : "tchinese", 43 | "count" : 643961.0 44 | }, 45 | { 46 | "_id" : "czech", 47 | "count" : 370956.0 48 | }, 49 | { 50 | "_id" : "italian", 51 | "count" : 348743.0 52 | }, 53 | { 54 | "_id" : "japanese", 55 | "count" : 309402.0 56 | }, 57 | { 58 | "_id" : "latam", 59 | "count" : 278111.0 60 | }, 61 | { 62 | "_id" : "thai", 63 | "count" : 277266.0 64 | }, 65 | { 66 | "_id" : "swedish", 67 | "count" : 246512.0 68 | }, 69 | { 70 | "_id" : "portuguese", 71 | "count" : 241709.0 72 | }, 73 | { 74 | "_id" : "dutch", 75 | "count" : 202342.0 76 | }, 77 | { 78 | "_id" : "finnish", 79 | "count" : 166827.0 80 | }, 81 | { 82 | "_id" : "danish", 83 | "count" : 158252.0 84 | }, 85 | { 86 | "_id" : "hungarian", 87 | "count" : 157412.0 88 | }, 89 | { 90 | "_id" : "norwegian", 91 | "count" : 107774.0 92 | }, 93 | { 94 | "_id" : "romanian", 95 | "count" : 86428.0 96 | }, 97 | { 98 | "_id" : "ukrainian", 99 | "count" : 85895.0 100 | }, 101 | { 102 | "_id" : "greek", 103 | "count" : 39893.0 104 | }, 105 | { 106 | "_id" : "bulgarian", 107 | "count" : 28014.0 108 | }, 109 | { 110 | "_id" : "vietnamese", 111 | "count" : 21145.0 112 | }] -------------------------------------------------------------------------------- /update/script.js: -------------------------------------------------------------------------------- 1 | db.getCollection("63mil-collection").aggregate([ 2 | { $group: { _id: '$language', count: { $sum: 1 } } }, 3 | { $sort: { 'count': -1 } } 4 | ]) 5 | -------------------------------------------------------------------------------- /yarn.lock: -------------------------------------------------------------------------------- 1 | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. 2 | # yarn lockfile v1 3 | 4 | 5 | "@types/node@*": 6 | version "18.7.18" 7 | resolved "https://registry.yarnpkg.com/@types/node/-/node-18.7.18.tgz#633184f55c322e4fb08612307c274ee6d5ed3154" 8 | integrity sha512-m+6nTEOadJZuTPkKR/SYK3A2d7FZrgElol9UP1Kae90VVU4a6mxnPuLiIW1m4Cq4gZ/nWb9GrdVXJCoCazDAbg== 9 | 10 | "@types/webidl-conversions@*": 11 | version "7.0.0" 12 | resolved "https://registry.yarnpkg.com/@types/webidl-conversions/-/webidl-conversions-7.0.0.tgz#2b8e60e33906459219aa587e9d1a612ae994cfe7" 13 | integrity sha512-xTE1E+YF4aWPJJeUzaZI5DRntlkY3+BCVJi0axFptnjGmAoWxkyREIh/XMrfxVLejwQxMCfDXdICo0VLxThrog== 14 | 15 | "@types/whatwg-url@^8.2.1": 16 | version "8.2.2" 17 | resolved "https://registry.yarnpkg.com/@types/whatwg-url/-/whatwg-url-8.2.2.tgz#749d5b3873e845897ada99be4448041d4cc39e63" 18 | integrity sha512-FtQu10RWgn3D9U4aazdwIE2yzphmTJREDqNdODHrbrZmmMqI0vMheC/6NE/J1Yveaj8H+ela+YwWTjq5PGmuhA== 19 | dependencies: 20 | "@types/node" "*" 21 | "@types/webidl-conversions" "*" 22 | 23 | base64-js@^1.3.1: 24 | version "1.5.1" 25 | resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" 26 | integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== 27 | 28 | bson@^4.7.0: 29 | version "4.7.0" 30 | resolved "https://registry.yarnpkg.com/bson/-/bson-4.7.0.tgz#7874a60091ffc7a45c5dd2973b5cad7cded9718a" 31 | integrity sha512-VrlEE4vuiO1WTpfof4VmaVolCVYkYTgB9iWgYNOrVlnifpME/06fhFRmONgBhClD5pFC1t9ZWqFUQEQAzY43bA== 32 | dependencies: 33 | buffer "^5.6.0" 34 | 35 | buffer@^5.6.0: 36 | version "5.7.1" 37 | resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" 38 | integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== 39 | dependencies: 40 | base64-js "^1.3.1" 41 | ieee754 "^1.1.13" 42 | 43 | denque@^2.1.0: 44 | version "2.1.0" 45 | resolved "https://registry.yarnpkg.com/denque/-/denque-2.1.0.tgz#e93e1a6569fb5e66f16a3c2a2964617d349d6ab1" 46 | integrity sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw== 47 | 48 | dotenv@16.0.2: 49 | version "16.0.2" 50 | resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.0.2.tgz#0b0f8652c016a3858ef795024508cddc4bffc5bf" 51 | integrity sha512-JvpYKUmzQhYoIFgK2MOnF3bciIZoItIIoryihy0rIA+H4Jy0FmgyKYAHCTN98P5ybGSJcIFbh6QKeJdtZd1qhA== 52 | 53 | ieee754@^1.1.13: 54 | version "1.2.1" 55 | resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" 56 | integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== 57 | 58 | ip@^2.0.0: 59 | version "2.0.0" 60 | resolved "https://registry.yarnpkg.com/ip/-/ip-2.0.0.tgz#4cf4ab182fee2314c75ede1276f8c80b479936da" 61 | integrity sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ== 62 | 63 | lodash@^4.17.21: 64 | version "4.17.21" 65 | resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" 66 | integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== 67 | 68 | memory-pager@^1.0.2: 69 | version "1.5.0" 70 | resolved "https://registry.yarnpkg.com/memory-pager/-/memory-pager-1.5.0.tgz#d8751655d22d384682741c972f2c3d6dfa3e66b5" 71 | integrity sha512-ZS4Bp4r/Zoeq6+NLJpP+0Zzm0pR8whtGPf1XExKLJBAczGMnSi3It14OiNCStjQjM6NU1okjQGSxgEZN8eBYKg== 72 | 73 | minimist@^1.2.6: 74 | version "1.2.7" 75 | resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18" 76 | integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g== 77 | 78 | mongodb-connection-string-url@^2.5.3: 79 | version "2.5.3" 80 | resolved "https://registry.yarnpkg.com/mongodb-connection-string-url/-/mongodb-connection-string-url-2.5.3.tgz#c0c572b71570e58be2bd52b33dffd1330cfb6990" 81 | integrity sha512-f+/WsED+xF4B74l3k9V/XkTVj5/fxFH2o5ToKXd8Iyi5UhM+sO9u0Ape17Mvl/GkZaFtM0HQnzAG5OTmhKw+tQ== 82 | dependencies: 83 | "@types/whatwg-url" "^8.2.1" 84 | whatwg-url "^11.0.0" 85 | 86 | mongodb@4.9.1: 87 | version "4.9.1" 88 | resolved "https://registry.yarnpkg.com/mongodb/-/mongodb-4.9.1.tgz#0c769448228bcf9a6aa7d16daa3625b48312479e" 89 | integrity sha512-ZhgI/qBf84fD7sI4waZBoLBNJYPQN5IOC++SBCiPiyhzpNKOxN/fi0tBHvH2dEC42HXtNEbFB0zmNz4+oVtorQ== 90 | dependencies: 91 | bson "^4.7.0" 92 | denque "^2.1.0" 93 | mongodb-connection-string-url "^2.5.3" 94 | socks "^2.7.0" 95 | optionalDependencies: 96 | saslprep "^1.0.3" 97 | 98 | object-sizeof@^1.6.3: 99 | version "1.6.3" 100 | resolved "https://registry.yarnpkg.com/object-sizeof/-/object-sizeof-1.6.3.tgz#6edbbf26825b971fd7a32125a800ed2a9895af95" 101 | integrity sha512-LGtilAKuDGKCcvu1Xg3UvAhAeJJlFmblo3faltmOQ80xrGwAHxnauIXucalKdTEksHp/Pq9tZGz1hfyEmjFJPQ== 102 | dependencies: 103 | buffer "^5.6.0" 104 | 105 | punycode@^2.1.1: 106 | version "2.1.1" 107 | resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" 108 | integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== 109 | 110 | radash@^7.1.0: 111 | version "7.1.0" 112 | resolved "https://registry.yarnpkg.com/radash/-/radash-7.1.0.tgz#b6da541c678368ab3a1f3702057342e944599e92" 113 | integrity sha512-NOWTaF5YMY3mCgrNq9Fw9fK8yvJv92uqRb2StcVq5W8kcJ6949EbV2vk6nLJKAocYs29rzJafZZP1lFkEkoVGw== 114 | 115 | saslprep@^1.0.3: 116 | version "1.0.3" 117 | resolved "https://registry.yarnpkg.com/saslprep/-/saslprep-1.0.3.tgz#4c02f946b56cf54297e347ba1093e7acac4cf226" 118 | integrity sha512-/MY/PEMbk2SuY5sScONwhUDsV2p77Znkb/q3nSVstq/yQzYJOH/Azh29p9oJLsl3LnQwSvZDKagDGBsBwSooag== 119 | dependencies: 120 | sparse-bitfield "^3.0.3" 121 | 122 | smart-buffer@^4.2.0: 123 | version "4.2.0" 124 | resolved "https://registry.yarnpkg.com/smart-buffer/-/smart-buffer-4.2.0.tgz#6e1d71fa4f18c05f7d0ff216dd16a481d0e8d9ae" 125 | integrity sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg== 126 | 127 | socks@^2.7.0: 128 | version "2.7.0" 129 | resolved "https://registry.yarnpkg.com/socks/-/socks-2.7.0.tgz#f9225acdb841e874dca25f870e9130990f3913d0" 130 | integrity sha512-scnOe9y4VuiNUULJN72GrM26BNOjVsfPXI+j+98PkyEfsIXroa5ofyjT+FzGvn/xHs73U2JtoBYAVx9Hl4quSA== 131 | dependencies: 132 | ip "^2.0.0" 133 | smart-buffer "^4.2.0" 134 | 135 | sparse-bitfield@^3.0.3: 136 | version "3.0.3" 137 | resolved "https://registry.yarnpkg.com/sparse-bitfield/-/sparse-bitfield-3.0.3.tgz#ff4ae6e68656056ba4b3e792ab3334d38273ca11" 138 | integrity sha512-kvzhi7vqKTfkh0PZU+2D2PIllw2ymqJKujUcyPMd9Y75Nv4nPbGJZXNhxsgdQab2BmlDct1YnfQCguEvHr7VsQ== 139 | dependencies: 140 | memory-pager "^1.0.2" 141 | 142 | stream-chain@^2.2.5: 143 | version "2.2.5" 144 | resolved "https://registry.yarnpkg.com/stream-chain/-/stream-chain-2.2.5.tgz#b30967e8f14ee033c5b9a19bbe8a2cba90ba0d09" 145 | integrity sha512-1TJmBx6aSWqZ4tx7aTpBDXK0/e2hhcNSTV8+CbFJtDjbb+I1mZ8lHit0Grw9GRT+6JbIrrDd8esncgBi8aBXGA== 146 | 147 | stream-json@^1.7.4: 148 | version "1.7.4" 149 | resolved "https://registry.yarnpkg.com/stream-json/-/stream-json-1.7.4.tgz#e41637f93c5aca7267009ca8a3f6751e62331e69" 150 | integrity sha512-ja2dde1v7dOlx5/vmavn8kLrxvNfs7r2oNc5DYmNJzayDDdudyCSuTB1gFjH4XBVTIwxiMxL4i059HX+ZiouXg== 151 | dependencies: 152 | stream-chain "^2.2.5" 153 | 154 | tr46@^3.0.0: 155 | version "3.0.0" 156 | resolved "https://registry.yarnpkg.com/tr46/-/tr46-3.0.0.tgz#555c4e297a950617e8eeddef633c87d4d9d6cbf9" 157 | integrity sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA== 158 | dependencies: 159 | punycode "^2.1.1" 160 | 161 | webidl-conversions@^7.0.0: 162 | version "7.0.0" 163 | resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-7.0.0.tgz#256b4e1882be7debbf01d05f0aa2039778ea080a" 164 | integrity sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g== 165 | 166 | whatwg-url@^11.0.0: 167 | version "11.0.0" 168 | resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-11.0.0.tgz#0a849eebb5faf2119b901bb76fd795c2848d4018" 169 | integrity sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ== 170 | dependencies: 171 | tr46 "^3.0.0" 172 | webidl-conversions "^7.0.0" 173 | --------------------------------------------------------------------------------