├── .gitignore ├── MongoDB ├── Aggregation_Framework.md ├── Aggregation_Framework │ ├── Additional_Stages.md │ ├── Array.md │ ├── Bucket.md │ ├── GeoNear.md │ ├── Group.md │ ├── Project.md │ ├── Project │ │ ├── Turning_the_Location_Into_a_geoJSON_Object.md │ │ └── Understanding_the_ISO_Week_Year_Operator.md │ └── Writing_Pipeline_Results_Into_a_New_Collection.md ├── Basic_CRUD,_Projection,_Embedded_Nested_Documents.md ├── Basic_CRUD,_Projection,_Embedded_Nested_Documents │ ├── Array.md │ ├── Create_Insert.md │ ├── Delete.md │ ├── Embedded_Nested_Documents.md │ ├── Projection.md │ ├── Read__find.md │ └── Update.md ├── Create_Insert,_Write_concern,_Atomicity.md ├── Delete.md ├── Exploring_The_Shell_and_The_Server.md ├── Geospatial_Data.md ├── Index_and_Others.md ├── Index_and_Others │ ├── Compound_Index_with_Text.md │ ├── Configuring_Indexes.md │ ├── How_MongoDB_rejects_a_plan.md │ ├── Multi_Key_Indexes.md │ ├── Query_Diagnosis_and_Query_Planning.md │ ├── Text_Indexes(Special_Type_of_Multi-Key_index),_Sor.md │ ├── Time-To-Live(TTL)_Index.md │ └── Using_Indexes_for_Sorting.md ├── Initial_Concepts.md ├── Numeric_Data.md ├── Performance.md ├── Performance_Fault_Tolerancy_Deployment.md ├── Read.md ├── Read │ ├── Array.md │ ├── Comparison.md │ ├── Element.md │ ├── Evaluation_Operators.md │ ├── Logical.md │ └── Projection.md ├── Schema_&_Relations.md ├── Security.md ├── Testing.md ├── Transactions.md └── Update.md └── readme.md /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /MongoDB/Aggregation_Framework/Additional_Stages.md: -------------------------------------------------------------------------------- 1 | # Additional Stages 2 | 3 | ### Want to find the 10 users, the 10 persons with the oldest birth date, so the lowest birth date 4 | 5 |
6 | See the data format 7 | 8 | ```schema 9 | > db.persons.aggregate([ 10 | { 11 | $project: { 12 | _id: 0, 13 | name: 1, 14 | birthDate: { 15 | $toDate: '$dob.date' 16 | } 17 | } 18 | } 19 | ]).pretty() 20 | 21 | **Output** 22 | [ 23 | { 24 | name: { title: 'mr', first: 'carl', last: 'jacobs' }, 25 | birthDate: ISODate("1984-09-30T01:20:26.000Z") 26 | }, 27 | { 28 | name: { title: 'mr', first: 'harvey', last: 'chambers' }, 29 | birthDate: ISODate("1988-05-27T00:14:03.000Z") 30 | }, 31 | { 32 | name: { title: 'mr', first: 'gideon', last: 'van drongelen' }, 33 | birthDate: ISODate("1971-03-28T04:47:21.000Z") 34 | }, 35 | { 36 | name: { title: 'mr', first: 'zachary', last: 'lo' }, 37 | birthDate: ISODate("1988-10-17T03:45:04.000Z") 38 | }, 39 | { 40 | name: { title: 'miss', first: 'maeva', last: 'wilson' }, 41 | birthDate: ISODate("1962-08-11T20:51:07.000Z") 42 | }, 43 | { 44 | name: { title: 'mrs', first: 'پریا', last: 'پارسا' }, 45 | birthDate: ISODate("1962-01-10T05:26:30.000Z") 46 | }, 47 | { 48 | name: { title: 'mrs', first: 'olav', last: 'oehme' }, 49 | birthDate: ISODate("1960-11-28T23:07:18.000Z") 50 | }, 51 | { 52 | name: { title: 'mr', first: 'elijah', last: 'lewis' }, 53 | birthDate: ISODate("1986-03-29T06:40:18.000Z") 54 | }, 55 | { 56 | name: { title: 'mr', first: 'victor', last: 'pedersen' }, 57 | birthDate: ISODate("1959-02-19T23:56:23.000Z") 58 | }, 59 | { 60 | name: { title: 'ms', first: 'louise', last: 'graham' }, 61 | birthDate: ISODate("1971-01-21T20:36:16.000Z") 62 | }, 63 | { 64 | name: { title: 'mrs', first: 'madeleine', last: 'till' }, 65 | birthDate: ISODate("1954-05-01T02:34:40.000Z") 66 | }, 67 | { 68 | name: { title: 'mr', first: 'isolino', last: 'viana' }, 69 | birthDate: ISODate("1959-03-22T14:53:41.000Z") 70 | }, 71 | { 72 | name: { title: 'miss', first: 'mestan', last: 'kaplangı' }, 73 | birthDate: ISODate("1951-12-17T20:03:33.000Z") 74 | }, 75 | { 76 | name: { title: 'mrs', first: 'katie', last: 'welch' }, 77 | birthDate: ISODate("1990-10-14T05:02:12.000Z") 78 | }, 79 | { 80 | name: { title: 'mr', first: 'بنیامین', last: 'سالاری' }, 81 | birthDate: ISODate("1984-03-10T22:12:43.000Z") 82 | }, 83 | { 84 | name: { title: 'miss', first: 'sandra', last: 'lorenzo' }, 85 | birthDate: ISODate("1975-03-23T17:01:45.000Z") 86 | }, 87 | { 88 | name: { title: 'miss', first: 'shona', last: 'kemperman' }, 89 | birthDate: ISODate("1948-04-23T03:40:22.000Z") 90 | }, 91 | { 92 | name: { title: 'madame', first: 'andreia', last: 'arnaud' }, 93 | birthDate: ISODate("1960-01-31T05:16:10.000Z") 94 | }, 95 | { 96 | name: { title: 'mademoiselle', first: 'delia', last: 'durand' }, 97 | birthDate: ISODate("1966-08-03T09:22:41.000Z") 98 | }, 99 | { 100 | name: { title: 'miss', first: 'anne', last: 'ruiz' }, 101 | birthDate: ISODate("1982-10-09T12:10:42.000Z") 102 | } 103 | ] 104 | Type "it" for more 105 | ``` 106 | 107 |
108 | 109 | 110 | ```cpp 111 | > db.persons.aggregate([ 112 | { $project: { _id: 0, name: 1, birthDate: { $toDate: '$dob.date' } } }, 113 | { $sort: { birthDate: 1 } }, 114 | { $limit: 10 } 115 | ]).pretty() 116 | 117 | **Output** 118 | [ 119 | { 120 | name: { title: 'mrs', first: 'victoria', last: 'hale' }, 121 | birthDate: ISODate("1944-09-07T15:52:50.000Z") 122 | }, 123 | { 124 | name: { title: 'mr', first: 'عباس', last: 'یاسمی' }, 125 | birthDate: ISODate("1944-09-12T07:49:20.000Z") 126 | }, 127 | { 128 | name: { title: 'miss', first: 'erundina', last: 'porto' }, 129 | birthDate: ISODate("1944-09-13T14:58:41.000Z") 130 | }, 131 | { 132 | name: { title: 'mr', first: 'پرهام', last: 'جعفری' }, 133 | birthDate: ISODate("1944-09-16T16:03:28.000Z") 134 | }, 135 | { 136 | name: { title: 'mr', first: 'eli', last: 'henry' }, 137 | birthDate: ISODate("1944-09-17T15:04:13.000Z") 138 | }, 139 | { 140 | name: { title: 'mr', first: 'kirk', last: 'brown' }, 141 | birthDate: ISODate("1944-09-18T11:03:05.000Z") 142 | }, 143 | { 144 | name: { title: 'miss', first: 'alexis', last: 'bélanger' }, 145 | birthDate: ISODate("1944-10-02T22:56:32.000Z") 146 | }, 147 | { 148 | name: { title: 'miss', first: 'gina', last: 'beck' }, 149 | birthDate: ISODate("1944-10-04T07:41:31.000Z") 150 | }, 151 | { 152 | name: { title: 'mr', first: 'sebastian', last: 'olsen' }, 153 | birthDate: ISODate("1944-10-13T15:29:05.000Z") 154 | }, 155 | { 156 | name: { title: 'miss', first: 'lucy', last: 'wilson' }, 157 | birthDate: ISODate("1944-10-25T16:27:56.000Z") 158 | } 159 | ] 160 | ``` 161 | 162 | Concatenating first and last name and creating new field name. 163 | 164 | ```bash 165 | > db.persons.aggregate([ 166 | { $project: { _id: 0, name: {$concat:['$name.first', ' ', '$name.last']}, birthDate: { $toDate: '$dob.date' } } }, 167 | { $sort: { birthDate: 1 } }, 168 | { $limit: 10 } 169 | ]).pretty() 170 | 171 | **Output** 172 | [ 173 | { 174 | name: 'victoria hale', 175 | birthDate: ISODate("1944-09-07T15:52:50.000Z") 176 | }, 177 | { 178 | name: 'عباس یاسمی', 179 | birthDate: ISODate("1944-09-12T07:49:20.000Z") 180 | }, 181 | { 182 | name: 'erundina porto', 183 | birthDate: ISODate("1944-09-13T14:58:41.000Z") 184 | }, 185 | { 186 | name: 'پرهام جعفری', 187 | birthDate: ISODate("1944-09-16T16:03:28.000Z") 188 | }, 189 | { name: 'eli henry', birthDate: ISODate("1944-09-17T15:04:13.000Z") }, 190 | { 191 | name: 'kirk brown', 192 | birthDate: ISODate("1944-09-18T11:03:05.000Z") 193 | }, 194 | { 195 | name: 'alexis bélanger', 196 | birthDate: ISODate("1944-10-02T22:56:32.000Z") 197 | }, 198 | { name: 'gina beck', birthDate: ISODate("1944-10-04T07:41:31.000Z") }, 199 | { 200 | name: 'sebastian olsen', 201 | birthDate: ISODate("1944-10-13T15:29:05.000Z") 202 | }, 203 | { 204 | name: 'lucy wilson', 205 | birthDate: ISODate("1944-10-25T16:27:56.000Z") 206 | } 207 | ] 208 | ``` 209 | 210 | Skip first 10 211 | 212 | ```cpp 213 | > db.persons.aggregate([ 214 | { $project: { _id: 0, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 215 | { $sort: { birthDate: 1 } }, 216 | { $skip: 10}, 217 | { $limit: 10 } 218 | ]).pretty() 219 | 220 | **Output** 221 | [ 222 | { 223 | name: 'eva murray', 224 | birthDate: ISODate("1944-10-29T02:05:56.000Z") 225 | }, 226 | { 227 | name: 'elena chevalier', 228 | birthDate: ISODate("1944-10-31T02:56:40.000Z") 229 | }, 230 | { 231 | name: 'gretchen schmidtke', 232 | birthDate: ISODate("1944-11-01T20:49:03.000Z") 233 | }, 234 | { 235 | name: 'joseph thomas', 236 | birthDate: ISODate("1944-11-06T11:08:45.000Z") 237 | }, 238 | { name: 'sarah lee', birthDate: ISODate("1944-11-07T07:53:47.000Z") }, 239 | { 240 | name: 'conrad scheepbouwer', 241 | birthDate: ISODate("1944-11-08T02:15:17.000Z") 242 | }, 243 | { 244 | name: 'martina charles', 245 | birthDate: ISODate("1944-11-08T07:38:49.000Z") 246 | }, 247 | { 248 | name: 'olga blanco', 249 | birthDate: ISODate("1944-11-17T09:16:50.000Z") 250 | }, 251 | { 252 | name: 'elisa morales', 253 | birthDate: ISODate("1944-11-22T22:51:47.000Z") 254 | }, 255 | { 256 | name: 'rafael velasco', 257 | birthDate: ISODate("1944-11-27T07:12:20.000Z") 258 | } 259 | ] 260 | ``` 261 | 262 | But after `$skip` after `$limit` it does not work as first we take 10 persons in `{ $limit: 10 },` and we skip 10 persons `{ $skip: 10 }` so we do not have any person left to show. 263 | 264 | ```cpp 265 | > db.persons.aggregate([ 266 | { $project: { _id: 0, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 267 | { $sort: { birthDate: 1 } }, 268 | { $limit: 10 }, 269 | { $skip: 10 } 270 | ]).pretty() 271 | 272 | **Output** 273 | ``` 274 | 275 | So if we `$limit` 20 first and then `$skip` 10 we will get 10 output as by `$limit` 20 we are getting 20 persons documents and after `$skip` 10 we left with 10 documents. 276 | 277 | ```bash 278 | > db.persons.aggregate([ 279 | { $project: { _id: 0, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 280 | { $sort: { birthDate: 1 } }, 281 | { $limit: 20 }, 282 | { $skip: 10 } 283 | ]).pretty() 284 | 285 | **Output** 286 | [ 287 | { 288 | name: 'eva murray', 289 | birthDate: ISODate("1944-10-29T02:05:56.000Z") 290 | }, 291 | { 292 | name: 'elena chevalier', 293 | birthDate: ISODate("1944-10-31T02:56:40.000Z") 294 | }, 295 | { 296 | name: 'gretchen schmidtke', 297 | birthDate: ISODate("1944-11-01T20:49:03.000Z") 298 | }, 299 | { 300 | name: 'joseph thomas', 301 | birthDate: ISODate("1944-11-06T11:08:45.000Z") 302 | }, 303 | { name: 'sarah lee', birthDate: ISODate("1944-11-07T07:53:47.000Z") }, 304 | { 305 | name: 'conrad scheepbouwer', 306 | birthDate: ISODate("1944-11-08T02:15:17.000Z") 307 | }, 308 | { 309 | name: 'martina charles', 310 | birthDate: ISODate("1944-11-08T07:38:49.000Z") 311 | }, 312 | { 313 | name: 'olga blanco', 314 | birthDate: ISODate("1944-11-17T09:16:50.000Z") 315 | }, 316 | { 317 | name: 'elisa morales', 318 | birthDate: ISODate("1944-11-22T22:51:47.000Z") 319 | }, 320 | { 321 | name: 'rafael velasco', 322 | birthDate: ISODate("1944-11-27T07:12:20.000Z") 323 | } 324 | ] 325 | ``` 326 | 327 | If add sort into last can see the different result. As we first `$skip` 10 documents and take 10 documents from collection and then `$sort` those 10 documents so this will not give us the wanted answer. 328 | 329 | ```cpp 330 | > db.persons.aggregate([ 331 | { $project: { _id: 0, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 332 | { $skip: 10 }, 333 | { $limit: 10 }, 334 | { $sort: { birthDate: 1 } }, 335 | ]).pretty() 336 | 337 | **Output** 338 | [ 339 | { 340 | name: 'shona kemperman', 341 | birthDate: ISODate("1948-04-23T03:40:22.000Z") 342 | }, 343 | { 344 | name: 'mestan kaplangı', 345 | birthDate: ISODate("1951-12-17T20:03:33.000Z") 346 | }, 347 | { 348 | name: 'madeleine till', 349 | birthDate: ISODate("1954-05-01T02:34:40.000Z") 350 | }, 351 | { 352 | name: 'isolino viana', 353 | birthDate: ISODate("1959-03-22T14:53:41.000Z") 354 | }, 355 | { 356 | name: 'andreia arnaud', 357 | birthDate: ISODate("1960-01-31T05:16:10.000Z") 358 | }, 359 | { 360 | name: 'delia durand', 361 | birthDate: ISODate("1966-08-03T09:22:41.000Z") 362 | }, 363 | { 364 | name: 'sandra lorenzo', 365 | birthDate: ISODate("1975-03-23T17:01:45.000Z") 366 | }, 367 | { name: 'anne ruiz', birthDate: ISODate("1982-10-09T12:10:42.000Z") }, 368 | { 369 | name: 'بنیامین سالاری', 370 | birthDate: ISODate("1984-03-10T22:12:43.000Z") 371 | }, 372 | { 373 | name: 'katie welch', 374 | birthDate: ISODate("1990-10-14T05:02:12.000Z") 375 | } 376 | ] 377 | ``` 378 | 379 | Same also for `$match` . We want 10 oldest mans documents but as discussed previously it will not give right answer. As pipeline order is not correct. 380 | 381 | ```cpp 382 | > db.persons.aggregate([ 383 | { $match: { gender: 'male' } }, 384 | { $project: { _id: 0, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 385 | { $skip: 10 }, 386 | { $limit: 10 }, 387 | { $sort: { birthDate: 1 } } 388 | ]).pretty() 389 | ``` 390 | 391 | If `$match` added after the project without projection, we do not get any result. As we do not add gender in `$project` in first line so all stage below this do not get the gender field. 392 | 393 | ```cpp 394 | > db.persons.aggregate([ 395 | { $project: { _id: 0, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 396 | { $sort: { birthDate: 1 } }, 397 | { $match: { gender: 'male' } }, 398 | { $skip: 10 }, 399 | { $limit: 10 }, 400 | ]).pretty() 401 | 402 | **Output** 403 | ``` 404 | 405 | If gender add into projection phase then will get results 406 | 407 | ```cpp 408 | > db.persons.aggregate([ 409 | { $project: { _id: 0, gender: 1, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 410 | { $sort: { birthDate: 1 } }, 411 | { $match: { gender: 'male' } }, 412 | { $skip: 10 }, 413 | { $limit: 10 }, 414 | ]).pretty() 415 | 416 | **Output** 417 | [ 418 | { 419 | gender: 'male', 420 | name: 'pierre boyer', 421 | birthDate: ISODate("1945-01-01T22:35:55.000Z") 422 | }, 423 | { 424 | gender: 'male', 425 | name: 'emile noel', 426 | birthDate: ISODate("1945-01-10T03:05:21.000Z") 427 | }, 428 | { 429 | gender: 'male', 430 | name: 'torgeir apeland', 431 | birthDate: ISODate("1945-01-13T17:04:33.000Z") 432 | }, 433 | { 434 | gender: 'male', 435 | name: 'igor kvistad', 436 | birthDate: ISODate("1945-01-17T22:13:14.000Z") 437 | }, 438 | { 439 | gender: 'male', 440 | name: 'mariusz gabler', 441 | birthDate: ISODate("1945-01-22T06:16:30.000Z") 442 | }, 443 | { 444 | gender: 'male', 445 | name: 'lewis freeman', 446 | birthDate: ISODate("1945-01-28T20:15:28.000Z") 447 | }, 448 | { 449 | gender: 'male', 450 | name: 'theodore moore', 451 | birthDate: ISODate("1945-02-10T03:34:29.000Z") 452 | }, 453 | { 454 | gender: 'male', 455 | name: 'florian mercier', 456 | birthDate: ISODate("1945-02-22T04:18:31.000Z") 457 | }, 458 | { 459 | gender: 'male', 460 | name: 'dursun schellekens', 461 | birthDate: ISODate("1945-02-22T07:28:00.000Z") 462 | }, 463 | { 464 | gender: 'male', 465 | name: 'marcel rey', 466 | birthDate: ISODate("1945-02-28T02:18:01.000Z") 467 | } 468 | ] 469 | ``` 470 | 471 | But best is, use `$match` before `$project` . As we do not documents which gender is female so if we filtered before projection or any other pipeline then those pipeline have to run operation on fewer data as we already we reduced data by match. 472 | 473 | ```cpp 474 | > db.persons.aggregate([ 475 | { $match: { gender: 'male' } }, 476 | { $project: { _id: 0, gender: 1, name: { $concat: ['$name.first', ' ', '$name.last'] }, birthDate: { $toDate: '$dob.date' } } }, 477 | { $sort: { birthDate: 1 } }, 478 | { $skip: 10 }, 479 | { $limit: 10 }, 480 | ]).pretty() 481 | ``` -------------------------------------------------------------------------------- /MongoDB/Aggregation_Framework/Bucket.md: -------------------------------------------------------------------------------- 1 | # Bucket 2 | 3 | 4 | >💡 Categorizes incoming documents into groups, called buckets, based on a specified expression and bucket boundaries and outputs a document per each bucket. Also can perform some statistics. 5 | 6 | 7 | Let's prepare a bucket stage, using bucket can create a different categories and filter 8 | 9 | Boundaries means range/levels like 0-18, 18-30,30-50, 50-80, 80-120, in every range includes first value execute not last value, 18-30 ⇒ means 18 includes in the range but not includes 30. There are noting in 0 - 18 and 80 to 120 range so the following query give as only three bucket. 10 | 11 | > `{ "_id" : 18, "numPersons" : 868, "average" : 25.101382488479263 } => 18 to less than 30`
12 | > `{ "_id" : 30, "numPersons" : 1828, "average" : 39.4917943107221 } => 30 to less than 59`
13 | > `{ "_id" : 50, "numPersons" : 2304, "average" : 61.46440972222222 } => 50 to all upperbound` 14 | 15 | ```cpp 16 | > db.persons.aggregate([ 17 | { 18 | $bucket: { 19 | groupBy: '$dob.age', 20 | boundaries: [0, 18, 30, 50, 80, 120], 21 | output: { 22 | numPersons: { $sum: 1 }, 23 | average: { $avg: '$dob.age' }, 24 | } 25 | } 26 | } 27 | ]).pretty() 28 | 29 | **Output** 30 | { "_id" : 18, "numPersons" : 868, "average" : 25.101382488479263 } 31 | { "_id" : 30, "numPersons" : 1828, "average" : 39.4917943107221 } 32 | { "_id" : 50, "numPersons" : 2304, "average" : 61.46440972222222 } 33 | ``` 34 | 35 | There are no people less than 18 and greater than 80 and equals to 80. Output noting as no data satisfy those conditions. 36 | 37 | ```cpp 38 | > db.persons.find({'dob.age': {$lt: 18}}) 39 | **Output** 40 | 41 | > db.persons.find({'dob.age': {$gt: 80}}) 42 | **Output** 43 | 44 | > db.persons.find({'dob.age': 80}) 45 | **Ouput** 46 | ``` 47 | 48 | ```cpp 49 | > db.persons.find({'dob.age': {$gt: 17, $lt: 30}}).count() 50 | 868 51 | 52 | > db.persons.find({'dob.age': {$gt: 29, $lt: 50}}).count() 53 | 1828 54 | 55 | > db.persons.find({'dob.age': {$gt: 49, $lt: 80}}).count() 56 | 2304 57 | ``` 58 | 59 | Adding more levels 60 | 61 | > `{ _id: 18, numPersons: 868, average: 25.101382488479263 }, => 18 to less than 30`
62 | > `{ _id: 30, numPersons: 910, average: 34.51758241758242 }, => 30 to less than 40`
63 | > `{ _id: 40, numPersons: 918, average: 44.42265795206972 }, => 40 to less than 50`
64 | > `{ _id: 50, numPersons: 976, average: 54.533811475409834 }, => 50 to less than 60`
65 | > `{ _id: 60, numPersons: 1328, average: 66.55798192771084 } ⇒ 60 to all upper-bound` 66 | 67 | ```cpp 68 | > db.persons.aggregate([ 69 | { 70 | $bucket: { 71 | groupBy: '$dob.age', 72 | boundaries: [18, 30, 40, 50, 60, 120], 73 | output: { 74 | numPersons: { $sum: 1 }, 75 | average: { $avg: '$dob.age' }, 76 | } 77 | } 78 | } 79 | ]).pretty() 80 | 81 | **Output** 82 | [ 83 | { _id: 18, numPersons: 868, average: 25.101382488479263 }, 84 | { _id: 30, numPersons: 910, average: 34.51758241758242 }, 85 | { _id: 40, numPersons: 918, average: 44.42265795206972 }, 86 | { _id: 50, numPersons: 976, average: 54.533811475409834 }, 87 | { _id: 60, numPersons: 1328, average: 66.55798192771084 } 88 | ] 89 | ``` 90 | 91 | Can also create a auto bucket by defining how many buckets want, almost have equal distributions in each buckets. 92 | 93 | ```cpp 94 | > db.persons.aggregate([ 95 | { 96 | $bucketAuto: { 97 | groupBy: '$dob.age', 98 | buckets: 5, 99 | output: { 100 | numPersons: { $sum: 1 }, 101 | average: { $avg: '$dob.age' }, 102 | } 103 | } 104 | } 105 | ]).pretty() 106 | 107 | { 108 | "_id" : { 109 | "min" : 21, 110 | "max" : 32 111 | }, 112 | "numPersons" : 1042, 113 | "average" : 25.99616122840691 114 | } 115 | { 116 | "_id" : { 117 | "min" : 32, 118 | "max" : 43 119 | }, 120 | "numPersons" : 1010, 121 | "average" : 36.97722772277228 122 | } 123 | { 124 | "_id" : { 125 | "min" : 43, 126 | "max" : 54 127 | }, 128 | "numPersons" : 1033, 129 | "average" : 47.98838334946757 130 | } 131 | { 132 | "_id" : { 133 | "min" : 54, 134 | "max" : 65 135 | }, 136 | "numPersons" : 1064, 137 | "average" : 58.99342105263158 138 | } 139 | { 140 | "_id" : { 141 | "min" : 65, 142 | "max" : 74 143 | }, 144 | "numPersons" : 851, 145 | "average" : 69.11515863689776 146 | } 147 | ``` -------------------------------------------------------------------------------- /MongoDB/Aggregation_Framework/GeoNear.md: -------------------------------------------------------------------------------- 1 | # GeoNear 2 | 3 | # Working with the geoNear Stage 4 | 5 | First create an index into the transformedPersons Collection 6 | 7 | ```cpp 8 | > db.transformedPersons.createIndex({location: '2dsphere'}) 9 | 10 | **Output** 11 | location_2dsphere 12 | ``` 13 | 14 | Create geo location aggregation pipeline stages 15 | 16 | have to specify and that is the distance field, because `geoNear` will actually also give us back the distance that is calculated between our point and the document 17 | 18 | `geoNear`, it has to be the first element in the pipeline because it needs to use that index and the **first pipeline** element is the **only element** with **direct access** to the **collection**, other pipeline stages just get the output of the previous pipeline stage, this is the only element with **direct access to the collection**. Also can add query. `maxDistance` in meter. 19 | 20 | ```cpp 21 | > db.transformedPersons.aggregate([ 22 | { 23 | $geoNear: { 24 | near: { 25 | type: 'Point', 26 | coordinates: [-18.4, -42.8] 27 | }, 28 | maxDistance: 1000000, 29 | $limit: 10, 30 | query: { age: { $gt: 30 } }, 31 | distanceField: 'distance' 32 | } 33 | } 34 | ]).pretty() 35 | 36 | **Output** 37 | [ 38 | { 39 | _id: ObjectId("634bb5cf1f059ab964bafc0b"), 40 | location: { type: 'Point', coordinates: [ -18.5996, -42.6128 ] }, 41 | email: 'elijah.lewis@example.com', 42 | birthdate: ISODate("1986-03-29T06:40:18.000Z"), 43 | age: 32, 44 | fullName: 'Elijah Lewis', 45 | distance: 26473.52536319881 46 | }, 47 | { 48 | _id: ObjectId("634bb5cf1f059ab964bb0786"), 49 | location: { type: 'Point', coordinates: [ -16.8251, -41.9369 ] }, 50 | email: 'delores.thompson@example.com', 51 | birthdate: ISODate("1984-04-11T07:34:45.000Z"), 52 | age: 34, 53 | fullName: 'Delores Thompson', 54 | distance: 161267.42830913173 55 | }, 56 | { 57 | _id: ObjectId("634bb5cf1f059ab964bb0ac6"), 58 | location: { type: 'Point', coordinates: [ -19.5492, -44.8346 ] }, 59 | email: 'kajus.moldskred@example.com', 60 | birthdate: ISODate("1978-09-12T00:25:23.000Z"), 61 | age: 39, 62 | fullName: 'Kajus Moldskred', 63 | distance: 244569.7553327739 64 | }, 65 | { 66 | _id: ObjectId("634bb5cf1f059ab964bb07a5"), 67 | location: { type: 'Point', coordinates: [ -20.6738, -40.2524 ] }, 68 | email: 'christian.møller@example.com', 69 | birthdate: ISODate("1967-07-18T04:08:25.000Z"), 70 | age: 51, 71 | fullName: 'Christian Møller', 72 | distance: 341047.8914183129 73 | }, 74 | { 75 | _id: ObjectId("634bb5cf1f059ab964baff3a"), 76 | location: { type: 'Point', coordinates: [ -12.8517, -44.2241 ] }, 77 | email: 'kübra.oraloğlu@example.com', 78 | birthdate: ISODate("1981-03-12T02:46:43.000Z"), 79 | age: 37, 80 | fullName: 'Kübra Oraloğlu', 81 | distance: 475031.1813780212 82 | }, 83 | { 84 | _id: ObjectId("634bb5cf1f059ab964bb08e6"), 85 | location: { type: 'Point', coordinates: [ -24.1976, -42.2063 ] }, 86 | email: 'gökhan.topaloğlu@example.com', 87 | birthdate: ISODate("1954-04-17T19:24:48.000Z"), 88 | age: 64, 89 | fullName: 'Gökhan Topaloğlu', 90 | distance: 480270.5071752364 91 | }, 92 | { 93 | _id: ObjectId("634bb5cf1f059ab964bb07e6"), 94 | location: { type: 'Point', coordinates: [ -15.6018, -38.2254 ] }, 95 | email: 'ayşe.eliçin@example.com', 96 | birthdate: ISODate("1959-02-26T17:16:38.000Z"), 97 | age: 59, 98 | fullName: 'Ayşe Eliçin', 99 | distance: 561521.5914865345 100 | }, 101 | { 102 | _id: ObjectId("634bb5cf1f059ab964bb0380"), 103 | location: { type: 'Point', coordinates: [ -23.0621, -47.0624 ] }, 104 | email: 'chloe.ennis@example.com', 105 | birthdate: ISODate("1956-07-16T00:28:06.000Z"), 106 | age: 62, 107 | fullName: 'Chloe Ennis', 108 | distance: 599870.3100224738 109 | }, 110 | { 111 | _id: ObjectId("634bb5cf1f059ab964bb0ea2"), 112 | location: { type: 'Point', coordinates: [ -26.0729, -42.8626 ] }, 113 | email: 'kuzey.berberoğlu@example.com', 114 | birthdate: ISODate("1984-08-27T19:20:20.000Z"), 115 | age: 34, 116 | fullName: 'Kuzey Berberoğlu', 117 | distance: 626211.846542541 118 | }, 119 | { 120 | _id: ObjectId("634bb5cf1f059ab964bb06ae"), 121 | location: { type: 'Point', coordinates: [ -10.6398, -41.7477 ] }, 122 | email: 'nellie.chapman@example.com', 123 | birthdate: ISODate("1982-12-27T07:32:35.000Z"), 124 | age: 35, 125 | fullName: 'Nellie Chapman', 126 | distance: 649597.0679432369 127 | }, 128 | { 129 | _id: ObjectId("634bb5cf1f059ab964bb0870"), 130 | location: { type: 'Point', coordinates: [ -14.6511, -36.9876 ] }, 131 | email: 'natão.nascimento@example.com', 132 | birthdate: ISODate("1955-10-23T04:21:38.000Z"), 133 | age: 62, 134 | fullName: 'Natão Nascimento', 135 | distance: 721718.9326148127 136 | }, 137 | { 138 | _id: ObjectId("634bb5cf1f059ab964bb09f4"), 139 | location: { type: 'Point', coordinates: [ -27.2914, -43.7096 ] }, 140 | email: 'silje.christensen@example.com', 141 | birthdate: ISODate("1946-09-18T13:46:00.000Z"), 142 | age: 71, 143 | fullName: 'Silje Christensen', 144 | distance: 727582.9314351011 145 | }, 146 | { 147 | _id: ObjectId("634bb5cf1f059ab964bb0621"), 148 | location: { type: 'Point', coordinates: [ -15.4943, -36.3461 ] }, 149 | email: 'پرنیا.سهيليراد@example.com', 150 | birthdate: ISODate("1947-03-06T12:26:25.000Z"), 151 | age: 71, 152 | fullName: 'پرنیا سهيلي راد', 153 | distance: 760338.5829500874 154 | }, 155 | { 156 | _id: ObjectId("634bb5cf1f059ab964bb0cc0"), 157 | location: { type: 'Point', coordinates: [ -24.7805, -48.0386 ] }, 158 | email: 'naomi.castillo@example.com', 159 | birthdate: ISODate("1975-04-29T06:55:56.000Z"), 160 | age: 43, 161 | fullName: 'Naomi Castillo', 162 | distance: 766668.6621958854 163 | }, 164 | { 165 | _id: ObjectId("634bb5cf1f059ab964bb07c8"), 166 | location: { type: 'Point', coordinates: [ -10.0214, -39.3665 ] }, 167 | email: 'lara.dehooge@example.com', 168 | birthdate: ISODate("1979-03-19T21:16:26.000Z"), 169 | age: 39, 170 | fullName: 'Lara De hooge', 171 | distance: 799665.4886170039 172 | }, 173 | { 174 | _id: ObjectId("634bb5cf1f059ab964bb0757"), 175 | location: { type: 'Point', coordinates: [ -17.255, -50.3131 ] }, 176 | email: 'lumi.wirta@example.com', 177 | birthdate: ISODate("1962-12-06T13:22:54.000Z"), 178 | age: 55, 179 | fullName: 'Lumi Wirta', 180 | distance: 840901.0420234643 181 | }, 182 | { 183 | _id: ObjectId("634bb5cf1f059ab964bafcc5"), 184 | location: { type: 'Point', coordinates: [ -28.6565, -44.6573 ] }, 185 | email: 'رضا.سالاری@example.com', 186 | birthdate: ISODate("1972-02-27T22:08:54.000Z"), 187 | age: 46, 188 | fullName: 'رضا سالاری', 189 | distance: 849915.7332153537 190 | }, 191 | { 192 | _id: ObjectId("634bb5cf1f059ab964bb08ae"), 193 | location: { type: 'Point', coordinates: [ -7.5113, -43.1898 ] }, 194 | email: 'zoe.hall@example.com', 195 | birthdate: ISODate("1945-06-30T22:42:58.000Z"), 196 | age: 73, 197 | fullName: 'Zoe Hall', 198 | distance: 886994.5551556123 199 | }, 200 | { 201 | _id: ObjectId("634bb5cf1f059ab964bb0c2e"), 202 | location: { type: 'Point', coordinates: [ -16.5148, -34.6911 ] }, 203 | email: 'justin.moulin@example.com', 204 | birthdate: ISODate("1961-06-01T15:51:52.000Z"), 205 | age: 57, 206 | fullName: 'Justin Moulin', 207 | distance: 917320.3765606781 208 | } 209 | ] 210 | ``` 211 | 212 | Can also add multiple pipeline stages 213 | 214 | ```bash 215 | db.transformedPersons.aggregate([ 216 | { 217 | $geoNear: { 218 | near: { 219 | type: 'Point', 220 | coordinates: [-18.4, -42.8] 221 | }, 222 | maxDistance: 1000000, 223 | $limit: 10, 224 | query: { age: { $gt: 30 } }, 225 | distanceField: 'distance' 226 | } 227 | }, 228 | { $project: { _id: 0, email: 0, birthdate: 0 } }, 229 | { $sort: { distanceField: 1 } }, 230 | ]).pretty() 231 | 232 | **Output** 233 | [ 234 | { 235 | location: { type: 'Point', coordinates: [ -18.5996, -42.6128 ] }, 236 | age: 32, 237 | fullName: 'Elijah Lewis', 238 | distance: 26473.52536319881 239 | }, 240 | { 241 | location: { type: 'Point', coordinates: [ -16.8251, -41.9369 ] }, 242 | age: 34, 243 | fullName: 'Delores Thompson', 244 | distance: 161267.42830913173 245 | }, 246 | { 247 | location: { type: 'Point', coordinates: [ -19.5492, -44.8346 ] }, 248 | age: 39, 249 | fullName: 'Kajus Moldskred', 250 | distance: 244569.7553327739 251 | }, 252 | { 253 | location: { type: 'Point', coordinates: [ -20.6738, -40.2524 ] }, 254 | age: 51, 255 | fullName: 'Christian Møller', 256 | distance: 341047.8914183129 257 | }, 258 | { 259 | location: { type: 'Point', coordinates: [ -12.8517, -44.2241 ] }, 260 | age: 37, 261 | fullName: 'Kübra Oraloğlu', 262 | distance: 475031.1813780212 263 | }, 264 | { 265 | location: { type: 'Point', coordinates: [ -24.1976, -42.2063 ] }, 266 | age: 64, 267 | fullName: 'Gökhan Topaloğlu', 268 | distance: 480270.5071752364 269 | }, 270 | { 271 | location: { type: 'Point', coordinates: [ -15.6018, -38.2254 ] }, 272 | age: 59, 273 | fullName: 'Ayşe Eliçin', 274 | distance: 561521.5914865345 275 | }, 276 | { 277 | location: { type: 'Point', coordinates: [ -23.0621, -47.0624 ] }, 278 | age: 62, 279 | fullName: 'Chloe Ennis', 280 | distance: 599870.3100224738 281 | }, 282 | { 283 | location: { type: 'Point', coordinates: [ -26.0729, -42.8626 ] }, 284 | age: 34, 285 | fullName: 'Kuzey Berberoğlu', 286 | distance: 626211.846542541 287 | }, 288 | { 289 | location: { type: 'Point', coordinates: [ -10.6398, -41.7477 ] }, 290 | age: 35, 291 | fullName: 'Nellie Chapman', 292 | distance: 649597.0679432369 293 | }, 294 | { 295 | location: { type: 'Point', coordinates: [ -14.6511, -36.9876 ] }, 296 | age: 62, 297 | fullName: 'Natão Nascimento', 298 | distance: 721718.9326148127 299 | }, 300 | { 301 | location: { type: 'Point', coordinates: [ -27.2914, -43.7096 ] }, 302 | age: 71, 303 | fullName: 'Silje Christensen', 304 | distance: 727582.9314351011 305 | }, 306 | { 307 | location: { type: 'Point', coordinates: [ -15.4943, -36.3461 ] }, 308 | age: 71, 309 | fullName: 'پرنیا سهيلي راد', 310 | distance: 760338.5829500874 311 | }, 312 | { 313 | location: { type: 'Point', coordinates: [ -24.7805, -48.0386 ] }, 314 | age: 43, 315 | fullName: 'Naomi Castillo', 316 | distance: 766668.6621958854 317 | }, 318 | { 319 | location: { type: 'Point', coordinates: [ -10.0214, -39.3665 ] }, 320 | age: 39, 321 | fullName: 'Lara De hooge', 322 | distance: 799665.4886170039 323 | }, 324 | { 325 | location: { type: 'Point', coordinates: [ -17.255, -50.3131 ] }, 326 | age: 55, 327 | fullName: 'Lumi Wirta', 328 | distance: 840901.0420234643 329 | }, 330 | { 331 | location: { type: 'Point', coordinates: [ -28.6565, -44.6573 ] }, 332 | age: 46, 333 | fullName: 'رضا سالاری', 334 | distance: 849915.7332153537 335 | }, 336 | { 337 | location: { type: 'Point', coordinates: [ -7.5113, -43.1898 ] }, 338 | age: 73, 339 | fullName: 'Zoe Hall', 340 | distance: 886994.5551556123 341 | }, 342 | { 343 | location: { type: 'Point', coordinates: [ -16.5148, -34.6911 ] }, 344 | age: 57, 345 | fullName: 'Justin Moulin', 346 | distance: 917320.3765606781 347 | } 348 | ] 349 | ``` -------------------------------------------------------------------------------- /MongoDB/Aggregation_Framework/Group.md: -------------------------------------------------------------------------------- 1 | # Group 2 | 3 | # Understanding the Group Stage 4 | 5 | **The group stage allows a group of data by certain fields or by multiple fields.** Have to add `$` sign before selected query document, here accumulate by 6 | 1 --> increasing value 7 | -1 -> decreasing value 8 | 9 | `totalPersons` is the value that how many people are in the same state 10 | `_id` is a unique value ⇒ take which field want to group will be a document. 11 | can not use group into `find()` method 12 | `$location.state` means iterating every element 13 | 14 | ```cpp 15 | db.persons.aggregate([ 16 | { $match: { gender: 'female' } }, 17 | { $group: 18 | { _id: 19 | { state: "$location.state" }, 20 | totalPersons: { $sum: 1} 21 | } 22 | } 23 | ]).pretty() 24 | ``` 25 | 26 | This is the group stage in action 27 | Here we get the data with unsorted order 28 | Can also be sorted 29 | 30 | ```cpp 31 | { "_id" : { "state" : "berkshire" }, "totalPersons" : 1 } 32 | { "_id" : { "state" : "michigan" }, "totalPersons" : 1 } 33 | { "_id" : { "state" : "county down" }, "totalPersons" : 1 } 34 | { "_id" : { "state" : "loiret" }, "totalPersons" : 1 } 35 | { "_id" : { "state" : "cornwall" }, "totalPersons" : 2 } 36 | { "_id" : { "state" : "sivas" }, "totalPersons" : 1 } 37 | { "_id" : { "state" : "uşak" }, "totalPersons" : 1 } 38 | { "_id" : { "state" : "sinop" }, "totalPersons" : 3 } 39 | { "_id" : { "state" : "marne" }, "totalPersons" : 1 } 40 | { "_id" : { "state" : "northumberland" }, "totalPersons" : 1 } 41 | { "_id" : { "state" : "leicestershire" }, "totalPersons" : 1 } 42 | { "_id" : { "state" : "puy-de-dôme" }, "totalPersons" : 1 } 43 | { "_id" : { "state" : "maryland" }, "totalPersons" : 1 } 44 | { "_id" : { "state" : "ardèche" }, "totalPersons" : 1 } 45 | { "_id" : { "state" : "ankara" }, "totalPersons" : 3 } 46 | { "_id" : { "state" : "dordogne" }, "totalPersons" : 1 } 47 | { "_id" : { "state" : "antalya" }, "totalPersons" : 1 } 48 | { "_id" : { "state" : "corrèze" }, "totalPersons" : 1 } 49 | { "_id" : { "state" : "ardennes" }, "totalPersons" : 1 } 50 | { "_id" : { "state" : "bas-rhin" }, "totalPersons" : 2 } 51 | Type "it" for more 52 | ``` 53 | 54 | To check aggregation function work correctly 55 | 56 | ```cpp 57 | db.persons.find({'location.state': 'sinop', gender: 'female'}).count() 58 | 3 59 | ``` 60 | 61 | Let also sort the group stage values according to `totalPersons` when executing the query 62 | Sorting is done from the previous stage. `$sort` has the data of `$group` stage and `$group` state has data of `$match` state. In normal `find`, we can not perform operations on the previous data stage. We have to sort in the client side for the `find`. But using aggregate we are doing this in MongoDB shell. 63 | 64 | ```cpp 65 | db.persons.aggregate([ 66 | { $match: { gender: 'female' } }, 67 | { $group: { 68 | _id: { state: "$location.state" }, 69 | totalPersons: { $sum: 1 } } 70 | }, 71 | { $sort: { totalPersons: -1 } } 72 | ]).pretty() 73 | **Output** 74 | { "_id" : { "state" : "midtjylland" }, "totalPersons" : 33 } 75 | { "_id" : { "state" : "nordjylland" }, "totalPersons" : 27 } 76 | { "_id" : { "state" : "new south wales" }, "totalPersons" : 24 } 77 | { "_id" : { "state" : "australian capital territory" }, "totalPersons" : 24 } 78 | { "_id" : { "state" : "syddanmark" }, "totalPersons" : 24 } 79 | { "_id" : { "state" : "south australia" }, "totalPersons" : 22 } 80 | { "_id" : { "state" : "hovedstaden" }, "totalPersons" : 21 } 81 | { "_id" : { "state" : "danmark" }, "totalPersons" : 21 } 82 | { "_id" : { "state" : "queensland" }, "totalPersons" : 20 } 83 | { "_id" : { "state" : "overijssel" }, "totalPersons" : 20 } 84 | { "_id" : { "state" : "sjælland" }, "totalPersons" : 19 } 85 | { "_id" : { "state" : "nova scotia" }, "totalPersons" : 17 } 86 | { "_id" : { "state" : "canterbury" }, "totalPersons" : 16 } 87 | { "_id" : { "state" : "northwest territories" }, "totalPersons" : 16 } 88 | { "_id" : { "state" : "gelderland" }, "totalPersons" : 16 } 89 | { "_id" : { "state" : "yukon" }, "totalPersons" : 16 } 90 | { "_id" : { "state" : "bayern" }, "totalPersons" : 15 } 91 | { "_id" : { "state" : "northern territory" }, "totalPersons" : 15 } 92 | { "_id" : { "state" : "tasmania" }, "totalPersons" : 15 } 93 | { "_id" : { "state" : "noord-brabant" }, "totalPersons" : 14 } 94 | Type "it" for more 95 | ``` 96 | 97 | Check if the answer is correctly 98 | 99 | ```cpp 100 | db.persons.find({'location.state': 'midtjylland', gender: 'female'}).count() 101 | 33 102 | ``` -------------------------------------------------------------------------------- /MongoDB/Aggregation_Framework/Project.md: -------------------------------------------------------------------------------- 1 | # Project 2 | 3 | # Working with Project Stage 4 | 5 | The project works in the same way as the projection works in the `find()` method 6 | 7 | ```cpp 8 | "gender" : "male", 9 | "name" : { 10 | "title" : "mr", 11 | "first" : "harvey", 12 | "last" : "chambers" 13 | }, 14 | ``` 15 | 16 | Full list to all want to convert the name into one document project does not group multiple documents together, it's just transforms every single document 17 | 18 | ```cpp 19 | > db.persons.aggregate([ 20 | { 21 | $project: { 22 | _id: 0, 23 | gender: 1, 24 | fullName: { 25 | $concat: ['$name.first', ' ','$name.last'] 26 | } 27 | } 28 | } 29 | ]).pretty() 30 | **Output** 31 | { "gender" : "male", "fullName" : "victor pedersen" } 32 | { "gender" : "male", "fullName" : "gideon van drongelen" } 33 | { "gender" : "male", "fullName" : "harvey chambers" } 34 | { "gender" : "female", "fullName" : "پریا پارسا" } 35 | { "gender" : "female", "fullName" : "maeva wilson" } 36 | { "gender" : "male", "fullName" : "elijah lewis" } 37 | { "gender" : "female", "fullName" : "olav oehme" } 38 | { "gender" : "female", "fullName" : "madeleine till" } 39 | { "gender" : "male", "fullName" : "carl jacobs" } 40 | { "gender" : "male", "fullName" : "isolino viana" } 41 | { "gender" : "female", "fullName" : "louise graham" } 42 | { "gender" : "female", "fullName" : "mestan kaplangı" } 43 | { "gender" : "female", "fullName" : "katie welch" } 44 | { "gender" : "female", "fullName" : "sandra lorenzo" } 45 | { "gender" : "male", "fullName" : "بنیامین سالاری" } 46 | { "gender" : "female", "fullName" : "andreia arnaud" } 47 | { "gender" : "female", "fullName" : "shona kemperman" } 48 | { "gender" : "male", "fullName" : "zachary lo" } 49 | { "gender" : "female", "fullName" : "anne ruiz" } 50 | { "gender" : "female", "fullName" : "gonca alnıaçık" } 51 | Type "it" for more 52 | ``` 53 | 54 | Now want to first and last name start with the uppercase letter 55 | 56 | ```cpp 57 | db.persons.aggregate([ 58 | { 59 | $project: { 60 | _id: 0, 61 | gender: 1, 62 | fullName: { 63 | $concat: [{ $toUpper: '$name.first'}, ' ', { $toUpper: '$name.last'}] 64 | } 65 | } 66 | } 67 | ]).pretty() 68 | **Output** 69 | { "gender" : "male", "fullName" : "VICTOR PEDERSEN" } 70 | { "gender" : "male", "fullName" : "GIDEON VAN DRONGELEN" } 71 | { "gender" : "male", "fullName" : "HARVEY CHAMBERS" } 72 | { "gender" : "female", "fullName" : "پریا پارسا" } 73 | { "gender" : "female", "fullName" : "MAEVA WILSON" } 74 | { "gender" : "male", "fullName" : "ELIJAH LEWIS" } 75 | { "gender" : "female", "fullName" : "OLAV OEHME" } 76 | { "gender" : "female", "fullName" : "MADELEINE TILL" } 77 | { "gender" : "male", "fullName" : "CARL JACOBS" } 78 | { "gender" : "male", "fullName" : "ISOLINO VIANA" } 79 | { "gender" : "female", "fullName" : "LOUISE GRAHAM" } 80 | { "gender" : "female", "fullName" : "MESTAN KAPLANGı" } 81 | { "gender" : "female", "fullName" : "KATIE WELCH" } 82 | { "gender" : "female", "fullName" : "SANDRA LORENZO" } 83 | { "gender" : "male", "fullName" : "بنیامین سالاری" } 84 | { "gender" : "female", "fullName" : "ANDREIA ARNAUD" } 85 | { "gender" : "female", "fullName" : "SHONA KEMPERMAN" } 86 | { "gender" : "male", "fullName" : "ZACHARY LO" } 87 | { "gender" : "female", "fullName" : "ANNE RUIZ" } 88 | { "gender" : "female", "fullName" : "GONCA ALNıAçıK" } 89 | Type "it" for more 90 | ``` 91 | 92 | `$substrCP` ⇒ Substring part 93 | 0 ⇒ means starting index 94 | 1 ⇒ means how much character(length) 95 | 96 | ```cpp 97 | > db.persons.aggregate([ 98 | { 99 | $project: { 100 | _id: 0, 101 | gender: 1, 102 | fullName: { 103 | $concat: [ 104 | { $toUpper: { $substrCP: ['$name.first', 0, 1] } }, 105 | ' ', 106 | { $toUpper: { $substrCP: ['$name.last', 0, 1] } } 107 | ] 108 | } 109 | } 110 | } 111 | ]).pretty() 112 | **Output** 113 | { "gender" : "male", "fullName" : "V P" } 114 | { "gender" : "male", "fullName" : "G V" } 115 | { "gender" : "male", "fullName" : "H C" } 116 | { "gender" : "female", "fullName" : "پ پ" } 117 | { "gender" : "female", "fullName" : "M W" } 118 | { "gender" : "male", "fullName" : "E L" } 119 | { "gender" : "female", "fullName" : "O O" } 120 | { "gender" : "female", "fullName" : "M T" } 121 | { "gender" : "male", "fullName" : "C J" } 122 | { "gender" : "male", "fullName" : "I V" } 123 | { "gender" : "female", "fullName" : "L G" } 124 | { "gender" : "female", "fullName" : "M K" } 125 | { "gender" : "female", "fullName" : "K W" } 126 | { "gender" : "female", "fullName" : "S L" } 127 | { "gender" : "male", "fullName" : "ب س" } 128 | { "gender" : "female", "fullName" : "A A" } 129 | { "gender" : "female", "fullName" : "S K" } 130 | { "gender" : "male", "fullName" : "Z L" } 131 | { "gender" : "female", "fullName" : "A R" } 132 | { "gender" : "female", "fullName" : "G A" } 133 | Type "it" for more 134 | ``` 135 | 136 | The final output 137 | 138 | ```cpp 139 | > db.persons.aggregate([ 140 | { 141 | $project: { 142 | _id: 0, 143 | gender: 1, 144 | fullName: { 145 | $concat: [ 146 | { $toUpper: { $substrCP: ['$name.first', 0, 1] } }, 147 | { 148 | $substrCP: [ 149 | '$name.first', 150 | 1, 151 | { $subtract: [{ $strLenCP: '$name.first' }, 1] } 152 | ] 153 | }, 154 | ' ', 155 | { $toUpper: { $substrCP: ['$name.last', 0, 1] } }, 156 | { 157 | $substrCP: [ 158 | '$name.last', 159 | 1, 160 | { $subtract: [{ $strLenCP: '$name.last' }, 1] } 161 | ] 162 | }, 163 | ] 164 | } 165 | } 166 | } 167 | ]).pretty() 168 | **Output** 169 | { "gender" : "male", "fullName" : "Victor Pedersen" } 170 | { "gender" : "male", "fullName" : "Gideon Van drongelen" } 171 | { "gender" : "male", "fullName" : "Harvey Chambers" } 172 | { "gender" : "female", "fullName" : "پریا پارسا" } 173 | { "gender" : "female", "fullName" : "Maeva Wilson" } 174 | { "gender" : "male", "fullName" : "Elijah Lewis" } 175 | { "gender" : "female", "fullName" : "Olav Oehme" } 176 | { "gender" : "female", "fullName" : "Madeleine Till" } 177 | { "gender" : "male", "fullName" : "Carl Jacobs" } 178 | { "gender" : "male", "fullName" : "Isolino Viana" } 179 | { "gender" : "female", "fullName" : "Louise Graham" } 180 | { "gender" : "female", "fullName" : "Mestan Kaplangı" } 181 | { "gender" : "female", "fullName" : "Katie Welch" } 182 | { "gender" : "female", "fullName" : "Sandra Lorenzo" } 183 | { "gender" : "male", "fullName" : "بنیامین سالاری" } 184 | { "gender" : "female", "fullName" : "Andreia Arnaud" } 185 | { "gender" : "female", "fullName" : "Shona Kemperman" } 186 | { "gender" : "male", "fullName" : "Zachary Lo" } 187 | { "gender" : "female", "fullName" : "Anne Ruiz" } 188 | { "gender" : "female", "fullName" : "Gonca Alnıaçık" } 189 | Type "it" for more 190 | ``` 191 | 192 | [Turning the Location Into a geoJSON Object ](Project/Turning_the_Location_Into_a_geoJSON_Object.md) 193 | 194 | [Understanding the ISO Week Year Operator](Project/Understanding_the_ISO_Week_Year_Operator.md) -------------------------------------------------------------------------------- /MongoDB/Aggregation_Framework/Project/Understanding_the_ISO_Week_Year_Operator.md: -------------------------------------------------------------------------------- 1 | # Understanding the ISO Week Year Operator 2 | 3 | `$isoWeekYear` retries the year out of date 4 | 5 | `_id: { birthYear: { $isoWeekYear: '$birthdate' } },` ⇒ Group using `birthdate` field get the first project, convert it in `isoWeekYear`, and returned key/field name will be `birthYear` 6 | `numPersons: { $sum: 1 }` ⇒ Return the total number/count of person in a year. 7 | 8 | ```cpp 9 | > db.persons.aggregate([ 10 | { 11 | $project: { 12 | _id: 0, 13 | name: 1, 14 | email: 1, 15 | birthdate: { $toDate: '$dob.date' }, 16 | age: '$dob.age', 17 | location: { 18 | type: 'Point', 19 | coordinates: [ 20 | { 21 | $convert: { 22 | input: '$location.coordinates.longitude', 23 | to: 'double', 24 | onError: 0.0, 25 | onNull: 0.0 26 | } 27 | }, 28 | { 29 | $convert: { 30 | input: '$location.coordinates.latitude', 31 | to: 'double', 32 | onError: 0.0, 33 | onNull: 0.0 34 | } 35 | } 36 | ] 37 | } 38 | } 39 | }, 40 | { 41 | $project: { 42 | email: 1, 43 | location: 1, 44 | gender: 1, 45 | birthdate: 1, 46 | age: 1, 47 | fullName: { 48 | $concat: [ 49 | { 50 | $toUpper: { 51 | $substrCP: ['$name.first', 0, 1] 52 | } 53 | }, { 54 | $substrCP: [ 55 | '$name.first', 1, { 56 | $subtract: [ 57 | { $strLenCP: '$name.first' }, 1 58 | ] 59 | }] 60 | }, 61 | ' ', 62 | { 63 | $toUpper: { 64 | $substrCP: ['$name.last', 0, 1] 65 | } 66 | }, 67 | { 68 | $substrCP: [ 69 | '$name.last', 1, { 70 | $subtract: [ 71 | { $strLenCP: '$name.last' }, 1 72 | ] 73 | }] 74 | } 75 | ] 76 | } 77 | } 78 | }, 79 | { 80 | $group: { 81 | _id: { birthYear: { $isoWeekYear: '$birthdate' } }, 82 | numPersons: { $sum: 1 } 83 | } 84 | } 85 | ]).pretty() 86 | 87 | **Output** 88 | [ 89 | { _id: { birthYear: Long("1988") }, numPersons: 89 }, 90 | { _id: { birthYear: Long("1950") }, numPersons: 99 }, 91 | { _id: { birthYear: Long("1954") }, numPersons: 95 }, 92 | { _id: { birthYear: Long("1953") }, numPersons: 97 }, 93 | { _id: { birthYear: Long("1977") }, numPersons: 86 }, 94 | { _id: { birthYear: Long("1959") }, numPersons: 93 }, 95 | { _id: { birthYear: Long("1971") }, numPersons: 88 }, 96 | { _id: { birthYear: Long("1962") }, numPersons: 90 }, 97 | { _id: { birthYear: Long("1992") }, numPersons: 96 }, 98 | { _id: { birthYear: Long("1966") }, numPersons: 92 }, 99 | { _id: { birthYear: Long("1946") }, numPersons: 100 }, 100 | { _id: { birthYear: Long("1947") }, numPersons: 93 }, 101 | { _id: { birthYear: Long("1993") }, numPersons: 110 }, 102 | { _id: { birthYear: Long("1968") }, numPersons: 96 }, 103 | { _id: { birthYear: Long("1980") }, numPersons: 86 }, 104 | { _id: { birthYear: Long("1982") }, numPersons: 96 }, 105 | { _id: { birthYear: Long("1963") }, numPersons: 98 }, 106 | { _id: { birthYear: Long("1979") }, numPersons: 84 }, 107 | { _id: { birthYear: Long("1975") }, numPersons: 107 }, 108 | { _id: { birthYear: Long("1951") }, numPersons: 79 } 109 | ] 110 | Type "it" for more 111 | ``` 112 | 113 | Adding sort 114 | 115 | ```cpp 116 | > db.persons.aggregate([ 117 | { 118 | $project: { 119 | _id: 0, 120 | name: 1, 121 | email: 1, 122 | birthdate: { $toDate: '$dob.date' }, 123 | age: '$dob.age', 124 | location: { 125 | type: 'Point', 126 | coordinates: [ 127 | { 128 | $convert: { 129 | input: '$location.coordinates.longitude', 130 | to: 'double', 131 | onError: 0.0, 132 | onNull: 0.0 133 | } 134 | }, 135 | { 136 | $convert: { 137 | input: '$location.coordinates.latitude', 138 | to: 'double', 139 | onError: 0.0, 140 | onNull: 0.0 141 | } 142 | } 143 | ] 144 | } 145 | } 146 | }, 147 | { 148 | $project: { 149 | email: 1, 150 | location: 1, 151 | gender: 1, 152 | birthdate: 1, 153 | age: 1, 154 | fullName: { 155 | $concat: [ 156 | { 157 | $toUpper: { 158 | $substrCP: ['$name.first', 0, 1] 159 | } 160 | }, { 161 | $substrCP: [ 162 | '$name.first', 1, { 163 | $subtract: [ 164 | { $strLenCP: '$name.first' }, 1 165 | ] 166 | }] 167 | }, 168 | ' ', 169 | { 170 | $toUpper: { 171 | $substrCP: ['$name.last', 0, 1] 172 | } 173 | }, 174 | { 175 | $substrCP: [ 176 | '$name.last', 1, { 177 | $subtract: [ 178 | { $strLenCP: '$name.last' }, 1 179 | ] 180 | }] 181 | } 182 | ] 183 | } 184 | } 185 | }, 186 | { 187 | $group: { 188 | _id: { birthYear: { $isoWeekYear: '$birthdate' } }, 189 | numPersons: { $sum: 1 } 190 | } 191 | }, 192 | { $sort: { numPersons: -1}} 193 | ]).pretty() 194 | 195 | **Output** 196 | [ 197 | { _id: { birthYear: Long("1955") }, numPersons: 113 }, 198 | { _id: { birthYear: Long("1961") }, numPersons: 111 }, 199 | { _id: { birthYear: Long("1993") }, numPersons: 110 }, 200 | { _id: { birthYear: Long("1960") }, numPersons: 110 }, 201 | { _id: { birthYear: Long("1975") }, numPersons: 107 }, 202 | { _id: { birthYear: Long("1945") }, numPersons: 106 }, 203 | { _id: { birthYear: Long("1976") }, numPersons: 105 }, 204 | { _id: { birthYear: Long("1967") }, numPersons: 104 }, 205 | { _id: { birthYear: Long("1990") }, numPersons: 103 }, 206 | { _id: { birthYear: Long("1994") }, numPersons: 102 }, 207 | { _id: { birthYear: Long("1981") }, numPersons: 102 }, 208 | { _id: { birthYear: Long("1958") }, numPersons: 101 }, 209 | { _id: { birthYear: Long("1995") }, numPersons: 101 }, 210 | { _id: { birthYear: Long("1946") }, numPersons: 100 }, 211 | { _id: { birthYear: Long("1948") }, numPersons: 100 }, 212 | { _id: { birthYear: Long("1950") }, numPersons: 99 }, 213 | { _id: { birthYear: Long("1983") }, numPersons: 99 }, 214 | { _id: { birthYear: Long("1970") }, numPersons: 99 }, 215 | { _id: { birthYear: Long("1963") }, numPersons: 98 }, 216 | { _id: { birthYear: Long("1965") }, numPersons: 98 } 217 | ] 218 | Type "it" for more 219 | ``` 220 | 221 | # -------------------------------------------------------------------------------- /MongoDB/Aggregation_Framework/Writing_Pipeline_Results_Into_a_New_Collection.md: -------------------------------------------------------------------------------- 1 | # Writing Pipeline Results Into a New Collection 2 | 3 | By getting the output we can store into the another Collection with `$out`, can do work with the out stage. 4 | 5 | ```bash 6 | db.persons.aggregate([ 7 | { 8 | $project: { 9 | _id: 0, 10 | name: 1, 11 | email: 1, 12 | birthdate: { $toDate: '$dob.date' }, 13 | age: "$dob.age", 14 | location: { 15 | type: 'Point', 16 | coordinates: [ 17 | { 18 | $convert: { 19 | input: '$location.coordinates.longitude', 20 | to: 'double', 21 | onError: 0.0, 22 | onNull: 0.0 23 | } 24 | }, 25 | { 26 | $convert: { 27 | input: '$location.coordinates.latitude', 28 | to: 'double', 29 | onError: 0.0, 30 | onNull: 0.0 31 | } 32 | } 33 | ] 34 | } 35 | } 36 | }, 37 | { 38 | $project: { 39 | gender: 1, 40 | email: 1, 41 | location: 1, 42 | birthdate: 1, 43 | age: 1, 44 | fullName: { 45 | $concat: [ 46 | { $toUpper: { $substrCP: ['$name.first', 0, 1] } }, 47 | { 48 | $substrCP: [ 49 | '$name.first', 50 | 1, 51 | { $subtract: [{ $strLenCP: '$name.first' }, 1] } 52 | ] 53 | }, 54 | ' ', 55 | { $toUpper: { $substrCP: ['$name.last', 0, 1] } }, 56 | { 57 | $substrCP: [ 58 | '$name.last', 59 | 1, 60 | { $subtract: [{ $strLenCP: '$name.last' }, 1] } 61 | ] 62 | } 63 | ] 64 | } 65 | } 66 | }, 67 | { $out: "transformedPersons" } 68 | ]).pretty(); 69 | 70 | **Output** 71 | ``` 72 | 73 | Now check all collections. We get new collection transformedPersons which we defined in previous command. 74 | 75 | ```bash 76 | show collections 77 | 78 | **Output** 79 | friends 80 | persons 81 | transformedPersons 82 | ``` 83 | 84 | Checking the data in transformedPersons collection 85 | 86 | ```bash 87 | db.transformedPersons.find().pretty() 88 | 89 | **Output** 90 | [ 91 | { 92 | _id: ObjectId("634bb5cf1f059ab964bafc04"), 93 | location: { type: 'Point', coordinates: [ -154.6037, -29.6721 ] }, 94 | email: 'carl.jacobs@example.com', 95 | birthdate: ISODate("1984-09-30T01:20:26.000Z"), 96 | age: 33, 97 | fullName: 'Carl Jacobs' 98 | }, 99 | { 100 | _id: ObjectId("634bb5cf1f059ab964bafc05"), 101 | location: { type: 'Point', coordinates: [ 168.9462, -22.5329 ] }, 102 | email: 'harvey.chambers@example.com', 103 | birthdate: ISODate("1988-05-27T00:14:03.000Z"), 104 | age: 30, 105 | fullName: 'Harvey Chambers' 106 | }, 107 | { 108 | _id: ObjectId("634bb5cf1f059ab964bafc06"), 109 | location: { type: 'Point', coordinates: [ -54.1364, -86.1268 ] }, 110 | email: 'gideon.vandrongelen@example.com', 111 | birthdate: ISODate("1971-03-28T04:47:21.000Z"), 112 | age: 47, 113 | fullName: 'Gideon Van drongelen' 114 | }, 115 | { 116 | _id: ObjectId("634bb5cf1f059ab964bafc07"), 117 | location: { type: 'Point', coordinates: [ -70.2264, 76.4507 ] }, 118 | email: 'zachary.lo@example.com', 119 | birthdate: ISODate("1988-10-17T03:45:04.000Z"), 120 | age: 29, 121 | fullName: 'Zachary Lo' 122 | }, 123 | { 124 | _id: ObjectId("634bb5cf1f059ab964bafc08"), 125 | location: { type: 'Point', coordinates: [ 111.3806, -31.6359 ] }, 126 | email: 'maeva.wilson@example.com', 127 | birthdate: ISODate("1962-08-11T20:51:07.000Z"), 128 | age: 56, 129 | fullName: 'Maeva Wilson' 130 | }, 131 | { 132 | _id: ObjectId("634bb5cf1f059ab964bafc09"), 133 | location: { type: 'Point', coordinates: [ 34.1689, 4.6625 ] }, 134 | email: 'پریا.پارسا@example.com', 135 | birthdate: ISODate("1962-01-10T05:26:30.000Z"), 136 | age: 56, 137 | fullName: 'پریا پارسا' 138 | }, 139 | { 140 | _id: ObjectId("634bb5cf1f059ab964bafc0a"), 141 | location: { type: 'Point', coordinates: [ -67.5738, -52.8348 ] }, 142 | email: 'olav.oehme@example.com', 143 | birthdate: ISODate("1960-11-28T23:07:18.000Z"), 144 | age: 57, 145 | fullName: 'Olav Oehme' 146 | }, 147 | { 148 | _id: ObjectId("634bb5cf1f059ab964bafc0b"), 149 | location: { type: 'Point', coordinates: [ -18.5996, -42.6128 ] }, 150 | email: 'elijah.lewis@example.com', 151 | birthdate: ISODate("1986-03-29T06:40:18.000Z"), 152 | age: 32, 153 | fullName: 'Elijah Lewis' 154 | }, 155 | { 156 | _id: ObjectId("634bb5cf1f059ab964bafc0c"), 157 | location: { type: 'Point', coordinates: [ -31.0208, -29.8113 ] }, 158 | email: 'victor.pedersen@example.com', 159 | birthdate: ISODate("1959-02-19T23:56:23.000Z"), 160 | age: 59, 161 | fullName: 'Victor Pedersen' 162 | }, 163 | { 164 | _id: ObjectId("634bb5cf1f059ab964bafc0d"), 165 | location: { type: 'Point', coordinates: [ 148.0944, 35.5726 ] }, 166 | email: 'louise.graham@example.com', 167 | birthdate: ISODate("1971-01-21T20:36:16.000Z"), 168 | age: 47, 169 | fullName: 'Louise Graham' 170 | }, 171 | { 172 | _id: ObjectId("634bb5cf1f059ab964bafc0e"), 173 | location: { type: 'Point', coordinates: [ -172.3753, 83.3998 ] }, 174 | email: 'madeleine.till@example.com', 175 | birthdate: ISODate("1954-05-01T02:34:40.000Z"), 176 | age: 64, 177 | fullName: 'Madeleine Till' 178 | }, 179 | { 180 | _id: ObjectId("634bb5cf1f059ab964bafc0f"), 181 | location: { type: 'Point', coordinates: [ 101.5995, 78.8545 ] }, 182 | email: 'isolino.viana@example.com', 183 | birthdate: ISODate("1959-03-22T14:53:41.000Z"), 184 | age: 59, 185 | fullName: 'Isolino Viana' 186 | }, 187 | { 188 | _id: ObjectId("634bb5cf1f059ab964bafc10"), 189 | location: { type: 'Point', coordinates: [ 43.9085, 25.1614 ] }, 190 | email: 'mestan.kaplangı@example.com', 191 | birthdate: ISODate("1951-12-17T20:03:33.000Z"), 192 | age: 66, 193 | fullName: 'Mestan Kaplangı' 194 | }, 195 | { 196 | _id: ObjectId("634bb5cf1f059ab964bafc11"), 197 | location: { type: 'Point', coordinates: [ 135.9359, 71.9851 ] }, 198 | email: 'katie.welch@example.com', 199 | birthdate: ISODate("1990-10-14T05:02:12.000Z"), 200 | age: 27, 201 | fullName: 'Katie Welch' 202 | }, 203 | { 204 | _id: ObjectId("634bb5cf1f059ab964bafc12"), 205 | location: { type: 'Point', coordinates: [ -90.9499, 21.3388 ] }, 206 | email: 'بنیامین.سالاری@example.com', 207 | birthdate: ISODate("1984-03-10T22:12:43.000Z"), 208 | age: 34, 209 | fullName: 'بنیامین سالاری' 210 | }, 211 | { 212 | _id: ObjectId("634bb5cf1f059ab964bafc13"), 213 | location: { type: 'Point', coordinates: [ -83.3326, -88.6846 ] }, 214 | email: 'sandra.lorenzo@example.com', 215 | birthdate: ISODate("1975-03-23T17:01:45.000Z"), 216 | age: 43, 217 | fullName: 'Sandra Lorenzo' 218 | }, 219 | { 220 | _id: ObjectId("634bb5cf1f059ab964bafc14"), 221 | location: { type: 'Point', coordinates: [ -8.557, -14.4912 ] }, 222 | email: 'shona.kemperman@example.com', 223 | birthdate: ISODate("1948-04-23T03:40:22.000Z"), 224 | age: 70, 225 | fullName: 'Shona Kemperman' 226 | }, 227 | { 228 | _id: ObjectId("634bb5cf1f059ab964bafc15"), 229 | location: { type: 'Point', coordinates: [ 59.5703, -67.6434 ] }, 230 | email: 'andreia.arnaud@example.com', 231 | birthdate: ISODate("1960-01-31T05:16:10.000Z"), 232 | age: 58, 233 | fullName: 'Andreia Arnaud' 234 | }, 235 | { 236 | _id: ObjectId("634bb5cf1f059ab964bafc16"), 237 | location: { type: 'Point', coordinates: [ -90.4049, -65.0877 ] }, 238 | email: 'delia.durand@example.com', 239 | birthdate: ISODate("1966-08-03T09:22:41.000Z"), 240 | age: 52, 241 | fullName: 'Delia Durand' 242 | }, 243 | { 244 | _id: ObjectId("634bb5cf1f059ab964bafc17"), 245 | location: { type: 'Point', coordinates: [ 78.0207, -84.1572 ] }, 246 | email: 'anne.ruiz@example.com', 247 | birthdate: ISODate("1982-10-09T12:10:42.000Z"), 248 | age: 35, 249 | fullName: 'Anne Ruiz' 250 | } 251 | ] 252 | Type "it" for more 253 | ``` -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents.md: -------------------------------------------------------------------------------- 1 | # Basic CRUD, Projection, Embedded/Nested Documents, Array 2 | 3 | 12 | 13 | # CRUD 14 | 15 | [Create/Insert](Basic_CRUD,_Projection,_Embedded_Nested_Documents/Create_Insert.md) 16 | 17 | [Read ⇒ find](Basic_CRUD,_Projection,_Embedded_Nested_Documents/Read__find.md) 18 | 19 | [Update](Basic_CRUD,_Projection,_Embedded_Nested_Documents/Update.md) 20 | 21 | [Delete](Basic_CRUD,_Projection,_Embedded_Nested_Documents/Delete.md) 22 | 23 | # Others 24 | 25 | [Projection](Basic_CRUD,_Projection,_Embedded_Nested_Documents/Projection.md) 26 | 27 | [Embedded/Nested Documents](Basic_CRUD,_Projection,_Embedded_Nested_Documents/Embedded_Nested_Documents.md) 28 | 29 | [Array](Basic_CRUD,_Projection,_Embedded_Nested_Documents/Array.md) -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents/Array.md: -------------------------------------------------------------------------------- 1 | # Array 2 | 3 | - An array of embedded documents. 4 | - An array can hold any data. 5 | Inserting array 6 | 7 | ```scheme 8 | > db.passengers.updateOne({name: "Albert Twostone"}, {$set:{hobbies:["sports", "cooking"]}}) 9 | **Output** 10 | { "acknowledged" : true, "matchedCount" : 1, "modifiedCount" : 0 } 11 | 12 | > db.passengers.find({name: "Albert Twostone"}).pretty() 13 | { 14 | "_id" : ObjectId("5f1339f7d022deabe244f282"), 15 | "name" : "Albert Twostone", 16 | "age" : 68, 17 | "hobbies" : [ 18 | "sports", 19 | "cooking" 20 | ] 21 | } 22 | ``` 23 | 24 |
25 | Output to test(last document where the array was added) 26 | 27 | ```scheme 28 | db.passengers.find().pretty() 29 | ``` 30 | **Output** 31 | ```scheme 32 | { 33 | "_id" : ObjectId("5f1339f7d022deabe244f26f"), 34 | "name" : "Max Schwarzmueller", 35 | "age" : 29 36 | } 37 | { 38 | "_id" : ObjectId("5f1339f7d022deabe244f270"), 39 | "name" : "Manu Lorenz", 40 | "age" : 30 41 | } 42 | { 43 | "_id" : ObjectId("5f1339f7d022deabe244f271"), 44 | "name" : "Chris Hayton", 45 | "age" : 35 46 | } 47 | { 48 | "_id" : ObjectId("5f1339f7d022deabe244f272"), 49 | "name" : "Sandeep Kumar", 50 | "age" : 28 51 | } 52 | { 53 | "_id" : ObjectId("5f1339f7d022deabe244f273"), 54 | "name" : "Maria Jones", 55 | "age" : 30 56 | } 57 | { 58 | "_id" : ObjectId("5f1339f7d022deabe244f274"), 59 | "name" : "Alexandra Maier", 60 | "age" : 27 61 | } 62 | { 63 | "_id" : ObjectId("5f1339f7d022deabe244f275"), 64 | "name" : "Dr. Phil Evans", 65 | "age" : 47 66 | } 67 | { 68 | "_id" : ObjectId("5f1339f7d022deabe244f276"), 69 | "name" : "Sandra Brugge", 70 | "age" : 33 71 | } 72 | { 73 | "_id" : ObjectId("5f1339f7d022deabe244f277"), 74 | "name" : "Elisabeth Mayr", 75 | "age" : 29 76 | } 77 | { 78 | "_id" : ObjectId("5f1339f7d022deabe244f278"), 79 | "name" : "Frank Cube", 80 | "age" : 41 81 | } 82 | { 83 | "_id" : ObjectId("5f1339f7d022deabe244f279"), 84 | "name" : "Karandeep Alun", 85 | "age" : 48 86 | } 87 | { 88 | "_id" : ObjectId("5f1339f7d022deabe244f27a"), 89 | "name" : "Michaela Drayer", 90 | "age" : 39 91 | } 92 | { 93 | "_id" : ObjectId("5f1339f7d022deabe244f27b"), 94 | "name" : "Bernd Hoftstadt", 95 | "age" : 22 96 | } 97 | { 98 | "_id" : ObjectId("5f1339f7d022deabe244f27c"), 99 | "name" : "Scott Tolib", 100 | "age" : 44 101 | } 102 | { 103 | "_id" : ObjectId("5f1339f7d022deabe244f27d"), 104 | "name" : "Freddy Melver", 105 | "age" : 41 106 | } 107 | { 108 | "_id" : ObjectId("5f1339f7d022deabe244f27e"), 109 | "name" : "Alexis Bohed", 110 | "age" : 35 111 | } 112 | { 113 | "_id" : ObjectId("5f1339f7d022deabe244f27f"), 114 | "name" : "Melanie Palace", 115 | "age" : 27 116 | } 117 | { 118 | "_id" : ObjectId("5f1339f7d022deabe244f280"), 119 | "name" : "Armin Glutch", 120 | "age" : 35 121 | } 122 | { 123 | "_id" : ObjectId("5f1339f7d022deabe244f281"), 124 | "name" : "Klaus Arber", 125 | "age" : 53 126 | } 127 | { 128 | "_id" : ObjectId("5f1339f7d022deabe244f282"), 129 | "name" : "Albert Twostone", 130 | "age" : 68, 131 | "hobbies" : [ 132 | "sports", 133 | "cooking" 134 | ] 135 | } 136 | ``` 137 |
138 | 139 | Will get only hobbies array as MongoDB return only one document that's why `find()` will not work here(as it return cursor). 140 | 141 | ```scheme 142 | > db.passengers.findOne({name: "Albert Twostone"}).hobbies 143 | **Output** 144 | [ "sports", "cooking" ] 145 | ``` 146 | 147 | ### Query an array 148 | 149 | ```scheme 150 | > db.passengers.find({hobbies: "sports"}).pretty() 151 | **Output** 152 | { 153 | "_id" : ObjectId("5f1339f7d022deabe244f282"), 154 | "name" : "Albert Twostone", 155 | "age" : 68, 156 | "hobbies" : [ 157 | "sports", 158 | "cooking" 159 | ] 160 | } 161 | ``` -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents/Create_Insert.md: -------------------------------------------------------------------------------- 1 | # Create/Insert 2 | 3 | # Create(Insertion) 4 | 5 | ```scheme 6 | insertOne(data, options) 7 | insertMany(data, options) 8 | ``` 9 | 10 | ## insertOne 11 | 12 | Will insert JSON object(in the connected database this case shop) on products collection(if the collection does not exist then it will be created) 13 | 14 | ```scheme 15 | db.products.insertOne({ "name": "Max", price: 12.99}) 16 | db.products.insertOne({ name: "A Computer", price:34829.99, description: "This is high quality computer", details:{cpu: "Intel i7 8770",memory: 32}}) 17 | ``` 18 | 19 | Like `db.products.insertOne({ name: "Max" })` mongodb will do it automatically.This will give back a command like. 20 | 21 | ```powershell 22 | { 23 | "acknowledged": true, 24 | "insertedId": ObjectId("61691b464aed55381c7714d3") 25 | } 26 | ``` 27 | 28 | That means data is inserted successfully and MongoDB will create a unique id automatically for it which is returned at insertedId. 29 | 30 | > We can insert using `db.products.insertOne({ name: "Max", price: 12.99})` . MongoDB will add “ ” for key(like “name”) automatically but must have to wrap value with “ ”(like “Max”). 31 | > 32 | 33 | ### insertMany 34 | 35 | Will insert more than one document. Have to insert it as an **array**. Will return all ids of the inserted document. 36 | 37 | ```scheme 38 | > db.flightData.insertMany([ 39 | ... { 40 | ... "departureAirport": "MUC", 41 | ... "arrivalAirport": "SFO", 42 | ... "aircraft": "Airbus A380", 43 | ... "distance": 12000, 44 | ... "intercontinental": true 45 | ... }, 46 | ... { 47 | ... "departureAirport": "LHR", 48 | ... "arrivalAirport": "TXL", 49 | ... "aircraft": "Airbus A320", 50 | ... "distance": 950, 51 | ... "intercontinental": false 52 | ... } 53 | ... ]) 54 | ``` 55 | 56 | Output 57 | 58 | ```scheme 59 | { 60 | "acknowledged" : true, 61 | "insertedIds" : [ 62 | ObjectId("5f132aebd022deabe244f26d"), 63 | ObjectId("5f132aebd022deabe244f26e") 64 | ] 65 | } 66 | ``` 67 |
68 | Checking using find 69 | 70 | Code 71 | ```scheme 72 | db.flightData.find().pretty() 73 | ``` 74 | Output 75 | 76 | { 77 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 78 | "departureAirport" : "MUC", 79 | "arrivalAirport" : "SFO", 80 | "aircraft" : "Airbus A380", 81 | "distance" : 12000, 82 | "intercontinental" : true 83 | } 84 | { 85 | "_id" : ObjectId("5f132aebd022deabe244f26e"), 86 | "departureAirport" : "LHR", 87 | "arrivalAirport" : "TXL", 88 | "aircraft" : "Airbus A320", 89 | "distance" : 950, 90 | "intercontinental" : false 91 | } 92 |
93 | 94 | ### Inserting number(double/float), Timestamp, Date 95 | 96 | ```scheme 97 | > db.companies.insertOne( 98 | { 99 | name: "Freash Apples Inc", 100 | isStartup: true, 101 | employees: 33, 102 | funding: 123456789876543219, 103 | details: { 104 | cea: "Mark Super" 105 | }, 106 | tags: [ 107 | {title: "super"}, 108 | {title: "perfect"} 109 | ], 110 | foundingData: new Date(), 111 | insertedAt: new Timestamp() 112 | } 113 | ) 114 | ``` 115 | **Output** 116 | ```scheme 117 | { 118 | "acknowledged" : true, 119 | "insertedId" : ObjectId("5f13ec400249b11a6aa5e37f") 120 | } 121 | ``` 122 |
123 | Checking result 124 | 125 | ```scheme 126 | db.companies.find().pretty() 127 | ``` 128 | **Output** 129 | ```scheme 130 | { 131 | "_id" : ObjectId("5f13ec400249b11a6aa5e37f"), 132 | "name" : "Freash Apples Inc", 133 | "isStartup" : true, 134 | "employees" : 33, 135 | "funding" : 123456789876543220, 136 | "details" : { 137 | "cea" : "Mark Super" 138 | }, 139 | "tags" : [ 140 | { 141 | "title" : "super" 142 | }, 143 | { 144 | "title" : "perfect" 145 | } 146 | ], 147 | "foundingData" : ISODate("2020-07-19T06:46:24.175Z"), 148 | "insertedAt" : Timestamp(1595141184, 1) 149 | } 150 | ``` 151 |
152 | 153 | Create int **32 bits** value instant of **default 64 bits floating** point value. 154 | 155 | ```scheme 156 | db.numbers.insertOne({a: NumberInt(1)}) 157 | ``` 158 | **Output** 159 | ```scheme 160 | { 161 | "acknowledged" : true, 162 | "insertedId" : ObjectId("5f13ef440249b11a6aa5e382") 163 | } 164 | ``` -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents/Delete.md: -------------------------------------------------------------------------------- 1 | # Delete 2 | 3 | ```scheme 4 | deleteOne(filter, options) 5 | deleteMany(filter, options) 6 | ``` 7 | 8 | Will delete the first document where departureAirport is MUC. 9 | 10 | ```scheme 11 | db.flightData.deleteOne({departureAirport :"MUC"}) 12 | { "acknowledged": true, "deletedCount" : 1 } => Output 13 | ``` 14 | 15 | Will delete all documents which **have** a **key marker** and value toDelete in flightData collection. 16 | 17 | ```scheme 18 | > db.flightData.deleteMany({marker: "toDelete"}) 19 | ``` 20 | 21 | Will delete all documents in flightData collection as there is no filter. 22 | 23 | ```scheme 24 | > db.flightData.deleteMany( {} ) 25 | ``` 26 | 27 | **Delete a collection from database** 28 | 29 | ```scheme 30 | > db.numbers.drop() => here numbers is the collection name 31 | ``` -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents/Embedded_Nested_Documents.md: -------------------------------------------------------------------------------- 1 | # Embedded/Nested Documents 2 | 3 | - Up to 100 levels of nesting. 4 | - Overall document size has to be below 60MB and a max of 16MB per document. 5 | 6 | Adding nested document `status: {description: “on-time”, lastupdated: “i Hour ago”}` 7 | 8 | ```scheme 9 | > db.flightData.updateMany({},{$set:{status:{description: "on-time", lastupdated: "i Hour ago"}}}) 10 | **Output** 11 | { "acknowledged" : true, "matchedCount" : 2, "modifiedCount" : 0 } 12 | 13 | > db.flightData.find().pretty() 14 | **Output** 15 | { 16 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 17 | "departureAirport" : "MUC", 18 | "arrivalAirport" : "SFO", 19 | "aircraft" : "Airbus A380", 20 | "distance" : 12000, 21 | "intercontinental" : true, 22 | "status" : { 23 | "description" : "on-time", 24 | "lastupdated" : "i Hour ago" 25 | } 26 | } 27 | { 28 | "_id" : ObjectId("5f132aebd022deabe244f26e"), 29 | "departureAirport" : "LHR", 30 | "arrivalAirport" : "TXL", 31 | "aircraft" : "Airbus A320", 32 | "distance" : 950, 33 | "intercontinental" : false, 34 | "status" : { 35 | "description" : "on-time", 36 | "lastupdated" : "i Hour ago" 37 | } 38 | } 39 | ``` 40 | 41 | Adding one more document `details: {responsible: “Raju”}` 42 | 43 | ```scheme 44 | > db.flightData.updateMany({},{$set:{status:{description: "on-time", lastupdated: "i Hour ago",details:{responsible: "RAJU"}}}}) 45 | **Output** 46 | { "acknowledged" : true, "matchedCount" : 2, "modifiedCount" : 2 } 47 | 48 | > db.flightData.find().pretty() 49 | { 50 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 51 | "departureAirport" : "MUC", 52 | "arrivalAirport" : "SFO", 53 | "aircraft" : "Airbus A380", 54 | "distance" : 12000, 55 | "intercontinental" : true, 56 | "status" : { 57 | "description" : "on-time", 58 | "lastupdated" : "i Hour ago", 59 | "details" : { 60 | "responsible" : "RAJU" 61 | } 62 | } 63 | } 64 | { 65 | "_id" : ObjectId("5f132aebd022deabe244f26e"), 66 | "departureAirport" : "LHR", 67 | "arrivalAirport" : "TXL", 68 | "aircraft" : "Airbus A320", 69 | "distance" : 950, 70 | "intercontinental" : false, 71 | "status" : { 72 | "description" : "on-time", 73 | "lastupdated" : "i Hour ago", 74 | "details" : { 75 | "responsible" : "RAJU" 76 | } 77 | } 78 | } 79 | ``` 80 | 81 | **Query a document** 82 | 83 | Embedded document flightData > status > description. Have to give double quotation in key "`status.description`" as these are nested. 84 | 85 | ```scheme 86 | > db.flightData.find({"status.description": "on-time"}).pretty() 87 | **Output** 88 | { 89 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 90 | "departureAirport" : "MUC", 91 | "arrivalAirport" : "SFO", 92 | "aircraft" : "Airbus A380", 93 | "distance" : 12000, 94 | "intercontinental" : true, 95 | "status" : { 96 | "description" : "on-time", 97 | "lastupdated" : "i Hour ago", 98 | "details" : { 99 | "responsible" : "RAJU" 100 | } 101 | } 102 | } 103 | { 104 | "_id" : ObjectId("5f132aebd022deabe244f26e"), 105 | "departureAirport" : "LHR", 106 | "arrivalAirport" : "TXL", 107 | "aircraft" : "Airbus A320", 108 | "distance" : 950, 109 | "intercontinental" : false, 110 | "status" : { 111 | "description" : "on-time", 112 | "lastupdated" : "i Hour ago", 113 | "details" : { 114 | "responsible" : "RAJU" 115 | } 116 | } 117 | } 118 | ``` 119 | 120 | We can do further query flightData > status > details > responsible and also have to wrap key with double quotation `“status.details.responsible”` as they are nested. 121 | 122 | 123 | 124 | ```bash 125 | > db.flightData.find({"status.details.responsible": "RAJU"}).pretty() 126 | **Output** 127 | { 128 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 129 | "departureAirport" : "MUC", 130 | "arrivalAirport" : "SFO", 131 | "aircraft" : "Airbus A380", 132 | "distance" : 12000, 133 | "intercontinental" : true, 134 | "status" : { 135 | "description" : "on-time", 136 | "lastupdated" : "i Hour ago", 137 | "details" : { 138 | "responsible" : "RAJU" 139 | } 140 | } 141 | } 142 | { 143 | "_id" : ObjectId("5f132aebd022deabe244f26e"), 144 | "departureAirport" : "LHR", 145 | "arrivalAirport" : "TXL", 146 | "aircraft" : "Airbus A320", 147 | "distance" : 950, 148 | "intercontinental" : false, 149 | "status" : { 150 | "description" : "on-time", 151 | "lastupdated" : "i Hour ago", 152 | "details" : { 153 | "responsible" : "RAJU" 154 | } 155 | } 156 | } 157 | ``` -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents/Projection.md: -------------------------------------------------------------------------------- 1 | # Projection 2 | 3 | ### Get only selected key value 4 | 5 | Returns only _id(automatically returns this) and name(like filtering) 6 | 7 | ```scheme 8 | > db.passengers.find({},{name: 1}).pretty() 9 | 10 | **Output** 11 | { 12 | "_id" : ObjectId("5f1339f7d022deabe244f26f"), 13 | "name" : "Max Schwarzmueller" 14 | } 15 | { "_id" : ObjectId("5f1339f7d022deabe244f270"), "name" : "Manu Lorenz" } 16 | { "_id" : ObjectId("5f1339f7d022deabe244f271"), "name" : "Chris Hayton" } 17 | { "_id" : ObjectId("5f1339f7d022deabe244f272"), "name" : "Sandeep Kumar" } 18 | { "_id" : ObjectId("5f1339f7d022deabe244f273"), "name" : "Maria Jones" } 19 | { "_id" : ObjectId("5f1339f7d022deabe244f274"), "name" : "Alexandra Maier" } 20 | { "_id" : ObjectId("5f1339f7d022deabe244f275"), "name" : "Dr. Phil Evans" } 21 | { "_id" : ObjectId("5f1339f7d022deabe244f276"), "name" : "Sandra Brugge" } 22 | { "_id" : ObjectId("5f1339f7d022deabe244f277"), "name" : "Elisabeth Mayr" } 23 | { "_id" : ObjectId("5f1339f7d022deabe244f278"), "name" : "Frank Cube" } 24 | { "_id" : ObjectId("5f1339f7d022deabe244f279"), "name" : "Karandeep Alun" } 25 | { "_id" : ObjectId("5f1339f7d022deabe244f27a"), "name" : "Michaela Drayer" } 26 | { "_id" : ObjectId("5f1339f7d022deabe244f27b"), "name" : "Bernd Hoftstadt" } 27 | { "_id" : ObjectId("5f1339f7d022deabe244f27c"), "name" : "Scott Tolib" } 28 | { "_id" : ObjectId("5f1339f7d022deabe244f27d"), "name" : "Freddy Melver" } 29 | { "_id" : ObjectId("5f1339f7d022deabe244f27e"), "name" : "Alexis Bohed" } 30 | { "_id" : ObjectId("5f1339f7d022deabe244f27f"), "name" : "Melanie Palace" } 31 | { "_id" : ObjectId("5f1339f7d022deabe244f280"), "name" : "Armin Glutch" } 32 | { "_id" : ObjectId("5f1339f7d022deabe244f281"), "name" : "Klaus Arber" } 33 | { "_id" : ObjectId("5f1339f7d022deabe244f282"), "name" : "Albert Twostone" } 34 | Type "it" for more 35 | ``` 36 | 37 | ### Select and remove key and value while querying 38 | 39 | Returns only name also filtering _id which automatically added by MongoDB. 40 | 41 | ```scheme 42 | > db.passengers.find({},{name: 1,_id: 0}).pretty() 43 | 44 | **Output** 45 | { "name" : "Max Schwarzmueller" } 46 | { "name" : "Manu Lorenz" } 47 | { "name" : "Chris Hayton" } 48 | { "name" : "Sandeep Kumar" } 49 | { "name" : "Maria Jones" } 50 | { "name" : "Alexandra Maier" } 51 | { "name" : "Dr. Phil Evans" } 52 | { "name" : "Sandra Brugge" } 53 | { "name" : "Elisabeth Mayr" } 54 | { "name" : "Frank Cube" } 55 | { "name" : "Karandeep Alun" } 56 | { "name" : "Michaela Drayer" } 57 | { "name" : "Bernd Hoftstadt" } 58 | { "name" : "Scott Tolib" } 59 | { "name" : "Freddy Melver" } 60 | { "name" : "Alexis Bohed" } 61 | { "name" : "Melanie Palace" } 62 | { "name" : "Armin Glutch" } 63 | { "name" : "Klaus Arber" } 64 | { "name" : "Albert Twostone" } 65 | ``` -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents/Read__find.md: -------------------------------------------------------------------------------- 1 | # Read ⇒ find 2 | 3 | - `find()` return cursor that’s why can use pretty(). But when there is a big array then `find()` gives the first 20 data and gives the "it" cursor for getting other data. 4 | - `find()` ****returns all as no argument in `find()`. Only find will return ****the **first 20** and a cursor "it" to iterate further but `find().toArray()` will return **all data** as an array. 5 | - `findOne(filter)` return object that's why can not use `pretty()` it exits on the cursor. 6 | 7 | ```scheme 8 | find(filter, options) 9 | findOne(filter, options) 10 | ``` 11 | 12 |
13 | Initial data in db 14 | 15 | ```scheme 16 | { 17 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 18 | "departureAirport" : "MUC", 19 | "arrivalAirport" : "SFO", 20 | "aircraft" : "Airbus A380", 21 | "distance" : 12000, 22 | "intercontinental" : true 23 | } 24 | { 25 | "_id" : ObjectId("5f132aebd022deabe244f26e"), 26 | "departureAirport" : "LHR", 27 | "arrivalAirport" : "TXL", 28 | "aircraft" : "Airbus A320", 29 | "distance" : 950, 30 | "intercontinental" : false 31 | } 32 | ``` 33 |
34 | 35 | ### `find()` with different scenarios 36 | 37 | ```scheme 38 | //**Find all flight that has intercontinental: true property.** 39 | > db.flightData.find({intercontinental: true}).pretty() 40 | **Output** 41 | { 42 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 43 | "departureAirport" : "MUC", 44 | "arrivalAirport" : "SFO", 45 | "aircraft" : "Airbus A380", 46 | "distance" : 12000, 47 | "intercontinental" : true 48 | } 49 | 50 | //**Will return the first one which fulfills the condition.** 51 | > db.flightData.findOne({distance: { $gt: 900 }}) 52 | **Output** 53 | { 54 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 55 | "departureAirport" : "MUC", 56 | "arrivalAirport" : "SFO", 57 | "aircraft" : "Airbus A380", 58 | "distance" : 12000, 59 | "intercontinental" : true 60 | } 61 | ``` 62 | 63 | **Can do some process at each data.** 64 |
65 | Code 66 | 67 | ```scheme 68 | db.passengers.find().forEach((passengerData) => {printjson(passengerData)}) 69 | ``` 70 | **Output** 71 | 72 | ```scheme 73 | { 74 | "_id" : ObjectId("5f1339f7d022deabe244f26f"), 75 | "name" : "Max Schwarzmueller", 76 | "age" : 29 77 | } 78 | { 79 | "_id" : ObjectId("5f1339f7d022deabe244f270"), 80 | "name" : "Manu Lorenz", 81 | "age" : 30 82 | } 83 | { 84 | "_id" : ObjectId("5f1339f7d022deabe244f271"), 85 | "name" : "Chris Hayton", 86 | "age" : 35 87 | } 88 | { 89 | "_id" : ObjectId("5f1339f7d022deabe244f272"), 90 | "name" : "Sandeep Kumar", 91 | "age" : 28 92 | } 93 | { 94 | "_id" : ObjectId("5f1339f7d022deabe244f273"), 95 | "name" : "Maria Jones", 96 | "age" : 30 97 | } 98 | { 99 | "_id" : ObjectId("5f1339f7d022deabe244f274"), 100 | "name" : "Alexandra Maier", 101 | "age" : 27 102 | } 103 | { 104 | "_id" : ObjectId("5f1339f7d022deabe244f275"), 105 | "name" : "Dr. Phil Evans", 106 | "age" : 47 107 | } 108 | { 109 | "_id" : ObjectId("5f1339f7d022deabe244f276"), 110 | "name" : "Sandra Brugge", 111 | "age" : 33 112 | } 113 | { 114 | "_id" : ObjectId("5f1339f7d022deabe244f277"), 115 | "name" : "Elisabeth Mayr", 116 | "age" : 29 117 | } 118 | { 119 | "_id" : ObjectId("5f1339f7d022deabe244f278"), 120 | "name" : "Frank Cube", 121 | "age" : 41 122 | } 123 | { 124 | "_id" : ObjectId("5f1339f7d022deabe244f279"), 125 | "name" : "Karandeep Alun", 126 | "age" : 48 127 | } 128 | { 129 | "_id" : ObjectId("5f1339f7d022deabe244f27a"), 130 | "name" : "Michaela Drayer", 131 | "age" : 39 132 | } 133 | { 134 | "_id" : ObjectId("5f1339f7d022deabe244f27b"), 135 | "name" : "Bernd Hoftstadt", 136 | "age" : 22 137 | } 138 | { 139 | "_id" : ObjectId("5f1339f7d022deabe244f27c"), 140 | "name" : "Scott Tolib", 141 | "age" : 44 142 | } 143 | { 144 | "_id" : ObjectId("5f1339f7d022deabe244f27d"), 145 | "name" : "Freddy Melver", 146 | "age" : 41 147 | } 148 | { 149 | "_id" : ObjectId("5f1339f7d022deabe244f27e"), 150 | "name" : "Alexis Bohed", 151 | "age" : 35 152 | } 153 | { 154 | "_id" : ObjectId("5f1339f7d022deabe244f27f"), 155 | "name" : "Melanie Palace", 156 | "age" : 27 157 | } 158 | { 159 | "_id" : ObjectId("5f1339f7d022deabe244f280"), 160 | "name" : "Armin Glutch", 161 | "age" : 35 162 | } 163 | { 164 | "_id" : ObjectId("5f1339f7d022deabe244f281"), 165 | "name" : "Klaus Arber", 166 | "age" : 53 167 | } 168 | { 169 | "_id" : ObjectId("5f1339f7d022deabe244f282"), 170 | "name" : "Albert Twostone", 171 | "age" : 68 172 | } 173 | { 174 | "_id" : ObjectId("5f1339f7d022deabe244f283"), 175 | "name" : "Gordon Black", 176 | "age" : 38 177 | } 178 | ``` 179 |
180 | 181 | 182 | **Get the statistics of database** 183 | 184 |
185 | Inserting data for the below command 186 | 187 | ```scheme 188 | db.numbers.insertOne({a: 1}) 189 | ``` 190 | **Output** 191 | ```scheme 192 | { 193 | "acknowledged" : true, 194 | "insertedId" : ObjectId("5f13ed5a0249b11a6aa5e380") 195 | } 196 | ``` 197 |
198 | 199 | ```scheme 200 | db.numbers.findOne() 201 | ``` 202 | **Output** 203 | ```scheme 204 | { "_id" : ObjectId("5f13ed5a0249b11a6aa5e380"), "a" : 1 } 205 | ``` 206 |
207 | 208 | ```scheme 209 | > db.stats 210 | ``` 211 | **Output** 212 | ```scheme 213 | function (scale) { 214 | return this.runCommand({dbstats: 1, scale: scale}); 215 | } 216 | ``` 217 |
218 | 219 | ### Statistics of db 220 | 221 | ```scheme 222 | > db.stats() 223 | **Output** 224 | { 225 | "db" : "companyData", 226 | "collections" : 2, 227 | "views" : 0, 228 | "objects" : 2, 229 | "avgObjSize" : 135, 230 | "dataSize" : 270, 231 | "storageSize" : 20480, 232 | "numExtents" : 0, 233 | "indexes" : 2, 234 | "indexSize" : 20480, 235 | "fsUsedSize" : 54183743488, 236 | "fsTotalSize" : 61754699776, 237 | "ok" : 1 238 | } 239 | ``` 240 | 241 | **Getting the datatype** 242 |
243 | Inserting the document for the command below 244 | 245 | ```scheme 246 | db.numbers.insertOne({a: 1.5,b: "r"}) 247 | ``` 248 | **Output** 249 | ```scheme 250 | { 251 | "acknowledged" : true, 252 | "insertedId" : ObjectId("5f13f19e0249b11a6aa5e386") 253 | } 254 | ``` 255 |
256 | 257 | 258 | ```scheme 259 | > typeof db.numbers.findOne({b: "r"}).a 260 | **Output** 261 | number 262 | ``` 263 | 264 | ### Getting all data from an collection 265 | 266 | By default `find()` returns the first 20 documents and a cursor `it` for further iteration. We can use `.toArray()` to get whole collection. 267 | 268 | ```bash 269 | > db.passengers.find().toArray() 270 | ``` -------------------------------------------------------------------------------- /MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents/Update.md: -------------------------------------------------------------------------------- 1 | # Update 2 | 3 | ```scheme 4 | updateOne(filter, data, options) 5 | updateMany(filter, data, options) 6 | replaceOne(filter, data, options) 7 | ``` 8 | 9 | ### updateOne 10 | 11 | Will **update** **first** **matched** document(with key distance and value 1200 and will set marker key to delete **if exits** the key **otherwise** will **create** that key and value **inside** the document). 12 | 13 | ```scheme 14 | > db.flightData.updateOne({distance: 1200},{$set:{marker: "delete"}}) 15 | **Output** 16 | { "acknowledged" : true, "matchedCount" : 1, "modifiedCount" : 1 } //if successfull 17 | 18 | > db.flightData.updateOne({_id : ObjectId("5f132aebd022deabe244f26d")},{$set:{delayed: true}}) 19 | **Output** 20 | { "acknowledged" : true, "matchedCount" : 1, "modifiedCount" : 0 } 21 | ``` 22 | 23 | ### updateMany 24 | 25 | Will **update all documents** which matched the **filter**, in this case, all as there is no filter condition, and will set the value of the key `marker` if **exits** otherwise **will add** a key marker with the value `toDelete` at the document. 26 | 27 | ```scheme 28 | > db.flightData.updateMany({},{$set:{marker: "toDelete"}}) 29 | ``` 30 | 31 | ### update 32 | 33 | > ****Deprecated this method**** 34 | > 35 | 36 | If the targeted document already has a delayed key with the value false then the update command will not update that but updateOne comment will update this means overwriting the same value. 37 | 38 | ```scheme 39 | > db.flightData.update({_id : ObjectId("5f132aebd022deabe244f26d")},{$set:{delayed: false}}) 40 | **Output** 41 | WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 }) 42 | ``` 43 | 44 | Will **delete** all key-value except `delayed: false`. 45 | 46 | ```scheme 47 | > db.flightData.update({_id : ObjectId("5f132aebd022deabe244f26d")},{delayed: false}) 48 | 49 | **Output** 50 | WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 }) 51 | 52 | > db.flightData.find().pretty() 53 | **Output** 54 | { "_id" : ObjectId("5f132aebd022deabe244f26d"), "delayed" : false } =>modified document 55 | { 56 | "_id" : ObjectId("5f132aebd022deabe244f26e"), 57 | "departureAirport" : "LHR", 58 | "arrivalAirport" : "TXL", 59 | "aircraft" : "Airbus A320", 60 | "distance" : 950, 61 | "intercontinental" : false 62 | } 63 | ``` 64 | 65 | ### replaceOne 66 | 67 | The same can be done by replaceOne 68 | 69 | ```scheme 70 | > db.flightData.replaceOne({_id : ObjectId("5f132aebd022deabe244f26d")},{ 71 | ... "departureAirport": "MUC", 72 | ... "arrivalAirport": "SFO", 73 | ... "aircraft": "Airbus A380", 74 | ... "distance": 12000, 75 | ... "intercontinental": true 76 | ... }) 77 | **Output** 78 | { "acknowledged" : true, "matchedCount" : 1, "modifiedCount" : 1 } 79 | 80 | Before running command replaceOne 81 | { "_id" : ObjectId("5f132aebd022deabe244f26d"), "delayed" : false } 82 | After running command replaceOne document will be like 83 | { 84 | "_id" : ObjectId("5f132aebd022deabe244f26d"), 85 | "departureAirport" : "MUC", 86 | "arrivalAirport" : "SFO", 87 | "aircraft" : "Airbus A380", 88 | "distance" : 12000, 89 | "intercontinental" : true 90 | } 91 | ``` 92 | 93 | | update | updateOne & updateMany | 94 | | --- | --- | 95 | | Does not need $set | Need $set attribute | 96 | | Does not overwrite the value of a key if the command and database have the same value. (If the document has only one key) | Will overwrite if have the same value in command and database. | 97 | | Keep only keys and values which are given in command others will be deleted automatically. The same can be done by replaceOne | Will not delete any key value. | -------------------------------------------------------------------------------- /MongoDB/Delete.md: -------------------------------------------------------------------------------- 1 | # Delete 2 |
3 | Initialization 4 | 5 | ```schema 6 | use user 7 | switched to db user 8 | ``` 9 |
10 | 11 | ## Deleting one document 12 | 13 | ```bash 14 | db.users.deleteOne({name: 'Chris'}) 15 | { "acknowledged": true, "deletedCount": 1 } 16 | ``` 17 | 18 | Delete with matched query 19 | 20 | ```bash 21 | > db.users.deleteOne({totalAge: {$gt: 30}, isSporty: true}) 22 | > db.users.deleteOne({totalAge: {$exists: false}, isSporty: true}) 23 | ``` 24 | 25 | ## Delete many 26 | 27 | Delete all documents which `totalAge` greater than 30 and `isSporty` is `true` 28 | 29 | ```bash 30 | > db.users.deleteMany({totalAge: {$gt: 30}, isSporty: true}) 31 | ``` 32 | 33 | Delete all document which does not have `totalAge` element and `isSporty` is `true`(for `null` `$exists` will be true) 34 | 35 | ```bash 36 | > db.users.deleteMany({totalAge: {$exists: false}, isSporty: true}) 37 | ``` 38 | 39 | ## Deleting all entries in a collection 40 | 41 | `{}` ⇒ this is simply a filter that matches every document in the collection. 42 | 43 | ```bash 44 | db.users.deleteMany({}) 45 | ``` 46 | 47 | ## To delete the entire collection 48 | 49 | ```bash 50 | db.users.drop() 51 | ``` 52 | 53 | ## To delete the entire database 54 | 55 | ```bash 56 | db.dropDataBase() 57 | ``` -------------------------------------------------------------------------------- /MongoDB/Index_and_Others/Compound_Index_with_Text.md: -------------------------------------------------------------------------------- 1 | # Compound index with text 2 | 3 | ```cpp 4 | > db.contacts.createIndex({gender: 1}) 5 | // get explain after creating index 6 | > db.contacts.explain("executionStats").find({gender: "male"}) 7 | ``` 8 | 9 | ### Want to find all persons who are older than 30 and male or older than 40 and male. 10 | 11 | When using multiple fields for queries in the index, basically one combined index is created from multiple fields. Here created one combined index from two fields. 12 | **Every time have to drop the index if the new index(which we want to create) uses the same field** 13 | 14 | Such as 15 | 16 | ```cpp 17 | > db.contacts.dropIndex({'gender': 1}) 18 | > db.contacts.createIndex({'dob.age': 1,'gender': 1}) 19 | **Output** 20 | { 21 | "createdCollectionAutomatically" : false, 22 | "numIndexesBefore" : 1, 23 | "numIndexesAfter" : 2, 24 | "ok" : 1 25 | } 26 | ``` 27 | 28 | Getting info from the query 29 | 30 | ```cpp 31 | > db.contacts.explain().find({'dob.age': 35,'gender': 'male'}) 32 | ``` 33 | 34 | ```bash 35 | > db.contacts.getIndexes() 36 | **Output** 37 | [ 38 | { 39 | "v" : 2, 40 | "key" : { 41 | "_id" : 1 42 | }, 43 | "name" : "_id_", 44 | "ns" : "contactData.contacts" 45 | }, 46 | { 47 | "v" : 2, 48 | "key" : { 49 | "dob.age" : 1, 50 | "gender" : 1 51 | }, 52 | "name" : "dob.age_1_gender_1", 53 | "ns" : "contactData.contacts" 54 | } 55 | ] 56 | ``` 57 | 58 | If want to execute single index from multiple combined index its work left to right `"indexName" : "dob.age_1_gender_1"` 59 | 60 | `db.contacts.explain().find({'dob.age': 35})` It works fine, which means it works with an index scan. But `db.contacts.explain().find({'gender': 'male'})` it will do normal scan. As the index will be like `33_male`, `33_male`, and `33_female` So it will be sorted for the first key, not for the second. So if it is multiple keys in an index such as 4(first_second_third_fourth) then if we combine left to right in find condition such as first / first_second / first_second_third / first_second_third_fourth it will use index search. Any discontinue left to right will not perform index search rather will use normal collection search such as second_third, first_third, etc. -------------------------------------------------------------------------------- /MongoDB/Index_and_Others/Configuring_Indexes.md: -------------------------------------------------------------------------------- 1 | # Configuring Indexes 2 | 3 | ### Make all key values unique 4 | 5 | Every collection _id is unique by default. Also can make any key not to add the same value to the document if we make `{unique: true}`. This will ensure all values of the given key will be unique. 6 | 7 | ```cpp 8 | > db.contacts.createIndex({email: 1}, {unique: true}) 9 | **Output** 10 | { 11 | "ok" : 0, 12 | "errmsg" : "E11000 duplicate key error collection: contactData.contacts index: email_1 dup key: { : \"abigail.clark@example.com\" }", 13 | "code" : 11000, 14 | "codeName" : "DuplicateKey" 15 | } 16 | ``` 17 | 18 | ### Understanding Partial Filters 19 | 20 | Creating an index on age, not on gender but age but only for elements where the underlying document is for a male. This means documents that have gender female will not include in the index. 21 | 22 | ```cpp 23 | db.contacts.createIndex({'dob.age': 1}, {partialFilterExpression:{gender: 'male'}}) 24 | { 25 | "createdCollectionAutomatically" : false, 26 | "numIndexesBefore" : 1, 27 | "numIndexesAfter" : 2, 28 | "ok" : 1 29 | } 30 | ``` 31 | 32 | Also can also use the age query 33 | 34 | ```cpp 35 | > db.contacts.createIndex({'dob.age': 1}, {partialFilterExpression:{'dob.age': {$gt: 60}}}) 36 | ``` 37 | 38 | This does not work. because as a partial index we have to also use gender and do not specify it in query so it will also return the document with the gender female also. 39 | 40 | ```cpp 41 | > db.contacts.explain().find({'dob.age': {$gt: 60}}) 42 | ``` 43 | 44 | This works with an index scan 45 | 46 | ```cpp 47 | db.contacts.explain().find({'dob.age': {$gt: 60}, gender: 'male'}) 48 | ``` 49 | 50 | ### Applying the Partial Index 51 | 52 | ```cpp 53 | > db.users.insertMany([{name: 'Max', email: 'max@test.com'},{name: 'Manu'}]) 54 | ``` 55 | 56 | Implementing a unique key with email 57 | 58 | ```cpp 59 | > db.users.createIndex({email: 1}, {unique: true}) 60 | ``` 61 | 62 | If now want to add a new user without email, it says duplicate index error, because no values store twice. (If there is a collection without email) 63 | 64 | ```cpp 65 | > db.users.insertOne({name: 'Anna'}) 66 | ``` 67 | 68 | But a person could not have an email 69 | 70 | ```cpp 71 | > db.users.dropIndex({email: 1}) 72 | ``` 73 | 74 | Now we create indexes a bit differently. 75 | 76 | ```cpp 77 | > db.users.createIndex({email: 1},{unique: true, partialFilterExpression: {email: {$exists: true}}}) 78 | ``` 79 | 80 | Now add users without email, it works as exists true for null also 81 | 82 | ```cpp 83 | db.users.insertOne({name: 'Anna'}) 84 | ``` 85 | 86 | Now we have three users one has email and the other two do without email 87 | 88 | ```cpp 89 | db.users.find().pretty() 90 | ``` 91 | 92 | As a partial index has already been created we can not add a new user with the same email 93 | 94 | ```cpp 95 | db.users.insertOne({name: 'Anna', email: 'max@test.com'}) 96 | ``` -------------------------------------------------------------------------------- /MongoDB/Index_and_Others/How_MongoDB_rejects_a_plan.md: -------------------------------------------------------------------------------- 1 | # How MongoDB rejects a plan 2 | 3 | Creating a compound index 4 | Order is important for compound index 5 | Creating an index putting name first wouldn't make much sense(As we already have an index of names then it will be the same as that). 6 | If age comes first, we can also filter just for age and take advantage of this index. 7 | If filtered for just a name and didn't have that index, the name could not be supported by the index. 8 | 9 | Here we can use just age or a combination of age and name. 10 | 11 | ```cpp 12 | db.customers.createIndex({age: 1, name: 1}) 13 | 14 | > db.customers.getIndexes() 15 | [ 16 | { 17 | "v" : 2, 18 | "key" : { 19 | "_id" : 1 20 | }, 21 | "name" : "_id_", 22 | "ns" : "test.customers" 23 | }, 24 | { 25 | "v" : 2, 26 | "key" : { 27 | "name" : 1 28 | }, 29 | "name" : "name_1", 30 | "ns" : "test.customers" 31 | }, 32 | { 33 | "v" : 2, 34 | "key" : { 35 | "age" : 1, 36 | "name" : 1 37 | }, 38 | "name" : "age_1_name_1", 39 | "ns" : "test.customers" 40 | } 41 | ] 42 | ``` 43 | 44 | Let execute query when executing query order does not matter in a compound index. Will reject this index if there are names only as it already has an index see rejectionPlan and will work for if find on basic of name and age both field see winningPlan. 45 | 46 | ```cpp 47 | db.customers.explain().find({age: 30, name: 'Max'}) 48 | db.customers.explain().find({name: 'Max', age: 30}) 49 | 50 | **Output** 51 | { 52 | "queryPlanner" : { 53 | "plannerVersion" : 1, 54 | "namespace" : "test.customers", 55 | "indexFilterSet" : false, 56 | "parsedQuery" : { 57 | "$and" : [ 58 | { 59 | "age" : { 60 | "$eq" : 30 61 | } 62 | }, 63 | { 64 | "name" : { 65 | "$eq" : "Max" 66 | } 67 | } 68 | ] 69 | }, 70 | "**winningPlan**" : { 71 | "stage" : "FETCH", 72 | "inputStage" : { 73 | "stage" : "IXSCAN", 74 | "keyPattern" : { 75 | "age" : 1, 76 | "name" : 1 77 | }, 78 | "indexName" : "age_1_name_1", 79 | "isMultiKey" : false, 80 | "multiKeyPaths" : { 81 | "age" : [ ], 82 | "name" : [ ] 83 | }, 84 | "isUnique" : false, 85 | "isSparse" : false, 86 | "isPartial" : false, 87 | "indexVersion" : 2, 88 | "direction" : "forward", 89 | "indexBounds" : { 90 | "age" : [ 91 | "[30.0, 30.0]" 92 | ], 93 | "name" : [ 94 | "[\"Max\", \"Max\"]" 95 | ] 96 | } 97 | } 98 | }, 99 | "**rejectedPlans**" : [ 100 | { 101 | "stage" : "FETCH", 102 | "filter" : { 103 | "age" : { 104 | "$eq" : 30 105 | } 106 | }, 107 | "inputStage" : { 108 | "stage" : "IXSCAN", 109 | "keyPattern" : { 110 | "name" : 1 111 | }, 112 | "indexName" : "name_1", 113 | "isMultiKey" : false, 114 | "multiKeyPaths" : { 115 | "name" : [ ] 116 | }, 117 | "isUnique" : false, 118 | "isSparse" : false, 119 | "isPartial" : false, 120 | "indexVersion" : 2, 121 | "direction" : "forward", 122 | "indexBounds" : { 123 | "name" : [ 124 | "[\"Max\", \"Max\"]" 125 | ] 126 | } 127 | } 128 | } 129 | ] 130 | }, 131 | "serverInfo" : { 132 | "host" : "jahid-HP-ProBook-6470b", 133 | "port" : 27017, 134 | "version" : "3.6.8", 135 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 136 | }, 137 | "ok" : 1 138 | } 139 | ``` 140 | 141 | Wining plan 142 | 143 | 1. Approach 1 144 | 2. Approach 2 145 | 3. Approach 3 ⇒ winning Plan ⇒ Cached ⇒ Cache ⇒ but the cache is not there forever 146 | 147 | Here approach 3 is the best match compared to 1 and 2. 148 | 149 | When the winning plan gets removed. 150 | 151 | 1. Write Threshold (currently 1,000) 152 | 2. Index is Rebuilt 153 | 3. Other Indexes are Added or Removed 154 | 4. MongoDB Server is Restarted 155 | 156 | ```cpp 157 | db.customers.insertOne({name:'Raju', age:22, salary: 1000}) 158 | ``` 159 | 160 | Get details of all plan 161 | Here we get the all details of the plan which is good and the execution time 162 | 163 | ```cpp 164 | db.customers.explain('allPlansExecution').find({age: 30, name: 'Max'}) 165 | ``` 166 | 167 |
168 | Output 169 | 170 | ```bash 171 | { 172 | "queryPlanner" : { 173 | "plannerVersion" : 1, 174 | "namespace" : "test.customers", 175 | "indexFilterSet" : false, 176 | "parsedQuery" : { 177 | "$and" : [ 178 | { 179 | "age" : { 180 | "$eq" : 30 181 | } 182 | }, 183 | { 184 | "name" : { 185 | "$eq" : "Max" 186 | } 187 | } 188 | ] 189 | }, 190 | "winningPlan" : { 191 | "stage" : "FETCH", 192 | "inputStage" : { 193 | "stage" : "IXSCAN", 194 | "keyPattern" : { 195 | "age" : 1, 196 | "name" : 1 197 | }, 198 | "indexName" : "age_1_name_1", 199 | "isMultiKey" : false, 200 | "multiKeyPaths" : { 201 | "age" : [ ], 202 | "name" : [ ] 203 | }, 204 | "isUnique" : false, 205 | "isSparse" : false, 206 | "isPartial" : false, 207 | "indexVersion" : 2, 208 | "direction" : "forward", 209 | "indexBounds" : { 210 | "age" : [ 211 | "[30.0, 30.0]" 212 | ], 213 | "name" : [ 214 | "[\"Max\", \"Max\"]" 215 | ] 216 | } 217 | } 218 | }, 219 | "rejectedPlans" : [ 220 | { 221 | "stage" : "FETCH", 222 | "filter" : { 223 | "age" : { 224 | "$eq" : 30 225 | } 226 | }, 227 | "inputStage" : { 228 | "stage" : "IXSCAN", 229 | "keyPattern" : { 230 | "name" : 1 231 | }, 232 | "indexName" : "name_1", 233 | "isMultiKey" : false, 234 | "multiKeyPaths" : { 235 | "name" : [ ] 236 | }, 237 | "isUnique" : false, 238 | "isSparse" : false, 239 | "isPartial" : false, 240 | "indexVersion" : 2, 241 | "direction" : "forward", 242 | "indexBounds" : { 243 | "name" : [ 244 | "[\"Max\", \"Max\"]" 245 | ] 246 | } 247 | } 248 | } 249 | ] 250 | }, 251 | "executionStats" : { 252 | "executionSuccess" : true, 253 | "nReturned" : 0, 254 | "executionTimeMillis" : 0, 255 | "totalKeysExamined" : 0, 256 | "totalDocsExamined" : 0, 257 | "executionStages" : { 258 | "stage" : "FETCH", 259 | "nReturned" : 0, 260 | "executionTimeMillisEstimate" : 0, 261 | "works" : 2, 262 | "advanced" : 0, 263 | "needTime" : 0, 264 | "needYield" : 0, 265 | "saveState" : 0, 266 | "restoreState" : 0, 267 | "isEOF" : 1, 268 | "invalidates" : 0, 269 | "docsExamined" : 0, 270 | "alreadyHasObj" : 0, 271 | "inputStage" : { 272 | "stage" : "IXSCAN", 273 | "nReturned" : 0, 274 | "executionTimeMillisEstimate" : 0, 275 | "works" : 1, 276 | "advanced" : 0, 277 | "needTime" : 0, 278 | "needYield" : 0, 279 | "saveState" : 0, 280 | "restoreState" : 0, 281 | "isEOF" : 1, 282 | "invalidates" : 0, 283 | "keyPattern" : { 284 | "age" : 1, 285 | "name" : 1 286 | }, 287 | "indexName" : "age_1_name_1", 288 | "isMultiKey" : false, 289 | "multiKeyPaths" : { 290 | "age" : [ ], 291 | "name" : [ ] 292 | }, 293 | "isUnique" : false, 294 | "isSparse" : false, 295 | "isPartial" : false, 296 | "indexVersion" : 2, 297 | "direction" : "forward", 298 | "indexBounds" : { 299 | "age" : [ 300 | "[30.0, 30.0]" 301 | ], 302 | "name" : [ 303 | "[\"Max\", \"Max\"]" 304 | ] 305 | }, 306 | "keysExamined" : 0, 307 | "seeks" : 1, 308 | "dupsTested" : 0, 309 | "dupsDropped" : 0, 310 | "seenInvalidated" : 0 311 | } 312 | }, 313 | "allPlansExecution" : [ 314 | { 315 | "nReturned" : 0, 316 | "executionTimeMillisEstimate" : 0, 317 | "totalKeysExamined" : 1, 318 | "totalDocsExamined" : 1, 319 | "executionStages" : { 320 | "stage" : "FETCH", 321 | "filter" : { 322 | "age" : { 323 | "$eq" : 30 324 | } 325 | }, 326 | "nReturned" : 0, 327 | "executionTimeMillisEstimate" : 0, 328 | "works" : 1, 329 | "advanced" : 0, 330 | "needTime" : 1, 331 | "needYield" : 0, 332 | "saveState" : 0, 333 | "restoreState" : 0, 334 | "isEOF" : 0, 335 | "invalidates" : 0, 336 | "docsExamined" : 1, 337 | "alreadyHasObj" : 0, 338 | "inputStage" : { 339 | "stage" : "IXSCAN", 340 | "nReturned" : 1, 341 | "executionTimeMillisEstimate" : 0, 342 | "works" : 1, 343 | "advanced" : 1, 344 | "needTime" : 0, 345 | "needYield" : 0, 346 | "saveState" : 0, 347 | "restoreState" : 0, 348 | "isEOF" : 0, 349 | "invalidates" : 0, 350 | "keyPattern" : { 351 | "name" : 1 352 | }, 353 | "indexName" : "name_1", 354 | "isMultiKey" : false, 355 | "multiKeyPaths" : { 356 | "name" : [ ] 357 | }, 358 | "isUnique" : false, 359 | "isSparse" : false, 360 | "isPartial" : false, 361 | "indexVersion" : 2, 362 | "direction" : "forward", 363 | "indexBounds" : { 364 | "name" : [ 365 | "[\"Max\", \"Max\"]" 366 | ] 367 | }, 368 | "keysExamined" : 1, 369 | "seeks" : 1, 370 | "dupsTested" : 0, 371 | "dupsDropped" : 0, 372 | "seenInvalidated" : 0 373 | } 374 | } 375 | }, 376 | { 377 | "nReturned" : 0, 378 | "executionTimeMillisEstimate" : 0, 379 | "totalKeysExamined" : 0, 380 | "totalDocsExamined" : 0, 381 | "executionStages" : { 382 | "stage" : "FETCH", 383 | "nReturned" : 0, 384 | "executionTimeMillisEstimate" : 0, 385 | "works" : 1, 386 | "advanced" : 0, 387 | "needTime" : 0, 388 | "needYield" : 0, 389 | "saveState" : 0, 390 | "restoreState" : 0, 391 | "isEOF" : 1, 392 | "invalidates" : 0, 393 | "docsExamined" : 0, 394 | "alreadyHasObj" : 0, 395 | "inputStage" : { 396 | "stage" : "IXSCAN", 397 | "nReturned" : 0, 398 | "executionTimeMillisEstimate" : 0, 399 | "works" : 1, 400 | "advanced" : 0, 401 | "needTime" : 0, 402 | "needYield" : 0, 403 | "saveState" : 0, 404 | "restoreState" : 0, 405 | "isEOF" : 1, 406 | "invalidates" : 0, 407 | "keyPattern" : { 408 | "age" : 1, 409 | "name" : 1 410 | }, 411 | "indexName" : "age_1_name_1", 412 | "isMultiKey" : false, 413 | "multiKeyPaths" : { 414 | "age" : [ ], 415 | "name" : [ ] 416 | }, 417 | "isUnique" : false, 418 | "isSparse" : false, 419 | "isPartial" : false, 420 | "indexVersion" : 2, 421 | "direction" : "forward", 422 | "indexBounds" : { 423 | "age" : [ 424 | "[30.0, 30.0]" 425 | ], 426 | "name" : [ 427 | "[\"Max\", \"Max\"]" 428 | ] 429 | }, 430 | "keysExamined" : 0, 431 | "seeks" : 1, 432 | "dupsTested" : 0, 433 | "dupsDropped" : 0, 434 | "seenInvalidated" : 0 435 | } 436 | } 437 | } 438 | ] 439 | }, 440 | "serverInfo" : { 441 | "host" : "jahid-HP-ProBook-6470b", 442 | "port" : 27017, 443 | "version" : "3.6.8", 444 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 445 | }, 446 | "ok" : 1 447 | } 448 | ``` 449 | 450 |
-------------------------------------------------------------------------------- /MongoDB/Index_and_Others/Multi_Key_Indexes.md: -------------------------------------------------------------------------------- 1 | # Multi Key Indexes 2 | 3 | Insert new data into a new table 4 | 5 | ```cpp 6 | db.contacts.insertOne( 7 | { 8 | name:'Max', hobbies:['Cooking', 'Sports'], 9 | address: [{street: 'Main Street'}, {street: 'Second Street'}] 10 | } 11 | ) 12 | **Output** 13 | { 14 | "acknowledged" : true, 15 | "insertedId" : ObjectId("633024183bb3e298033546d4") 16 | } 17 | 18 | db.contacts.findOne() 19 | **Output** 20 | { 21 | "_id" : ObjectId("633024183bb3e298033546d4"), 22 | "name" : "Max", 23 | "hobbies" : [ 24 | "Cooking", 25 | "Sports" 26 | ], 27 | "address" : [ 28 | { 29 | "street" : "Main Street" 30 | }, 31 | { 32 | "street" : "Second Street" 33 | } 34 | ] 35 | } 36 | ``` 37 | 38 | Create an index 39 | 40 | ```cpp 41 | db.contacts.createIndex({hobbies: 1}) 42 | **Output** 43 | { 44 | "createdCollectionAutomatically" : false, 45 | "numIndexesBefore" : 1, 46 | "numIndexesAfter" : 2, 47 | "ok" : 1 48 | } 49 | 50 | db.contacts.find({hobbies: 'Sports'}).pretty() 51 | **Output** 52 | { 53 | "_id" : ObjectId("633024183bb3e298033546d4"), 54 | "name" : "Max", 55 | "hobbies" : [ 56 | "Cooking", 57 | "Sports" 58 | ], 59 | "address" : [ 60 | { 61 | "street" : "Main Street" 62 | }, 63 | { 64 | "street" : "Second Street" 65 | } 66 | ] 67 | } 68 | ``` 69 | 70 | Execute explain 71 | Here multi-key is true, it is created when documents into an array. MongoDB takes all values of the key of all documents in this case array and stores them separately for indexing. 72 | 73 | Such as if all keys have 4 values in the array key and have a total of 1000 documents then it will store 4000 elements for indexing. 74 | 75 | ```cpp 76 | db.contacts.explain('executionStats').find({hobbies: 'Sports'}) 77 | ``` 78 | 79 |
80 | Output `"stage" : "IXSCAN"` `"isMultiKey" : true` 81 | 82 | ```schema 83 | { 84 | "queryPlanner" : { 85 | "plannerVersion" : 1, 86 | "namespace" : "test.contacts", 87 | "indexFilterSet" : false, 88 | "parsedQuery" : { 89 | "hobbies" : { 90 | "$eq" : "Sports" 91 | } 92 | }, 93 | "winningPlan" : { 94 | "stage" : "FETCH", 95 | "inputStage" : { 96 | "stage" : "IXSCAN", 97 | "keyPattern" : { 98 | "hobbies" : 1 99 | }, 100 | "indexName" : "hobbies_1", 101 | "isMultiKey" : true, 102 | "multiKeyPaths" : { 103 | "hobbies" : [ 104 | "hobbies" 105 | ] 106 | }, 107 | "isUnique" : false, 108 | "isSparse" : false, 109 | "isPartial" : false, 110 | "indexVersion" : 2, 111 | "direction" : "forward", 112 | "indexBounds" : { 113 | "hobbies" : [ 114 | "[\"Sports\", \"Sports\"]" 115 | ] 116 | } 117 | } 118 | }, 119 | "rejectedPlans" : [ ] 120 | }, 121 | "executionStats" : { 122 | "executionSuccess" : true, 123 | "nReturned" : 1, 124 | "executionTimeMillis" : 0, 125 | "totalKeysExamined" : 1, 126 | "totalDocsExamined" : 1, 127 | "executionStages" : { 128 | "stage" : "FETCH", 129 | "nReturned" : 1, 130 | "executionTimeMillisEstimate" : 0, 131 | "works" : 2, 132 | "advanced" : 1, 133 | "needTime" : 0, 134 | "needYield" : 0, 135 | "saveState" : 0, 136 | "restoreState" : 0, 137 | "isEOF" : 1, 138 | "invalidates" : 0, 139 | "docsExamined" : 1, 140 | "alreadyHasObj" : 0, 141 | "inputStage" : { 142 | **"stage" : "IXSCAN",** 143 | "nReturned" : 1, 144 | "executionTimeMillisEstimate" : 0, 145 | "works" : 2, 146 | "advanced" : 1, 147 | "needTime" : 0, 148 | "needYield" : 0, 149 | "saveState" : 0, 150 | "restoreState" : 0, 151 | "isEOF" : 1, 152 | "invalidates" : 0, 153 | "keyPattern" : { 154 | "hobbies" : 1 155 | }, 156 | "indexName" : "hobbies_1", 157 | **"isMultiKey" : true,** 158 | "multiKeyPaths" : { 159 | "hobbies" : [ 160 | "hobbies" 161 | ] 162 | }, 163 | "isUnique" : false, 164 | "isSparse" : false, 165 | "isPartial" : false, 166 | "indexVersion" : 2, 167 | "direction" : "forward", 168 | "indexBounds" : { 169 | "hobbies" : [ 170 | "[\"Sports\", \"Sports\"]" 171 | ] 172 | }, 173 | "keysExamined" : 1, 174 | "seeks" : 1, 175 | "dupsTested" : 1, 176 | "dupsDropped" : 0, 177 | "seenInvalidated" : 0 178 | } 179 | } 180 | }, 181 | "serverInfo" : { 182 | "host" : "jahid-HP-ProBook-6470b", 183 | "port" : 27017, 184 | "version" : "3.6.8", 185 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 186 | }, 187 | "ok" : 1 188 | } 189 | ``` 190 | 191 |
192 | 193 | 194 | Lets create another index 195 | 196 | ```cpp 197 | db.contacts.createIndex({addresses: 1}) 198 | **Output** 199 | { 200 | "createdCollectionAutomatically" : false, 201 | "numIndexesBefore" : 2, 202 | "numIndexesAfter" : 3, 203 | "ok" : 1 204 | } 205 | ``` 206 | 207 | Here index does not work, because it holds all documents not the nested elements in the address key. 208 | 209 | ```cpp 210 | db.contacts.explain('executionStats').find({'addresses.street': 'Main Street'}) 211 | ``` 212 | 213 |
214 | Output `"stage" : "COLLSCAN"` 215 | 216 | ```schema 217 | { 218 | "queryPlanner" : { 219 | "plannerVersion" : 1, 220 | "namespace" : "test.contacts", 221 | "indexFilterSet" : false, 222 | "parsedQuery" : { 223 | "addresses.street" : { 224 | "$eq" : "Main Street" 225 | } 226 | }, 227 | "winningPlan" : { 228 | "stage" : "COLLSCAN", 229 | "filter" : { 230 | "addresses.street" : { 231 | "$eq" : "Main Street" 232 | } 233 | }, 234 | "direction" : "forward" 235 | }, 236 | "rejectedPlans" : [ ] 237 | }, 238 | "executionStats" : { 239 | "executionSuccess" : true, 240 | "nReturned" : 0, 241 | "executionTimeMillis" : 0, 242 | "totalKeysExamined" : 0, 243 | "totalDocsExamined" : 2, 244 | "executionStages" : { 245 | **"stage" : "COLLSCAN",** 246 | "filter" : { 247 | "addresses.street" : { 248 | "$eq" : "Main Street" 249 | } 250 | }, 251 | "nReturned" : 0, 252 | "executionTimeMillisEstimate" : 0, 253 | "works" : 4, 254 | "advanced" : 0, 255 | "needTime" : 3, 256 | "needYield" : 0, 257 | "saveState" : 0, 258 | "restoreState" : 0, 259 | "isEOF" : 1, 260 | "invalidates" : 0, 261 | "direction" : "forward", 262 | "docsExamined" : 2 263 | } 264 | }, 265 | "serverInfo" : { 266 | "host" : "jahid-HP-ProBook-6470b", 267 | "port" : 27017, 268 | "version" : "3.6.8", 269 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 270 | }, 271 | "ok" : 1 272 | } 273 | ``` 274 | 275 |
276 | 277 | 278 | It works when querying as we are searching for the whole address document and we create an index of whole documents not for values in the nested document. 279 | 280 | ```cpp 281 | db.contacts.explain('executionStats').find({addresses: {street: 'Main Street'}}) 282 | ``` 283 | 284 |
285 | Output `"stage" : "IXSCAN"` 286 | 287 | ```schema 288 | { 289 | "queryPlanner" : { 290 | "plannerVersion" : 1, 291 | "namespace" : "test.contacts", 292 | "indexFilterSet" : false, 293 | "parsedQuery" : { 294 | "addresses" : { 295 | "$eq" : { 296 | "street" : "Main Street" 297 | } 298 | } 299 | }, 300 | "winningPlan" : { 301 | "stage" : "FETCH", 302 | "inputStage" : { 303 | "stage" : "IXSCAN", 304 | "keyPattern" : { 305 | "addresses" : 1 306 | }, 307 | "indexName" : "addresses_1", 308 | "isMultiKey" : false, 309 | "multiKeyPaths" : { 310 | "addresses" : [ ] 311 | }, 312 | "isUnique" : false, 313 | "isSparse" : false, 314 | "isPartial" : false, 315 | "indexVersion" : 2, 316 | "direction" : "forward", 317 | "indexBounds" : { 318 | "addresses" : [ 319 | "[{ street: \"Main Street\" }, { street: \"Main Street\" }]" 320 | ] 321 | } 322 | } 323 | }, 324 | "rejectedPlans" : [ ] 325 | }, 326 | "executionStats" : { 327 | "executionSuccess" : true, 328 | "nReturned" : 0, 329 | "executionTimeMillis" : 1, 330 | "totalKeysExamined" : 0, 331 | "totalDocsExamined" : 0, 332 | "executionStages" : { 333 | "stage" : "FETCH", 334 | "nReturned" : 0, 335 | "executionTimeMillisEstimate" : 0, 336 | "works" : 1, 337 | "advanced" : 0, 338 | "needTime" : 0, 339 | "needYield" : 0, 340 | "saveState" : 0, 341 | "restoreState" : 0, 342 | "isEOF" : 1, 343 | "invalidates" : 0, 344 | "docsExamined" : 0, 345 | "alreadyHasObj" : 0, 346 | "inputStage" : { 347 | **"stage" : "IXSCAN",** 348 | "nReturned" : 0, 349 | "executionTimeMillisEstimate" : 0, 350 | "works" : 1, 351 | "advanced" : 0, 352 | "needTime" : 0, 353 | "needYield" : 0, 354 | "saveState" : 0, 355 | "restoreState" : 0, 356 | "isEOF" : 1, 357 | "invalidates" : 0, 358 | "keyPattern" : { 359 | "addresses" : 1 360 | }, 361 | "indexName" : "addresses_1", 362 | "isMultiKey" : false, 363 | "multiKeyPaths" : { 364 | "addresses" : [ ] 365 | }, 366 | "isUnique" : false, 367 | "isSparse" : false, 368 | "isPartial" : false, 369 | "indexVersion" : 2, 370 | "direction" : "forward", 371 | "indexBounds" : { 372 | "addresses" : [ 373 | "[{ street: \"Main Street\" }, { street: \"Main Street\" }]" 374 | ] 375 | }, 376 | "keysExamined" : 0, 377 | "seeks" : 1, 378 | "dupsTested" : 0, 379 | "dupsDropped" : 0, 380 | "seenInvalidated" : 0 381 | } 382 | } 383 | }, 384 | "serverInfo" : { 385 | "host" : "jahid-HP-ProBook-6470b", 386 | "port" : 27017, 387 | "version" : "3.6.8", 388 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 389 | }, 390 | "ok" : 1 391 | } 392 | ``` 393 |
394 | 395 | 396 | If the index is created like then it works, we are creating index for it also have a multi-key index 397 | 398 | ```cpp 399 | db.contacts.createIndex({'addresses.street': 1}) 400 | **Output** 401 | { 402 | "createdCollectionAutomatically" : false, 403 | "numIndexesBefore" : 3, 404 | "numIndexesAfter" : 4, 405 | "ok" : 1 406 | } 407 | ``` 408 | 409 | This is now index scan 410 | 411 | ```bash 412 | db.contacts.explain('executionStats').find({'addresses.street': 'Main Street'}) 413 | ``` 414 | 415 |
416 | Output `"stage" : "IXSCAN"` 417 | 418 | ```schema 419 | { 420 | "queryPlanner" : { 421 | "plannerVersion" : 1, 422 | "namespace" : "test.contacts", 423 | "indexFilterSet" : false, 424 | "parsedQuery" : { 425 | "addresses.street" : { 426 | "$eq" : "Main Street" 427 | } 428 | }, 429 | "winningPlan" : { 430 | "stage" : "FETCH", 431 | "inputStage" : { 432 | "stage" : "IXSCAN", 433 | "keyPattern" : { 434 | "addresses.street" : 1 435 | }, 436 | "indexName" : "addresses.street_1", 437 | "isMultiKey" : false, 438 | "multiKeyPaths" : { 439 | "addresses.street" : [ ] 440 | }, 441 | "isUnique" : false, 442 | "isSparse" : false, 443 | "isPartial" : false, 444 | "indexVersion" : 2, 445 | "direction" : "forward", 446 | "indexBounds" : { 447 | "addresses.street" : [ 448 | "[\"Main Street\", \"Main Street\"]" 449 | ] 450 | } 451 | } 452 | }, 453 | "rejectedPlans" : [ ] 454 | }, 455 | "executionStats" : { 456 | "executionSuccess" : true, 457 | "nReturned" : 0, 458 | "executionTimeMillis" : 3, 459 | "totalKeysExamined" : 0, 460 | "totalDocsExamined" : 0, 461 | "executionStages" : { 462 | "stage" : "FETCH", 463 | "nReturned" : 0, 464 | "executionTimeMillisEstimate" : 0, 465 | "works" : 1, 466 | "advanced" : 0, 467 | "needTime" : 0, 468 | "needYield" : 0, 469 | "saveState" : 0, 470 | "restoreState" : 0, 471 | "isEOF" : 1, 472 | "invalidates" : 0, 473 | "docsExamined" : 0, 474 | "alreadyHasObj" : 0, 475 | "inputStage" : { 476 | **"stage" : "IXSCAN",** 477 | "nReturned" : 0, 478 | "executionTimeMillisEstimate" : 0, 479 | "works" : 1, 480 | "advanced" : 0, 481 | "needTime" : 0, 482 | "needYield" : 0, 483 | "saveState" : 0, 484 | "restoreState" : 0, 485 | "isEOF" : 1, 486 | "invalidates" : 0, 487 | "keyPattern" : { 488 | "addresses.street" : 1 489 | }, 490 | "indexName" : "addresses.street_1", 491 | "isMultiKey" : false, 492 | "multiKeyPaths" : { 493 | "addresses.street" : [ ] 494 | }, 495 | "isUnique" : false, 496 | "isSparse" : false, 497 | "isPartial" : false, 498 | "indexVersion" : 2, 499 | "direction" : "forward", 500 | "indexBounds" : { 501 | "addresses.street" : [ 502 | "[\"Main Street\", \"Main Street\"]" 503 | ] 504 | }, 505 | "keysExamined" : 0, 506 | "seeks" : 1, 507 | "dupsTested" : 0, 508 | "dupsDropped" : 0, 509 | "seenInvalidated" : 0 510 | } 511 | } 512 | }, 513 | "serverInfo" : { 514 | "host" : "jahid-HP-ProBook-6470b", 515 | "port" : 27017, 516 | "version" : "3.6.8", 517 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 518 | }, 519 | "ok" : 1 520 | } 521 | ``` 522 | 523 |
524 | 525 | 526 | Still **multi-key index** is super **helpful** if have **queries** that regularly target **array values** or even **nested values** in an **embedded document** in **arrays**. 527 | There are a couple of restrictions or one important restriction to be precise when using multi-key indexes 528 | 529 | Create a multi-key compound index, it is also possible, when having one multi-key 530 | 531 | ```cpp 532 | db.contacts.createIndex({name: 1, hobbies: 1}) 533 | **Output** 534 | { 535 | "createdCollectionAutomatically" : false, 536 | "numIndexesBefore" : 5, 537 | "numIndexesAfter" : 6, 538 | "ok" : 1 539 | } 540 | ``` 541 | 542 | But can not add more than one array in a single index 543 | 544 | ```cpp 545 | db.contacts.createIndex({addresses: 1, hobbies: 1}) 546 | ``` -------------------------------------------------------------------------------- /MongoDB/Index_and_Others/Query_Diagnosis_and_Query_Planning.md: -------------------------------------------------------------------------------- 1 | # Query Diagnosis and Query Planning 2 | 3 | 1. 'queryPlanner' ⇒ Show Summary for Executed Query + Winning Plan 4 | 2. 'executionsStats' ⇒ Show Detailed Summary for Executed Query + Winning Plan + Possibly Rejected Plans 5 | 3. 'allPlanExecution' ⇒ Show Detailed Summary for Executed Query + Winning Plan + Winning Plan Decision Process 6 | 7 | ### Efficient Queries and Covered Queries 8 | 9 | 1. Milliseconds Process Time 10 | 2. IXSCAN typically beats COLLSCAN 11 | 3. Number of keys(in index) Examined 12 | 4. Number of Documents Examined 13 | 5. Number of Documents Returns 14 | 15 | The number of keys(in index) Examined and the Number of Documents Examined should be as close as possible or zero. 16 | 17 | The number of Documents Examined and the Number of Documents Returned should be as close as possible or zero. 18 | 19 | **Covered Query** ⇒ Number of keys(in index) Examined, Number of Documents Examined, Number of Documents Returns 20 | 21 | Understanding Covered Queries 22 | 23 | ```bash 24 | > db.customers.insertMany([{name: 'Max', age: 29, salary: 3000}, {name: 'Manu', age: 30, salary: 4000}]) 25 | **Output** 26 | { 27 | "acknowledged" : true, 28 | "insertedIds" : [ 29 | ObjectId("632f30533bb3e298033546d1"), 30 | ObjectId("632f30533bb3e298033546d2") 31 | ] 32 | } 33 | ``` 34 | 35 | Creating index 36 | 37 | ```cpp 38 | db.customers.createIndex({name: 1}) 39 | db.customers.getIndexes() 40 | ``` 41 | 42 | Get info 43 | 44 | ```cpp 45 | db.customers.explain('executionStats').find({name: 'Max'}) 46 | ``` 47 | 48 |
49 | Output `"totalKeysExamined" : 1,` `"totalDocsExamined" : 1` 50 | 51 | ```schema 52 | { 53 | "queryPlanner" : { 54 | "plannerVersion" : 1, 55 | "namespace" : "test.customers", 56 | "indexFilterSet" : false, 57 | "parsedQuery" : { 58 | "name" : { 59 | "$eq" : "Max" 60 | } 61 | }, 62 | "winningPlan" : { 63 | "stage" : "FETCH", 64 | "inputStage" : { 65 | "stage" : "IXSCAN", 66 | "keyPattern" : { 67 | "name" : 1 68 | }, 69 | "indexName" : "name_1", 70 | "isMultiKey" : false, 71 | "multiKeyPaths" : { 72 | "name" : [ ] 73 | }, 74 | "isUnique" : false, 75 | "isSparse" : false, 76 | "isPartial" : false, 77 | "indexVersion" : 2, 78 | "direction" : "forward", 79 | "indexBounds" : { 80 | "name" : [ 81 | "[\"Max\", \"Max\"]" 82 | ] 83 | } 84 | } 85 | }, 86 | "rejectedPlans" : [ ] 87 | }, 88 | "executionStats" : { 89 | "executionSuccess" : true, 90 | "nReturned" : 1, 91 | "executionTimeMillis" : 0, 92 | "totalKeysExamined" : 1, 93 | "totalDocsExamined" : 1, 94 | "executionStages" : { 95 | "stage" : "FETCH", 96 | "nReturned" : 1, 97 | "executionTimeMillisEstimate" : 0, 98 | "works" : 2, 99 | "advanced" : 1, 100 | "needTime" : 0, 101 | "needYield" : 0, 102 | "saveState" : 0, 103 | "restoreState" : 0, 104 | "isEOF" : 1, 105 | "invalidates" : 0, 106 | "docsExamined" : 1, 107 | "alreadyHasObj" : 0, 108 | "inputStage" : { 109 | "stage" : "IXSCAN", 110 | "nReturned" : 1, 111 | "executionTimeMillisEstimate" : 0, 112 | "works" : 2, 113 | "advanced" : 1, 114 | "needTime" : 0, 115 | "needYield" : 0, 116 | "saveState" : 0, 117 | "restoreState" : 0, 118 | "isEOF" : 1, 119 | "invalidates" : 0, 120 | "keyPattern" : { 121 | "name" : 1 122 | }, 123 | "indexName" : "name_1", 124 | "isMultiKey" : false, 125 | "multiKeyPaths" : { 126 | "name" : [ ] 127 | }, 128 | "isUnique" : false, 129 | "isSparse" : false, 130 | "isPartial" : false, 131 | "indexVersion" : 2, 132 | "direction" : "forward", 133 | "indexBounds" : { 134 | "name" : [ 135 | "[\"Max\", \"Max\"]" 136 | ] 137 | }, 138 | "keysExamined" : 1, 139 | "seeks" : 1, 140 | "dupsTested" : 0, 141 | "dupsDropped" : 0, 142 | "seenInvalidated" : 0 143 | } 144 | } 145 | }, 146 | "serverInfo" : { 147 | "host" : "jahid-HP-ProBook-6470b", 148 | "port" : 27017, 149 | "version" : "3.6.8", 150 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 151 | }, 152 | "ok" : 1 153 | } 154 | ``` 155 |
156 | 157 | 158 | Let's implement **covered queries.** If can optimize the query, then have to reach that covered query state. Useful when typically returning the specific fields 159 | 160 | ```cpp 161 | db.customers.explain('executionStats').find({name: 'Max'},{_id: 0, name: 1}) 162 | ``` 163 | 164 |
165 | Output `"totalKeysExamined" : 1`, `"totalDocsExamined" : 0` 166 | 167 | ```schema 168 | { 169 | "queryPlanner" : { 170 | "plannerVersion" : 1, 171 | "namespace" : "test.customers", 172 | "indexFilterSet" : false, 173 | "parsedQuery" : { 174 | "name" : { 175 | "$eq" : "Max" 176 | } 177 | }, 178 | "winningPlan" : { 179 | "stage" : "PROJECTION", 180 | "transformBy" : { 181 | "_id" : 0, 182 | "name" : 1 183 | }, 184 | "inputStage" : { 185 | "stage" : "IXSCAN", 186 | "keyPattern" : { 187 | "name" : 1 188 | }, 189 | "indexName" : "name_1", 190 | "isMultiKey" : false, 191 | "multiKeyPaths" : { 192 | "name" : [ ] 193 | }, 194 | "isUnique" : false, 195 | "isSparse" : false, 196 | "isPartial" : false, 197 | "indexVersion" : 2, 198 | "direction" : "forward", 199 | "indexBounds" : { 200 | "name" : [ 201 | "[\"Max\", \"Max\"]" 202 | ] 203 | } 204 | } 205 | }, 206 | "rejectedPlans" : [ ] 207 | }, 208 | "executionStats" : { 209 | "executionSuccess" : true, 210 | "nReturned" : 1, 211 | "executionTimeMillis" : 0, 212 | "totalKeysExamined" : 1, 213 | "totalDocsExamined" : 0, 214 | "executionStages" : { 215 | "stage" : "PROJECTION", 216 | "nReturned" : 1, 217 | "executionTimeMillisEstimate" : 0, 218 | "works" : 2, 219 | "advanced" : 1, 220 | "needTime" : 0, 221 | "needYield" : 0, 222 | "saveState" : 0, 223 | "restoreState" : 0, 224 | "isEOF" : 1, 225 | "invalidates" : 0, 226 | "transformBy" : { 227 | "_id" : 0, 228 | "name" : 1 229 | }, 230 | "inputStage" : { 231 | "stage" : "IXSCAN", 232 | "nReturned" : 1, 233 | "executionTimeMillisEstimate" : 0, 234 | "works" : 2, 235 | "advanced" : 1, 236 | "needTime" : 0, 237 | "needYield" : 0, 238 | "saveState" : 0, 239 | "restoreState" : 0, 240 | "isEOF" : 1, 241 | "invalidates" : 0, 242 | "keyPattern" : { 243 | "name" : 1 244 | }, 245 | "indexName" : "name_1", 246 | "isMultiKey" : false, 247 | "multiKeyPaths" : { 248 | "name" : [ ] 249 | }, 250 | "isUnique" : false, 251 | "isSparse" : false, 252 | "isPartial" : false, 253 | "indexVersion" : 2, 254 | "direction" : "forward", 255 | "indexBounds" : { 256 | "name" : [ 257 | "[\"Max\", \"Max\"]" 258 | ] 259 | }, 260 | "keysExamined" : 1, 261 | "seeks" : 1, 262 | "dupsTested" : 0, 263 | "dupsDropped" : 0, 264 | "seenInvalidated" : 0 265 | } 266 | } 267 | }, 268 | "serverInfo" : { 269 | "host" : "jahid-HP-ProBook-6470b", 270 | "port" : 27017, 271 | "version" : "3.6.8", 272 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 273 | }, 274 | "ok" : 1 275 | } 276 | ``` 277 |
278 | 279 | **This cover query does not understand fully.** -------------------------------------------------------------------------------- /MongoDB/Index_and_Others/Text_Indexes(Special_Type_of_Multi-Key_index),_Sor.md: -------------------------------------------------------------------------------- 1 | # Text indexes(Special Type of Multi-Key index), Sorting using Text indexes, Combine Text index, Exclude while find in Text-Index 2 | 3 | This is a special type of multi-key index 4 | 5 | This product is a must-buy for all fans of modern fiction! Using the text index all words of this sentence will be stored as an array of words, the text index will be like: this, product, must buy, fans, modern, fiction. If an array of single words or an array of keywords is essential to search text. 6 | 7 | Create a new collections 8 | 9 | ```cpp 10 | db.products.insertMany( 11 | [ 12 | { 13 | title: 'A book', 14 | description: 'This is an awesome book about a young artist!' 15 | }, 16 | { 17 | title: 'Red T-Shirt', description: 'This T-Shirt is red and it is pretty awesome' 18 | } 19 | ] 20 | ) 21 | **Output** 22 | { 23 | "acknowledged" : true, 24 | "insertedIds" : [ 25 | ObjectId("6330504b8fea738a87154400"), 26 | ObjectId("6330504b8fea738a87154401") 27 | ] 28 | } 29 | ``` 30 | 31 | Create an index. This is a single field index and can search with exact text such as `'This is an awesome book about a young artist!'`. Can not index individual words of description key’s value. 32 | 33 | ```cpp 34 | db.products.createIndex({description: 1}) 35 | ``` 36 | 37 | To create a text index to split the sentence, so drop the previous index 38 | 39 | ```cpp 40 | db.products.dropIndex({description: 1}) 41 | ``` 42 | 43 | ### Create text index ⇒ special kind of index 44 | 45 | In the text, the index removes all the stop words(such as space) and stores all the keywords into an array essentially. 46 | 47 | ```cpp 48 | db.products.createIndex({description: 'text'}) 49 | **Output** 50 | { 51 | "createdCollectionAutomatically" : false, 52 | "numIndexesBefore" : 1, 53 | "numIndexesAfter" : 2, 54 | "ok" : 1 55 | } 56 | ``` 57 | 58 | Now might be wondering why do not need to specify the field in which want to search pretty expensive as can imagine. 59 | If have is a lot of long text that has to be split up, don't want to do this like 10 times per collection and therefore, only have one text index where this could look into. 60 | 61 | Can merge multiple fields into one text index. Everything is stored in lowercase. 62 | 63 | ```cpp 64 | db.products.find({$text: {$search: 'awesome'}}).pretty() 65 | { 66 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a4"), 67 | "title" : "Red T-Shirt", 68 | "description" : "This T-Shirt is red and it is pretty awesome" 69 | } 70 | { 71 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 72 | "title" : "A book", 73 | "description" : "This is an awesome book about a young artist!" 74 | } 75 | ``` 76 | 77 | ```cpp 78 | > db.products.find({$text: {$search: 'book'}}).pretty() 79 | { 80 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 81 | "title" : "A book", 82 | "description" : "This is an awesome book about a young artist!" 83 | } 84 | ``` 85 | 86 | Here second document has red word and first document has book word. Not searching the words in a single document. 87 | 88 | ```cpp 89 | db.products.find({$text: {$search: 'red book'}}).pretty() 90 | { 91 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 92 | "title" : "A book", 93 | "description" : "This is an awesome book about a young artist!" 94 | } 95 | { 96 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a4"), 97 | "title" : "Red T-Shirt", 98 | "description" : "This T-Shirt is red and it is pretty awesome" 99 | } 100 | ``` 101 | 102 | Can search with exactly phrase. **This faster than regular expression.** 103 | 104 | ```cpp 105 | db.products.find({$text: {$search: "\"red book\""}}).pretty() 106 | db.products.find({$text: {$search: "\"awesome book\""}}).pretty() 107 | { 108 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 109 | "title" : "A book", 110 | "description" : "This is an awesome book about a young artist!" 111 | } 112 | ``` 113 | 114 | # Text Indexes Sorting 115 | 116 | It works in new version automatically 117 | 118 | ```cpp 119 | db.products.find({$text: {$search: "awesome t-shirt"}}).pretty() 120 | **Output** 121 | { 122 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a4"), 123 | "title" : "Red T-Shirt", 124 | "description" : "This T-Shirt is red and it is pretty awesome" 125 | } 126 | { 127 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 128 | "title" : "A book", 129 | "description" : "This is an awesome book about a young artist!" 130 | } 131 | ``` 132 | 133 | // but in the previous version 134 | 135 | ```cpp 136 | db.products.find({$text: {$search: "awesome t-shirt"}}).pretty() 137 | **Output** 138 | { 139 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 140 | "title" : "A book", 141 | "description" : "This is an awesome book about a young artist!" 142 | } 143 | { 144 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a4"), 145 | "title" : "Red T-Shirt", 146 | "description" : "This T-Shirt is red and it is pretty awesome" 147 | } 148 | ``` 149 | 150 | Let's add a sorting query. in this query check how many words match each document. Score increase with the number of matching words. Which score is higher comes into first position 151 | 152 | ```cpp 153 | db.products.find({$text: {$search: "awesome t-shirt"}}, {score: {$meta: 'textScore'}}).pretty() 154 | **Output** 155 | { 156 | "_id" : ObjectId("6330504b8fea738a87154401"), 157 | "title" : "Red T-Shirt", 158 | "description" : "This T-Shirt is red and it is pretty awesome", 159 | "score" : 1.7999999999999998 160 | } 161 | { 162 | "_id" : ObjectId("6330504b8fea738a87154400"), 163 | "title" : "A book", 164 | "description" : "This is an awesome book about a young artist!", 165 | "score" : 0.625 166 | } 167 | ``` 168 | 169 | If sort does not work automatically add the sort function and sort by score. 170 | 171 | ```cpp 172 | db.products.find( 173 | {$text: {$search: "awesome t-shirt"}}, 174 | {score: {$meta: 'textScore'}} 175 | ).sort( 176 | {score: {$meta: 'textScore'}}).pretty() 177 | **Output** 178 | { 179 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a4"), 180 | "title" : "Red T-Shirt", 181 | "description" : "This T-Shirt is red and it is pretty awesome", 182 | "score" : 1.7999999999999998 183 | } 184 | { 185 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 186 | "title" : "A book", 187 | "description" : "This is an awesome book about a young artist!", 188 | "score" : 0.6 189 | } 190 | ``` 191 | 192 | # Combining Text Indexes 193 | 194 | ```cpp 195 | db.products.getIndexes() 196 | db.products.findOne() 197 | { 198 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 199 | "title" : "A book", 200 | "description" : "This is an awesome book about a young artist!" 201 | } 202 | ``` 203 | 204 | If we now add text indexes with a title like that it would be an error. Already text index is added with description in the document. Index option conflict. In every document we can must add only one text index 205 | 206 | ```cpp 207 | db.products.createIndex({title: 'text'}) 208 | ``` 209 | 210 | Can merge the text of multiple fields together into one text index. Now drop the previous description text index, dropping the text index is a little bit different. Have included the text index name 211 | 212 | ```cpp 213 | db.products.dropIndex('description_text') 214 | { "nIndexesWas" : 2, "ok" : 1 } 215 | ``` 216 | 217 | Now add two fields like title and description to create a **combined text index** 218 | 219 | ```cpp 220 | db.products.createIndex({title:'text',description: 'text'}) 221 | **Output** 222 | { 223 | "createdCollectionAutomatically" : false, 224 | "numIndexesBefore" : 1, 225 | "numIndexesAfter" : 2, 226 | "ok" : 1 227 | } 228 | 229 | // insert a new element 230 | db.products.insertOne({title: 'A Ship', description: 'Floats perfectly!'}) 231 | **Output** 232 | { 233 | "acknowledged" : true, 234 | "insertedId" : ObjectId("63308f298fea738a87154402") 235 | } 236 | 237 | ``` 238 | 239 | As we make combined text index with title and description if we search for ship then we will get matched docuemnt. 240 | 241 | ```bash 242 | > db.products.find({$text: {$search: 'ship'}}).pretty() 243 | **Output** 244 | { 245 | "_id" : ObjectId("63308f298fea738a87154402"), 246 | "title" : "A Ship", 247 | "description" : "Floats perfectly!" 248 | } 249 | ``` 250 | 251 | Search with multiple text 252 | 253 | ```cpp 254 | db.products.find({$text: {$search: 'ship t-shirt'}}).pretty() 255 | { 256 | "_id" : ObjectId("63308f298fea738a87154402"), 257 | "title" : "A Ship", 258 | "description" : "Floats perfectly!" 259 | } 260 | { 261 | "_id" : ObjectId("6330504b8fea738a87154401"), 262 | "title" : "Red T-Shirt", 263 | "description" : "This T-Shirt is red and it is pretty awesome" 264 | } 265 | 266 | > db.products.find({$text: {$search: 'awesome'}}).pretty() 267 | { 268 | "_id" : ObjectId("6330504b8fea738a87154400"), 269 | "title" : "A book", 270 | "description" : "This is an awesome book about a young artist!" 271 | } 272 | { 273 | "_id" : ObjectId("6330504b8fea738a87154401"), 274 | "title" : "Red T-Shirt", 275 | "description" : "This T-Shirt is red and it is pretty awesome" 276 | } 277 | ``` 278 | 279 | # Using Text Index To Exclude Words 280 | 281 | To exclude words in search just add '-' before word. Here want to search awesome but in the sentence. If get awesome then exclude t-shirt 282 | 283 | ```cpp 284 | db.products.find({$text: {$search: 'awesome -t-shirt'}}).pretty() 285 | { 286 | "_id" : ObjectId("5f2adb2fbcaaeedce48e55a3"), 287 | "title" : "A book", 288 | "description" : "This is an awesome book abou a young artist!" 289 | } 290 | ``` -------------------------------------------------------------------------------- /MongoDB/Index_and_Others/Time-To-Live(TTL)_Index.md: -------------------------------------------------------------------------------- 1 | # Time-To-Live(TTL) index 2 | 3 | This works like a session. Clear data after some duration. Self-destroying data 4 | 5 | ```cpp 6 | > db.sessions.insertOne({data: 'Sample data', createdAt: new Date()}) 7 | **Output** 8 | { 9 | "acknowledged" : true, 10 | "insertedId" : ObjectId("632f15e43bb3e298033546d0") 11 | } 12 | 13 | > db.sessions.find().pretty() 14 | **Output** 15 | { 16 | "_id" : ObjectId("632f15e43bb3e298033546d0"), 17 | "data" : "Sample data", 18 | "createdAt" : ISODate("2022-09-24T14:36:20.317Z") 19 | } 20 | ``` 21 | 22 | Now add time to live index, can create with normal ascending text 23 | 24 | ```cpp 25 | > db.sessions.createdIndex({createdAt: 1}) 26 | > db.sessions.dropIndex({createdAt: 1}) 27 | ``` 28 | 29 | Add indexes differently. After 10 seconds inserted the document will be destroyed. 30 | 31 | ```cpp 32 | > db.sessions.createIndex({createdAt: 1}, {expireAfterSeconds: 10}) 33 | ``` -------------------------------------------------------------------------------- /MongoDB/Index_and_Others/Using_Indexes_for_Sorting.md: -------------------------------------------------------------------------------- 1 | # Using indexes for sorting 2 | 3 | This query also works like indexes. This also uses index scan `"indexName": "dob.age_1_gender_1"` as MongoDB already has index it will be easy to sort by gender using the already created index and also have to do less work. 4 | 5 | ```bash 6 | > db.contacts.explain().find({'dob.age': 35}).sort({gender: 1}) 7 | **Output** 8 | { 9 | "queryPlanner" : { 10 | "plannerVersion" : 1, 11 | "namespace" : "contactData.contacts", 12 | "indexFilterSet" : false, 13 | "parsedQuery" : { 14 | "dob.age" : { 15 | "$eq" : 35 16 | } 17 | }, 18 | "winningPlan" : { 19 | "stage" : "FETCH", 20 | "inputStage" : { 21 | "stage" : "IXSCAN", 22 | "keyPattern" : { 23 | "dob.age" : 1, 24 | "gender" : 1 25 | }, 26 | "indexName" : "dob.age_1_gender_1", 27 | "isMultiKey" : false, 28 | "multiKeyPaths" : { 29 | "dob.age" : [ ], 30 | "gender" : [ ] 31 | }, 32 | "isUnique" : false, 33 | "isSparse" : false, 34 | "isPartial" : false, 35 | "indexVersion" : 2, 36 | "direction" : "forward", 37 | "indexBounds" : { 38 | "dob.age" : [ 39 | "[35.0, 35.0]" 40 | ], 41 | "gender" : [ => Means MongoDB uses already exits index to sort by gender 42 | "[MinKey, MaxKey]" 43 | ] 44 | } 45 | } 46 | }, 47 | "rejectedPlans" : [ ] 48 | }, 49 | "serverInfo" : { 50 | "host" : "jahid-HP-ProBook-6470b", 51 | "port" : 27017, 52 | "version" : "3.6.8", 53 | "gitVersion" : "8e540c0b6db93ce994cc548f000900bdc740f80a" 54 | }, 55 | "ok" : 1 56 | } 57 | ``` 58 | 59 | MongoDB db reserves 32MB for fetched documents when using sort it could be cross it’s this limit but if the key we want to sort already being used in any index that will need less memory to sort and also will be helpful if can not sort by any key for memory limitation. 60 | 61 | Understanding the default index and finding how many indexes in documents 62 | 63 | ```cpp 64 | > db.contacts.getIndexes() 65 | [ 66 | { 67 | "v" : 2, 68 | "key" : { 69 | "_id" : 1 70 | }, 71 | "name" : "_id_", 72 | "ns" : "contactData.contacts" 73 | }, 74 | { 75 | "v" : 2, 76 | "key" : { 77 | "dob.age" : 1, 78 | "gender" : 1 79 | }, 80 | "name" : "dob.age_1_gender_1", 81 | "ns" : "contactData.contacts" 82 | } 83 | ] 84 | ``` -------------------------------------------------------------------------------- /MongoDB/Initial_Concepts.md: -------------------------------------------------------------------------------- 1 | # Initial Concepts 2 | 3 | 17 | 18 | > **Normalizing**: Storing and distributing data in multiple tables where every table has a clear schema and use lots of relations(in SQL). 19 | > 20 | 21 | As MongoDB saves all in one single document so for fetching it does not need any complex operation like join(SQL) so MongoDB can fetch query is more simple, more flexible, and more efficient(as no need to merge collection/table) than SQL. 22 | 23 | MongoDB is popular for **read and write heavy** applications (like sensors which send data every second/online shop/blog). 24 | 25 | # Some very basic commands 26 | 27 | ### Start MongoDB on the shell 28 | 29 | ```powershell 30 | mongo 31 | ``` 32 | 33 | ### Show all databases(admin, config, and local are predefined there) 34 | 35 | ```powershell 36 | show dbs 37 | ``` 38 | 39 | ### Use existing database or create a new one 40 | 41 | Will connected with database name 'shop' if already exits or if the database is not exited yet MongoDB will create that 42 | 43 | ```powershell 44 | use shop 45 | ``` 46 | 47 | Output will be 48 | 49 | > switched to db shop 50 | > 51 | 52 | **Note:** If the database is not created previously this command will create that database but the shop database will not create instantly(as a result `show dbs` will not list shop db). When we will first add data to the shops database than MongoDB implicitly will create that database and also `show dbs` commands that will list the database. 53 | 54 | ### Get all documents of a collection. 55 | 56 | Will give all data as we add not add any argument. 57 | 58 | ```powershell 59 | db.products.find() 60 | ``` 61 | 62 | ### Get JSON like format 63 | 64 | Will return exactly the same data as JSON like format which is easy to read. 65 | 66 | ```powershell 67 | db.products.find().pretty 68 | ``` 69 | 70 | ### Clear the screen 71 | 72 | ```powershell 73 | cls 74 | ``` 75 | 76 | ## MongoDB Ecosystem 77 | 78 | ### MongoDB Database 79 | 80 | 1. Self - Managed / Enterprise 81 | 1. CloudManager / OpsManager 82 | 2. Atlas(Cloud) 83 | 3. Mobile 84 | 85 | Others options 86 | 87 | 1. Compass 88 | 2. BI Connectors 89 | 3. MongoDB Charts 90 | 91 | ### Stitch 92 | 93 | 97 | 98 | That gives 99 | 100 | 104 | 105 | 109 | 110 | 114 | 115 | 119 | 120 | ## How MongoDB works 121 | 122 | ### Application 123 | 124 | - Frontend(UI) 125 | - Backend(Server) ⇒ Which interacts/communicates with MongoDB server using their corresponding drivers 126 | - Node.js 127 | - Java 128 | - Python 129 | - MongoDB Shell(does not depends on any specific programming language). 130 | 131 | ### Data 132 | 133 | - MongoDB server 134 | - MongoDB server use a storage engine default for MongoDB is **Wired Tiger**. Which access file/data where the data kept(database information collection, table, etc) 135 | - Storage Engine 136 | - Read + Write Data to Files(slow) 137 | - Read + Write Data in Memory(fast) ⇒ Memory(RAM) 138 | 139 | ### Documents Created Implicity 140 | 141 | JSON data converts into BSON data 142 | 143 | `{ 144 | "name" : "MAX", 145 | "age" : 29 146 | }` 147 | 148 | it converts into BSON 149 | 150 | BSON 151 | 152 | 1. Binary data 153 | 2. Extends JSON Types(e.g more detailed Number Types) 154 | 3. Efficient Storage 155 | 156 | ### MongoD service 157 | 158 | 164 | 165 | ### Start mongod process(default port is 27017) 166 | 167 | ```bash 168 | sudo mongod => To start in default port 169 | sudo mongod --port 27018 => can use if in any case default port 27017 not available 170 | ``` 171 | 172 | ### Start MongoDB on the server 173 | 174 | ```bash 175 | mongo => to start in default port 27017 176 | mongo --port 27018 => to start at 27018 port 177 | ``` -------------------------------------------------------------------------------- /MongoDB/Numeric_Data.md: -------------------------------------------------------------------------------- 1 | # Numeric Data 2 | 3 | # Working With Numeric Data 4 | 5 | Numeric data is most important in scientific calculation 6 | Number more complex than any other 7 | 3 types of number in mongoDB.(Integers,Longs,Doubles) 8 | 9 | ### Integers(int32) 10 | 11 | 1. Only full Numbers(+- 2^32). **-2,147,483,648 to +2,147,483,647** 12 | 2. Use for 'normal' Integers 13 | 14 | ### Longs(int64) 15 | 16 | 1. Only full Numbers(+- 2^64). **-9,223,372,036,854,775,808 to +9,223,372,036,854,775,807** 17 | 2. Use for large Integers 18 | 19 | ### Doubles(64bit) 20 | 21 | 1. Numbers with Decimal Places(Decimal values are approximated). 22 | 2. Use for floats where high precision is not required 23 | 24 | ### High Precision Doubles(128bit) 25 | 26 | 1. Numbers with Decimal Places(Decimal values are stored with high precision(**34 decimal digits**)). 27 | 2. Use for floats where high precision is required 28 | 29 | In mongoDB driver is a javaScript based driver. All numeric values stored as a double. Such as 74 is stored as double and also 74.00 stored as double in MongoDB shell. But in Python 55 is int and 55.0 is float. 30 | 31 | In this case age of Max maybe store as 29.0000002 and will be some imprecision at some point. But we do not see this but if we use MongoDB shell this will save number like this. [Proof](Numeric_Data.md) 32 | 33 | ```bash 34 | > use numeric 35 | switched to db numeric 36 | > db.persons.insertOne({name: 'Max', age: 29}) 37 | 38 | **Output** 39 | { 40 | acknowledged: true, 41 | insertedId: ObjectId("634c030a4ff2758a4da7409c") 42 | } 43 | 44 | > db.persons.find() 45 | 46 | Ouput 47 | [ { _id: ObjectId("634c030a4ff2758a4da7409c"), name: 'Max', age: 29 } ] 48 | ``` 49 | 50 | # Working with Int32 51 | 52 | > Hided all others fields of stats() function as we do not interested on those. 53 | > 54 | 55 | Here can see the size 56 | 57 | ```cpp 58 | > db.persons.stats() 59 | 60 | **Output** 61 | "size" : 49, 62 | "count" : 1, 63 | 64 | > db.persons.deleteMany({}) 65 | **Output** 66 | { acknowledged: true, deletedCount: 1 } 67 | ``` 68 | 69 | Here can see that size is decrease 70 | 71 | ```bash 72 | > db.persons.insertOne({ age: 29}) 73 | 74 | **Output** 75 | { 76 | acknowledged: true, 77 | insertedId: ObjectId("634c03b14ff2758a4da7409d") 78 | } 79 | 80 | > db.persons.stats() 81 | 82 | **Output** 83 | "size" : 35, 84 | "count" : 1, 85 | 86 | > db.persons.deleteMany({}) 87 | 88 | **Output** 89 | { acknowledged: true, deletedCount: 1 } 90 | ``` 91 | 92 | Here also can see that size now also more decrease. We can store number with and without quotation. Using `NumberInt` we are saving as integer and saving some space. 93 | 94 | > `NumberInt` is only available for MongoDB shell it will different for other programming language driver. 95 | > 96 | 97 | ```cpp 98 | > db.persons.insertOne({ age: NumberInt(29)}) 99 | > db.persons.insertOne({ age: NumberInt("29")}) 100 | 101 | **Output** 102 | "size" : 31, 103 | "count" : 1, 104 | ``` 105 | 106 | # Working with Int64 107 | 108 | Here can the output is a wrong value as enter very big value which Int32 can not store. 109 | 110 | ```cpp 111 | > db.companies.insertOne({valuation: NumberInt('50000000000')}) 112 | 113 | **Output** 114 | { 115 | acknowledged: true, 116 | insertedId: ObjectId("634c06f14ff2758a4da7409e") 117 | } 118 | 119 | > db.companies.findOne() 120 | 121 | **Output** 122 | { _id: ObjectId("634c06f14ff2758a4da7409e"), valuation: -1539607552 } 123 | ``` 124 | 125 | If decrease value then it works 126 | 127 | ```cpp 128 | > db.companies.insertOne({valuation: NumberInt('500000000')}) 129 | 130 | **Output** 131 | { 132 | acknowledged: true, 133 | insertedId: ObjectId("634c087d4ff2758a4da740a0") 134 | } 135 | ``` 136 | 137 | But if the data stored as a 64 bit double then it works as 64 bit double can store more than 32 bit int. 138 | 139 | ```cpp 140 | > db.companies.insertOne({valuation: 50000000000'}) 141 | ``` 142 | 143 | ```cpp 144 | > db.companies.find() 145 | { "_id" : ObjectId("5f3e81e7d0209e4d3a0ec072"), "valuation" : -1539607552 } 146 | { "_id" : ObjectId("5f3e8635d0209e4d3a0ec073"), "valuation" : 50000000000 } 147 | { "_id" : ObjectId("5f3e866cd0209e4d3a0ec074"), "valuation" : 705032704 } 148 | ``` 149 | 150 | To store biggest possible number as Integers, always have to use quotation marks, basically it take number as double and than convert into given type(in this case NumberLong). So if given number is not fit in Double than in older version of MongoDB will throw error and in updated/newer version will give warning. 151 | 152 | > For my case it give warning and store number currectly but in course (as it was older version) give error. 153 | > 154 | 155 | ```cpp 156 | > db.companies.insertOne({valuation: NumberLong(9223372036854775807)}) 157 | 158 | **Output** 159 | Warning: NumberLong: specifying a number as argument is deprecated and may lead to loss of precision, pass a string instead 160 | { 161 | acknowledged: true, 162 | insertedId: ObjectId("634c0a114ff2758a4da740a3") 163 | } 164 | 165 | > db.companies.find() 166 | { "_id" : ObjectId("5f3e81e7d0209e4d3a0ec072"), "valuation" : -1539607552 } 167 | { "_id" : ObjectId("5f3e8635d0209e4d3a0ec073"), "valuation" : 50000000000 } 168 | { "_id" : ObjectId("5f3e866cd0209e4d3a0ec074"), "valuation" : 705032704 } 169 | { "_id" : ObjectId("5f3e86ecd0209e4d3a0ec075"), "valuation" : NumberLong("50000000000") } 170 | ``` 171 | 172 | Doing Maths with Floats Int32 Int64 173 | 174 | > We can store integer value as string but problem is we can not perform any number related operation if we store number as string. 175 | > 176 | 177 | ```cpp 178 | > db.accounts.insertOne( {name: 'Max', amount: '34234253458373534574524524'} ) 179 | // add a small number 180 | > db.accounts.insertOne( {name: 'Max', amount: '10'} ) 181 | ``` 182 | 183 | `$inc` or any math calculation does not work with string value 184 | 185 | ```cpp 186 | > db.accounts.updateOne({}, {$inc: {amount: 10}}) 187 | 188 | **Output** 189 | MongoServerError: Cannot apply $inc to a value of non-numeric type. {_id: ObjectId('634c0e1f4ff2758a4da740a4')} has the field 'amount' of non-numeric type string 190 | ``` 191 | 192 | Have to insert a Integers value 193 | 194 | ```cpp 195 | > db.accounts.deleteMany() 196 | > db.accounts.insertOne( { name: 'Max', amount: NumberInt('10') } ) 197 | 198 | Ouput 199 | { 200 | acknowledged: true, 201 | insertedId: ObjectId("634c120e4ff2758a4da740a5") 202 | } 203 | 204 | > db.accounts.find().pretty() 205 | ``` 206 | 207 | Here 10 as a double value mongoDB convert the sum as double. 208 | 209 | ```cpp 210 | db.accounts.updateOne({}, {$inc: {amount: 10}}) 211 | ``` 212 | 213 | If update the number with wrapping with NumberInt then the final output be a int. 214 | 215 | > But MongoDB print integer and double same way so by printing those in shell we can not see any difference. 216 | > 217 | 218 | ```cpp 219 | db.accounts.updateOne({}, {$inc: {amount: NumberInt('10')}}) 220 | ``` 221 | 222 | Let delete the document 223 | 224 | ```cpp 225 | db.companies.deleteMany({}) 226 | ``` 227 | 228 | Insert a large number 229 | 230 | ```cpp 231 | db.companies.insertOne({valuation: NumberLong('34234253458373534574524524')}) 232 | ``` 233 | 234 | To calculate math operation with the large number NumberLong should be include in that number, this is incorrect 235 | 236 | ```cpp 237 | db.companies.updateOne({}, {$inc: {valuation: 1}}) 238 | // this is correct 239 | db.companies.updateOne({}, {$inc: {valuation: NumberLong('1')}}) 240 | ``` 241 | 242 | # What's Wrong With Normal Doubles 243 | 244 | ```bash 245 | > db.science.insertOne({a: 0.3, b: 0.1}) 246 | 247 | **Output** 248 | { 249 | acknowledged: true, 250 | insertedId: ObjectId("634c1ee44ff2758a4da740a8") 251 | } 252 | 253 | > db.science.findOne() 254 | 255 | **Output** 256 | { _id: ObjectId("634c1ee44ff2758a4da740a8"), a: 0.3, b: 0.1 } 257 | ``` 258 | 259 | Let execute maths calculation 260 | 261 | ```bash 262 | > db.science.aggregate([ 263 | { $project: {result: {$subtract: ['$a', '$b'] } } } 264 | ]) 265 | 266 | **Output** 267 | [ 268 | { 269 | _id: ObjectId("634c1ee44ff2758a4da740a8"), 270 | result: 0.19999999999999998 271 | } 272 | ] 273 | ``` 274 | 275 | Here should be the subtract value is 0.2. But it's come 276 | 277 | ```cpp 278 | { "_id" : ObjectId("5f3e9ec9d0209e4d3a0ec079"), "result" : 0.19999999999999998 } 279 | ``` 280 | 281 | So have to fix the issue 282 | 283 | # Working With Decimal 128bit 284 | 285 | To get the exact subtract value, have to use `NumberDecimal` constructor. This will different for other language. 286 | 287 | ```bash 288 | > db.companies.deleteMany({}) 289 | 290 | **Output** 291 | { acknowledged: true, deletedCount: 6 } 292 | 293 | > db.science.insertOne({a: NumberDecimal("0.3"), b: NumberDecimal("0.1")}) 294 | 295 | **Output** 296 | { 297 | acknowledged: true, 298 | insertedId: ObjectId("634c209d4ff2758a4da740aa") 299 | } 300 | 301 | > db.science.find().pretty() 302 | 303 | **Output** 304 | [ 305 | { 306 | _id: ObjectId("634c209d4ff2758a4da740aa"), 307 | a: Decimal128("0.3"), 308 | b: Decimal128("0.1") 309 | } 310 | ] 311 | 312 | ``` 313 | 314 | Now getting the exact value 315 | 316 | ```bash 317 | > db.science.aggregate([ 318 | { $project: { result: { $subtract: ['$a', '$b'] } } } 319 | ]) 320 | 321 | **Output** 322 | [ 323 | { 324 | _id: ObjectId("634c209d4ff2758a4da740aa"), 325 | result: Decimal128("0.2") 326 | } 327 | ] 328 | ``` 329 | 330 | Let execute another query 331 | 332 | ```bash 333 | > db.science.updateOne({}, {$inc: {a: 0.1}}) 334 | 335 | **Output** 336 | { 337 | acknowledged: true, 338 | insertedId: null, 339 | matchedCount: 1, 340 | modifiedCount: 1, 341 | upsertedCount: 0 342 | } 343 | 344 | > db.science.find().pretty() 345 | 346 | **Output** 347 | [ 348 | { 349 | _id: ObjectId("634c209d4ff2758a4da740aa"), 350 | a: Decimal128("0.400000000000000"), 351 | b: Decimal128("0.1") 352 | } 353 | ] 354 | ``` 355 | 356 | So to get the right value 357 | 358 | ```cpp 359 | > db.science.updateOne({}, {$inc: {a: NumberDecimal("0.1")}}) 360 | 361 | Output 362 | { 363 | acknowledged: true, 364 | insertedId: null, 365 | matchedCount: 1, 366 | modifiedCount: 1, 367 | upsertedCount: 0 368 | } 369 | 370 | > db.science.find().pretty() 371 | 372 | **Output** 373 | [ 374 | { 375 | _id: ObjectId("634c209d4ff2758a4da740aa"), 376 | a: Decimal128("0.500000000000000"), 377 | b: Decimal128("0.1") 378 | } 379 | ] 380 | ``` 381 | 382 | ## High precision 383 | 384 | Normal precision use less space. 385 | 386 | ```bash 387 | > db.number.insertOne({num: 0.1}) 388 | 389 | **Output** 390 | { 391 | acknowledged: true, 392 | insertedId: ObjectId("634c231e4ff2758a4da740ab") 393 | } 394 | 395 | > db.number.stats() 396 | "size" : 33 397 | 398 | > db.number.deleteMany({}) 399 | 400 | **Output** 401 | { acknowledged: true, deletedCount: 1 } 402 | ``` 403 | 404 | `NumberDecimal` will use give **high precision** but consume more space 405 | 406 | ```bash 407 | > db.number.insertOne({num: NumberDecimal("0.1")}) 408 | 409 | **Output** 410 | { 411 | acknowledged: true, 412 | insertedId: ObjectId("634c23574ff2758a4da740ac") 413 | } 414 | 415 | > db.number.stats() 416 | "size" : 41 417 | ``` -------------------------------------------------------------------------------- /MongoDB/Performance.md: -------------------------------------------------------------------------------- 1 | # Mastering MongoDB Queries 2 | - **Indexing**: 3 | - Create indexes on fields used for frequent sorting, searching, and filtering. 4 | - **Efficient Projections**: 5 | - Fetch only required fields, using projection to limit data transferred. 6 | - **Aggregation Pipelines**: 7 | - Take advantage of pipelines for complex data transformations. 8 | 9 | # Others 10 | - **Data Modeling**: 11 | - Embrace proper MongoDB schema design (embedding vs. referencing). Keep related data together. 12 | - **Connection Pooling**: 13 | - Maintain open MongoDB connections for optimal reuse without setup overhead. 14 | 15 | 16 | 17 | # Resources 18 | * [Advanced Node.JS](https://interactivecares.com/courseDetails/246?title=Advanced_Node.JS) -------------------------------------------------------------------------------- /MongoDB/Performance_Fault_Tolerancy_Deployment.md: -------------------------------------------------------------------------------- 1 | # Performance Fault Tolerancy Deployment 2 | 3 | ### What influences Performance 4 | 5 | **Developer/ DB Admin** 6 | 7 | 1. Efficient Queries/Operations 8 | 2. Indexes 9 | 3. Fitting Data Schema 10 | 11 | **DB Admin/ System Admin** 12 | 13 | 1. Hardware & Network 14 | 2. Sharding 15 | 3. Replica Sets 16 | 17 | ### Understanding Capped Collections 18 | 19 | 23 | 24 | - The oldest data is automatically deleted when new data comes in. 25 | - This is efficient for application logs where the most recent logs are stored, or as caching some data. 26 | 27 | > For capped collection, it returns documents as the same order as it was inserted. But for normal collection it is not always true. 28 | > 29 | 30 | Let's create a collection with capped, `max` is optional 31 | 32 | > Default `size` 4 MB, `max` is the number of documents that can be stored there. 33 | > 34 | 35 | ```cpp 36 | > use performance 37 | switched to db performance 38 | 39 | > db.createCollection('capped', {capped: true, size: 10000, max: 3}) 40 | **Output** 41 | { ok: 1 } 42 | ``` 43 | 44 | Access capped collections 45 | 46 | ```cpp 47 | > db.capped.insertOne({name: 'Max'}) 48 | **Output** 49 | { 50 | acknowledged: true, 51 | insertedId: ObjectId("6353cc9a88121c3743e45cef") 52 | } 53 | 54 | > db.capped.insertOne({name: 'Manu'}) 55 | **Output** 56 | { 57 | acknowledged: true, 58 | insertedId: ObjectId("6353ccb588121c3743e45cf0") 59 | } 60 | 61 | db.capped.insertOne({name: 'Anna'}) 62 | **Output** 63 | { 64 | acknowledged: true, 65 | insertedId: ObjectId("6353cccc88121c3743e45cf1") 66 | } 67 | ``` 68 | 69 | ```cpp 70 | > db.capped.find().pretty() 71 | 72 | **Output** 73 | [ 74 | { _id: ObjectId("6353cc9a88121c3743e45cef"), name: 'Max' }, 75 | { _id: ObjectId("6353ccb588121c3743e45cf0"), name: 'Manu' }, 76 | { _id: ObjectId("6353cccc88121c3743e45cf1"), name: 'Anna' } 77 | ] 78 | ``` 79 | 80 | If any time need to order the documents to descending order 81 | 82 | ```cpp 83 | > db.capped.find().sort({$natural: -1}).pretty() 84 | **Output** 85 | [ 86 | { _id: ObjectId("6353cccc88121c3743e45cf1"), name: 'Anna' }, 87 | { _id: ObjectId("6353ccb588121c3743e45cf0"), name: 'Manu' }, 88 | { _id: ObjectId("6353cc9a88121c3743e45cef"), name: 'Max' } 89 | ] 90 | ``` 91 | 92 | Now if we insert an new document then the first document will automatically get deleted(name: 'Max') and will not show error msg. 93 | 94 | ```cpp 95 | > db.capped.insertOne({name: 'Maria'}) 96 | 97 | **Output** 98 | { 99 | acknowledged: true, 100 | insertedId: ObjectId("6353fac444b95bf60083ef55") 101 | } 102 | 103 | > db.capped.find().pretty() 104 | 105 | **Output** 106 | [ 107 | { _id: ObjectId("6353ccb588121c3743e45cf0"), name: 'Manu' }, 108 | { _id: ObjectId("6353cccc88121c3743e45cf1"), name: 'Anna' }, 109 | { _id: ObjectId("6353fac444b95bf60083ef55"), name: 'Maria' } 110 | ] 111 | ``` 112 | 113 | But the data is nor permanently deleted, If by using Cache the can retrieve the full data 114 | 115 | ### Replica Sets 116 | 117 | Client(shell , Driver) <--Read--> MongoDB Server Primary Node --> Replica Set(Primary Node --> 1. Secondary Node 2. Secondary Node 118 | 119 | ![Screenshot from 2022-10-22 22-45-07.png](Performance_Fault_Tolerancy_Deployment/Screenshot_from_2022-10-22_22-45-07.png) 120 | 121 | > If we add replica node then if we make a write request then it will be go to the Client then MongoDB server then primary node where we have the database. And if we have secondary node MongoDB asynchronously(not instantly) will replicate(copy) to in those secondary node. If primary node goes offline then data read from replica (Secondary Node). 122 | > 123 | 124 | Why Replica Sets? 125 | 126 | 1. Backup / Fault Tolerance 127 | 2. Improve Read Performance 128 | 129 | To read data as first as possible can over distribute read request between the primary and secondary Node. If there are huge amount of read request then this will help to reduce the pressure at primary node. 130 | 131 | ### Sharding (Horizontal Scaling) 132 | 133 | More memory into a single machine. Horizontal scaling which means need more servers. 134 | 135 | ``` 136 | MongoDB Server 137 | | 138 | ------------------------------------------------------ 139 | server1 server2 server3 server4 server5 server6 140 | 141 | ``` 142 | 143 | That issue here of course is this might sound logical but severs now don't duplicate that data, they are not backups, they split that data. So server 1 on the left, let's say stores data for the same application as the other server but a different chunk of it. So with sharding, we have multiple computers who all run MongoDB servers but these servers don’t work alone but work together and split the available data so that data is distributed across shards not replicated. So queries where find data but also insert update and delete operations therefore have to be run against all the servers or the right servers because each chunk manages its data and its range of the data. 144 | 145 | ## Data is distributed (not replicated) across Shards 146 | 147 | Each chunk manages its data and its a range of data 148 | 149 | ## Queries are run across all Shards 150 | 151 | Data can be stored as A-J, J-K, K-S.. to the sequence of a chunk(server). 152 | 153 | ### How Sharding Works 154 | 155 | ``` 156 | ---------- 157 | | Client | 158 | ---------- 159 | | 160 | ------------------ 161 | | mongos(Router) | 162 | ------------------ 163 | | 164 | ------------------------------------------------------------------------- 165 | | | | 166 | MongoDB(Server/Shard) MongoDB(Server/Shard) MongoDB(Server/Shard) 167 | -------------------------------------------------------------------------- 168 | | Shard Key | | Shard Key | | Shard Key | 169 | --------------------------------------------------------------------------- 170 | // shard key: shard key is essential just a field that's added to every document which kid of is important. 171 | 172 | ------------------------Queries and Sharding------------------------- 173 | 174 | ---------- 175 | | find() | 176 | ---------- 177 | | 178 | ------------------ 179 | | mongos(Router) | 180 | ------------------ 181 | | 182 | ------------------------------------------------------- 183 | | | 184 | --------------------------- --------------------------- 185 | | Option1: Operation does | | Option2: Operation does | 186 | | not contain Shared Key | | contain Shared Key | 187 | --------------------------- --------------------------- 188 | | | | Directly Send ---> | 189 | BroadCast > | | to right Shard | 190 | | | | | 191 | -------------------------- -------------------- 192 | shared shared shared shared 193 | ``` 194 | 195 | # Deploying a MongoDB Server 196 | 197 | localhost(mongod) -------> Atlas(mongod) 198 | 199 | Have to manage 200 | 201 | 1. Manage Shards 202 | 2. Secure User / Auth Setup 203 | 3. Project Web Server / Network 204 | 4. Manage Replica Set 205 | 5. Encryption(Transportation & Rest) 206 | 6. Regular Backups 207 | 7. Update Software 208 | 209 | # Performance & Fault Tolerancy 210 | 211 | 1. Consider Capped Collections for cases where want to clear old data automatically. 212 | 2. Performance is all about having efficient queries / operations, fitting data formats and a best-practice MongoDB server config. 213 | 3. Replica sets provide fault tolerancy (with automatic recovery) and improved read performance. 214 | 4. Sharding allows to scale MongoDB server horizontally. 215 | 216 | # Deployment $ MongoDB Atlas 217 | 218 | 1. Deployment is a complex matter since it involves many tasks - some of them are not even directly related to MongoDB. 219 | 2. Unless are a experienced admin (or got one), should consider a managed solution like MongoDB Atlas. 220 | 3. Atlas is a managed service where can configure a MongoDB environment and pay at a by-usage basis. -------------------------------------------------------------------------------- /MongoDB/Read.md: -------------------------------------------------------------------------------- 1 | # Read 2 | 3 | 1. Methods, Filters & Operations 4 | 2. Query Selectors (READ) 5 | 3. Projection Operators(READ) 6 | 7 | 8 | >💡 **Aggregation:** This is reading data from the database but it allows to set up of a pipeline of stages to funnel the data throw and permits a bunch of operations that allow the user to shape the data and get back from the database to the form needed in a particular app. 9 | 10 | 11 | ```bash 12 | db.myCollection.find({age: 30) 13 | => here {age: 30 } 14 | --> Filter 15 | age -> Field, 16 | 32 --> Value 17 | 18 | db.myCollection.find({age: { $gt: 30}}) 19 | => {age: { $gt: 30}} 20 | --> Filter(Range) 21 | > --> Operator 22 | ``` 23 | 24 | 25 | ### Operators 26 | 27 | | Read | Update | | | 28 | | --- | --- | --- | --- | 29 | | Query & Projection | Update | Query Modifiers | Aggregation | 30 | | Query Selectors | Fields | Change Query Behaviors | Pipeline Stages | 31 | | Projection Operators | Arrays | This is Deprecated now | Pipeline Operators | 32 | 33 | ### How Operators Impact Our Data 34 | 35 | | Type | Purpose | Changes Data | Example | 36 | | --- | --- | --- | --- | 37 | | Query Operator | Locate Data | blocked | $eq | 38 | | Projection Operator | Modify data presentation | blocked | $ | 39 | | Update Operator | Modify + add additional data | not blocked | &inc | 40 | 41 |
42 | `findOne()` ⇒ First matching document(if filter given) or first element of document | Simple movie sample 43 | 44 | ```bash 45 | > use movieData 46 | switched to db movieData 47 | > cls 48 | 49 | // findOne() => First matching document 50 | 51 | > db.movies.findOne() 52 | { 53 | "_id" : ObjectId("5f15a22a9bfbc37d06f66616"), 54 | "id" : 1, 55 | "url" : "http://www.tvmaze.com/shows/1/under-the-dome", 56 | "name" : "Under the Dome", 57 | "type" : "Scripted", 58 | "language" : "English", 59 | "genres" : [ 60 | "Drama", 61 | "Science-Fiction", 62 | "Thriller" 63 | ], 64 | "status" : "Ended", 65 | "runtime" : 60, 66 | "premiered" : "2013-06-24", 67 | "officialSite" : "http://www.cbs.com/shows/under-the-dome/", 68 | "schedule" : { 69 | "time" : "22:00", 70 | "days" : [ 71 | "Thursday" 72 | ] 73 | }, 74 | "rating" : { 75 | "average" : 6.5 76 | }, 77 | "weight" : 91, 78 | "network" : { 79 | "id" : 2, 80 | "name" : "CBS", 81 | "country" : { 82 | "name" : "United States", 83 | "code" : "US", 84 | "timezone" : "America/New_York" 85 | } 86 | }, 87 | "webChannel" : null, 88 | "externals" : { 89 | "tvrage" : 25988, 90 | "thetvdb" : 264492, 91 | "imdb" : "tt1553656" 92 | }, 93 | "image" : { 94 | "medium" : "http://static.tvmaze.com/uploads/images/medium_portrait/0/1.jpg", 95 | "original" : "http://static.tvmaze.com/uploads/images/original_untouched/0/1.jpg" 96 | }, 97 | "summary" : "

Under the Dome is the story of a small town that is suddenly and inexplicably sealed off from the rest of the world by an enormous transparent dome. The town's inhabitants must deal with surviving the post-apocalyptic conditions while searching for answers about the dome, where it came from and if and when it will go away.

", 98 | "updated" : 1529612668, 99 | "_links" : { 100 | "self" : { 101 | "href" : "http://api.tvmaze.com/shows/1" 102 | }, 103 | "previousepisode" : { 104 | "href" : "http://api.tvmaze.com/episodes/185054" 105 | } 106 | } 107 | } 108 | ``` 109 | 110 |
111 | 112 | 113 | 114 | 115 | # Query Selectors & Projection 116 | 117 | ## Query Selectors 118 | 119 | [Comparison](Read/Comparison.md) 120 | 121 | [Logical](Read/Logical.md) 122 | 123 | [Element](Read/Element.md) 124 | 125 | [Evaluation Operators](Read/Evaluation_Operators.md) 126 | 127 | [Array](Read/Array.md) 128 | 129 | Comments 130 | 131 | Geospatial(special) 132 | 133 | ### Projections Operators 134 | 135 | 1. $ 136 | 2. $elemMatch 137 | 3. $meta 138 | 4. $slice 139 | 140 | [Projection](Read/Projection.md) 141 | 142 | # Understanding Cursors 143 | 144 | When we use `find()` can get the all data like 100 million 145 | 146 | It can reduce if we include a query. 147 | 148 | Here cursors a pointer. cursor request batch to the server every time to get that data. 149 | 150 | If have a query that meets 1000 documents, but let's say you have a website where you only display 10 items, let's say 10 products you fetched at a time anyways, then there is no need to load all thousand results that matched your query right at the start. Instead, you would only fetch the first 10, display them on the screen and then go ahead and fetch the next 10, when the user navigated to the next page or anything like that. This is the idea behind a cursor. 151 | 152 | ```cpp 153 | > use movieData 154 | > db.movies.find().count() 155 | 240 156 | ``` 157 | 158 | Basically it returns 20 elements 159 | 160 | ```cpp 161 | db.movies.find().pretty() 162 | // type it for more 163 | > it 164 | ``` 165 | 166 | Get exactly one document, because next() gives the next document. 167 | 168 | `db.movies.find().next()` => giving this command will return the same document over and over as it works from scratch(from first) every time. 169 | 170 | ```cpp 171 | > db.movies.find().next() 172 | ``` 173 | 174 | We can use JavaScript syntax in the mongo shell(We will get documents one by one as we stored them) 175 | 176 | ```bash 177 | > const dataCursor = db.movies.find() 178 | > dataCursor.next() => first element 179 | > dataCursor.next() => second element 180 | ``` 181 | 182 | `printjson()` is a mongo shell function that helps to print something into shell it fetched all documents(so it will not return cursor) 183 | 184 | ```bash 185 | dataCursor.forEach(document => {printjson(document)}) 186 | ``` 187 | 188 | Check have any next value 189 | 190 | ```bash 191 | dataCursor.hasNext() 192 | ``` 193 | 194 | Fetching data with sort(), One means ascending 195 | 196 | ```bash 197 | db.movies.find().sort({'rating.average': 1}).pretty() 198 | ``` 199 | 200 | eMinus one mean descending 201 | 202 | ```bash 203 | db.movies.find().sort({'rating.average': -1}).pretty()e 204 | ``` 205 | 206 | Sort with multiple documents, here first sort the `'rating.average'` and if `'rating.average'` have the same value into particular indexes and then runtime executes with descending if may exist. 207 | 208 | ```bash 209 | > db.movies.find().sort({'rating.average': 1, runtime: -1}).pretty() 210 | ``` 211 | 212 | The cursor also works with sort() 213 | 214 | ```bash 215 | > db.movies.find().sort({'rating.average': 1, runtime: -1}).next() 216 | ``` 217 | 218 | We can skip a certain amount of elements, it is effective in pagination, when we work with pagination on the second page we need data from the 11th element, we can skip the previous 10 elements. 219 | 220 | ```bash 221 | db.movies.find().sort({'rating.average': 1, runtime: -1}).skip(10).pretty() 222 | ``` 223 | 224 | `skip()` is used to specify the number of documents to skip at the first time. 225 | We can still include `limit()` limit return the exact number of elements will be retrieved at a time. 226 | 227 | ```bash 228 | db.movies.find().sort({'rating.average': 1,runtime: -1}).skip(100).limit(10).pretty() 229 | ``` 230 | 231 | Here order does not matter. Order check from right such(previous example) : `sort()` -> `skip()` -> `limit()` . what method we **write first**, it **executes first**. -------------------------------------------------------------------------------- /MongoDB/Read/Array.md: -------------------------------------------------------------------------------- 1 | # Array 2 | 3 | `$elemMatch` 4 | 5 | `$size` 6 | 7 | `$all` 8 | 9 |
10 | Initial 11 | 12 | ```schema 13 | > use user 14 | switched to db user 15 | ``` 16 | 17 |
18 | 19 | 20 | Find all hobbies that are sports. 21 | 22 | This won't work as this will try to get 100% matching object `{ hobbies: 'Sports' }` which does not match. 23 | 24 | ```bash 25 | > db.users.find({hobbies: 'Sports'}).pretty() 26 | ``` 27 | 28 | Also find nothing when using a nested document(as it does not has a 100% match) as it also has `“frequency”: 3` 29 | 30 | ```bash 31 | > db.users.find({hobbies: {title:'Sports'}}).pretty() 32 | ``` 33 | 34 | This also can not be a perfect value(as frequency can have other values but our goal is not that) 35 | 36 | ```bash 37 | > db.users.find({hobbies: {title:'Sports', frequency: 2}}).pretty() 38 | ``` 39 | 40 | Act as an **embedded document**. This is path embedded approach not only on directly embedded documents. This is similar to multiple embedded documents query. 41 | 42 | ```cpp 43 | > db.users.find({'hobbies.title': 'Sports'}).pretty() 44 | **Output** 45 | { 46 | "_id" : ObjectId("5f172a343a76a40cd42b836a"), 47 | "name" : "Max", 48 | "hobbies" : [ 49 | { 50 | "title" : "Sports", 51 | "frequency" : 3 52 | }, 53 | { 54 | "title" : "Cooking", 55 | "frequency" : 6 56 | } 57 | ], 58 | "phone" : 123495334 59 | } 60 | { 61 | "_id" : ObjectId("5f172c593a76a40cd42b836c"), 62 | "name" : "Anna", 63 | "hobbies" : [ 64 | { 65 | "title" : "Sports", 66 | "frequency" : 2 67 | }, 68 | { 69 | "title" : "Yoga", 70 | "frequency" : 3 71 | } 72 | ], 73 | "phone" : 1234953345, 74 | "age" : null 75 | } 76 | ``` 77 | 78 | # `$size` 79 | 80 | ### Want to find all users who have exactly 3 hobbies 81 | 82 | ```cpp 83 | //Inserting data 84 | > db.users.insertOne({name: 'Chris', hobbies: ['Sports', 'Cooking', 'Hiking']}) 85 | //Query 86 | > db.users.find({'hobbies': {$size: 3}}).pretty() 87 | ``` 88 | 89 | > If want to query hobbies greater than 3 or smaller. It does not support MongoDB 90 | > 91 | 92 | # `$all` 93 | 94 |
95 | Initialization 96 | 97 | ```schema 98 | > use boxOffice 99 | > db.moviestarts.find().pretty() 100 | ``` 101 |
102 | 103 | 104 | Want to find movies that have a genre of exactly thriller and action and also in the same order first action then thriller. **Order maters**. 105 | 106 | ```cpp 107 | > db.moviestarts.find({genre: ['action', 'thriller']}).pretty() 108 | ``` 109 | 110 | `$all` find if the array has all fields like 'action', and 'thriller' **order** does **not matter** but have only those two elements. 111 | 112 | ```cpp 113 | > db.moviestarts.find({genre: {$all: ['action', 'thriller']}}).pretty() 114 | ``` 115 | 116 | Want to find all users who have a hobby of sports and the frequency should be greater than or equal to 2. 117 | 118 | ```cpp 119 | // if we change the query replace 2 with 3. does not work properly 120 | > db.users.find({$and: [{'hobbies.title': 'Sports'},{'hobbies.frequency': {$gte:2}}]}).pretty() 121 | { 122 | "_id" : ObjectId("5f172a343a76a40cd42b836a"), 123 | "name" : "Max", 124 | "hobbies" : [ 125 | { 126 | "title" : "Sports", 127 | "frequency" : 3 128 | }, 129 | { 130 | "title" : "Cooking", 131 | "frequency" : 6 132 | } 133 | ], 134 | "phone" : 123495334 135 | } 136 | { 137 | "_id" : ObjectId("5f172c593a76a40cd42b836c"), 138 | "name" : "Anna", 139 | "hobbies" : [ 140 | { 141 | "title" : "Sports", 142 | "frequency" : 2 143 | }, 144 | { 145 | "title" : "Yoga", 146 | "frequency" : 3 147 | } 148 | ], 149 | "phone" : 1234953345, 150 | "age" : null 151 | } 152 | ``` 153 | 154 | > This work with different(all available) embedded document but we do not want that. it does not give the expected result. 155 | > 156 | 157 | For "Anna" in the first document, the title is "Sports" and the frequency is 2(we want greater than or equal to 3) and in the second document frequency is 3 so it returns true which we do not want. This applies to all conditions on a single document but also on multiple documents. 158 | 159 | ```cpp 160 | > db.users.find({$and: [{'hobbies.title': 'Sports'},{'hobbies.frequency': {$gte:3}}]}).pretty() 161 | 162 | { 163 | "_id" : ObjectId("5f172a343a76a40cd42b836a"), 164 | "name" : "Max", 165 | "hobbies" : [ 166 | { 167 | "title" : "Sports", 168 | "frequency" : 3 169 | }, 170 | { 171 | "title" : "Cooking", 172 | "frequency" : 6 173 | } 174 | ], 175 | "phone" : 123495334 176 | } 177 | { 178 | "_id" : ObjectId("5f172c593a76a40cd42b836c"), 179 | "name" : "Anna", 180 | "hobbies" : [ 181 | { 182 | "title" : "Sports", 183 | "frequency" : 2 184 | }, 185 | { 186 | "title" : "Yoga", 187 | "frequency" : 3 188 | } 189 | ], 190 | "phone" : 1234953345, 191 | "age" : null 192 | } 193 | 194 | > db.users.find({$and: [{'hobbies.title': 'Sports'},{'hobbies.frequency': {$gte:3}}]}).pretty().count() 195 | ``` 196 | 197 | # `$elemMatch` 198 | 199 | we want to ensure that query/condition have to perform into same document/element at a time. If all conditions are true then will return that document. 200 | 201 | Performing queries into a single document. 202 | 203 | ```cpp 204 | > db.users.find({hobbies: {$elemMatch:{title: 'Sports', frequency: {$gte: 3}}}}).pretty() 205 | **Output** 206 | { 207 | "_id" : ObjectId("5f172a343a76a40cd42b836a"), 208 | "name" : "Max", 209 | "hobbies" : [ 210 | { 211 | "title" : "Sports", 212 | "frequency" : 3 213 | }, 214 | { 215 | "title" : "Cooking", 216 | "frequency" : 6 217 | } 218 | ], 219 | "phone" : 123495334 220 | } 221 | ``` -------------------------------------------------------------------------------- /MongoDB/Read/Comparison.md: -------------------------------------------------------------------------------- 1 | # Comparison 2 | 3 | `$ne` `$eq` `$lt` `$lte` `$gt` `$gte` `$in` `$nin` 4 | 5 | - `$ne` ⇒ not equal 6 | - `$eq` ⇒ equal 7 | - `$lt` ⇒ less/lower than 8 | - `$lte` ⇒ lower than or equal 9 | - `$gt` ⇒ greater than 10 | - `$gte` ⇒ greater than or equal 11 | - `$in` ⇒ selects the documents where the value of a field **equals any value** in the **specified array**. 12 | - `$nin` ⇒ selects the documents where the value of a field **does not equal any value** in the **specified array**. 13 | 14 | `find()` ⇒ returns cursor and technically gives us all documents but we have to iterate. For the shell, it gives the first 20 documents. 15 | 16 | `findOne()` ⇒ same as the find and give back the first matched document also no cursor. 17 | 18 | ### Code Examples 19 | 20 | ```bash 21 | > db.movies.find({runtime: 60}).pretty() 22 | > db.movies.findOne({runtime: 60}) 23 | 24 | > db.movies.find({runtime: {$ne: 60}}).pretty() 25 | > db.movies.find({runtime: {$lt: 40}}).pretty() 26 | > db.movies.find({runtime: {$lte: 40}}).pretty() 27 | > db.movies.find({runtime: {$gt: 40}}).pretty() 28 | > db.movies.find({runtime: {$gte: 40}}).pretty() 29 | ``` 30 | 31 | ### Query into Embedded array 32 | 33 | Will return documents that have Drama in genres array. There can be other element in genres array(like "genres": [ "Drama", "Action", "Anime", "Horror" ]) or can be only Drama. 34 | 35 | ```bash 36 | > db.movies.find({genres:"Drama"}).pretty() 37 | ``` 38 | 39 | To exact query (exactly this "genres": ["Drama"] array in genres). Will be no other element in the array 40 | 41 | ```bash 42 | > db.movies.find({genres:["Drama"]}).pretty() 43 | ``` 44 | 45 | It will find us all documents that have a runtime of 30 or 42 but not 60 46 | 47 | ```cpp 48 | //[30,42] --> this is set of values not a range 49 | //{$in:[30,42]} => will select all document in which runtime have 30 or 42. 50 | //{$nin:[30,42]} => will select all document in which runtime have not 30 or 42. 51 | > db.movies.find({runtime: {$in:[30,42]}}).pretty() 52 | > db.movies.find({runtime: {$nin:[30,42]}}).pretty() 53 | ``` 54 | 55 | ### Embedded Documents 56 | 57 | In embedded documents have to use must quotes. 58 | 59 | ```bash 60 | > db.movies.find({"rating.average": {$gt: 7}}).pretty() 61 | ``` -------------------------------------------------------------------------------- /MongoDB/Read/Element.md: -------------------------------------------------------------------------------- 1 | # Element 2 | 3 | `$exists` `$type` 4 | 5 | ### `$exists` 6 |
7 | Inserting data 8 | 9 | ```bash 10 | > db.users.insertMany([ 11 | { 12 | name: 'Max', 13 | hobbies: [ 14 | {title: 'Sports', frequency: 3}, 15 | {title: 'Cooking', frequency: 6} 16 | ], 17 | phone: 0123495334 18 | }, 19 | { 20 | name: 'Manuel', 21 | hobbies: [ 22 | {title: 'Cooking', frequency: 5}, 23 | {title: 'Cars', frequency: 6} 24 | ], 25 | phone: '043453495334', 26 | age: 30 27 | } 28 | ]) 29 | **Output** 30 | { 31 | "acknowledged" : true, 32 | "insertedIds" : [ 33 | ObjectId("5f172a343a76a40cd42b836a"), 34 | ObjectId("5f172a343a76a40cd42b836b") 35 | ] 36 | } 37 | 38 | > db.users.find().pretty() 39 | { 40 | "_id" : ObjectId("5f172a343a76a40cd42b836a"), 41 | "name" : "Max", 42 | "hobbies" : [ 43 | { 44 | "title" : "Sports", 45 | "frequency" : 3 46 | }, 47 | { 48 | "title" : "Cooking", 49 | "frequency" : 6 50 | } 51 | ], 52 | "phone" : 123495334 53 | } 54 | { 55 | "_id" : ObjectId("5f172a343a76a40cd42b836b"), 56 | "name" : "Manuel", 57 | "hobbies" : [ 58 | { 59 | "title" : "Cooking", 60 | "frequency" : 5 61 | }, 62 | { 63 | "title" : "Cars", 64 | "frequency" : 6 65 | } 66 | ], 67 | "phone" : "043453495334", 68 | "age" : 30 69 | } 70 | ``` 71 | 72 |
73 | 74 | 75 | 76 | 77 | Here checking whether an element exists or not 78 | 79 | ```bash 80 | > db.users.find({age: {$exists: true}}).pretty() 81 | **Output** 82 | { 83 | "_id" : ObjectId("5f172a343a76a40cd42b836b"), 84 | "name" : "Manuel", 85 | "hobbies" : [ 86 | { 87 | "title" : "Cooking", 88 | "frequency" : 5 89 | }, 90 | { 91 | "title" : "Cars", 92 | "frequency" : 6 93 | } 94 | ], 95 | "phone" : "043453495334", 96 | "age" : 30 97 | } 98 | ``` 99 | 100 | Can also check multiple logic element age exits and also greater than 30 101 | 102 | ```cpp 103 | > db.users.find({age: {$exists: true, $gt: 30}}).pretty() 104 | > db.users.find({age: {$exists: true, $gte: 30}}).pretty() 105 | ``` 106 | 107 | `null` ⇒ `true` for `$exists: true` but `false` for `$exists: false` 108 | 109 | ```bash 110 | // if the element value is null then it also be exists( 111 | > db.users.insertMany([{name: 'Anna', hobbies: [{title: 'Sports', frequency: 2},{title: 'Yoga', frequency: 3}], phone: 01234953345, age: null}]) 112 | **Output** 113 | { 114 | "acknowledged" : true, 115 | "insertedIds" : [ 116 | ObjectId("5f172c593a76a40cd42b836c") 117 | ] 118 | } 119 | 120 | > db.users.find({age: {$exists: true}}).pretty() 121 | **Output** 122 | { 123 | "_id" : ObjectId("62e167dbc5b6842cf5a061c0"), 124 | "name" : "Anna", 125 | "hobbies" : [ 126 | { 127 | "title" : "Sports", 128 | "frequency" : 2 129 | }, 130 | { 131 | "title" : "Yoga", 132 | "frequency" : 3 133 | } 134 | ], 135 | "phone" : 1234953345, 136 | "age" : null 137 | } 138 | ``` 139 | 140 | But if we check with exists value is false and if an element value has null but exists then it also does not show 141 | 142 | ```cpp 143 | > db.users.find({age: {$exists: false}}).pretty() 144 | **Output** => No output as age is null 145 | ``` 146 | 147 | Checking is age exits and the value is not `null` 148 | 149 | ```bash 150 | > db.users.find({age: {$exists: true, $ne: null}}).pretty() 151 | ``` 152 | 153 | # Types 154 | 155 | ```bash 156 | Type Number Alias Notes 157 | Double 1 “double” 158 | String 2 “string” 159 | Object 3 “object” 160 | Array 4 “array” 161 | Binary data 5 “binData” 162 | Undefined 6 “undefined” Deprecated. 163 | ObjectId 7 “objectId” 164 | Boolean 8 “bool” 165 | Date 9 “date” 166 | Null 10 “null” 167 | Regular Expression 11 “regex” 168 | DBPointer 12 “dbPointer” Deprecated. 169 | JavaScript 13 “javascript” 170 | Symbol 14 “symbol” Deprecated. 171 | JavaScript (with scope) 15 “javascriptWithScope” 172 | 32-bit integer 16 “int” 173 | Timestamp 17 “timestamp” 174 | 64-bit integer 18 “long” 175 | Decimal128 19 “decimal” New in version 3.4. 176 | Min key -1 “minKey” 177 | Max key 127 “maxKey” 178 | ``` 179 | 180 | ### Checking with alias 181 | 182 | Will return document which has key phone which value is number type. 183 | 184 | ```bash 185 | // checking with alias 186 | > db.users.find({phone: {$type: 'number'}}).pretty() 187 | ``` 188 | 189 | As **shell** is **based** **on** **JavaScript** **number** and **double** would be the **same answer**. In the **database**, the number is stored into a **floating point** number as **double**. JS drivers only know it always double 190 | 191 | ```cpp 192 | > db.users.find({phone: {$type: 'double'}}).pretty() 193 | ``` 194 | 195 | Also can add multiple types of double or string(both cases true) 196 | 197 | ```cpp 198 | > db.users.find({phone: {$type: ['double','string']}}).pretty() 199 | ``` -------------------------------------------------------------------------------- /MongoDB/Read/Evaluation_Operators.md: -------------------------------------------------------------------------------- 1 | # Evaluation Operators 2 | 3 | `$expr` `$regex` `$text` `$where` 4 | 5 | `$expr` ⇒ Compare two fields inside one document then return that fields. 6 | 7 | `$regex` ⇒ Allows text search. 8 | 9 | ### `$regex` 10 | 11 | Return the document that found the word. it is not the best way(efficient) to find text in this way 12 | 13 | ```bash 14 | > db.movies.find({summary: {$regex: /musical/}}).pretty() 15 | ``` 16 | 17 | ### `$expr` 18 |
19 | Inserting Demo Data 20 | 21 | ```schema 22 | > use financialDatalet 23 | switched to db financialData 24 | > db.sales.insertMany([{volume: 100, target: 120},{volume: 89, target: 80},{volume: 200, target: 177}]) 25 | 26 | > db.sales.find().pretty() 27 | { 28 | "_id" : ObjectId("5f17491c3a76a40cd42b836d"), 29 | "volume" : 100, 30 | "target" : 120 31 | } 32 | { 33 | "_id" : ObjectId("5f17491c3a76a40cd42b836e"), 34 | "volume" : 89, 35 | "target" : 80 36 | } 37 | { 38 | "_id" : ObjectId("5f17491c3a76a40cd42b836f"), 39 | "volume" : 200, 40 | "target" : 177 41 | } 42 | ``` 43 |
44 | 45 | 46 | 47 | 48 | We want to find all entries and all items in this collection where the volume is above the target. 49 | 50 | we have to use single/double quotes in the query, have to pass reference fields name have use dollar signs before fields also. This worked successfully. 51 | 52 | ```bash 53 | > db.sales.find({$expr: {$gt: ['$volume', '$target']}}).pretty() 54 | **Output** 55 | { 56 | "_id" : ObjectId("5f17491c3a76a40cd42b836e"), 57 | "volume" : 89, 58 | "target" : 80 59 | } 60 | { 61 | "_id" : ObjectId("5f17491c3a76a40cd42b836f"), 62 | "volume" : 200, 63 | "target" : 177 64 | } 65 | ``` 66 | 67 | This will not work cause do not use the `$` sign. 68 | 69 | ```bash 70 | > db.sales.find({$expr: {$gt: ['volume', 'target']}}).pretty() 71 | ``` 72 | 73 | ### `$cond` **⇒ conditional because we are in the document.** 74 | 75 | This is actually from the aggregation framework. 76 | 77 | > If the volume is greater than or equal to 190 then subtract 10 from the volume and use subtracted value for further operation else use the original value of volume for further operation and we get volume dynamically. 78 | > 79 | 80 | ```bash 81 | {$cond: {if: {$gte: ['$volume', 190]}, then: {$subtract: ['$volume', 10]}, else: '$volume'}} 82 | // db.sales.find({$expr: {$gt: ['$volume', '$target']}}).pretty() 83 | ``` 84 | 85 | ```bash 86 | > db.sales.find({$expr: {$gt: [{$cond: {if: {$gte: ['$volume', 190]}, then: {$subtract: ['$volume', 10]}, else: '$volume'}}, '$target']}}).pretty() 87 | **Output** 88 | { 89 | "_id" : ObjectId("5f17491c3a76a40cd42b836e"), 90 | "volume" : 89, 91 | "target" : 80 92 | } 93 | { 94 | "_id" : ObjectId("5f17491c3a76a40cd42b836f"), 95 | "volume" : 200, 96 | "target" : 177 97 | } 98 | ``` 99 | 100 | > If I increase the subtracted value logically then the result might be changed. 101 | > 102 | 103 | ```bash 104 | > db.sales.find({$expr: {$gt: [{$cond: {if: {$gte: ['$volume', 190]}, then: {$subtract: ['$volume', 30]}, else: '$volume'}}, '$target']}}).pretty() 105 | **Output** 106 | { 107 | "_id" : ObjectId("5f17491c3a76a40cd42b836e"), 108 | "volume" : 89, 109 | "target" : 80 110 | } 111 | ``` -------------------------------------------------------------------------------- /MongoDB/Read/Logical.md: -------------------------------------------------------------------------------- 1 | # Logical 2 | 3 | `$or` `$and` `$not` `$nor` 4 | 5 | - `$nor` => operation on an array of one or more query expressions and selects the documents that fail all the query expressions in the array. 6 | 7 | # Multiple Query 8 | 9 | ### `$or`, `$nor` 10 | 11 | ```bash 12 | //Total number of documents with an average rating less than 5 or greater than 9.3 nothing in between 13 | 14 | > db.movies.find({$or: [{"rating.average": {$lt: 5}},{"rating.average": {$gt: 9.3}}]}).count() 15 | > db.movies.find({$or: [{"rating.average": {$lt: 5}},{"rating.average": {$gt: 9.3}}]}).pretty() 16 | 17 | //Total number of documents with no average rating less than 5 or not greater than 9.3 means none of the conditions matched. 18 | > db.movies.find({$nor: [{"rating.average": {$lt: 5}},{"rating.average": {$gt: 9.3}}]}).count() 19 | ``` 20 | 21 | ### `$and` 22 | 23 | ```bash 24 | // this is the older command 25 | 26 | //Total number of documents with an average rating greater than 5 and genres is Drama 27 | 28 | > db.movies.find({$and: [{"rating.average": {$gt: 9}},{genres : "Drama"}]}).count() 29 | > db.movies.find({$and: [{"rating.average": {$gt: 9}},{genres : "Drama"}]}).pretty() 30 | 31 | // latest command (using the only document) result is same as above({$and: [{"rating.average": {$gt: 9}},{genres : "Drama"}]}) 32 | 33 | //Total number of documents with an average rating greater than 5 and genres is Drama 34 | > db.movies.find({"rating.average": {$gt: 9}, genres : "Drama"}).count() 35 | ``` 36 | 37 | This does not work, does not give the right value(some time) same object is not permitted in this way here issue is created by the same JSON key genres, this key value replaces the first one when executing the second 38 | 39 | ```bash 40 | > db.movies.find({genres : "Drama", genres: 'Horror'}).count() 41 | 42 | // this become like this 43 | > db.movies.find({genres: 'Horror'}).count() 44 | ``` 45 | 46 | We have to use `$and` in the same field/key in this issue, we have to use `$and` must. (In this case, we must have to use and) 47 | 48 | ```bash 49 | > db.movies.find({$and: [{genres : "Drama"}, {genres: 'Horror'}]}).count() 50 | > db.movies.find({runtime: {$not :{$eq: 60}}}).count() 51 | ``` 52 | 53 | ### `$not` 54 | 55 | `$not` just inverse the logic of the query operator, this is also equal to the 56 | 57 | ```cpp 58 | > db.movies.find({runtime: {$not: {$eq: 60}}}).count() 59 | //equals to db.movies.find({runtime: {$ne: 60}}).count() 60 | ``` -------------------------------------------------------------------------------- /MongoDB/Read/Projection.md: -------------------------------------------------------------------------------- 1 | # Projection 2 | 3 | # Using Projection to Share our Results 4 | 5 | Want to retrieve elements of specific fields, no need to check the other fields. they are executed by default but objectId always includes them by default. 6 | 7 | ```cpp 8 | > db.movies.find({}, {name: 1, genres: 1, runtime: 1, rating: 1}).pretty() 9 | ``` 10 | 11 | To exclude the objectId 12 | 13 | ```cpp 14 | > db.movies.find({}, {name: 1, genres: 1, runtime: 1, rating: 1, _id: 0}).pretty() 15 | ``` 16 | 17 | Can also use embedded documents with path notation 18 | 19 | ```cpp 20 | > db.movies.find({}, {name: 1, genres: 1, runtime: 1, 'rating.average': 1, _id: 0}).pretty() 21 | ``` 22 | 23 | The schedule has a time and date field but only the time element will be returned. 24 | 25 | ```cpp 26 | > db.movies.find({}, {name: 1, genres: 1, runtime: 1, 'rating.average': 1, 'schedule.time': 1,_id: 0}).pretty() 27 | ``` 28 | 29 | Can also add logic for the filter. 30 | 31 | ```cpp 32 | > db.movies.find({'rating.average': {$gt: 8}}, {name: 1, genres: 1, runtime: 1, 'rating.average': 1, 'schedule.time': 1,_id: 0}).pretty() 33 | ``` 34 | 35 | ## Projection in Arrays 36 | 37 | Simply array query 38 | 39 | ```bash 40 | > db.movies.find({genres: 'Drama'}).pretty() 41 | ``` 42 | 43 | Return the array projection of the related query(in genres array will be only one element others will be neglected). 44 | 45 | ```bash 46 | > db.movies.find({genres: 'Drama'},{'genres.$': 1}).pretty() 47 | **Output** 48 | { "_id" : ObjectId("61beab75a2d88f2d3d82d451"), "genres" : [ "Drama" ] } 49 | { "_id" : ObjectId("61beab75a2d88f2d3d82d452"), "genres" : [ "Drama" ] } 50 | { "_id" : ObjectId("61beab75a2d88f2d3d82d454"), "genres" : [ "Drama" ] } 51 | { "_id" : ObjectId("61beab75a2d88f2d3d82d458"), "genres" : [ "Drama" ] } 52 | and so on. 53 | ``` 54 | 55 | It does not work properly(will return only for "Horror" as "Drama" will be replaced by "Horror"). 56 | 57 | ```cpp 58 | > db.movies.find({genres: {$all: ['Drama', 'Horror']}},{'genres.$': 1}).pretty() 59 | { "_id" : ObjectId("61beab75a2d88f2d3d82d455"), "genres" : [ "Horror" ] } 60 | { "_id" : ObjectId("61beab75a2d88f2d3d82d45c"), "genres" : [ "Horror" ] } 61 | and so on. 62 | ``` 63 | 64 | This projection is the element-wise and exact query 65 | 66 | `{$elemMatch: {$eq: 'Horror'}}` --> this thing decide which item is displayed or not 67 | 68 | Those document which has only Drama in genres will just print the id and those which has both drama and horror in genres in the genres will print "genres" : ["Horror"]. 69 | 70 | ```bash 71 | > db.movies.find({genres: 'Drama'},{genres: {$elemMatch: {$eq: 'Horror'}}}).pretty() 72 | **Ouput** 73 | { "_id" : ObjectId("61beab75a2d88f2d3d82d451") } => has only genres "Drama" which is must-have. 74 | { "_id" : ObjectId("61beab75a2d88f2d3d82d452") } 75 | { "_id" : ObjectId("61beab75a2d88f2d3d82d454") } 76 | { "_id" : ObjectId("61beab75a2d88f2d3d82d455"), "genres" : [ "Horror" ] } => has both genres "Drama" and "Horror" 77 | and so on. 78 | ``` 79 | 80 | Can also check with other element 81 | 82 | ```cpp 83 | db.movies.find({'rating.average':{$gt: 9}},{genres: {$elemMatch: {$eq: 'Horror'}}}).pretty() 84 | ``` 85 | 86 | ## Projection Slice 87 | 88 | Slicing array that I want, can add any number 89 | `$slice: 2` --> how many array elements we want to show from first 90 | 91 | ```bash 92 | db.movies.find({'rating.average':{$gt: 9}}, {genres: {$slice: 2}, name: 1}) 93 | { "_id" : ObjectId("61beab75a2d88f2d3d82d46f"), "name" : "Berserk", "genres" : [ "Anime", "Fantasy" ] } 94 | { "_id" : ObjectId("61beab75a2d88f2d3d82d4a6"), "name" : "Game of Thrones", "genres" : [ "Drama", "Adventure" ] } 95 | { "_id" : ObjectId("61beab75a2d88f2d3d82d4fa"), "name" : "Firefly", "genres" : [ "Adventure", "Science-Fiction" ] } 96 | { "_id" : ObjectId("61beab75a2d88f2d3d82d4fb"), "name" : "The Wire", "genres" : [ "Drama", "Crime" ] } 97 | and so on. 98 | ``` 99 | 100 | Can also be executed with an array from 101 | // 1--> What lengths of elements do we want to skip (index - start from 1) 102 | // 2--> How many elements do we want to show 103 | 104 | ```cpp 105 | db.movies.find({'rating.average':{$gt: 9}}, {genres: {$slice: [1, 2]}, name: 1}) 106 | { "_id" : ObjectId("61beab75a2d88f2d3d82d46f"), "name" : "Berserk", "genres" : [ "Fantasy", "Horror" ] } 107 | { "_id" : ObjectId("61beab75a2d88f2d3d82d4a6"), "name" : "Game of Thrones", "genres" : [ "Adventure", "Fantasy" ] } 108 | { "_id" : ObjectId("61beab75a2d88f2d3d82d4fa"), "name" : "Firefly", "genres" : [ "Science-Fiction", "Western" ] } 109 | and so on. 110 | // checking 111 | > db.movies.find({_id: ObjectId("5f15a22a9bfbc37d06f66662")},{genres: 1}).pretty() 112 | { 113 | "_id" : ObjectId("5f15a22a9bfbc37d06f66662"), 114 | "genres" : [ 115 | "Drama", 116 | "Adventure", 117 | "Fantasy" 118 | ] 119 | } 120 | ``` 121 | 122 | ```cpp 123 | > db.movies.find({'rating.average':{$gt: 9}}, {genres: {$slice: [2, 2]}, name: 1}).pretty() 124 | 125 | { 126 | "_id" : ObjectId("5f15a22a9bfbc37d06f6662d"), 127 | "name" : "Berserk", 128 | "genres" : [ 129 | "Horror" 130 | ] 131 | } 132 | { 133 | "_id" : ObjectId("5f15a22a9bfbc37d06f66662"), 134 | "name" : "Game of Thrones", 135 | "genres" : [ 136 | "Fantasy" 137 | ] 138 | } 139 | { 140 | "_id" : ObjectId("5f15a22a9bfbc37d06f666b7"), 141 | "name" : "Breaking Bad", 142 | "genres" : [ 143 | "Thriller" 144 | ] 145 | } 146 | { 147 | "_id" : ObjectId("5f15a22a9bfbc37d06f666c0"), 148 | "name" : "The Wire", 149 | "genres" : [ ] 150 | } 151 | { 152 | "_id" : ObjectId("5f15a22a9bfbc37d06f666c1"), 153 | "name" : "Firefly", 154 | "genres" : [ 155 | "Western" 156 | ] 157 | } 158 | { 159 | "_id" : ObjectId("5f15a22a9bfbc37d06f666d8"), 160 | "name" : "Stargate SG-1", 161 | "genres" : [ 162 | "Science-Fiction" 163 | ] 164 | } 165 | { 166 | "_id" : ObjectId("5f15a22a9bfbc37d06f666e2"), 167 | "name" : "Rick and Morty", 168 | "genres" : [ 169 | "Science-Fiction" 170 | ] 171 | } 172 | ``` -------------------------------------------------------------------------------- /MongoDB/Schema_&_Relations.md: -------------------------------------------------------------------------------- 1 | # Schema & Relations 2 | 3 | # Relations 4 | | Relation Type | Example 5 | | --- |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 6 | | One-to-one relationship | **Patient:** Summary of diseases

**Person:** <=> Car (One car can own by only one person)

Can be done like embedded documents or two in different collections and referencing each other by id. | 7 | | One to many relationship | Question and Answer(Embedded works with no problem).

Book and Author(Reference should use as author data maybe change).

City and Citizens(Two collection with reference will work better). | 8 | | Many to many relationship | **Customers:** Products(Orders). One customer has many products(via orders), a product belongs to many customers.

**Book:** Author(A book can be written by multiple author). | 9 | 10 |
11 | Initializing two collections for join 12 | Creating authors collection 13 | 14 | ```scheme 15 | db.authors.insertMany( 16 | [ 17 | {name: 'Max Scwarz',age: 29, address:{street: 'Main'}}, 18 | {name: 'Manuel Lor',age: 30, address:{street: 'Tree'}} 19 | ] 20 | ) 21 | ``` 22 | **Output** 23 | ```scheme 24 | { 25 | "acknowledged" : true, 26 | "insertedIds" : [ 27 | ObjectId("5f145a7c231893e15e9e53fe"), 28 | ObjectId("5f145a7c231893e15e9e53ff") 29 | ] 30 | } 31 | ``` 32 |
33 | 34 | ```scheme 35 | > db.authors.find().pretty() 36 | ``` 37 | **Output** 38 | 39 | ```scheme 40 | { 41 | "_id" : ObjectId("5f145a7c231893e15e9e53fe"), 42 | "name" : "Max Scwarz", 43 | "age" : 29, 44 | "address" : { 45 | "street" : "Main" 46 | } 47 | } 48 | { 49 | "_id" : ObjectId("5f145a7c231893e15e9e53ff"), 50 | "name" : "Manuel Lor", 51 | "age" : 30, 52 | "address" : { 53 | "street" : "Tree" 54 | } 55 | } 56 | ``` 57 | 58 | Creating books collection 59 | 60 | ```scheme 61 | db.books.insertOne( 62 | { 63 | name: 'My favorite Book', 64 | authors: 65 | [ 66 | ObjectId("5f145a7c231893e15e9e53fe"), 67 | ObjectId("5f145a7c231893e15e9e53ff") 68 | ] 69 | } 70 | ) 71 | ``` 72 | **Output** 73 | 74 | ```scheme 75 | { 76 | "acknowledged" : true, 77 | "insertedId" : ObjectId("5f145b5f231893e15e9e5400") 78 | } 79 | ``` 80 |
81 | 82 | ```scheme 83 | db.books.find().pretty() 84 | ``` 85 | **Output** 86 | ```scheme 87 | { 88 | "_id" : ObjectId("5f145b5f231893e15e9e5400"), 89 | "name" : "My favorite Book", 90 | "authors" : [ 91 | ObjectId("5f145a7c231893e15e9e53fe"), 92 | ObjectId("5f145a7c231893e15e9e53ff") 93 | ] 94 | } 95 | ``` 96 |
97 | 98 | 99 | # Lookup function 100 | 101 | | from | Collection name(From which collection/foreign collection) | 102 | | --- | --- | 103 | | localField | Current documents key in which the _id's(reference for other collection) of other collection are stored. | 104 | | foreignField | Field we are targeting in foreign collection. | 105 | | as | Will be created this key automatically(by the given name) with the targeted document from another(targeted) collection. | 106 | 107 | ```scheme 108 | > db.books.aggregate([ 109 | { 110 | $lookup: 111 | { 112 | from: 'authors', 113 | localField:'authors', 114 | foreignField:"_id", 115 | as:'creators' 116 | } 117 | } 118 | ]).pretty() 119 | ``` 120 | **Output** 121 | ```scheme 122 | { 123 | "_id" : ObjectId("5f145b5f231893e15e9e5400"), 124 | "name" : "My favorite Book", 125 | "authors" : [ 126 | ObjectId("5f145a7c231893e15e9e53fe"), 127 | ObjectId("5f145a7c231893e15e9e53ff") 128 | ], 129 | "creators" : [ 130 | { 131 | "_id" : ObjectId("5f145a7c231893e15e9e53fe"), 132 | "name" : "Max Scwarz", 133 | "age" : 29, 134 | "address" : { 135 | "street" : "Main" 136 | } 137 | }, 138 | { 139 | "_id" : ObjectId("5f145a7c231893e15e9e53ff"), 140 | "name" : "Manuel Lor", 141 | "age" : 30, 142 | "address" : { 143 | "street" : "Tree" 144 | } 145 | } 146 | ] 147 | } 148 | ``` 149 | 150 | # Schema Validation 151 | 152 | | Validation Level | Validation Mode | 153 | |------------------|----------------------------------------------------------------------------------------------| 154 | | validationLevel ⇒ Which document get validated? | strict ⇒ All inserts & updates.
moderate ⇒ All inserts & updates to correct documents. | 155 | | validationAction ⇒ What happens if validation fails? | error ⇒ Throw error and deny insert/update
warn ⇒ Log warning but proceed. | 156 | 157 | Adding validation(should be set before inserting) 158 | 159 | ```scheme 160 | > db.createCollection("posts", { 161 | validator: { 162 | $jsonSchema: { 163 | bsonType: "object", 164 | required: ["title", "text", "creator", "comments"], 165 | properties : { 166 | title: { 167 | bsonType : "string", 168 | description: "must be a string and is required." 169 | }, 170 | text: { 171 | bsonType : "string", 172 | description: "must be a string and is required." 173 | }, 174 | creator: { 175 | bsonType : "objectId", 176 | description: "must be a objectId and is required." 177 | }, 178 | comments: { 179 | bsonType : "array", 180 | description: "must be an array and is required.", 181 | items: { 182 | bsonType: "object", 183 | required: ["text", "author"], 184 | properties: { 185 | text: { 186 | bsonType : "string", 187 | description: "must be a string and is required." 188 | }, 189 | author: { 190 | bsonType : "objectId", 191 | description: "must be an objectId and is required." 192 | }, 193 | } 194 | } 195 | } 196 | } 197 | } 198 | } 199 | }); 200 | ``` 201 | 202 | ### Modifying existing validator 203 | 204 | `collMod`(collectionModifier) ⇒ Name of the collection whose validation rule will be updated. 205 | Note: like as validator as previous. 206 | 207 | ```scheme 208 | db.runCommand({ 209 | collMod: "posts", 210 | validator: { 211 | $jsonSchema: { 212 | bsonType: "object", 213 | required: ["title", "text", "creator", "comments"], 214 | properties: { 215 | title: { 216 | bsonType: "string", 217 | description: "must be a string and is required.", 218 | }, 219 | text: { 220 | bsonType: "string", 221 | description: "must be a string and is required.", 222 | }, 223 | creator: { 224 | bsonType: "objectId", 225 | description: "must be a objectId and is required.", 226 | }, 227 | comments: { 228 | bsonType: "array", 229 | description: "must be an array and is required.", 230 | items: { 231 | bsonType: "object", 232 | required: ["text", "author"], 233 | properties: { 234 | text: { 235 | bsonType: "string", 236 | description: "must be a string and is required.", 237 | }, 238 | author: { 239 | bsonType: "objectId", 240 | description: "must be an objectId and is required.", 241 | }, 242 | }, 243 | }, 244 | }, 245 | }, 246 | }, 247 | }, 248 | validationAction: "warn", 249 | }); 250 | ``` 251 | 252 | # Data Types 253 | 254 | - Text 255 | - Boolean 256 | - true 257 | - false 258 | - Number 259 | - Integer(int32) 260 | - NumberLong(int64) 261 | - NumberDecimal 262 | - ObjectId 263 | - Time 264 | - ISODate 265 | - Timestamp 266 | - Embedded Document 267 | - Array -------------------------------------------------------------------------------- /MongoDB/Security.md: -------------------------------------------------------------------------------- 1 | # Security 2 | 3 | **Not enough taught in this course and not all commands worked on my machine.** 4 | 5 | # Security Check List 6 | 7 | 1. Authentication and Authorization 8 | 2. Transport Encryption 9 | 1. Data sending from my app to server should be encrypted. 10 | 3. Encryption at Rest 11 | 1. Data in the database also should be encrypted as if anyone get the data could not read those. 12 | 4. Auditing 13 | 5. Server & Network Config and Setup 14 | 6. Backups & Software Updates 15 | 16 | > First 3 are important checklist in teams of security 17 | > 18 | 19 | # Authentication & Authorization 20 | 21 | Authentication : Identifies valid users of the database. Such as you are employee therefore may access the office. 22 | Authorization : Identifies what these users may actually do in the database. Such as you are employed as an account and therefore may access the office and process orders. 23 | 24 | # Role Based Access Control 25 | 26 | ## MongoDB Server 27 | 28 | (Auth Enabled) 29 | 30 | Shop database : 31 | 1. Products Collection 32 | 2. Customers Collection 33 | Blog database 34 | 1. Posts Collection 35 | 2. Authors Collection 36 | Admin database 37 | 38 | Why Rules 39 | **Different Types of Database users** 40 | 41 | Administrator 42 | 43 | 1. Needs to be able to manage the database config, create users etc. 44 | 2. Does not need to be able to insert or fetch data. 45 | 46 | Developers / Your App 47 | 48 | 1. Needs to be able to insert, Updates, delete or fetch data(CRUD). 49 | 2. Does not need to be able to create users or manage the database config. 50 | 51 | Data Scientist 52 | 53 | 1. Needs to be able to fetch data. 54 | 2. Does not need to be able to create users, manage the database config or insert, edit or delete data. 55 | 56 | # Creating $ Editing Users 57 | 58 | createUser(), UpdateUser() --> ('name': 'Max') --> (Roles and Privileges) --> Database (e.g admin) --> Access is not limited to authentication database 59 | 60 | To enable authentication for MongoDB database. 61 | 62 | > After running this command we will able to access the database meaning MongoDB will ask for a username and password for the first time as at the first time we do not add a username and password yet and have to add those. After adding those will ask for a username and password every time while accessing db. 63 | > 64 | 65 | ```bash 66 | sudo mongod --auth 67 | ``` 68 | 69 | This does not work 70 | 71 | ```cpp 72 | mongo -u max -p 123456 73 | ``` 74 | 75 | Creating a user. This is a granted user 76 | 77 | ```cpp 78 | > use admin 79 | switched to db admin 80 | 81 | > db.createUser( { user: 'Jahid', pwd: 'Jahid', roles: ['userAdminAnyDatabase'] } ) 82 | **Output** 83 | { ok: 1 } 84 | ``` 85 | 86 | Let authenticate as an admin 87 | 88 | ```cpp 89 | > db.auth('Jahid', 'Jahid') 90 | **Output** 91 | { ok: 1 } 92 | ``` 93 | 94 | # Built-in-Roles 95 | 96 | 1. Database User 97 | 1. read 98 | 2. readWrite 99 | 2. Database Admin 100 | 1. dbAdmin 101 | 2. userAdmin 102 | 3. dbOwner 103 | 3. All Database Roles 104 | 1. readAnyDatabase 105 | 2. readWriteAnyDatabase 106 | 3. userAdminAnyDatabase 107 | 4. dbAdminAnyDatabase 108 | 4. Cluster Admin 109 | 1. clusterManager 110 | 2. clusterMonitor 111 | 3. hostManager 112 | 4. clusterAdmin 113 | 5. Backup/Restore 114 | 1. backup 115 | 2. restore 116 | 6. Superuser 117 | 1. dbOwner (admin) 118 | 2. userAdmin(admin) 119 | 3. userAdminAnyDatabase 120 | 4. root 121 | 122 | // to logout 123 | 124 | # Assigning Roles to Users Database 125 | 126 | Let's go to the database 127 | 128 | ```cpp 129 | mongo --help 130 | ``` 131 | 132 | Adding user for a particular database 133 | 134 | ```cpp 135 | mongo -u Jahid -p jahid123 --authenticationDatabase admin 136 | // now logged in 137 | ``` 138 | 139 | Assigning roles to the sub-admin(access permission to any single database) 140 | 141 | ```cpp 142 | > use shop 143 | switched to db shop 144 | 145 | db.createUser({user: 'jahid', pwd: 'jahid123', roles: ['readWrite']}) 146 | ``` 147 | 148 | Let authenticate the user 149 | 150 | ```cpp 151 | mongo -u jahid -p jahid123 --authenticationDatabase shop 152 | ``` 153 | 154 | ```cpp 155 | > use shop 156 | db.products.insertOne({name: 'book'}) 157 | { 158 | "acknowledged" : true, 159 | "insertedId" : ObjectId("5f3f747a8f6d2a27804cd909") 160 | } 161 | ``` 162 | 163 | # Updating Extending Roles to Other Database 164 | 165 | Every time when try to log in then have to switch the db 166 | 167 | Here basically readWrite replace 168 | 169 | ```cpp 170 | db.updateUser('jahid', {roles: ['readWrite', {role: 'readWrite', db: 'blog'}]}) 171 | ``` 172 | 173 | So first have to be an admin 174 | 175 | ```cpp 176 | > db.logout() 177 | 178 | > use admin 179 | switched to db admin 180 | 181 | db.auth('jahid', 'jahid123') 182 | ``` 183 | 184 | ```cpp 185 | > use shop 186 | db.updateUser('jahid', {roles: ['readWrite', {role: 'readWrite', db: 'blog'}]}) 187 | ``` 188 | 189 | Can see which database permission have 190 | 191 | ```cpp 192 | db.getUser('jahid') 193 | ``` 194 | 195 | ```cpp 196 | > db.getUser('jahid') 197 | { 198 | "_id" : "shop.Mijanur1", 199 | "userId" : UUID("b0abc10a-dba1-4609-aec7-f9714c046505"), 200 | "user" : "Mijanur1", 201 | "db" : "shop", 202 | "roles" : [ 203 | { 204 | "role" : "readWrite", 205 | "db" : "shop" 206 | }, 207 | { 208 | "role" : "readWrite", 209 | "db" : "blog" 210 | } 211 | ], 212 | "mechanisms" : [ 213 | "SCRAM-SHA-1", 214 | "SCRAM-SHA-256" 215 | ] 216 | } 217 | ``` 218 | 219 | Now first login the database 220 | 221 | ```cpp 222 | mongo -u jahid -p jahid123 --authenticationDatabase shop 223 | ``` 224 | 225 | ```cpp 226 | > use blog 227 | switched to db blog 228 | > db.posts.insertOne({title: 'This works'}) 229 | { 230 | "acknowledged" : true, 231 | "insertedId" : ObjectId("5f3f7ad50d7fd790369f97ea") 232 | } 233 | ``` 234 | 235 | # Transport Encryption 236 | 237 | Client(app <=> MongoDb Driver --- Encrypted--)--> MongoDb Server 238 | 239 | In this part have an issue for the MongoDb version 4.0 with my tutorial 240 | 241 | There are couple of commands 242 | 243 | 1. openssl genrsa -out mongodb-test-ca.key 4096 244 | 2. openssl req -new -x509 -days 1826 -key mongodb-test-ca.key -out mongodb-test-ca.crt -config openssl-test-ca.cnf 245 | 3. openssl genrsa -out mongodb-test-ia.key 4096 246 | sudo mongod --tlsMode requireTLS --tlsCertificateKeyFile test-ca.pem 247 | 4. openssl x509 -sha256 -req -days 730 -in mongodb-test-ia.csr -CA mongodb-test-ca.crt -CAkey mongodb-test-ca.key -set_serial 01 -out mongodb-test-ia.crt -extfile openssl-test-ca.cnf -extensions v3_ca 248 | 5. cat mongodb-test-ca.crt mongodb-test-ia.crt > test-ca.pem 249 | 250 | To execute the database(command does not work properly) 251 | sudo mongod --tlsMode requireTLS --tlsCertificateKeyFile test-ca.pem 252 | mongo --tls --tlsCAFile test-ca.pem --host localhost 253 | -------------------Encryption at Rest------------------- 254 | 255 | Can encrypt full storage. this is for enterprise MongoDb 256 | 257 | Common Name --> have to use web server address but if work into a localhost just write localhost 258 | 259 | or getting an error 260 | 261 | >> Storage << (Encrypted) 262 | 263 | ```cpp 264 | { 265 | email: 'test1@test.com', 266 | password: 'ad50rr4fdf84bfsdbsd44' <--- Encrypted/Hashed 267 | } 268 | 269 | { 270 | email: 'test2@test.com', 271 | password: 'a5ymb7fdf47fbryfvsfff' 272 | } 273 | ``` 274 | 275 | # Users & Roles 276 | 277 | 1. MongoDb uses a Role Based Access Control approach. 278 | 2. You create users on database and then log in with credentials(against those database). 279 | 3. Users have no rights by default, need to add roles to allow certain operations. 280 | 4. Permission that are granted by roles ('Privileges') are only granted for the database the user was added to unless explicitly grant access to other database. 281 | 5. Can use 'AnyDatabase' roles for cross-database access. 282 | 283 | # Encryption 284 | 285 | 1. Can encrypt data during transportation and at rest. 286 | 2. During transportation, can use TLS/SSL to encrypt data. 287 | 3. For production, should use SSL certificates issues by a certificate authority (NOT self-signed certificates) 288 | 4. For encryption at rest, can encrypt both the files that holdes data (made simple with "MongoDb Enterprise") and the values inside documents. -------------------------------------------------------------------------------- /MongoDB/Testing.md: -------------------------------------------------------------------------------- 1 | We can use `mongodb-memory-server` to run MongoDB in memory for testing purposes. This is useful for running tests 2 | without having to install MongoDB on your machine. 3 | 4 | 5 | 6 | # Resources 7 | * [Advanced Node.JS](https://interactivecares.com/courseDetails/246?title=Advanced_Node.JS) -------------------------------------------------------------------------------- /MongoDB/Transactions.md: -------------------------------------------------------------------------------- 1 | # Transactions 2 | 3 | Will delete user so we also have to delete all posts of that user. 4 | 5 | ``` 6 | User deletes Accounts 7 | Users Collection Posts Collection 8 | -------------------------------------------------------- 9 | | { User Document } -----------> { Post Document } | 10 | | \ | 11 | | related \ | 12 | | \--------> { Post Document } | 13 | | Should be deleted together | 14 | --------------------------------------------------------- 15 | ``` 16 | 17 | 18 | Always have to change IP address into MongoDB cloud cluster. First have access to the MongoDB cloud 19 | mongo "mongodb+srv://mytestingcluster.n7v1t.mongodb.net/" --username jahid 20 | 21 | ```cpp 22 | > use blog 23 | > db.users.insertOne({name: 'Max'}) 24 | > db.posts.insertMany([{title: 'A js post', views: 23, userId: ObjectId("5f4163d6526c4846e4c6fe1b")}, {title: 'Group discussion', views: 2, userId: ObjectId("5f4163d6526c4846e4c6fe1b")}]) 25 | ``` 26 | 27 | Have to execute the mongo session to work with the Transactions 28 | 29 | ```cpp 30 | const session = db.getMongo().startSession() 31 | session.startTransaction() 32 | ``` 33 | 34 | ```cpp 35 | const usersCol = session.getDatabase('blog').users 36 | const postsCol = session.getDatabase('blog').posts 37 | ``` 38 | 39 | This is basically will remove/delete the user 40 | 41 | ```cpp 42 | usersCol.deleteOne({_id: ObjectId("5f4163d6526c4846e4c6fe1b")}) 43 | ``` 44 | 45 | This command also successfully executes (this comes from the cloud, not the session), but we deleted the user before 46 | 47 | ```bash 48 | > postsCol.deleteMany({userId: ObjectId("5f4163d6526c4846e4c6fe1b")}) 49 | > usersCol.deleteOne({_id: ObjectId("5f4163d6526c4846e4c6fe1b")}) 50 | { "acknowledged" : true, "deletedCount" : 1 } 51 | ``` 52 | 53 | It was basically deleted from the cache but not from the real server as we do not start the transactions. MongoDB just keeps tracking(or acknowledging) what has to do in this transection. 54 | 55 | ```bash 56 | db.users.find().pretty() 57 | { "_id" : ObjectId("5f4163d6526c4846e4c6fe1b"), "name" : "Max" } 58 | ``` 59 | 60 | To execute fully delete from the cloud have to commit to the Transactions 61 | 62 | ```bash 63 | session.commitTransaction() 64 | ``` 65 | 66 | Now deleted from the cloud 67 | 68 | ```cpp 69 | db.users.find().pretty() 70 | ``` 71 | 72 | Can also abort --> all things are trying to delete fully 73 | 74 | So these actions either succeed together or all fail together. That is the idea behind the transactions. 75 | 76 | This basically comes from atomicity 77 | Get atomicity in the operation level not just on a document level 78 | So need cross-operation consistency -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # MongoDB 2 | 3 | [Initial Concepts](readme.md) 4 | 5 | [Basic CRUD, Projection, Embedded/Nested Documents, Array](MongoDB/Basic_CRUD,_Projection,_Embedded_Nested_Documents.md) 6 | 7 | [Schema & Relations](MongoDB/Schema_&_Relations.md) 8 | 9 | [Exploring The Shell and The Server](MongoDB/Exploring_The_Shell_and_The_Server.md) 10 | 11 | [Create/Insert, Write concern, Atomicity](MongoDB/Create_Insert,_Write_concern,_Atomicity.md) 12 | 13 | [Read](MongoDB/Read.md) 14 | 15 | [Update](MongoDB/Update.md) 16 | 17 | [Delete](MongoDB/Delete.md) 18 | 19 | [Index and others](MongoDB/Index_and_Others.md) 20 | 21 | [Geospatial Data](MongoDB/Geospatial_Data.md) 22 | 23 | [Aggregation Framework](MongoDB/Aggregation_Framework.md) 24 | 25 | [Numeric Data](MongoDB/Numeric_Data.md) 26 | 27 | [Security](MongoDB/Security%202d5e71d574ce4b9d8e00b7531b088618.md) 28 | 29 | [Performance Fault Tolerancy Deployment](MongoDB/Performance_Fault_Tolerancy_Deployment.md) 30 | 31 | [Transactions](MongoDB/Transactions.md) 32 | 33 | [Performance](MongoDB/Performance.md) 34 | 35 | SKIPPED 36 | 37 | - Shell to drive 38 | - Introducing Stitch 39 | 40 | 41 | 42 | # Initial Concepts 43 | 44 | 58 | 59 | > **Normalizing**: Storing and distributing data in multiple tables where every table has a clear schema and use lots of relations(in SQL). 60 | > 61 | 62 | As MongoDB saves all in one single document so for fetching it does not need any complex operation like join(SQL) so MongoDB can fetch query is more simple, more flexible, and more efficient(as no need to merge collection/table) than SQL. 63 | 64 | MongoDB is popular for **read and write heavy** applications (like sensors which send data every second/online shop/blog). 65 | 66 | # Some very basic commands 67 | 68 | ### Start MongoDB on the shell 69 | 70 | ```powershell 71 | mongo 72 | ``` 73 | 74 | ### Show all databases(admin, config, and local are predefined there) 75 | 76 | ```powershell 77 | show dbs 78 | ``` 79 | 80 | ### Use existing database or create a new one 81 | 82 | Will connected with database name 'shop' if already exits or if the database is not exited yet MongoDB will create that 83 | 84 | ```powershell 85 | use shop 86 | ``` 87 | 88 | Output will be 89 | 90 | > switched to db shop 91 | > 92 | 93 | **Note:** If the database is not created previously this command will create that database but the shop database will not create instantly(as a result `show dbs` will not list shop db). When we will first add data to the shops database than MongoDB implicitly will create that database and also `show dbs` commands that will list the database. 94 | 95 | ### Get all documents of a collection. 96 | 97 | Will give all data as we add not add any argument. 98 | 99 | ```powershell 100 | db.products.find() 101 | ``` 102 | 103 | ### Get JSON like format 104 | 105 | Will return exactly the same data as JSON like format which is easy to read. 106 | 107 | ```powershell 108 | db.products.find().pretty 109 | ``` 110 | 111 | ### Clear the screen 112 | 113 | ```powershell 114 | cls 115 | ``` 116 | 117 | ## MongoDB Ecosystem 118 | 119 | ### MongoDB Database 120 | 121 | 1. Self - Managed / Enterprise 122 | 1. CloudManager / OpsManager 123 | 2. Atlas(Cloud) 124 | 3. Mobile 125 | 126 | Others options 127 | 128 | 1. Compass 129 | 2. BI Connectors 130 | 3. MongoDB Charts 131 | 132 | ### Stitch 133 | 134 | 138 | 139 | That gives 140 | 141 | 145 | 146 | 150 | 151 | 155 | 156 | 160 | 161 | ## How MongoDB works 162 | 163 | ### Application 164 | 165 | - Frontend(UI) 166 | - Backend(Server) ⇒ Which interacts/communicates with MongoDB server using their corresponding drivers 167 | - Node.js 168 | - Java 169 | - Python 170 | - MongoDB Shell(does not depends on any specific programming language). 171 | 172 | ### Data 173 | 174 | - MongoDB server 175 | - MongoDB server use a storage engine default for MongoDB is **Wired Tiger**. Which access file/data where the data kept(database information collection, table, etc) 176 | - Storage Engine 177 | - Read + Write Data to Files(slow) 178 | - Read + Write Data in Memory(fast) ⇒ Memory(RAM) 179 | 180 | ### Documents Created Implicity 181 | 182 | JSON data converts into BSON data 183 | 184 | `{ 185 | "name" : "MAX", 186 | "age" : 29 187 | }` 188 | 189 | it converts into BSON 190 | 191 | BSON 192 | 193 | 1. Binary data 194 | 2. Extends JSON Types(e.g more detailed Number Types) 195 | 3. Efficient Storage 196 | 197 | ### MongoD service 198 | 199 | 205 | 206 | ### Start mongod process(default port is 27017) 207 | 208 | ```bash 209 | sudo mongod => To start in default port 210 | sudo mongod --port 27018 => can use if in any case default port 27017 not available 211 | ``` 212 | 213 | ### Start MongoDB on the server 214 | 215 | ```bash 216 | mongo => to start in default port 27017 217 | mongo --port 27018 => to start at 27018 port 218 | ``` --------------------------------------------------------------------------------