├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.org ├── TODO.org ├── benches └── keys_benchmark.rs ├── proptest-regressions ├── asyncresp.txt ├── bloom.txt └── engine.txt ├── redis_benchmark.sh ├── rustfmt.toml ├── scripts ├── example.x7 ├── project_euler.x7 └── semaphore.x7 └── src ├── asyncresp.rs ├── blocking.rs ├── bloom.rs ├── data_structures ├── mod.rs ├── receipt_map.rs ├── sorted_set.rs └── stack.rs ├── database.rs ├── hashes.rs ├── hyperloglog.rs ├── keys.rs ├── lib.rs ├── lists.rs ├── logger.rs ├── macros.rs ├── main.rs ├── misc.rs ├── ops.rs ├── scripting.rs ├── server.rs ├── sets.rs ├── sorted_sets.rs ├── stack.rs ├── startup.rs ├── state.rs ├── timeouts.rs └── types.rs /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Redis Oxide Pipeline 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | check: 7 | name: Check 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v1 11 | - uses: actions-rs/toolchain@v1 12 | with: 13 | profile: minimal 14 | toolchain: nightly 15 | override: true 16 | - uses: actions-rs/cargo@v1 17 | with: 18 | command: check 19 | 20 | test: 21 | name: Test Suite 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v1 25 | - uses: actions-rs/toolchain@v1 26 | with: 27 | profile: minimal 28 | toolchain: nightly 29 | override: true 30 | - uses: actions-rs/cargo@v1 31 | with: 32 | command: test 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "redis-oxide" 3 | version = "0.2.0" 4 | authors = ["David Briggs "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "redis_oxide" 9 | path = "src/lib.rs" 10 | 11 | [[bin]] 12 | name = "redis-oxide" 13 | path = "src/main.rs" 14 | 15 | [dependencies] 16 | tokio = { version = "1.15", features = ["full", "tracing"] } 17 | tokio-util = { version = "0.6.9", features = ["codec"] } 18 | shlex = "0.1.1" 19 | promptly = "0.1.5" 20 | rand = "0.7.2" 21 | bytes = { version = "1.0.0", features = ["serde"] } 22 | sloggers = "0.3.4" 23 | lazy_static = "1.4.0" 24 | slog = "2.5.2" 25 | serde = { version = "1.0.103", features = ["rc"] } 26 | serde_derive = "1.0.103" 27 | bincode = "1.2.0" 28 | structopt = "0.3.5" 29 | directories = "2.0.2" 30 | parking_lot = { version = "0.9.0", features = ["serde"] } 31 | rmp-serde = "0.14.0" 32 | spin = "0.5.2" 33 | growable-bloom-filter = "2.0.1" 34 | seahash = "3.0.6" 35 | futures = "0.3.1" 36 | futures-util = "0.3.1" 37 | serde_bytes = "0.11.3" 38 | dashmap = { version = "4.0.2", features = ["serde"] } 39 | memchr = "2.3.0" 40 | smallvec = "1.4.1" 41 | x7 = { git = "https://github.com/dpbriggs/x7.git", rev = "f1a410d0" } 42 | # x7 = { path = "../x7" } 43 | num-traits = "0.2.14" 44 | amadeus-streaming = "0.4.3" 45 | console-subscriber = "0.1.0" 46 | 47 | [dev-dependencies] 48 | pretty_assertions = "0.6.1" 49 | proptest = "0.9.4" 50 | criterion = "0.3.0" 51 | 52 | [[bench]] 53 | name = "keys_benchmark" 54 | harness = false 55 | 56 | # [profile.release] 57 | # debug = true -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | Redis-Oxide, the rusty redis clone 635 | Copyright (C) 2019 David Briggs 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | redis-oxide Copyright (C) 2019 David Briggs (email@dpbriggs.ca) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.org: -------------------------------------------------------------------------------- 1 | #+AUTHOR: David Briggs 2 | #+STARTUP: SHOWALL 3 | 4 | * Redis Oxide 5 | 6 | [[https://github.com/dpbriggs/redis-oxide/actions][https://github.com/dpbriggs/redis-oxide/workflows/Redis%20Oxide%20Pipeline/badge.svg]] 7 | 8 | A multi-threaded implementation of redis written in rust 🦀. 9 | 10 | This project is intended to be a drop-in replacement for redis. 11 | It's under construction at the moment. 12 | 13 | [[https://i.imgur.com/8Zb0gu5.png][https://i.imgur.com/8Zb0gu5.png]] 14 | 15 | ** Design 16 | 17 | 18 | =redis-oxide= is a black-box multi-threaded re-implementation of redis, backed by [[https://tokio.rs/][tokio]]. 19 | It features data-structure key-space/lock granularity, written entirely in safe rust. 20 | It's currently protocol compatible with redis, so you should be able to test it out with your favourite tools. 21 | 22 | The multi-threaded nature has advantages and disadvantages. 23 | On one hand, =KEYS *= isn't particularly crippling for the server as it'll just keep a thread busy. 24 | On the other hand, there's some lock-juggling overhead, especially for writes, which messes with tokio. 25 | 26 | ** Building / Running 27 | 28 | There's currently no official release for the project. You can compile and install it yourself with the following command: 29 | 30 | : cargo install --git https://github.com/dpbriggs/redis-oxide 31 | 32 | Note: This project requires the rust nightly. You can use [[https://rustup.rs/][rustup]] to install it. 33 | 34 | Once it compiles you should be able to run it with =~ redis-oxide=. 35 | 36 | If you wish to download and run it yourself, you can do the following 37 | 38 | #+begin_example 39 | ~ git clone https://github.com/dpbriggs/redis-oxide 40 | ~ cd redis-oxide 41 | ~ cargo run 42 | #+end_example 43 | 44 | Then use your favorite redis client. Eg. =redis-cli=: 45 | 46 | #+begin_example 47 | ~ redis-cli 48 | 127.0.0.1:6379> set foo bar 49 | OK 50 | 127.0.0.1:6379> get foo 51 | "bar" 52 | #+end_example 53 | 54 | Or using the redis library for python: 55 | 56 | #+begin_src python 57 | import redis 58 | from pprint import pprint 59 | 60 | r = redis.Redis() 61 | r.set('foobar', 'foobar') 62 | pprint(r.get('foobar')) 63 | 64 | for i in range(100): 65 | r.rpush('list', i) 66 | 67 | list_res = r.lrange('list', 0, -1) 68 | 69 | pprint(list_res[0:3]) 70 | pprint(sum(map(int, list_res))) 71 | 72 | total = 0 73 | for i in range(100): 74 | total += int(r.lpop('list')) 75 | pprint(total) 76 | #+end_src 77 | 78 | Which will print: 79 | 80 | #+begin_src python 81 | b'foobar' 82 | [b'0', b'1', b'2'] 83 | 4950 84 | 4950 85 | #+end_src 86 | 87 | ** Things left to do 88 | 89 | *** Basic Datastructures 90 | 91 | - [X] Keys 92 | - [X] Sets 93 | - [X] Lists 94 | - [X] Hashes 95 | - [ ] HyperLogLog 96 | - [ ] Geo 97 | - [-] Sorted Sets 98 | - [X] Basic Functionality 99 | - [ ] Still need some operations 100 | - [ ] Strings 101 | 102 | We should solidify the above before working on the more complex bits, but contributions are welcome :) 103 | 104 | *** Redis Compatibility 105 | 106 | - [X] Resp / server 107 | - [ ] Database compatibility 108 | - [ ] Unsure if this is a good thing -- may be better to port existing dumps. 109 | - [ ] Blocking / Concurrent Ops (ttl/save-on-x-ops) 110 | - [ ] CLI / config compatibility 111 | - [ ] Authentication 112 | 113 | ** Contribution Guide 114 | 115 | Conduct: =Have fun, please don't be a jerk.= 116 | 117 | Contact: Make an issue or PR against this repo, or send an email to =david@dpbriggs.ca=. If you know of a better forum, please suggest it! 118 | 119 | NOTE: *DO NOT USE THE REDIS SOURCE CODE IN ANY WAY!* 120 | 121 | This project is under active development, so things are a little messy. 122 | 123 | The general design of =redis-oxide= is: 124 | 125 | - A Command (=set foo bar=) is read off the socket and passed to the translate function in =src/ops.rs=. 126 | - The parser generates a =RedisValue=, which is the lingua franca of =redis-oxide=. 127 | - This gets converted to an =Ops::XYZ(XYZOps::Foobar(..))= enum object, which is consumed by the =op_interact= function. 128 | - A macro is used to provide automate this. 129 | - This operation is executed against the global =State= object (using the =op_interact= function) 130 | - This will return an =ReturnValue= type, which is a more convenient form of =RedisValue=. 131 | - This =ReturnValue= is converted and sent back to the client. 132 | 133 | Therefore, if you want to do something like implement =hashes=, you will need to: 134 | 135 | 1. Add a new struct member in =State=. 136 | 1. You first define the type: =type KeyHash = DashMap>= 137 | 2. Then add it to State: =pub hashes: KeyHash= 138 | 3. Define a new file for your data type, =src/hashes.rs=. 139 | 1. Keep your type definitions in =src/types.rs=! 140 | 4. Create an enum to track your commands, =op_variants! { HashOps, HGet(Key, Key), HSet(Key, Key, Value) }= 141 | 5. Implement parsing for your enum in =src/ops.rs=. 142 | 1. You should be able to follow the existing parsing infrastructure. Should just be extra entries in =translate_array= in =src/ops.rs=. 143 | 2. You will need to add your return type to the =ok!= macro. Just copy/paste an existing line. 144 | 3. You should return something like =ok!(HashOps::HSet(x, y, z))=. 145 | 4. A stretch goal is to automate parsing. 146 | 6. Implement a =async *_interact= for your type; I would follow existing implementations (eg. =src/keys.rs=). 147 | 1. I would keep the redis docs open, and play around with the commands in the web console (or wherever) to determine behavior. 148 | 2. Add a new match entry in the =async op_interact= function in =src/ops.rs=. 149 | 7. Test it! (follow existing testing bits; eg. =src/keys.rs=). 150 | 8. Please add the commands to the list below. 151 | 1. If you're using emacs, just fire up the server and evaluate the babel block below (see =README.org= source) 152 | 2. Alternatively, copy the script into a terminal and copy/paste the output below. (see raw =README.org=) 153 | 154 | ** Implemented Commands 155 | 156 | #+BEGIN_SRC python :results output raw :format org :exports results 157 | import redis 158 | 159 | r = redis.StrictRedis(decode_responses=True) 160 | 161 | all_commands = r.execute_command('printcmds') 162 | 163 | for command in all_commands: 164 | command_name, ops = command[0], command[1:] 165 | print(f'*** {command_name}\n') 166 | for op in ops: 167 | print(f'- ={op}=') 168 | print('\n') 169 | #+END_SRC 170 | 171 | #+RESULTS: 172 | *** KeyOps 173 | 174 | - =Set (Key, Value)= 175 | - =MSet (RVec<(Key, Value)>)= 176 | - =Get (Key)= 177 | - =MGet (RVec)= 178 | - =Del (RVec)= 179 | - =Rename (Key, Key)= 180 | - =RenameNx (Key, Key)= 181 | 182 | 183 | *** ListOps 184 | 185 | - =LIndex (Key, Index)= 186 | - =LLen (Key)= 187 | - =LPop (Key)= 188 | - =LPush (Key, RVec)= 189 | - =LPushX (Key, Value)= 190 | - =LRange (Key, Index, Index)= 191 | - =LSet (Key, Index, Value)= 192 | - =LTrim (Key, Index, Index)= 193 | - =RPop (Key)= 194 | - =RPush (Key, RVec)= 195 | - =RPushX (Key, Value)= 196 | - =RPopLPush (Key, Key)= 197 | - =BLPop (Key, UTimeout)= 198 | - =BRPop (Key, UTimeout)= 199 | 200 | 201 | *** HashOps 202 | 203 | - =HGet (Key, Key)= 204 | - =HSet (Key, Key, Value)= 205 | - =HExists (Key, Key)= 206 | - =HGetAll (Key)= 207 | - =HMGet (Key, RVec)= 208 | - =HKeys (Key)= 209 | - =HMSet (Key, RVec<(Key, Value)>)= 210 | - =HIncrBy (Key, Key, Count)= 211 | - =HLen (Key)= 212 | - =HDel (Key, RVec)= 213 | - =HVals (Key)= 214 | - =HStrLen (Key, Key)= 215 | - =HSetNX (Key, Key, Value)= 216 | 217 | 218 | *** SetOps 219 | 220 | - =SAdd (Key, RVec)= 221 | - =SCard (Key)= 222 | - =SDiff (RVec)= 223 | - =SDiffStore (Key, RVec)= 224 | - =SInter (RVec)= 225 | - =SInterStore (Key, RVec)= 226 | - =SIsMember (Key, Value)= 227 | - =SMembers (Key)= 228 | - =SMove (Key, Key, Value)= 229 | - =SPop (Key, Option)= 230 | - =SRandMembers (Key, Option)= 231 | - =SRem (Key, RVec)= 232 | - =SUnion (RVec)= 233 | - =SUnionStore (Key, RVec)= 234 | 235 | 236 | *** ZSetOps 237 | 238 | - =ZAdd (Key, RVec<(Score, Key)>)= 239 | - =ZRem (Key, RVec)= 240 | - =ZRange (Key, Score, Score)= 241 | - =ZCard (Key)= 242 | - =ZScore (Key, Key)= 243 | - =ZPopMax (Key, Count)= 244 | - =ZPopMin (Key, Count)= 245 | - =ZRank (Key, Key)= 246 | 247 | 248 | *** BloomOps 249 | 250 | - =BInsert (Key, Value)= 251 | - =BContains (Key, Value)= 252 | 253 | 254 | *** StackOps 255 | 256 | - =STPush (Key, Value)= 257 | - =STPop (Key)= 258 | - =STPeek (Key)= 259 | - =STSize (Key)= 260 | 261 | 262 | *** HyperLogLogOps 263 | 264 | - =PfAdd (Key, RVec)= 265 | - =PfCount (RVec)= 266 | - =PfMerge (Key, RVec)= 267 | 268 | 269 | *** MiscOps 270 | 271 | - =Keys ()= 272 | - =Exists (Vec)= 273 | - =Pong ()= 274 | - =FlushAll ()= 275 | - =FlushDB ()= 276 | - =Echo (Value)= 277 | - =PrintCmds ()= 278 | - =Select (Index)= 279 | - =Script (Value)= 280 | - =EmbeddedScript (Value, Vec)= 281 | - =Info ()= 282 | -------------------------------------------------------------------------------- /TODO.org: -------------------------------------------------------------------------------- 1 | * Ideas 2 | 3 | ** Use sled for actual paging to disk 4 | - Use it for the actual database? 5 | - Actually we lose some features we'd want. We'll just represent gs 6 | -------------------------------------------------------------------------------- /benches/keys_benchmark.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use bytes::BytesMut; 3 | use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; 4 | use redis_oxide::asyncresp::RespParser; 5 | use redis_oxide::keys::{key_interact, KeyOps}; 6 | use redis_oxide::ops::{op_interact, translate}; 7 | use redis_oxide::types::{RedisValueRef, ReturnValue, State}; 8 | use std::sync::Arc; 9 | use tokio_util::codec::Decoder; 10 | 11 | fn bench_parsing(c: &mut Criterion) { 12 | let buf: String = std::iter::repeat("a").take(100).collect(); 13 | let mut decoder = RespParser::default(); 14 | let mut group = c.benchmark_group("decoding"); 15 | group.throughput(Throughput::Bytes(buf.len() as u64 + 3)); 16 | group.bench_function("simple_string", |b| { 17 | let _ = b.iter(|| { 18 | let mut buf = BytesMut::from(format!("+{}\r\n", buf).as_str()); 19 | decoder 20 | .decode(black_box(&mut buf)) 21 | .expect("parsing to work"); 22 | }); 23 | }); 24 | group.finish(); 25 | } 26 | 27 | fn bench_translate(c: &mut Criterion) { 28 | let value: Bytes = std::iter::repeat("a").take(200).collect::().into(); 29 | let value = RedisValueRef::Array(vec![ 30 | RedisValueRef::SimpleString(Bytes::from_static(b"set")), 31 | RedisValueRef::SimpleString(Bytes::from_static(b"foo")), 32 | RedisValueRef::SimpleString(value), 33 | ]); 34 | let mut group = c.benchmark_group("translate"); 35 | group.throughput(Throughput::Bytes(212)); 36 | group.bench_function("translate", |b| { 37 | b.iter(|| translate(black_box(value.clone()))); 38 | }); 39 | group.finish(); 40 | } 41 | 42 | fn bench_interact(c: &mut Criterion) { 43 | let s = Arc::new(State::default()); 44 | c.bench_function("KeyOps::Set", |b| { 45 | b.iter(|| async { 46 | let f = KeyOps::Set(Bytes::from_static(b"foo"), Bytes::from_static(b"bar")); 47 | key_interact(black_box(f), black_box(s.clone())).await; 48 | }); 49 | }); 50 | } 51 | 52 | fn bench_full_life_cycle(c: &mut Criterion) { 53 | c.bench_function("full_life_cycle", |b| { 54 | b.iter(|| async { 55 | let mut decoder = RespParser::default(); 56 | let s = Arc::new(State::default()); 57 | let scc = "*3\r\n$3\r\nset\r\n$3\r\nfoo\r\n$3\r\nbar\r\n"; 58 | let mut buf = BytesMut::from(format!("{}", scc).as_str()); 59 | let res = decoder 60 | .decode(black_box(&mut buf)) 61 | .expect("parsing to work") 62 | .unwrap(); 63 | let op = translate(black_box(res)).unwrap(); 64 | let res = op_interact(black_box(op), black_box(s.clone())).await; 65 | assert_eq!(res, ReturnValue::Ok); 66 | }); 67 | }); 68 | } 69 | 70 | criterion_group!( 71 | benches, 72 | bench_parsing, 73 | bench_translate, 74 | bench_interact, 75 | bench_full_life_cycle 76 | ); 77 | criterion_main!(benches); 78 | -------------------------------------------------------------------------------- /proptest-regressions/asyncresp.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc b3801ce41718fe24743eb6a12356b685979de361af48a6d0265c38e51f4d81fe # shrinks to input = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 8 | cc 07017a9b78f39b5504241540c0fc87c43791af9b384af258595da54973f1fb99 # shrinks to input = " ⷘa𞸩 𘠀𐏈𞣇0 0\u{1e000}લ𝒮 a 0𛱰A®aⷐ" 9 | -------------------------------------------------------------------------------- /proptest-regressions/bloom.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 968b3144caca87bef21a208f357f94042bfd087cdc5e00d5353865bb94b7c6ba # shrinks to key = [], v = [] 8 | -------------------------------------------------------------------------------- /proptest-regressions/engine.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc d40be14e09a15adc77372690c4a2244737679dc496ec12d6e767e0c6a25250ba # shrinks to l = [], unused = [] 8 | -------------------------------------------------------------------------------- /redis_benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | redis-benchmark -t set,get,incr,lpush,rpush,lpop,rpop,sadd,hset,spop,zadd,zpopmin,lrange,lrange_100,lrange_300,lrange_500,lrange_600,lrange,lrange_100,lrange,lrange_300,lrange,lrange_500,lrange,lrange_600,mset 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2018" -------------------------------------------------------------------------------- /scripts/example.x7: -------------------------------------------------------------------------------- 1 | (def-redis-fn sum-list 2 | (li) 3 | (apply + (map int (redis "lrange" li 0 -1)))) 4 | 5 | (defn any? 6 | (p l) 7 | (reduce (fn (a b) (or a b)) (map p l))) 8 | 9 | (def-redis-fn is-prime? 10 | (key) 11 | (bind 12 | (n (int (redis "get" key))) 13 | (not 14 | (any? (fn (x) (= 0 (% n x))) 15 | (range 2 n))))) 16 | 17 | (def-redis-fn sum-args 18 | (& args) 19 | (apply + (map int args))) 20 | -------------------------------------------------------------------------------- /scripts/project_euler.x7: -------------------------------------------------------------------------------- 1 | (def-redis-fn eulerp1 2 | (n) 3 | (reduce + 4 | (filter 5 | (fn (x) (or (= 0 (% x 3)) (= 0 (% x 5)))) 6 | (range (int n))))) 7 | -------------------------------------------------------------------------------- /scripts/semaphore.x7: -------------------------------------------------------------------------------- 1 | (defn repeat 2 | "Generate a list of length n by falling f repeatedly." 3 | (f n) 4 | (map (fn (ignored) (f)) (range n))) 5 | 6 | (def semaphore--name-segment-length 8) 7 | (def semaphore--name-segment-count 5) 8 | 9 | (defn semaphore--random-segment 10 | "Generate a single segment for the semaphore." 11 | () 12 | (apply + (repeat (fn () (str (random_int 0 10))) semaphore--name-segment-length))) 13 | 14 | (defn semaphore--random-name 15 | "Generate a name for the semaphore. Looks like 01234567-12121212-..." 16 | () 17 | (bind 18 | (seq (repeat 19 | (fn () (semaphore--random-segment)) 20 | semaphore--name-segment-count)) 21 | (+ (head seq) 22 | (apply + (map (fn (seg) (+ "-" seg)) (tail seq)))))) 23 | 24 | ;; Make it easy to debug this file when not running inside of redis-oxide 25 | (if (not (ident-exists def-redis-fn)) 26 | (do 27 | (println "In debug mode!") 28 | (def def-redis-fn defn)) 29 | ()) 30 | 31 | (def-redis-fn sema/new 32 | () 33 | (semaphore--random-name)) 34 | 35 | (def-redis-fn sema/inc 36 | (sema-name) 37 | (redis "lpush" sema-name "-")) 38 | 39 | (def-redis-fn sema/dec 40 | (sema-name duration) 41 | (redis "blpop" sema-name duration)) 42 | -------------------------------------------------------------------------------- /src/asyncresp.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use memchr::memchr; 3 | use std::convert::From; 4 | use std::io; 5 | use std::str; 6 | 7 | use crate::types::{RedisValueRef, NULL_ARRAY, NULL_BULK_STRING}; 8 | 9 | use bytes::BytesMut; 10 | use tokio_util::codec::{Decoder, Encoder}; 11 | 12 | #[derive(Debug)] 13 | pub enum RESPError { 14 | UnexpectedEnd, 15 | UnknownStartingByte, 16 | IOError(std::io::Error), 17 | IntParseFailure, 18 | BadBulkStringSize(i64), 19 | BadArraySize(i64), 20 | } 21 | 22 | impl From for RESPError { 23 | fn from(e: std::io::Error) -> RESPError { 24 | RESPError::IOError(e) 25 | } 26 | } 27 | 28 | #[derive(Default)] 29 | pub struct RespParser; 30 | 31 | type RedisResult = Result, RESPError>; 32 | 33 | enum RedisBufSplit { 34 | String(BufSplit), 35 | Error(BufSplit), 36 | Array(Vec), 37 | NullBulkString, 38 | NullArray, 39 | Int(i64), 40 | } 41 | 42 | impl RedisBufSplit { 43 | fn redis_value(self, buf: &Bytes) -> RedisValueRef { 44 | match self { 45 | RedisBufSplit::String(bfs) => RedisValueRef::BulkString(bfs.as_bytes(buf)), 46 | RedisBufSplit::Error(bfs) => RedisValueRef::Error(bfs.as_bytes(buf)), 47 | RedisBufSplit::Array(arr) => { 48 | RedisValueRef::Array(arr.into_iter().map(|bfs| bfs.redis_value(buf)).collect()) 49 | } 50 | RedisBufSplit::NullArray => RedisValueRef::NullArray, 51 | RedisBufSplit::NullBulkString => RedisValueRef::NullBulkString, 52 | RedisBufSplit::Int(i) => RedisValueRef::Int(i), 53 | } 54 | } 55 | } 56 | 57 | /// Fundamental struct for viewing byte slices 58 | /// 59 | /// Used for zero-copy redis values. 60 | struct BufSplit(usize, usize); 61 | 62 | impl BufSplit { 63 | /// Get a lifetime appropriate slice of the underlying buffer. 64 | /// 65 | /// Constant time. 66 | #[inline] 67 | fn as_slice<'a>(&self, buf: &'a BytesMut) -> &'a [u8] { 68 | &buf[self.0..self.1] 69 | } 70 | 71 | /// Get a Bytes object representing the appropriate slice 72 | /// of bytes. 73 | /// 74 | /// Constant time. 75 | #[inline] 76 | fn as_bytes(&self, buf: &Bytes) -> Bytes { 77 | buf.slice(self.0..self.1) 78 | } 79 | } 80 | 81 | #[inline] 82 | fn word(buf: &BytesMut, pos: usize) -> Option<(usize, BufSplit)> { 83 | if buf.len() <= pos { 84 | return None; 85 | } 86 | memchr(b'\r', &buf[pos..]).and_then(|end| { 87 | if end + 1 < buf.len() { 88 | Some((pos + end + 2, BufSplit(pos, pos + end))) 89 | } else { 90 | None 91 | } 92 | }) 93 | } 94 | 95 | fn int(buf: &BytesMut, pos: usize) -> Result, RESPError> { 96 | match word(buf, pos) { 97 | Some((pos, word)) => { 98 | let s = str::from_utf8(word.as_slice(buf)).map_err(|_| RESPError::IntParseFailure)?; 99 | let i = s.parse().map_err(|_| RESPError::IntParseFailure)?; 100 | Ok(Some((pos, i))) 101 | } 102 | None => Ok(None), 103 | } 104 | } 105 | 106 | fn bulk_string(buf: &BytesMut, pos: usize) -> RedisResult { 107 | match int(buf, pos)? { 108 | Some((pos, -1)) => Ok(Some((pos, RedisBufSplit::NullBulkString))), 109 | Some((pos, size)) if size >= 0 => { 110 | let total_size = pos + size as usize; 111 | if buf.len() < total_size + 2 { 112 | Ok(None) 113 | } else { 114 | let bb = RedisBufSplit::String(BufSplit(pos, total_size)); 115 | Ok(Some((total_size + 2, bb))) 116 | } 117 | } 118 | Some((_pos, bad_size)) => Err(RESPError::BadBulkStringSize(bad_size)), 119 | None => Ok(None), 120 | } 121 | } 122 | 123 | #[allow(clippy::unnecessary_wraps)] 124 | fn simple_string(buf: &BytesMut, pos: usize) -> RedisResult { 125 | Ok(word(buf, pos).map(|(pos, word)| (pos, RedisBufSplit::String(word)))) 126 | } 127 | 128 | #[allow(clippy::unnecessary_wraps)] 129 | fn error(buf: &BytesMut, pos: usize) -> RedisResult { 130 | Ok(word(buf, pos).map(|(pos, word)| (pos, RedisBufSplit::Error(word)))) 131 | } 132 | 133 | fn resp_int(buf: &BytesMut, pos: usize) -> RedisResult { 134 | Ok(int(buf, pos)?.map(|(pos, int)| (pos, RedisBufSplit::Int(int)))) 135 | } 136 | 137 | fn array(buf: &BytesMut, pos: usize) -> RedisResult { 138 | match int(buf, pos)? { 139 | None => Ok(None), 140 | Some((pos, -1)) => Ok(Some((pos, RedisBufSplit::NullArray))), 141 | Some((pos, num_elements)) if num_elements >= 0 => { 142 | let mut values = Vec::with_capacity(num_elements as usize); 143 | let mut curr_pos = pos; 144 | for _ in 0..num_elements { 145 | match parse(buf, curr_pos)? { 146 | Some((new_pos, value)) => { 147 | curr_pos = new_pos; 148 | values.push(value); 149 | } 150 | None => return Ok(None), 151 | } 152 | } 153 | Ok(Some((curr_pos, RedisBufSplit::Array(values)))) 154 | } 155 | Some((_pos, bad_num_elements)) => Err(RESPError::BadArraySize(bad_num_elements)), 156 | } 157 | } 158 | 159 | fn parse(buf: &BytesMut, pos: usize) -> RedisResult { 160 | if buf.is_empty() { 161 | return Ok(None); 162 | } 163 | 164 | match buf[pos] { 165 | b'+' => simple_string(buf, pos + 1), 166 | b'-' => error(buf, pos + 1), 167 | b'$' => bulk_string(buf, pos + 1), 168 | b':' => resp_int(buf, pos + 1), 169 | b'*' => array(buf, pos + 1), 170 | _ => Err(RESPError::UnknownStartingByte), 171 | } 172 | } 173 | 174 | impl Decoder for RespParser { 175 | type Item = RedisValueRef; 176 | type Error = RESPError; 177 | fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { 178 | if buf.is_empty() { 179 | return Ok(None); 180 | } 181 | 182 | match parse(buf, 0)? { 183 | Some((pos, value)) => { 184 | let our_data = buf.split_to(pos); 185 | Ok(Some(value.redis_value(&our_data.freeze()))) 186 | } 187 | None => Ok(None), 188 | } 189 | } 190 | } 191 | 192 | impl Encoder for RespParser { 193 | type Error = io::Error; 194 | 195 | fn encode(&mut self, item: RedisValueRef, dst: &mut BytesMut) -> io::Result<()> { 196 | write_redis_value(item, dst); 197 | Ok(()) 198 | } 199 | } 200 | 201 | fn write_redis_value(item: RedisValueRef, dst: &mut BytesMut) { 202 | match item { 203 | RedisValueRef::Error(e) => { 204 | dst.extend_from_slice(b"-"); 205 | dst.extend_from_slice(&e); 206 | dst.extend_from_slice(b"\r\n"); 207 | } 208 | RedisValueRef::ErrorMsg(e) => { 209 | dst.extend_from_slice(b"-"); 210 | dst.extend_from_slice(&e); 211 | dst.extend_from_slice(b"\r\n"); 212 | } 213 | RedisValueRef::SimpleString(s) => { 214 | dst.extend_from_slice(b"+"); 215 | dst.extend_from_slice(&s); 216 | dst.extend_from_slice(b"\r\n"); 217 | } 218 | RedisValueRef::BulkString(s) => { 219 | dst.extend_from_slice(b"$"); 220 | dst.extend_from_slice(s.len().to_string().as_bytes()); 221 | dst.extend_from_slice(b"\r\n"); 222 | dst.extend_from_slice(&s); 223 | dst.extend_from_slice(b"\r\n"); 224 | } 225 | RedisValueRef::Array(array) => { 226 | dst.extend_from_slice(b"*"); 227 | dst.extend_from_slice(array.len().to_string().as_bytes()); 228 | dst.extend_from_slice(b"\r\n"); 229 | for redis_value in array { 230 | write_redis_value(redis_value, dst); 231 | } 232 | } 233 | RedisValueRef::Int(i) => { 234 | dst.extend_from_slice(b":"); 235 | dst.extend_from_slice(i.to_string().as_bytes()); 236 | dst.extend_from_slice(b"\r\n"); 237 | } 238 | RedisValueRef::NullArray => dst.extend_from_slice(NULL_ARRAY.as_bytes()), 239 | RedisValueRef::NullBulkString => dst.extend_from_slice(NULL_BULK_STRING.as_bytes()), 240 | } 241 | } 242 | 243 | #[cfg(test)] 244 | mod resp_parser_tests { 245 | use crate::asyncresp::RespParser; 246 | use crate::types::{RedisValueRef, Value}; 247 | use bytes::{Bytes, BytesMut}; 248 | use tokio_util::codec::{Decoder, Encoder}; 249 | 250 | fn generic_test(input: &'static str, output: RedisValueRef) { 251 | let mut decoder = RespParser::default(); 252 | let result_read = decoder.decode(&mut BytesMut::from(input)); 253 | 254 | let mut encoder = RespParser::default(); 255 | let mut buf = BytesMut::new(); 256 | let result_write = encoder.encode(output.clone(), &mut buf); 257 | 258 | assert!( 259 | result_write.as_ref().is_ok(), 260 | "{:?}", 261 | result_write.unwrap_err() 262 | ); 263 | 264 | assert_eq!(input.as_bytes(), buf.as_ref()); 265 | 266 | assert!( 267 | result_read.as_ref().is_ok(), 268 | "{:?}", 269 | result_read.unwrap_err() 270 | ); 271 | // let values = result_read.unwrap().unwrap(); 272 | 273 | // let generic_arr_test_case = vec![output.clone(), output.clone()]; 274 | // let doubled = input.to_owned() + &input.to_owned(); 275 | 276 | // assert_eq!(output, values); 277 | // generic_test_arr(&doubled, generic_arr_test_case) 278 | } 279 | 280 | fn generic_test_arr(input: &str, output: Vec) { 281 | // TODO: Try to make this occur randomly 282 | let first: usize = input.len() / 2; 283 | let second = input.len() - first; 284 | let mut first = BytesMut::from(&input[0..=first]); 285 | let mut second = Some(BytesMut::from(&input[second..])); 286 | 287 | let mut decoder = RespParser::default(); 288 | let mut res: Vec = Vec::new(); 289 | loop { 290 | match decoder.decode(&mut first) { 291 | Ok(Some(value)) => { 292 | res.push(value.into()); 293 | break; 294 | } 295 | Ok(None) => { 296 | if let None = second { 297 | panic!("Test expected more bytes than expected!"); 298 | } 299 | first.extend(second.unwrap()); 300 | second = None; 301 | } 302 | Err(e) => panic!("Should not error, {:?}", e), 303 | } 304 | } 305 | if let Some(second) = second { 306 | first.extend(second); 307 | } 308 | loop { 309 | match decoder.decode(&mut first) { 310 | Ok(Some(value)) => { 311 | res.push(value.into()); 312 | break; 313 | } 314 | Err(e) => panic!("Should not error, {:?}", e), 315 | _ => break, 316 | } 317 | } 318 | assert_eq!(output, res); 319 | } 320 | 321 | fn ezs() -> Value { 322 | Bytes::from_static(b"hello") 323 | } 324 | 325 | // XXX: Simple String has been removed. 326 | // #[test] 327 | // fn test_simple_string() { 328 | // let t = RedisValue::BulkString(ezs()); 329 | // let s = "+hello\r\n"; 330 | // generic_test(s, t); 331 | 332 | // let t0 = RedisValue::BulkString(ezs()); 333 | // let t1 = RedisValue::BulkString("abcdefghijklmnopqrstuvwxyz".as_bytes().to_vec()); 334 | // let s = "+hello\r\n+abcdefghijklmnopqrstuvwxyz\r\n"; 335 | // generic_test_arr(s, vec![t0, t1]); 336 | // } 337 | 338 | #[test] 339 | fn test_error() { 340 | let t = RedisValueRef::Error(ezs()); 341 | let s = "-hello\r\n"; 342 | generic_test(s, t); 343 | 344 | let t0 = RedisValueRef::Error(Bytes::from_static(b"abcdefghijklmnopqrstuvwxyz")); 345 | let t1 = RedisValueRef::Error(ezs()); 346 | let s = "-abcdefghijklmnopqrstuvwxyz\r\n-hello\r\n"; 347 | generic_test_arr(s, vec![t0, t1]); 348 | } 349 | 350 | #[test] 351 | fn test_bulk_string() { 352 | let t = RedisValueRef::BulkString(ezs()); 353 | let s = "$5\r\nhello\r\n"; 354 | generic_test(s, t); 355 | 356 | let t = RedisValueRef::BulkString(Bytes::from_static(b"")); 357 | let s = "$0\r\n\r\n"; 358 | generic_test(s, t); 359 | } 360 | 361 | #[test] 362 | fn test_int() { 363 | let t = RedisValueRef::Int(0); 364 | let s = ":0\r\n"; 365 | generic_test(s, t); 366 | 367 | let t = RedisValueRef::Int(123); 368 | let s = ":123\r\n"; 369 | generic_test(s, t); 370 | 371 | let t = RedisValueRef::Int(-123); 372 | let s = ":-123\r\n"; 373 | generic_test(s, t); 374 | } 375 | 376 | #[test] 377 | fn test_array() { 378 | let t = RedisValueRef::Array(vec![]); 379 | let s = "*0\r\n"; 380 | generic_test(s, t); 381 | 382 | let inner = vec![ 383 | RedisValueRef::BulkString(Bytes::from_static(b"foo")), 384 | RedisValueRef::BulkString(Bytes::from_static(b"bar")), 385 | ]; 386 | let t = RedisValueRef::Array(inner); 387 | let s = "*2\r\n$3\r\nfoo\r\n$3\r\nbar\r\n"; 388 | generic_test(s, t); 389 | 390 | let inner = vec![ 391 | RedisValueRef::Int(1), 392 | RedisValueRef::Int(2), 393 | RedisValueRef::Int(3), 394 | ]; 395 | let t = RedisValueRef::Array(inner); 396 | let s = "*3\r\n:1\r\n:2\r\n:3\r\n"; 397 | generic_test(s, t); 398 | 399 | let inner = vec![ 400 | RedisValueRef::Int(1), 401 | RedisValueRef::Int(2), 402 | RedisValueRef::Int(3), 403 | RedisValueRef::Int(4), 404 | RedisValueRef::BulkString(Bytes::from_static(b"foobar")), 405 | ]; 406 | let t = RedisValueRef::Array(inner); 407 | let s = "*5\r\n:1\r\n:2\r\n:3\r\n:4\r\n$6\r\nfoobar\r\n"; 408 | generic_test(s, t); 409 | 410 | let inner = vec![ 411 | RedisValueRef::Array(vec![ 412 | RedisValueRef::Int(1), 413 | RedisValueRef::Int(2), 414 | RedisValueRef::Int(3), 415 | ]), 416 | RedisValueRef::Array(vec![ 417 | RedisValueRef::BulkString(Bytes::from_static(b"Foo")), 418 | RedisValueRef::Error(Bytes::from_static(b"Bar")), 419 | ]), 420 | ]; 421 | let t = RedisValueRef::Array(inner); 422 | let s = "*2\r\n*3\r\n:1\r\n:2\r\n:3\r\n*2\r\n$3\r\nFoo\r\n-Bar\r\n"; 423 | generic_test(s, t); 424 | 425 | let inner = vec![ 426 | RedisValueRef::BulkString(Bytes::from_static(b"foo")), 427 | RedisValueRef::NullBulkString, 428 | RedisValueRef::BulkString(Bytes::from_static(b"bar")), 429 | ]; 430 | let t = RedisValueRef::Array(inner); 431 | let s = "*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n"; 432 | generic_test(s, t); 433 | 434 | let t = RedisValueRef::NullArray; 435 | let s = "*-1\r\n"; 436 | generic_test(s, t); 437 | } 438 | } 439 | -------------------------------------------------------------------------------- /src/blocking.rs: -------------------------------------------------------------------------------- 1 | use crate::data_structures::receipt_map::{KeyTypes, Receipt}; 2 | use crate::types::{Key, ReturnValue, StateRef}; 3 | 4 | use std::future::Future; 5 | use std::pin::Pin; 6 | use std::task::{Context, Poll}; 7 | 8 | pub type YieldingFn = Box Option + Send>; 9 | 10 | pub struct KeyBlocking { 11 | f: Box Option + Send>, 12 | state: StateRef, 13 | key: Key, 14 | receipt: Receipt, 15 | } 16 | 17 | impl KeyBlocking { 18 | pub fn new(f: YieldingFn, state: StateRef, key: Key, receipt: Receipt) -> KeyBlocking { 19 | KeyBlocking { 20 | f, 21 | state, 22 | key, 23 | receipt, 24 | } 25 | } 26 | } 27 | 28 | impl Future for KeyBlocking { 29 | type Output = ReturnValue; 30 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 31 | if self.state.receipt_timed_out(self.receipt) { 32 | return Poll::Ready(ReturnValue::Nil); 33 | } 34 | match (self.f)() { 35 | Some(ret) => Poll::Ready(ret), 36 | None => { 37 | let mut rm = self.state.reciept_map.lock(); 38 | rm.insert(self.receipt, cx.waker().clone(), KeyTypes::list(&self.key)); 39 | Poll::Pending 40 | } 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/bloom.rs: -------------------------------------------------------------------------------- 1 | use crate::types::{Key, RedisBool, ReturnValue, StateRef, Value}; 2 | use crate::{make_reader, op_variants}; 3 | use growable_bloom_filter::GrowableBloom; 4 | 5 | op_variants! { 6 | BloomOps, 7 | BInsert(Key, Value), 8 | BContains(Key, Value) 9 | } 10 | 11 | const DESIRED_FAILURE_RATE: f64 = 0.05; 12 | const EST_INSERTS: usize = 10; 13 | 14 | make_reader!(blooms, read_blooms); 15 | 16 | pub async fn bloom_interact(bloom_op: BloomOps, state: StateRef) -> ReturnValue { 17 | match bloom_op { 18 | BloomOps::BInsert(bloom_key, value) => { 19 | state 20 | .blooms 21 | .entry(bloom_key) 22 | .or_insert_with(|| GrowableBloom::new(DESIRED_FAILURE_RATE, EST_INSERTS)) 23 | .insert(value); 24 | ReturnValue::Ok 25 | } 26 | BloomOps::BContains(bloom_key, value) => read_blooms!(state, &bloom_key) 27 | .map(|bloom| bloom.contains(value) as RedisBool) 28 | .unwrap_or(0) 29 | .into(), 30 | } 31 | } 32 | 33 | #[cfg(test)] 34 | mod test_bloom { 35 | use crate::bloom::{bloom_interact, BloomOps}; 36 | use crate::types::{ReturnValue, State}; 37 | use bytes::Bytes; 38 | use std::sync::Arc; 39 | 40 | #[tokio::test] 41 | async fn test_insert() { 42 | let (key, v) = (Bytes::from_static(b"key"), Bytes::from_static(b"v")); 43 | let eng = Arc::new(State::default()); 44 | let res = bloom_interact(BloomOps::BInsert(key, v), eng.clone()).await; 45 | assert_eq!(res, ReturnValue::Ok); 46 | } 47 | 48 | #[tokio::test] 49 | async fn test_contains() { 50 | let (key, v) = (Bytes::from_static(b"key"), Bytes::from_static(b"v")); 51 | let eng = Arc::new(State::default()); 52 | let res = bloom_interact(BloomOps::BContains(key.clone(), v.clone()), eng.clone()).await; 53 | assert_eq!(res, ReturnValue::IntRes(0)); 54 | bloom_interact(BloomOps::BInsert(key.clone(), v.clone()), eng.clone()).await; 55 | let res = bloom_interact(BloomOps::BContains(key, v), eng.clone()).await; 56 | assert_eq!(res, ReturnValue::IntRes(1)); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/data_structures/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod receipt_map; 2 | pub mod sorted_set; 3 | pub mod stack; 4 | -------------------------------------------------------------------------------- /src/data_structures/receipt_map.rs: -------------------------------------------------------------------------------- 1 | use seahash::hash; 2 | use std::collections::HashMap; 3 | use std::collections::HashSet; 4 | use std::task::Waker; 5 | 6 | pub type Receipt = u32; 7 | 8 | #[derive(Hash, Debug, PartialEq, Eq)] 9 | pub enum KeyTypes { 10 | List(u64), 11 | } 12 | 13 | impl KeyTypes { 14 | pub fn list(key: &[u8]) -> KeyTypes { 15 | KeyTypes::List(hash(key)) 16 | } 17 | } 18 | 19 | #[derive(Default, Debug)] 20 | pub struct RecieptMap { 21 | counter: Receipt, 22 | wakers: HashMap, 23 | timed_out: HashSet, 24 | keys: HashMap>, 25 | } 26 | 27 | impl RecieptMap { 28 | pub fn get_receipt(&mut self) -> Receipt { 29 | self.counter += 1; 30 | self.counter 31 | } 32 | 33 | pub fn insert(&mut self, receipt: Receipt, item: Waker, key: KeyTypes) { 34 | self.wakers.insert(receipt, item); 35 | self.keys.entry(key).or_default().push(receipt); 36 | } 37 | 38 | pub fn receipt_timed_out(&self, receipt: Receipt) -> bool { 39 | self.timed_out.contains(&receipt) 40 | } 41 | 42 | pub fn wake_with_key(&mut self, key: KeyTypes) { 43 | let v = self.keys.get_mut(&key); 44 | if v.is_none() { 45 | return; 46 | } 47 | let v = v.unwrap(); 48 | while let Some(receipt) = v.pop() { 49 | match self.wakers.remove(&receipt) { 50 | Some(waker) => { 51 | waker.wake(); 52 | break; 53 | } 54 | None => continue, 55 | }; 56 | } 57 | } 58 | 59 | pub fn timeout_receipt(&mut self, receipt: Receipt) { 60 | self.timed_out.insert(receipt); 61 | if let Some(waker) = self.wakers.remove(&receipt) { 62 | waker.wake(); 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/data_structures/sorted_set.rs: -------------------------------------------------------------------------------- 1 | // Clippy does not like SortedSet. TODO: Figure out if we can fix this. 2 | #![allow(clippy::mutable_key_type)] 3 | 4 | use crate::ops::RVec; 5 | use crate::types::{Count, Index, Key, Score}; 6 | use std::cmp::Ordering; 7 | use std::collections::hash_map::Entry; 8 | use std::collections::{BTreeSet, HashMap}; 9 | 10 | // TODO: Use convenient-skiplist 11 | 12 | // TODO: Why doesn't this actually allow it? 13 | #[allow(clippy::mutable_key_type)] 14 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] 15 | pub struct SortedSetMember { 16 | pub score: Score, 17 | pub member: String, 18 | } 19 | 20 | impl PartialOrd for SortedSetMember { 21 | fn partial_cmp(&self, other: &Self) -> Option { 22 | let score_cmp = self.score.cmp(&other.score); 23 | if let Ordering::Equal = score_cmp { 24 | return Some(self.member.cmp(&other.member)); 25 | } 26 | Some(score_cmp) 27 | } 28 | } 29 | 30 | impl Ord for SortedSetMember { 31 | fn cmp(&self, other: &Self) -> Ordering { 32 | self.partial_cmp(other).unwrap() 33 | } 34 | } 35 | 36 | // TODO: Look into using RangeBounds properly 37 | // impl RangeBounds for SortedSetMember { 38 | // fn start_bound(&self) -> Bound<&Score> { 39 | // Included(&self.score) 40 | // } 41 | 42 | // fn end_bound(&self) -> Bound<&Score> { 43 | // Included(&self.score) 44 | // } 45 | // fn contains(&self, item: &U) -> bool 46 | // where 47 | // U: PartialOrd + ?Sized, 48 | // { 49 | // if let Ordering::Equal = item.partial_cmp(&self.score).unwrap() { 50 | // true 51 | // } else { 52 | // false 53 | // } 54 | // } 55 | // } 56 | 57 | // impl RangeBounds for Range { 58 | // fn start_bound(&self) -> Bound<&SortedSetMember> { 59 | // let f = SortedSetMember::new(&b"".to_vec(), self.start); 60 | // Included(f) 61 | // } 62 | // fn end_bound(&self) -> Bound<&SortedSetMember> { 63 | // let f = SortedSetMember::new(&b"".to_vec(), self.end); 64 | // Included(&f) 65 | // } 66 | 67 | // fn contains(&self, item: &U) -> bool 68 | // where 69 | // U: PartialOrd + ?Sized { 70 | // if let Ordering::Equal = item.partial_cmp(&self.score).unwrap() { 71 | // true 72 | // } else { 73 | // false 74 | // } 75 | // } 76 | // } 77 | 78 | impl SortedSetMember { 79 | fn new(key: &[u8], score: Score) -> Self { 80 | SortedSetMember { 81 | score, 82 | member: String::from_utf8_lossy(key).to_string(), 83 | } 84 | } 85 | } 86 | 87 | #[derive(Debug, Serialize, Deserialize, Default)] 88 | pub struct SortedSet { 89 | members_hash: HashMap, 90 | scores: BTreeSet, 91 | } 92 | 93 | #[allow(unused)] 94 | impl SortedSet { 95 | /// Create a new SortedSet 96 | pub fn new() -> Self { 97 | SortedSet::default() 98 | } 99 | 100 | /// Add the following keys and scores to the sorted set 101 | pub fn add(&mut self, key_scores: RVec<(Score, Key)>) -> Count { 102 | key_scores 103 | .into_iter() 104 | .map(|(score, key)| match self.members_hash.entry(key) { 105 | Entry::Vacant(ent) => { 106 | self.scores.insert(SortedSetMember::new(ent.key(), score)); 107 | ent.insert(score); 108 | 1 109 | } 110 | Entry::Occupied(_) => 0, 111 | }) 112 | .sum() 113 | } 114 | 115 | /// Remove the following keys from the sorted set 116 | pub fn remove(&mut self, keys: &[Key]) -> Count { 117 | keys.iter() 118 | .map(|key| match self.members_hash.remove(key) { 119 | None => 0, 120 | Some(score) => { 121 | let tmp = SortedSetMember::new(key, score); 122 | self.scores.remove(&tmp); 123 | 1 124 | } 125 | }) 126 | .sum() 127 | } 128 | 129 | fn remove_one(&mut self, key: &Key) { 130 | self.members_hash.remove(key); 131 | } 132 | 133 | /// Returns the number of members stored in the set. 134 | pub fn card(&self) -> Count { 135 | self.members_hash.len() as Count 136 | } 137 | 138 | /// Return the score of the member in the sorted set 139 | pub fn score(&self, key: Key) -> Option { 140 | self.members_hash.get(&key).cloned() 141 | } 142 | 143 | /// Get all members between (lower, upper) scores 144 | pub fn range(&self, range: (Score, Score)) -> RVec { 145 | // TODO: Use a more efficient method. I should use a skiplist or an AVL tree. 146 | // Another option is to retackle the rangebounds stuff, but the semantics are different. 147 | // I want to be able to compare by score AND member when inserting/removing, 148 | // but only by score in this case. Need to figure out how to encode that. 149 | self.scores 150 | .iter() 151 | .filter(|mem| range.0 <= mem.score && mem.score <= range.1) 152 | .cloned() 153 | .collect() 154 | } 155 | 156 | /// Remove count (default: 1) maximum members from the sorted set 157 | pub fn pop_max(&mut self, count: Count) -> Vec { 158 | let count = count as usize; // TODO: What if it's negative? 159 | let ret: Vec = self.scores.iter().rev().take(count).cloned().collect(); 160 | for key in ret.iter().map(|s| s.member.clone()) { 161 | self.remove(&[key.into()]); 162 | } 163 | ret 164 | } 165 | 166 | /// Remove count (default: 1) minimum members from the sorted set 167 | pub fn pop_min(&mut self, count: Count) -> Vec { 168 | let count = count as usize; // TODO: What if it's negative? 169 | let ret: Vec = self.scores.iter().take(count).cloned().collect(); 170 | for key in ret.iter().map(|s| s.member.clone()) { 171 | self.remove(&[key.into()]); 172 | } 173 | ret 174 | } 175 | 176 | // /// Get the maximum score in the sorted set 177 | // pub fn max_score(&self) -> Option { 178 | // self.scores.iter().rev().next().cloned().map(|m| m.score) 179 | // } 180 | 181 | /// Get the rank of a given key in the sorted set 182 | pub fn rank(&self, key: Key) -> Option { 183 | self.scores 184 | .iter() 185 | .position(|s| s.member.as_bytes() == &*key) 186 | .map(|pos| pos as Index) 187 | } 188 | } 189 | 190 | #[cfg(test)] 191 | mod test_sorted_sets_ds { 192 | use crate::data_structures::sorted_set::{SortedSet, SortedSetMember}; 193 | use crate::ops::RVec; 194 | use crate::types::{Key, Score}; 195 | use bytes::Bytes; 196 | use smallvec::smallvec; 197 | 198 | fn get_multiple_entries() -> RVec<(Score, Key)> { 199 | smallvec![ 200 | (1, Bytes::from_static(b"hi_0")), 201 | (3, Bytes::from_static(b"hi_1")), 202 | (5, Bytes::from_static(b"hi_2")), 203 | ] 204 | } 205 | 206 | #[allow(unused)] 207 | fn get_multiple_sorted_set_entries() -> RVec { 208 | get_multiple_entries() 209 | .into_iter() 210 | .map(|(score, key)| SortedSetMember::new(&key, score)) 211 | .collect() 212 | } 213 | 214 | #[test] 215 | fn test_add() { 216 | let mut ss = SortedSet::new(); 217 | assert_eq!(1, ss.add(smallvec![(2, Bytes::from_static(b"hi"))])); 218 | assert_eq!( 219 | get_multiple_entries().len() as i64, 220 | ss.add(get_multiple_entries()) 221 | ); 222 | assert_eq!(0, ss.add(get_multiple_entries())); 223 | } 224 | 225 | #[test] 226 | fn test_range() { 227 | let mut ss = SortedSet::new(); 228 | 229 | ss.add(smallvec![ 230 | (1, Bytes::from_static(b"hi_0")), 231 | (3, Bytes::from_static(b"hi_1")), 232 | (5, Bytes::from_static(b"hi_2")), 233 | ]); 234 | let expected: RVec = smallvec![ 235 | SortedSetMember::new(&Bytes::from_static(b"hi_0"), 1), 236 | SortedSetMember::new(&Bytes::from_static(b"hi_1"), 3), 237 | SortedSetMember::new(&Bytes::from_static(b"hi_2"), 5), 238 | ]; 239 | assert_eq!(ss.range((1, 5)), expected); 240 | let expected: RVec = smallvec![SortedSetMember::new(&b"hi_1".to_vec(), 3)]; 241 | assert_eq!(ss.range((2, 4)), expected); 242 | let empty_vec: RVec = RVec::new(); 243 | assert_eq!(ss.range((20, 40)), empty_vec); 244 | } 245 | 246 | #[test] 247 | fn test_remove() { 248 | let mut ss = SortedSet::new(); 249 | let all_keys: Vec = get_multiple_entries() 250 | .into_iter() 251 | .map(|(_, key)| key) 252 | .collect(); 253 | assert_eq!(0, ss.remove(&all_keys.clone())); 254 | ss.add(get_multiple_entries()); 255 | assert_eq!(1, ss.remove(&[all_keys[1].clone()])); 256 | assert_eq!(2, ss.card()); 257 | assert_eq!(2, ss.remove(&all_keys)); 258 | assert_eq!(0, ss.card()); 259 | } 260 | 261 | // XXX: Fix test case. Am moving to proper skiplist later. 262 | #[test] 263 | fn test_pop_max() { 264 | let mut ss = SortedSet::new(); 265 | assert_eq!(ss.pop_max(10), Vec::new()); 266 | ss.add(get_multiple_entries()); 267 | let entries = get_multiple_sorted_set_entries(); 268 | let first_two: Vec = entries.iter().cloned().collect(); 269 | ss.pop_max(2); // TODO: Fix this test case. 270 | // assert_eq!(ss.pop_max(2).as_slice(), &first_two[1..]); 271 | assert_eq!(ss.pop_max(2).as_slice(), &[first_two[0].clone()]); 272 | } 273 | #[test] 274 | fn test_pop_min() { 275 | let mut ss = SortedSet::new(); 276 | assert_eq!(ss.pop_min(10), Vec::new()); 277 | ss.add(get_multiple_entries()); 278 | let entries = get_multiple_sorted_set_entries(); 279 | let last_two: Vec = entries.iter().cloned().collect(); 280 | assert_eq!(ss.pop_min(2).as_slice(), &last_two[..2]); 281 | assert_eq!(ss.pop_min(2).as_slice(), &[last_two[2].clone()]); 282 | } 283 | } 284 | -------------------------------------------------------------------------------- /src/data_structures/stack.rs: -------------------------------------------------------------------------------- 1 | use crate::types::Count; 2 | #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Default)] 3 | pub struct Stack { 4 | inner: Vec, 5 | } 6 | 7 | impl Stack { 8 | pub fn new() -> Stack { 9 | Stack { inner: Vec::new() } 10 | } 11 | 12 | pub fn push(&mut self, item: T) -> Count { 13 | self.inner.push(item); 14 | self.inner.len() as Count 15 | } 16 | 17 | pub fn pop(&mut self) -> Option { 18 | self.inner.pop() 19 | } 20 | 21 | pub fn peek(&self) -> Option { 22 | self.inner.last().cloned() 23 | } 24 | 25 | pub fn size(&self) -> Count { 26 | self.inner.len() as Count 27 | } 28 | } 29 | 30 | #[cfg(test)] 31 | mod test_stack { 32 | use crate::data_structures::stack::Stack; 33 | 34 | #[test] 35 | fn test_push_pop() { 36 | let mut s = Stack::new(); 37 | s.push(3); 38 | assert_eq!(s.pop(), Some(3)); 39 | assert_eq!(s.pop(), None); 40 | } 41 | 42 | #[test] 43 | fn test_peek_size() { 44 | let mut s = Stack::new(); 45 | assert_eq!(s.size(), 0); 46 | assert_eq!(s.peek(), None); 47 | s.push(3); 48 | assert_eq!(s.peek(), Some(3)); 49 | assert_eq!(s.peek(), Some(3)); 50 | assert_eq!(s.size(), 1); 51 | s.push(4); 52 | assert_eq!(s.peek(), Some(4)); 53 | assert_eq!(s.size(), 2); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/database.rs: -------------------------------------------------------------------------------- 1 | use crate::logger::LOGGER; 2 | use crate::startup::Config; 3 | use crate::types::{DumpFile, StateStore, StateStoreRef}; 4 | use directories::ProjectDirs; 5 | use parking_lot::Mutex; 6 | use std::error::Error; 7 | use std::fs::File; 8 | use std::fs::OpenOptions; 9 | use std::io::Seek; 10 | use std::io::SeekFrom; 11 | use std::path::Path; 12 | use std::path::PathBuf; 13 | use std::sync::atomic::Ordering; 14 | use std::sync::Arc; 15 | use std::time::Duration; 16 | use tokio::task; 17 | use tokio::time::interval; 18 | 19 | const SAVE_STATE_PERIOD_SEC: u64 = 60; 20 | const SAVE_STATE_PERIOD: u64 = SAVE_STATE_PERIOD_SEC * 1000; 21 | 22 | /// Convenience macro to panic with error messages. 23 | macro_rules! fatal_panic { 24 | ($msg:expr) => {{ 25 | error!(LOGGER, "{}", $msg); 26 | println!("{}", $msg); 27 | panic!("Fatal Error, cannot continue..."); 28 | }}; 29 | ($msg:expr, $err:expr) => {{ 30 | error!(LOGGER, "{} {}", $msg, $err); 31 | println!("{}", $msg); 32 | panic!("Fatal Error, cannot continue..."); 33 | }}; 34 | } 35 | 36 | /// Dump the current state to the dump_file 37 | fn dump_state(state: StateStoreRef, dump_file: &mut File) -> Result<(), Box> { 38 | dump_file.seek(SeekFrom::Start(0))?; 39 | rmps::encode::write(dump_file, &state) 40 | .map_err(|e| fatal_panic!("Could not write state!", e.to_string())) 41 | .unwrap(); 42 | Ok(()) 43 | } 44 | 45 | /// Load state from the dump_file 46 | pub fn load_state(dump_file: DumpFile, config: &Config) -> Result> { 47 | let mut contents = dump_file.lock(); 48 | if contents.metadata()?.len() == 0 { 49 | return Ok(Arc::new(StateStore::default())); 50 | } 51 | 52 | contents.seek(SeekFrom::Start(0))?; 53 | let mut state_store: StateStore = rmps::decode::from_read(&*contents)?; 54 | state_store.commands_threshold = config.ops_until_save; 55 | state_store.memory_only = config.memory_only; 56 | 57 | Ok(Arc::new(state_store)) 58 | } 59 | 60 | /// Make the data directory (directory where the dump file lives) 61 | fn make_data_dir(data_dir: &Path) { 62 | match std::fs::create_dir_all(&data_dir) { 63 | Ok(_) => { 64 | info!( 65 | LOGGER, 66 | "Created config dir path {}", 67 | data_dir.to_string_lossy() 68 | ); 69 | } 70 | Err(e) => { 71 | let err_msg = format!( 72 | "Error! Cannot create path {}, error {}", 73 | data_dir.to_string_lossy(), 74 | e, 75 | ); 76 | fatal_panic!(err_msg); 77 | } 78 | } 79 | } 80 | 81 | fn default_data_dir() -> PathBuf { 82 | let data_dir = ProjectDirs::from("ca", "dpbriggs", "redis-oxide").expect("to fetch a default"); 83 | let mut p = PathBuf::new(); 84 | p.push(data_dir.data_dir()); 85 | p 86 | } 87 | 88 | /// Get the dump file 89 | /// 90 | /// Panics if a data directory cannot be found, or file cannot be opened. 91 | pub fn get_dump_file(config: &Config) -> DumpFile { 92 | let data_dir: PathBuf = match &config.data_dir { 93 | Some(dir) => dir.to_path_buf(), 94 | None => default_data_dir(), 95 | }; 96 | if !data_dir.exists() { 97 | make_data_dir(&data_dir); 98 | } 99 | 100 | let dump_file = data_dir.join("dump.rodb"); 101 | info!(LOGGER, "Dump File Location: {:?}", dump_file); 102 | let opened_file = match OpenOptions::new() 103 | .read(true) 104 | .write(true) 105 | .create(true) 106 | .append(false) 107 | .open(dump_file) 108 | { 109 | Ok(f) => f, 110 | Err(e) => fatal_panic!(format!("Failed to open dump file! {}", e)), 111 | }; 112 | // TODO: Use tokio locks here 113 | Arc::new(Mutex::new(opened_file)) 114 | } 115 | 116 | pub fn save_state(state: StateStoreRef, dump_file: DumpFile) { 117 | info!( 118 | LOGGER, 119 | "Saving state ({}s or >={} ops ran)...", SAVE_STATE_PERIOD_SEC, state.commands_threshold 120 | ); 121 | match dump_file.try_lock() { 122 | Some(mut file) => { 123 | if let Err(e) = task::block_in_place(|| dump_state(state, &mut file)) { 124 | fatal_panic!("FAILED TO DUMP STATE!", e.to_string()); 125 | } 126 | } 127 | None => debug!( 128 | LOGGER, 129 | "Failed to save state! Someone else is currently writing..." 130 | ), 131 | } 132 | } 133 | 134 | /// Save the current State to Dumpfile. 135 | /// 136 | /// Panics if state fails to dump. 137 | pub async fn save_state_interval(state: StateStoreRef, dump_file: DumpFile) { 138 | let mut interval = interval(Duration::from_millis(SAVE_STATE_PERIOD)); 139 | loop { 140 | interval.tick().await; 141 | let commands_ran_since_save = state.commands_ran_since_save.load(Ordering::SeqCst); 142 | if commands_ran_since_save != 0 { 143 | state.commands_ran_since_save.store(0, Ordering::SeqCst); 144 | save_state(state.clone(), dump_file.clone()); 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/hashes.rs: -------------------------------------------------------------------------------- 1 | use crate::op_variants; 2 | use crate::ops::RVec; 3 | use crate::types::{Count, Key, ReturnValue, StateRef, Value}; 4 | use crate::{make_reader, make_writer}; 5 | use std::collections::hash_map::Entry; 6 | 7 | op_variants! { 8 | HashOps, 9 | HGet(Key, Key), 10 | HSet(Key, Key, Value), 11 | HExists(Key, Key), 12 | HGetAll(Key), 13 | HMGet(Key, RVec), 14 | HKeys(Key), 15 | HMSet(Key, RVec<(Key, Value)>), 16 | HIncrBy(Key, Key, Count), 17 | HLen(Key), 18 | HDel(Key, RVec), 19 | HVals(Key), 20 | HStrLen(Key, Key), 21 | HSetNX(Key, Key, Value) 22 | } 23 | 24 | make_reader!(hashes, read_hashes); 25 | make_writer!(hashes, write_hashes); 26 | 27 | pub async fn hash_interact(hash_op: HashOps, state: StateRef) -> ReturnValue { 28 | match hash_op { 29 | HashOps::HGet(key, field) => match read_hashes!(state, &key) { 30 | None => ReturnValue::Nil, 31 | Some(hash) => hash 32 | .get(&field) 33 | .map_or(ReturnValue::Nil, |f| ReturnValue::StringRes(f.clone())), 34 | }, 35 | HashOps::HSet(key, field, value) => { 36 | state.hashes.entry(key).or_default().insert(field, value); 37 | ReturnValue::Ok 38 | } 39 | HashOps::HExists(key, field) => read_hashes!(state) 40 | .get(&key) 41 | .map(|hashes| hashes.contains_key(&field)) 42 | .map_or(ReturnValue::IntRes(0), |v: bool| { 43 | ReturnValue::IntRes(if v { 1 } else { 0 }) 44 | }), 45 | 46 | HashOps::HGetAll(key) => match read_hashes!(state, &key) { 47 | Some(hash) => { 48 | let mut ret = Vec::with_capacity(hash.len() * 2); 49 | for (key, value) in hash.iter() { 50 | ret.push(key.clone()); 51 | ret.push(value.clone()); 52 | } 53 | ReturnValue::MultiStringRes(ret) 54 | } 55 | None => ReturnValue::MultiStringRes(Vec::with_capacity(0)), 56 | }, 57 | // HashOps::HGetAll(key) => { 58 | // read_hashes!(state, &key, hash); 59 | // if hash.is_none() { 60 | // return ; 61 | // } 62 | // let mut ret = Vec::new(); 63 | // for (key, val) in hash.unwrap().iter() { 64 | // ret.push(key.clone()); 65 | // ret.push(val.clone()); 66 | // } 67 | // ReturnValue::MultiStringRes(ret) 68 | // } 69 | HashOps::HMGet(key, fields) => ReturnValue::Array(match read_hashes!(state, &key) { 70 | None => std::iter::repeat_with(|| ReturnValue::Nil) 71 | .take(fields.len()) 72 | .collect(), 73 | Some(hash) => fields 74 | .iter() 75 | .map(|field| { 76 | hash.get(field) 77 | .map_or(ReturnValue::Nil, |v| ReturnValue::StringRes(v.clone())) 78 | }) 79 | .collect(), 80 | }), 81 | HashOps::HKeys(key) => match read_hashes!(state, &key) { 82 | Some(hash) => { 83 | ReturnValue::Array(hash.keys().cloned().map(ReturnValue::StringRes).collect()) 84 | } 85 | None => ReturnValue::Array(vec![]), 86 | }, 87 | HashOps::HMSet(key, key_values) => { 88 | state.hashes.entry(key).or_default().extend(key_values); 89 | ReturnValue::Ok 90 | } 91 | HashOps::HIncrBy(key, field, count) => { 92 | let mut hash = state.hashes.entry(key).or_default(); 93 | let mut curr_value = match hash.get(&field) { 94 | Some(value) => { 95 | let i64_repr = std::str::from_utf8(value) 96 | .map(|e| e.parse::()) 97 | .unwrap(); 98 | if i64_repr.is_err() { 99 | return ReturnValue::Error(b"Bad Type!"); 100 | } 101 | i64_repr.unwrap() 102 | } 103 | None => 0, 104 | }; 105 | curr_value += count; 106 | let new_value = Value::from(curr_value.to_string()); 107 | hash.insert(field, new_value); 108 | ReturnValue::Ok 109 | } 110 | HashOps::HLen(key) => read_hashes!(state, &key) 111 | .map_or(0, |hash| hash.len() as Count) 112 | .into(), 113 | 114 | // HashOps::HLen(key) => read_hashes!(state, &key) 115 | // .map(|hash| hash.len() as Count) 116 | // .unwrap_or(0) 117 | // .into(), 118 | // HashOps::HLen(key) => match read_hashes!(state, &key) { 119 | // Some(hash) => ReturnValue::IntRes(hash.len() as Count), 120 | // None => ReturnValue::IntRes(0), 121 | // }, 122 | HashOps::HDel(key, fields) => match write_hashes!(state, &key) { 123 | Some(mut hash) => { 124 | let res = fields.iter().filter_map(|field| hash.remove(field)).count(); 125 | ReturnValue::IntRes(res as Count) 126 | } 127 | None => ReturnValue::IntRes(0), 128 | }, 129 | HashOps::HVals(key) => match read_hashes!(state, &key) { 130 | Some(hash) => { 131 | ReturnValue::Array(hash.values().cloned().map(ReturnValue::StringRes).collect()) 132 | } 133 | None => ReturnValue::Array(vec![]), 134 | }, 135 | // XXX: For some reason there's lifetime issues when doing the usual combinator chain. 136 | HashOps::HStrLen(key, field) => match read_hashes!(state, &key) { 137 | None => ReturnValue::Nil, 138 | Some(hash) => hash 139 | .get(&field) 140 | .map_or(ReturnValue::Nil, |f| ReturnValue::IntRes(f.len() as Count)), 141 | }, 142 | HashOps::HSetNX(key, field, value) => { 143 | if let Entry::Vacant(ent) = state.hashes.entry(key).or_default().entry(field) { 144 | ent.insert(value); 145 | ReturnValue::IntRes(1) 146 | } else { 147 | ReturnValue::IntRes(0) 148 | } 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/hyperloglog.rs: -------------------------------------------------------------------------------- 1 | use crate::ops::RVec; 2 | use crate::types::{Key, ReturnValue, StateRef, Value}; 3 | use crate::{make_reader, op_variants}; 4 | 5 | op_variants! { 6 | HyperLogLogOps, 7 | PfAdd(Key, RVec), 8 | PfCount(RVec), 9 | PfMerge(Key, RVec) 10 | } 11 | 12 | make_reader!(hyperloglogs, read_hyperloglogs); 13 | 14 | // Error ratio from http://antirez.com/news/75 15 | const HYPERLOGLOG_ERROR_RATIO: f64 = 0.0081; 16 | 17 | fn default_hyperloglog() -> amadeus_streaming::HyperLogLog { 18 | amadeus_streaming::HyperLogLog::new(HYPERLOGLOG_ERROR_RATIO) 19 | } 20 | 21 | pub async fn hyperloglog_interact(hyperloglog_op: HyperLogLogOps, state: StateRef) -> ReturnValue { 22 | match hyperloglog_op { 23 | HyperLogLogOps::PfAdd(key, values) => { 24 | let mut pf_ref = state 25 | .hyperloglogs 26 | .entry(key) 27 | .or_insert_with(default_hyperloglog); 28 | let curr_card = pf_ref.len() as i64; 29 | values.into_iter().for_each(|e| pf_ref.push(&e)); 30 | let new_card = pf_ref.len() as i64; 31 | ReturnValue::IntRes((new_card != curr_card).into()) 32 | } 33 | HyperLogLogOps::PfCount(keys) => { 34 | // If there's only key, read that. redis appears to return zero if it doesn't exist. 35 | if keys.len() == 1 { 36 | return read_hyperloglogs!(state, &keys[0]) 37 | .map(|pf| pf.len() as i64) 38 | .unwrap_or(0) 39 | .into(); 40 | } 41 | let res = keys 42 | .iter() 43 | .filter_map(|key| read_hyperloglogs!(state, key)) 44 | .fold(default_hyperloglog(), |mut acc, curr_pf| { 45 | acc.union(&curr_pf); 46 | acc 47 | }) 48 | .len() as i64; 49 | ReturnValue::IntRes(res) 50 | } 51 | HyperLogLogOps::PfMerge(dest_key, source_keys) => { 52 | let mut dest_pf = state 53 | .hyperloglogs 54 | .entry(dest_key) 55 | .or_insert_with(default_hyperloglog); 56 | source_keys 57 | .iter() 58 | .filter_map(|key| read_hyperloglogs!(state, key)) 59 | .for_each(|ref pf| dest_pf.union(pf)); 60 | ReturnValue::Ok 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/keys.rs: -------------------------------------------------------------------------------- 1 | use crate::op_variants; 2 | use crate::ops::RVec; 3 | use crate::types::{Count, Key, ReturnValue, StateRef, Value}; 4 | 5 | op_variants! { 6 | KeyOps, 7 | Set(Key, Value), 8 | MSet(RVec<(Key, Value)>), 9 | Get(Key), 10 | MGet(RVec), 11 | Del(RVec), 12 | Rename(Key, Key), 13 | RenameNx(Key, Key) 14 | } 15 | 16 | pub async fn key_interact(key_op: KeyOps, state: StateRef) -> ReturnValue { 17 | match key_op { 18 | KeyOps::Get(key) => state.kv.get(&key).map_or(ReturnValue::Nil, |v| { 19 | ReturnValue::StringRes(v.value().clone()) 20 | }), 21 | KeyOps::MGet(keys) => { 22 | let vals = keys 23 | .iter() 24 | .map(|key| match state.kv.get(key) { 25 | Some(v) => ReturnValue::StringRes(v.value().clone()), 26 | None => ReturnValue::Nil, 27 | }) 28 | .collect(); 29 | ReturnValue::Array(vals) 30 | } 31 | KeyOps::Set(key, value) => { 32 | state.kv.insert(key, value); 33 | ReturnValue::Ok 34 | } 35 | KeyOps::MSet(key_vals) => { 36 | let kv = &state.kv; 37 | for (key, val) in key_vals.into_iter() { 38 | kv.insert(key, val); 39 | } 40 | ReturnValue::Ok 41 | } 42 | KeyOps::Del(keys) => { 43 | let deleted = keys 44 | .iter() 45 | .map(|x| state.kv.remove(x)) 46 | .filter(Option::is_some) 47 | .count(); 48 | ReturnValue::IntRes(deleted as Count) 49 | } 50 | KeyOps::Rename(key, new_key) => match state.kv.remove(&key) { 51 | Some((_, value)) => { 52 | state.kv.insert(new_key, value); 53 | ReturnValue::Ok 54 | } 55 | None => ReturnValue::Error(b"no such key"), 56 | }, 57 | KeyOps::RenameNx(key, new_key) => { 58 | if state.kv.contains_key(&new_key) { 59 | return ReturnValue::IntRes(0); 60 | } 61 | match state.kv.remove(&key) { 62 | Some((_, value)) => { 63 | state.kv.insert(new_key, value); 64 | ReturnValue::IntRes(1) 65 | } 66 | None => ReturnValue::Error(b"no such key"), 67 | } 68 | } 69 | } 70 | } 71 | 72 | #[cfg(test)] 73 | mod test_keys { 74 | use crate::keys::{key_interact, KeyOps}; 75 | use crate::types::{ReturnValue, State}; 76 | use bytes::Bytes; 77 | use smallvec::smallvec; 78 | use std::sync::Arc; 79 | 80 | #[tokio::test] 81 | async fn test_get() { 82 | let v = Bytes::from_static(b"hello"); 83 | let eng = Arc::new(State::default()); 84 | assert_eq!( 85 | ReturnValue::Nil, 86 | key_interact(KeyOps::Get(v.clone()), eng.clone()).await 87 | ); 88 | key_interact(KeyOps::Set(v.clone(), v.clone()), eng.clone()).await; 89 | assert_eq!( 90 | ReturnValue::StringRes(v.clone()), 91 | key_interact(KeyOps::Get(v.clone()), eng.clone()).await 92 | ); 93 | } 94 | 95 | #[tokio::test] 96 | async fn test_set() { 97 | let (l, r) = (Bytes::from_static(b"l"), Bytes::from_static(b"r")); 98 | let eng = Arc::new(State::default()); 99 | key_interact(KeyOps::Set(l.clone(), r.clone()), eng.clone()).await; 100 | assert_eq!( 101 | ReturnValue::StringRes(r.clone()), 102 | key_interact(KeyOps::Get(l.clone()), eng.clone()).await 103 | ); 104 | } 105 | 106 | #[tokio::test] 107 | async fn test_del() { 108 | let (l, unused) = (Bytes::from_static(b"l"), Bytes::from_static(b"r")); 109 | let eng = Arc::new(State::default()); 110 | key_interact(KeyOps::Set(l.clone(), l.clone()), eng.clone()).await; 111 | 112 | assert_eq!( 113 | ReturnValue::IntRes(1), 114 | key_interact(KeyOps::Del(smallvec![l.clone()]), eng.clone()).await 115 | ); 116 | assert_eq!( 117 | ReturnValue::IntRes(0), 118 | key_interact(KeyOps::Del(smallvec![unused]), eng.clone()).await 119 | ); 120 | } 121 | 122 | #[tokio::test] 123 | async fn test_rename() { 124 | let (old, v, new) = ( 125 | Bytes::from_static(b"old"), 126 | Bytes::from_static(b"v"), 127 | Bytes::from_static(b"new"), 128 | ); 129 | let eng = Arc::new(State::default()); 130 | key_interact(KeyOps::Set(old.clone(), v.clone()), eng.clone()).await; 131 | // TODO: Make testing Exec_OpionRes tractable 132 | // assert(ir(eng.clone().exec_op(gp(KeyOps::Rename(new.clone()), old.clone()))).is_error()); 133 | key_interact(KeyOps::Rename(old.clone(), new.clone()), eng.clone()).await; 134 | assert_eq!( 135 | ReturnValue::StringRes(v.clone()), 136 | key_interact(KeyOps::Get(new), eng.clone()).await 137 | ); 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![warn(rust_2018_idioms)] 3 | #![warn(clippy::all)] 4 | 5 | #[macro_use] 6 | extern crate serde_derive; 7 | 8 | #[macro_use] 9 | extern crate lazy_static; 10 | 11 | #[macro_use] 12 | extern crate slog; 13 | 14 | extern crate rmp_serde as rmps; 15 | 16 | pub mod asyncresp; 17 | pub mod blocking; 18 | pub mod bloom; 19 | pub mod database; 20 | pub mod hashes; 21 | pub mod keys; 22 | pub mod lists; 23 | pub mod logger; 24 | #[macro_use] 25 | pub mod macros; 26 | pub mod data_structures; 27 | pub mod hyperloglog; 28 | pub mod misc; 29 | pub mod ops; 30 | pub mod scripting; 31 | pub mod server; 32 | pub mod sets; 33 | pub mod sorted_sets; 34 | pub mod stack; 35 | pub mod startup; 36 | pub mod state; 37 | pub mod timeouts; 38 | pub mod types; 39 | -------------------------------------------------------------------------------- /src/lists.rs: -------------------------------------------------------------------------------- 1 | use crate::ops::RVec; 2 | use crate::timeouts::blocking_key_timeout; 3 | use crate::types::{Count, Index, Key, ReturnValue, StateRef, UTimeout, Value}; 4 | use crate::{make_reader, make_writer, op_variants}; 5 | 6 | op_variants! { 7 | ListOps, 8 | LIndex(Key, Index), 9 | LLen(Key), 10 | LPop(Key), 11 | LPush(Key, RVec), 12 | LPushX(Key, Value), 13 | LRange(Key, Index, Index), 14 | LSet(Key, Index, Value), 15 | LTrim(Key, Index, Index), 16 | RPop(Key), 17 | RPush(Key, RVec), 18 | RPushX(Key, Value), 19 | RPopLPush(Key, Key), 20 | BLPop(Key, UTimeout), 21 | BRPop(Key, UTimeout) 22 | } 23 | 24 | make_reader!(lists, read_lists); 25 | make_writer!(lists, write_lists); 26 | 27 | #[allow(clippy::cognitive_complexity)] 28 | pub async fn list_interact(list_op: ListOps, state: StateRef) -> ReturnValue { 29 | match list_op { 30 | ListOps::LPush(key, vals) => { 31 | let mut list = state.lists.entry(key.clone()).or_default(); 32 | for val in vals { 33 | list.push_front(val); 34 | } 35 | state.wake_list(&key); 36 | ReturnValue::IntRes(list.len() as Count) 37 | } 38 | ListOps::LPushX(key, val) => match state.lists.get_mut(&key) { 39 | Some(mut list) => { 40 | list.push_front(val); 41 | state.wake_list(&key); 42 | ReturnValue::IntRes(list.len() as Count) 43 | } 44 | None => ReturnValue::IntRes(0), 45 | }, 46 | ListOps::RPushX(key, val) => match state.lists.get_mut(&key) { 47 | Some(mut list) => { 48 | list.push_back(val); 49 | state.wake_list(&key); 50 | ReturnValue::IntRes(list.len() as Count) 51 | } 52 | None => ReturnValue::IntRes(0), 53 | }, 54 | ListOps::LLen(key) => match read_lists!(state, &key) { 55 | Some(l) => ReturnValue::IntRes(l.len() as Count), 56 | None => ReturnValue::IntRes(0), 57 | }, 58 | ListOps::LPop(key) => match write_lists!(state, &key).and_then(|mut v| v.pop_front()) { 59 | Some(v) => ReturnValue::StringRes(v), 60 | None => ReturnValue::Nil, 61 | }, 62 | ListOps::RPop(key) => match write_lists!(state, &key).and_then(|mut v| v.pop_back()) { 63 | Some(v) => ReturnValue::StringRes(v), 64 | None => ReturnValue::Nil, 65 | }, 66 | ListOps::RPush(key, vals) => { 67 | let mut list = state.lists.entry(key).or_default(); 68 | for val in vals { 69 | list.push_back(val) 70 | } 71 | ReturnValue::IntRes(list.len() as Count) 72 | } 73 | ListOps::LIndex(key, index) => match write_lists!(state, &key) { 74 | Some(list) => { 75 | let llen = list.len() as i64; 76 | let real_index = if index < 0 { llen + index } else { index }; 77 | if !(0 <= real_index && real_index < llen) { 78 | return ReturnValue::Error(b"Bad Range!"); 79 | } 80 | let real_index = real_index as usize; 81 | ReturnValue::StringRes(list[real_index].clone()) 82 | } 83 | None => ReturnValue::Nil, 84 | }, 85 | ListOps::LSet(key, index, value) => match write_lists!(state, &key) { 86 | Some(mut list) => { 87 | let llen = list.len() as i64; 88 | let real_index = if index < 0 { llen + index } else { index }; 89 | if !(0 <= real_index && real_index < llen) { 90 | return ReturnValue::Error(b"Bad Range!"); 91 | } 92 | let real_index = real_index as usize; 93 | list[real_index] = value; 94 | ReturnValue::Ok 95 | } 96 | None => ReturnValue::Error(b"No list at key!"), 97 | }, 98 | ListOps::LRange(key, start_index, end_index) => match read_lists!(state, &key) { 99 | Some(list) => { 100 | let start_index = 101 | std::cmp::max(0, if start_index < 0 { 0 } else { start_index } as usize); 102 | let end_index = std::cmp::min( 103 | list.len(), 104 | if end_index < 0 { 105 | list.len() as i64 + end_index 106 | } else { 107 | end_index 108 | } as usize, 109 | ); 110 | let mut ret = Vec::new(); 111 | for (index, value) in list.iter().enumerate() { 112 | if start_index <= index && index <= end_index { 113 | ret.push(value.clone()); 114 | } 115 | if index > end_index { 116 | break; 117 | } 118 | } 119 | ReturnValue::MultiStringRes(ret) 120 | } 121 | None => ReturnValue::MultiStringRes(vec![]), 122 | }, 123 | ListOps::LTrim(key, start_index, end_index) => { 124 | match write_lists!(state, &key) { 125 | Some(mut list) => { 126 | let start_index = 127 | std::cmp::max(0, if start_index < 0 { 0 } else { start_index } as usize); 128 | let end_index = std::cmp::min( 129 | list.len(), 130 | if end_index < 0 { 131 | list.len() as i64 + end_index 132 | } else { 133 | end_index 134 | } as usize, 135 | ) + 1; 136 | // Deal with right side 137 | list.truncate(end_index); 138 | // Deal with left side 139 | for _ in 0..start_index { 140 | list.pop_front(); 141 | } 142 | ReturnValue::Ok 143 | } 144 | None => ReturnValue::Ok, 145 | } 146 | } 147 | ListOps::RPopLPush(source, dest) => match state.lists.get_mut(&source) { 148 | None => ReturnValue::Nil, 149 | Some(mut source_list) => match source_list.pop_back() { 150 | None => ReturnValue::Nil, 151 | Some(value) => { 152 | if source == dest { 153 | source_list.push_back(value.clone()); 154 | } else { 155 | state 156 | .lists 157 | .entry(dest.clone()) 158 | .or_default() 159 | .push_back(value.clone()); 160 | state.wake_list(&dest); 161 | } 162 | ReturnValue::StringRes(value) 163 | } 164 | }, 165 | }, 166 | ListOps::BLPop(key, timeout) => { 167 | let state_clone = state.clone(); 168 | let key_clone = key.clone(); 169 | let bl = move || { 170 | write_lists!(state, &key) 171 | .and_then(|mut v| v.pop_front()) 172 | .map(ReturnValue::StringRes) 173 | }; 174 | blocking_key_timeout(Box::new(bl), state_clone, key_clone, timeout).await 175 | } 176 | ListOps::BRPop(key, timeout) => { 177 | let state_clone = state.clone(); 178 | let key_clone = key.clone(); 179 | let bl = move || { 180 | write_lists!(state, &key) 181 | .and_then(|mut v| v.pop_back()) 182 | .map(ReturnValue::StringRes) 183 | }; 184 | blocking_key_timeout(Box::new(bl), state_clone, key_clone, timeout).await 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use lazy_static::lazy_static; 2 | use slog::Logger; 3 | use sloggers::terminal::{Destination, TerminalLoggerBuilder}; 4 | #[allow(unused_imports)] // Emacs is convinced this is unused. 5 | use sloggers::types::{Severity, SourceLocation}; 6 | use sloggers::Build; 7 | 8 | #[cfg(debug_assertions)] 9 | fn get_logger() -> Logger { 10 | let mut builder = TerminalLoggerBuilder::new(); 11 | builder.level(Severity::Debug); 12 | builder.destination(Destination::Stdout); 13 | 14 | builder.build().unwrap() 15 | } 16 | 17 | #[cfg(not(debug_assertions))] 18 | fn get_logger() -> Logger { 19 | let mut builder = TerminalLoggerBuilder::new(); 20 | builder.level(Severity::Info); 21 | builder.destination(Destination::Stdout); 22 | builder.source_location(SourceLocation::None); 23 | 24 | let logger = builder.build().unwrap(); 25 | logger 26 | } 27 | 28 | lazy_static! { 29 | pub static ref LOGGER: Logger = get_logger(); 30 | } 31 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | /// Reader Macro 3 | /// Use this to easily write into a given datastructure in State. 4 | macro_rules! make_reader { 5 | ($id:ident, $name:ident) => { 6 | macro_rules! $name { 7 | ($state:expr) => { 8 | &$state.$id 9 | }; 10 | ($state:expr, $key:expr) => { 11 | $state.$id.get($key) 12 | }; 13 | } 14 | }; 15 | } 16 | 17 | #[macro_export] 18 | /// Writer Macro 19 | /// Use this to easily write into a given datastructure in State. 20 | macro_rules! make_writer { 21 | ($id:ident, $name:ident) => { 22 | macro_rules! $name { 23 | ($state:expr) => { 24 | &$state.$id 25 | }; 26 | ($state:expr, $key:expr) => { 27 | $state.$id.get_mut($key) 28 | }; 29 | } 30 | }; 31 | } 32 | 33 | #[macro_export] 34 | /// Macro to generate the enum AND store each variant in OP_VARIANTS 35 | macro_rules! op_variants { 36 | ($name:ident, $($variant_name:ident($($arg:ty),*)),*) => { 37 | lazy_static! { 38 | pub static ref OP_VARIANTS: Vec = { 39 | let mut v = Vec::new(); 40 | v.push(format!("{}", stringify!($name))); 41 | $( 42 | v.push(format!("{}", stringify!($variant_name($($arg),*)))); 43 | )* 44 | v 45 | }; 46 | } 47 | crate::as_item! { 48 | #[derive(Debug, Clone)] 49 | pub enum $name { $($variant_name($($arg),*),)* } 50 | } 51 | }; 52 | } 53 | 54 | #[macro_export] 55 | macro_rules! as_item { 56 | ($i:item) => { 57 | $i 58 | }; 59 | } 60 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use redis_oxide::database::{get_dump_file, load_state}; 2 | use redis_oxide::logger::LOGGER; 3 | use redis_oxide::scripting::{handle_redis_cmd, ScriptingBridge}; 4 | use redis_oxide::server::socket_listener; 5 | use redis_oxide::startup::{startup_message, Config}; 6 | use redis_oxide::{database::save_state_interval, scripting::ScriptingEngine}; 7 | use tokio::sync::mpsc::channel; 8 | #[macro_use] 9 | extern crate slog; 10 | 11 | use structopt::StructOpt; 12 | 13 | #[tokio::main] 14 | async fn main() -> Result<(), Box> { 15 | // 1. Get the args. 16 | let opt = Config::from_args(); 17 | // 2. Print the fancy logo. 18 | startup_message(&opt); 19 | // 3. Get the database file, making folders if necessary. 20 | info!(LOGGER, "Initializing State..."); 21 | let dump_file = get_dump_file(&opt); 22 | // 4. Load database state if it exists. 23 | info!(LOGGER, "Opening Datafile..."); 24 | let state = load_state(dump_file.clone(), &opt)?; 25 | // 5. Spawn the save-occasionally service. 26 | info!(LOGGER, "Starting Server..."); 27 | if !opt.memory_only { 28 | info!(LOGGER, "Spawning database saving task..."); 29 | tokio::spawn(save_state_interval(state.clone(), dump_file.clone())); 30 | } else { 31 | warn!( 32 | LOGGER, 33 | "Database is in memory-only mode. STATE WILL NOT BE SAVED!" 34 | ); 35 | } 36 | // 6. Create the channels for scripting 37 | let (prog_string_sx, prog_string_rx) = channel(12); 38 | let (cmd_result_sx, cmd_result_rx) = channel(12); 39 | 40 | let scripting_engine = 41 | ScriptingEngine::new(prog_string_rx, cmd_result_sx, state.clone(), &opt)?; 42 | 43 | info!(LOGGER, "ScriptingEngine main loop started"); 44 | std::thread::spawn(|| scripting_engine.main_loop()); 45 | 46 | let scripting_bridge = ScriptingBridge::new(prog_string_sx); 47 | 48 | tokio::spawn(handle_redis_cmd( 49 | cmd_result_rx, 50 | state.clone(), 51 | dump_file.clone(), 52 | scripting_bridge.clone(), 53 | )); 54 | 55 | // 7. Start the server! It will start listening for connections. 56 | socket_listener(state.clone(), dump_file.clone(), opt, scripting_bridge).await; 57 | Ok(()) 58 | } 59 | -------------------------------------------------------------------------------- /src/misc.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::scripting::{Program, ScriptingBridge}; 4 | use crate::types::{Count, Index, Key, RedisValueRef, ReturnValue, StateRef, StateStoreRef, Value}; 5 | 6 | op_variants! { 7 | MiscOps, 8 | Keys(), // TODO: Add optional glob 9 | Exists(Vec), 10 | Pong(), 11 | FlushAll(), 12 | FlushDB(), 13 | // SwapDB(Index, Index), // TODO: Need to figure out how to best sync clients. 14 | Echo(Value), 15 | PrintCmds(), 16 | Select(Index), 17 | Script(Value), 18 | EmbeddedScript(Value, Vec), 19 | Info() 20 | } 21 | 22 | macro_rules! create_commands_list { 23 | ($($ops:ident),*) => { 24 | { 25 | let mut res = Vec::new(); 26 | $( 27 | let tmp = $ops.iter().cloned().map(|s| s.into()).collect(); 28 | res.push(ReturnValue::MultiStringRes(tmp)); 29 | )* 30 | ReturnValue::Array(res) 31 | } 32 | }; 33 | } 34 | 35 | /// Easily get all keys out of each passed type. 36 | macro_rules! get_all_keys { 37 | ($state:expr, $($type:ident),*) => { 38 | { 39 | let mut all = Vec::new(); 40 | $( 41 | all.extend($state.$type.iter().map(|r| r.key().clone())); 42 | )* 43 | all 44 | } 45 | } 46 | } 47 | 48 | lazy_static! { 49 | static ref ALL_COMMANDS: ReturnValue = { 50 | use crate::keys::OP_VARIANTS as KEY_VARIANTS; 51 | use crate::sets::OP_VARIANTS as SET_VARIANTS; 52 | use crate::lists::OP_VARIANTS as LIST_VARIANTS; 53 | use crate::hashes::OP_VARIANTS as HASH_VARIANTS; 54 | use crate::sorted_sets::OP_VARIANTS as ZSET_VARIANTS; 55 | use crate::bloom::OP_VARIANTS as BLOOM_VARIANTS; 56 | use crate::stack::OP_VARIANTS as STACK_VARIANTS; 57 | use crate::hyperloglog::OP_VARIANTS as HYPERLOGLOG_VARIANTS; 58 | create_commands_list!( 59 | KEY_VARIANTS, 60 | LIST_VARIANTS, 61 | HASH_VARIANTS, 62 | SET_VARIANTS, 63 | ZSET_VARIANTS, 64 | BLOOM_VARIANTS, 65 | STACK_VARIANTS, 66 | HYPERLOGLOG_VARIANTS, 67 | OP_VARIANTS // Misc variants 68 | ) 69 | }; 70 | } 71 | 72 | pub async fn misc_interact( 73 | misc_op: MiscOps, 74 | state: &mut StateRef, 75 | state_store: StateStoreRef, 76 | scripting_bridge: Arc, 77 | ) -> ReturnValue { 78 | match misc_op { 79 | MiscOps::Pong() => ReturnValue::StringRes(Value::from_static(b"PONG")), 80 | MiscOps::FlushAll() => { 81 | let clear = |state: &StateRef| { 82 | state.kv.clear(); 83 | state.sets.clear(); 84 | state.lists.clear(); 85 | state.hashes.clear(); 86 | state.zsets.clear(); 87 | state.blooms.clear(); 88 | }; 89 | for state in state_store.states.iter_mut() { 90 | clear(&state); 91 | } 92 | // let state_guard = state_store.states.lock(); 93 | // for state in state_guard.values() { 94 | // clear(state); 95 | // } 96 | ReturnValue::Ok 97 | } 98 | MiscOps::FlushDB() => { 99 | *state = Default::default(); 100 | ReturnValue::Ok 101 | } 102 | MiscOps::Exists(keys) => ReturnValue::IntRes( 103 | keys.iter() 104 | .map(|key| state.kv.contains_key(key)) 105 | .filter(|exists| *exists) 106 | .count() as Count, 107 | ), 108 | MiscOps::Keys() => { 109 | let kv_keys = get_all_keys!(state, kv, sets, lists, hashes, zsets, blooms); 110 | ReturnValue::MultiStringRes(kv_keys) 111 | } 112 | MiscOps::PrintCmds() => (*ALL_COMMANDS).clone(), 113 | MiscOps::Select(index) => { 114 | let state_store = state_store.get_or_create(index); 115 | *state = state_store; 116 | ReturnValue::Ok 117 | } 118 | MiscOps::Echo(val) => ReturnValue::StringRes(val), 119 | MiscOps::Info() => { 120 | let info: String = [ 121 | concat!("redis_version", ":", env!("CARGO_PKG_VERSION")), 122 | "arch_bits:64", 123 | ] 124 | .join("\r\n"); 125 | ReturnValue::StringRes(info.into()) 126 | } 127 | MiscOps::Script(program) => { 128 | let prog_str = String::from_utf8_lossy(&program).to_string(); 129 | let res = scripting_bridge 130 | .handle_script_cmd(Program::String(prog_str)) 131 | .await; 132 | ReturnValue::Ident(res) 133 | } 134 | MiscOps::EmbeddedScript(fn_name, fn_args) => { 135 | // We need to send the program over the scripting bridge 136 | // and wait for the result 137 | let fn_name = String::from_utf8_lossy(&fn_name).to_string(); 138 | let res = scripting_bridge 139 | .handle_script_cmd(Program::Function(fn_name, fn_args)) 140 | .await; 141 | ReturnValue::Ident(res) 142 | } 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/ops.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use std::convert::TryFrom; 3 | use std::fmt::Debug; 4 | 5 | use crate::bloom::{bloom_interact, BloomOps}; 6 | use crate::hashes::{hash_interact, HashOps}; 7 | use crate::hyperloglog::{hyperloglog_interact, HyperLogLogOps}; 8 | use crate::keys::{key_interact, KeyOps}; 9 | use crate::lists::{list_interact, ListOps}; 10 | use crate::misc::MiscOps; 11 | use crate::sets::{set_interact, SetOps}; 12 | use crate::sorted_sets::{zset_interact, ZSetOps}; 13 | use crate::stack::{stack_interact, StackOps}; 14 | use crate::types::{ReturnValue, StateRef, StateStoreRef}; 15 | 16 | use crate::types::{Count, Index, Key, RedisValueRef, Score, UTimeout, Value}; 17 | 18 | #[derive(Debug, Clone)] 19 | pub enum Ops { 20 | Keys(KeyOps), 21 | Sets(SetOps), 22 | Lists(ListOps), 23 | Misc(MiscOps), 24 | Hashes(HashOps), 25 | ZSets(ZSetOps), 26 | Blooms(BloomOps), 27 | Stacks(StackOps), 28 | HyperLogLogs(HyperLogLogOps), 29 | } 30 | 31 | /// Top level interaction function. Used by the server to run 32 | /// operations against state. 33 | pub async fn op_interact(op: Ops, state: StateRef) -> ReturnValue { 34 | match op { 35 | Ops::Keys(op) => key_interact(op, state).await, 36 | Ops::Sets(op) => set_interact(op, state).await, 37 | Ops::Lists(op) => list_interact(op, state).await, 38 | Ops::Hashes(op) => hash_interact(op, state).await, 39 | Ops::ZSets(op) => zset_interact(op, state).await, 40 | Ops::Blooms(op) => bloom_interact(op, state).await, 41 | Ops::Stacks(op) => stack_interact(op, state).await, 42 | Ops::HyperLogLogs(op) => hyperloglog_interact(op, state).await, 43 | _ => unreachable!(), 44 | } 45 | } 46 | 47 | #[derive(Debug)] 48 | pub enum OpsError { 49 | InvalidStart, 50 | Noop, 51 | UnknownOp, 52 | NotEnoughArgs(usize, usize), // req, given 53 | WrongNumberOfArgs(usize, usize), 54 | InvalidArgPattern(&'static str), 55 | InvalidType, 56 | SyntaxError, 57 | InvalidArgs(String), 58 | } 59 | 60 | // impl Error for OpsError {} 61 | 62 | impl From for RedisValueRef { 63 | fn from(op: OpsError) -> RedisValueRef { 64 | match op { 65 | OpsError::InvalidStart => RedisValueRef::ErrorMsg(b"Invalid start!".to_vec()), 66 | OpsError::UnknownOp => RedisValueRef::ErrorMsg(b"Unknown Operation!".to_vec()), 67 | OpsError::InvalidArgPattern(explain) => { 68 | let f = format!("Invalid Arg Pattern, {}", explain); 69 | RedisValueRef::ErrorMsg(f.as_bytes().to_vec()) 70 | } 71 | OpsError::NotEnoughArgs(req, given) => { 72 | let f = format!("Not enough arguments, {} required, {} given!", req, given); 73 | RedisValueRef::ErrorMsg(f.as_bytes().to_vec()) 74 | } 75 | OpsError::WrongNumberOfArgs(required, given) => { 76 | let f = format!( 77 | "Wrong number of arguments! ({} required, {} given)", 78 | required, given 79 | ); 80 | RedisValueRef::ErrorMsg(f.as_bytes().to_vec()) 81 | } 82 | OpsError::InvalidType => RedisValueRef::ErrorMsg(b"Invalid Type!".to_vec()), 83 | OpsError::SyntaxError => RedisValueRef::ErrorMsg(b"Syntax Error!".to_vec()), 84 | OpsError::Noop => RedisValueRef::ErrorMsg(b"".to_vec()), 85 | OpsError::InvalidArgs(s) => RedisValueRef::ErrorMsg(s.as_bytes().to_vec()), 86 | } 87 | } 88 | } 89 | 90 | // impl ToString for RedisValueRef { 91 | // fn to_string(&self) -> String { 92 | // match self { 93 | // RedisValueRef::SimpleString(s) => format!("+{}\r\n", String::from_utf8_lossy(s)), 94 | // RedisValueRef::Error(e) => format!("-{}\r\n", String::from_utf8_lossy(e)), 95 | // RedisValueRef::BulkString(s) => { 96 | // format!("${}\r\n{}\r\n", s.len(), String::from_utf8_lossy(s)) 97 | // } 98 | // RedisValueRef::Int(i) => format!(":{}\r\n", i.to_string()), 99 | // RedisValueRef::Array(a) => { 100 | // if a.is_empty() { 101 | // return EMPTY_ARRAY.to_string(); 102 | // } 103 | // let contents: String = a 104 | // .iter() 105 | // .map(ToString::to_string) 106 | // .collect::>() 107 | // .join(""); 108 | // if contents.ends_with("\r\n") { 109 | // return format!("*{:?}\r\n{}", a.len(), contents); 110 | // } 111 | // format!("*{:?}\r\n{:?}\r\n", a.len(), contents) 112 | // } 113 | // RedisValueRef::NullBulkString => NULL_BULK_STRING.to_string(), 114 | // RedisValueRef::NullArray => NULL_ARRAY.to_string(), 115 | // } 116 | // } 117 | // } 118 | 119 | impl TryFrom for Bytes { 120 | type Error = OpsError; 121 | 122 | fn try_from(r: RedisValueRef) -> Result { 123 | match r { 124 | RedisValueRef::BulkString(s) => Ok(s), 125 | _ => Err(OpsError::InvalidType), 126 | } 127 | } 128 | } 129 | 130 | impl TryFrom<&RedisValueRef> for Bytes { 131 | type Error = OpsError; 132 | 133 | fn try_from(r: &RedisValueRef) -> Result { 134 | match r { 135 | RedisValueRef::BulkString(r) => Ok(r.clone()), 136 | _ => Err(OpsError::InvalidType), 137 | } 138 | } 139 | } 140 | 141 | impl TryFrom for String { 142 | type Error = OpsError; 143 | 144 | fn try_from(r: RedisValueRef) -> Result { 145 | match r { 146 | RedisValueRef::BulkString(s) => Ok(String::from_utf8_lossy(&s).to_string()), 147 | _ => Err(OpsError::InvalidType), 148 | } 149 | } 150 | } 151 | 152 | impl TryFrom<&RedisValueRef> for String { 153 | type Error = OpsError; 154 | 155 | fn try_from(r: &RedisValueRef) -> Result { 156 | String::try_from(r.clone()) 157 | } 158 | } 159 | 160 | impl TryFrom<&RedisValueRef> for Count { 161 | type Error = OpsError; 162 | 163 | fn try_from(r: &RedisValueRef) -> Result { 164 | match r { 165 | RedisValueRef::Int(e) => Ok(*e as Count), 166 | // TODO: Not copy here 167 | RedisValueRef::BulkString(s) => match String::from_utf8(s.to_owned().to_vec()) { 168 | Ok(s) => s.parse().map_err(|_| OpsError::InvalidType), 169 | Err(_) => Err(OpsError::InvalidType), 170 | }, 171 | _ => Err(OpsError::InvalidType), 172 | } 173 | } 174 | } 175 | 176 | /// Ensure the passed collection has an even number of arguments. 177 | #[inline] 178 | fn ensure_even(v: &[T]) -> Result<(), OpsError> { 179 | if v.len() % 2 != 0 { 180 | return Err(OpsError::InvalidArgPattern( 181 | "even number of arguments required!", 182 | )); 183 | } 184 | Ok(()) 185 | } 186 | 187 | use smallvec::SmallVec; 188 | const DEFAULT_SMALL_VEC_SIZE: usize = 2; 189 | pub type RVec = SmallVec<[T; DEFAULT_SMALL_VEC_SIZE]>; 190 | 191 | fn collect_from_tail<'a, ValueType>(tail: &[&'a RedisValueRef]) -> Result, OpsError> 192 | where 193 | ValueType: TryFrom<&'a RedisValueRef, Error = OpsError>, 194 | { 195 | let mut items: SmallVec<[ValueType; DEFAULT_SMALL_VEC_SIZE]> = SmallVec::new(); 196 | for item in tail.iter() { 197 | let value = ValueType::try_from(item)?; 198 | items.push(value); 199 | } 200 | Ok(items) 201 | } 202 | 203 | fn values_from_tail<'a, ValueType>(tail: &[&'a RedisValueRef]) -> Result, OpsError> 204 | where 205 | ValueType: TryFrom<&'a RedisValueRef, Error = OpsError>, 206 | { 207 | let mut items: Vec = Vec::new(); 208 | for item in tail.iter() { 209 | let value = ValueType::try_from(item)?; 210 | items.push(value); 211 | } 212 | Ok(items) 213 | } 214 | 215 | /// Verify that the collection v has _at least_ min_size values. 216 | /// e.g. If you wanted to verify that there's two or more items, min_size would be 2. 217 | fn verify_size_lower(v: &[T], min_size: usize) -> Result<(), OpsError> { 218 | if v.len() < min_size { 219 | return Err(OpsError::NotEnoughArgs(min_size, v.len())); 220 | } 221 | Ok(()) 222 | } 223 | 224 | /// Verify the exact size of a sequence. 225 | /// Useful for some commands that require an exact number of arguments (like get and set) 226 | fn verify_size(v: &[T], size: usize) -> Result<(), OpsError> { 227 | if v.len() != size { 228 | return Err(OpsError::WrongNumberOfArgs(size, v.len())); 229 | } 230 | Ok(()) 231 | } 232 | 233 | /// Get a tuple of (KeyType, ValueType) 234 | /// Mainly used for the thousand 2-adic ops 235 | fn get_key_and_value<'a, KeyType, ValueType>( 236 | array: &'a [RedisValueRef], 237 | ) -> Result<(KeyType, ValueType), OpsError> 238 | where 239 | KeyType: TryFrom<&'a RedisValueRef, Error = OpsError>, 240 | ValueType: TryFrom<&'a RedisValueRef, Error = OpsError>, 241 | { 242 | if array.len() < 3 { 243 | return Err(OpsError::WrongNumberOfArgs(2, array.len() - 1)); 244 | } 245 | let key = KeyType::try_from(&array[1])?; 246 | let val = ValueType::try_from(&array[2])?; 247 | Ok((key, val)) 248 | } 249 | 250 | /// Transform &[RedisValueRef] into (KeyType, Vec) 251 | /// Used for commands like DEL arg1 arg2... 252 | fn get_key_and_tail<'a, KeyType, TailType>( 253 | array: &'a [RedisValueRef], 254 | ) -> Result<(KeyType, RVec), OpsError> 255 | where 256 | KeyType: TryFrom<&'a RedisValueRef, Error = OpsError>, 257 | TailType: TryFrom<&'a RedisValueRef, Error = OpsError>, 258 | { 259 | if array.len() < 3 { 260 | return Err(OpsError::WrongNumberOfArgs(3, array.len())); 261 | } 262 | let set_key = KeyType::try_from(&array[1])?; 263 | let mut tail = RVec::new(); 264 | for tail_item in array.iter().skip(2) { 265 | let tmp = TailType::try_from(tail_item)?; 266 | tail.push(tmp) 267 | } 268 | Ok((set_key, tail)) 269 | } 270 | 271 | /// Transform a sequence of [Key1, Val1, Key2, Val2, ...] -> Vec<(Key, Value)> 272 | fn get_key_value_pairs<'a, KeyType, ValueType>( 273 | tail: &[&'a RedisValueRef], 274 | ) -> Result, OpsError> 275 | where 276 | KeyType: TryFrom<&'a RedisValueRef, Error = OpsError> + Debug, 277 | ValueType: TryFrom<&'a RedisValueRef, Error = OpsError> + Debug, 278 | { 279 | ensure_even(tail)?; 280 | let keys = tail.iter().step_by(2); 281 | let vals = tail.iter().skip(1).step_by(2); 282 | let mut ret = RVec::new(); 283 | for (&key, &val) in keys.zip(vals) { 284 | let key = KeyType::try_from(key)?; 285 | let val = ValueType::try_from(val)?; 286 | ret.push((key, val)) 287 | } 288 | Ok(ret) 289 | } 290 | 291 | /// Convenience macro to automatically construct the right variant 292 | /// of Ops. 293 | macro_rules! ok { 294 | (KeyOps::$OpName:ident($($OpArg:expr),*)) => { 295 | Ok(Ops::Keys(KeyOps::$OpName($( $OpArg ),*))) 296 | }; 297 | (MiscOps::$OpName:ident($($OpArg:expr),*)) => { 298 | Ok(Ops::Misc(MiscOps::$OpName($( $OpArg ),*))) 299 | }; 300 | (MiscOps::$OpName:ident) => { 301 | Ok(Ops::Misc(MiscOps::$OpName)) 302 | }; 303 | (SetOps::$OpName:ident($($OpArg:expr),*)) => { 304 | Ok(Ops::Sets(SetOps::$OpName($( $OpArg ),*))) 305 | }; 306 | (HashOps::$OpName:ident($($OpArg:expr),*)) => { 307 | Ok(Ops::Hashes(HashOps::$OpName($( $OpArg ),*))) 308 | }; 309 | (ListOps::$OpName:ident($($OpArg:expr),*)) => { 310 | Ok(Ops::Lists(ListOps::$OpName($( $OpArg ),*))) 311 | }; 312 | (ZSetOps::$OpName:ident($($OpArg:expr),*)) => { 313 | Ok(Ops::ZSets(ZSetOps::$OpName($( $OpArg ),*))) 314 | }; 315 | (StackOps::$OpName:ident($($OpArg:expr),*)) => { 316 | Ok(Ops::Stacks(StackOps::$OpName($( $OpArg ),*))) 317 | }; 318 | (BloomOps::$OpName:ident($($OpArg:expr),*)) => { 319 | Ok(Ops::Blooms(BloomOps::$OpName($( $OpArg ),*))) 320 | }; 321 | (HyperLogLogOps::$OpName:ident($($OpArg:expr),*)) => { 322 | Ok(Ops::HyperLogLogs(HyperLogLogOps::$OpName($( $OpArg ),*))) 323 | }; 324 | } 325 | 326 | fn translate_array(array: &[RedisValueRef], state_store: StateStoreRef) -> Result { 327 | if array.is_empty() { 328 | return Err(OpsError::Noop); 329 | } 330 | let head = Value::try_from(&array[0])?; 331 | let head_s = String::from_utf8_lossy(&head); 332 | if state_store.contains_foreign_function(&head_s) { 333 | return ok!(MiscOps::EmbeddedScript(head, array[1..].to_vec())); 334 | } 335 | let tail: Vec<&RedisValueRef> = array.iter().skip(1).collect(); 336 | match head_s.to_lowercase().as_ref() { 337 | "ping" => ok!(MiscOps::Pong()), 338 | "keys" => ok!(MiscOps::Keys()), 339 | "flushall" => ok!(MiscOps::FlushAll()), 340 | "flushdb" => ok!(MiscOps::FlushDB()), 341 | "script" => { 342 | verify_size(&tail, 1)?; 343 | let program = Value::try_from(tail[0])?; 344 | ok!(MiscOps::Script(program)) 345 | } 346 | // Key-Value 347 | "set" => { 348 | let (key, val) = get_key_and_value(array)?; 349 | ok!(KeyOps::Set(key, val)) 350 | } 351 | "mset" => ok!(KeyOps::MSet(get_key_value_pairs(&tail)?)), 352 | "get" => { 353 | verify_size(&tail, 1)?; 354 | let key = Key::try_from(tail[0])?; 355 | ok!(KeyOps::Get(key)) 356 | } 357 | "mget" => { 358 | verify_size_lower(&tail, 1)?; 359 | let keys = collect_from_tail(&tail)?; 360 | ok!(KeyOps::MGet(keys)) 361 | } 362 | "del" => { 363 | verify_size_lower(&tail, 1)?; 364 | let keys = collect_from_tail(&tail)?; 365 | ok!(KeyOps::Del(keys)) 366 | } 367 | "rename" => { 368 | verify_size(&tail, 2)?; 369 | let key = Key::try_from(tail[0])?; 370 | let new_key = Key::try_from(tail[1])?; 371 | ok!(KeyOps::Rename(key, new_key)) 372 | } 373 | "renamenx" => { 374 | verify_size(&tail, 2)?; 375 | let key = Key::try_from(tail[0])?; 376 | let new_key = Key::try_from(tail[1])?; 377 | ok!(KeyOps::RenameNx(key, new_key)) 378 | } 379 | "exists" => { 380 | verify_size_lower(&tail, 1)?; 381 | let keys = values_from_tail(&tail)?; 382 | ok!(MiscOps::Exists(keys)) 383 | } 384 | "printcmds" => ok!(MiscOps::PrintCmds()), 385 | // Sets 386 | "sadd" => { 387 | let (set_key, vals) = get_key_and_tail(array)?; 388 | ok!(SetOps::SAdd(set_key, vals)) 389 | } 390 | "srem" => { 391 | let (set_key, vals) = get_key_and_tail(array)?; 392 | ok!(SetOps::SRem(set_key, vals)) 393 | } 394 | "smembers" => { 395 | verify_size(&tail, 1)?; 396 | let set_key = Key::try_from(tail[0])?; 397 | ok!(SetOps::SMembers(set_key)) 398 | } 399 | "scard" => { 400 | verify_size(&tail, 1)?; 401 | let key = Key::try_from(tail[0])?; 402 | ok!(SetOps::SCard(key)) 403 | } 404 | "sdiff" => { 405 | verify_size_lower(&tail, 2)?; 406 | let keys = collect_from_tail(&tail)?; 407 | ok!(SetOps::SDiff(keys)) 408 | } 409 | "sunion" => { 410 | verify_size_lower(&tail, 2)?; 411 | let keys = collect_from_tail(&tail)?; 412 | ok!(SetOps::SUnion(keys)) 413 | } 414 | "sinter" => { 415 | verify_size_lower(&tail, 2)?; 416 | let keys = collect_from_tail(&tail)?; 417 | ok!(SetOps::SInter(keys)) 418 | } 419 | "sdiffstore" => { 420 | let (set_key, sets) = get_key_and_tail(array)?; 421 | ok!(SetOps::SDiffStore(set_key, sets)) 422 | } 423 | "sunionstore" => { 424 | let (set_key, sets) = get_key_and_tail(array)?; 425 | ok!(SetOps::SUnionStore(set_key, sets)) 426 | } 427 | "sinterstore" => { 428 | let (set_key, sets) = get_key_and_tail(array)?; 429 | ok!(SetOps::SInterStore(set_key, sets)) 430 | } 431 | "spop" => { 432 | verify_size_lower(&tail, 1)?; 433 | let key = Key::try_from(tail[0])?; 434 | let count = match tail.get(1) { 435 | Some(c) => Some(Count::try_from(*c)?), 436 | None => None, 437 | }; 438 | ok!(SetOps::SPop(key, count)) 439 | } 440 | "sismember" => { 441 | let (key, member) = get_key_and_value(array)?; 442 | ok!(SetOps::SIsMember(key, member)) 443 | } 444 | "smove" => { 445 | verify_size(&tail, 3)?; 446 | let src = Key::try_from(tail[0])?; 447 | let dest = Key::try_from(tail[1])?; 448 | let member = Value::try_from(tail[2])?; 449 | ok!(SetOps::SMove(src, dest, member)) 450 | } 451 | "srandmember" => { 452 | verify_size_lower(&tail, 1)?; 453 | let key = Key::try_from(tail[0])?; 454 | let count = match tail.get(1) { 455 | Some(c) => Some(Count::try_from(*c)?), 456 | None => None, 457 | }; 458 | ok!(SetOps::SRandMembers(key, count)) 459 | } 460 | "lpush" => { 461 | let (key, vals) = get_key_and_tail(array)?; 462 | ok!(ListOps::LPush(key, vals)) 463 | } 464 | "rpush" => { 465 | let (key, vals) = get_key_and_tail(array)?; 466 | ok!(ListOps::RPush(key, vals)) 467 | } 468 | "lpushx" => { 469 | verify_size(&tail, 2)?; 470 | let key = Key::try_from(tail[0])?; 471 | let val = Value::try_from(tail[1])?; 472 | ok!(ListOps::LPushX(key, val)) 473 | } 474 | "rpushx" => { 475 | verify_size(&tail, 2)?; 476 | let key = Key::try_from(tail[0])?; 477 | let val = Value::try_from(tail[1])?; 478 | ok!(ListOps::RPushX(key, val)) 479 | } 480 | "llen" => { 481 | verify_size(&tail, 1)?; 482 | let key = Key::try_from(tail[0])?; 483 | ok!(ListOps::LLen(key)) 484 | } 485 | "lpop" => { 486 | verify_size(&tail, 1)?; 487 | let key = Key::try_from(tail[0])?; 488 | ok!(ListOps::LPop(key)) 489 | } 490 | "blpop" => { 491 | verify_size(&tail, 2)?; 492 | let key = Key::try_from(tail[0])?; 493 | let timeout = UTimeout::try_from(tail[1])?; 494 | ok!(ListOps::BLPop(key, timeout)) 495 | } 496 | "brpop" => { 497 | verify_size(&tail, 2)?; 498 | let key = Key::try_from(tail[0])?; 499 | let timeout = UTimeout::try_from(tail[1])?; 500 | ok!(ListOps::BRPop(key, timeout)) 501 | } 502 | "rpop" => { 503 | verify_size(&tail, 1)?; 504 | let key = Key::try_from(tail[0])?; 505 | ok!(ListOps::RPop(key)) 506 | } 507 | "linsert" => { 508 | verify_size(&tail, 1)?; 509 | let key = Key::try_from(tail[0])?; 510 | ok!(ListOps::LPop(key)) 511 | } 512 | "lindex" => { 513 | verify_size(&tail, 2)?; 514 | let key = Key::try_from(tail[0])?; 515 | let index = Index::try_from(tail[1])?; 516 | ok!(ListOps::LIndex(key, index)) 517 | } 518 | "lset" => { 519 | verify_size(&tail, 3)?; 520 | let key = Key::try_from(tail[0])?; 521 | let index = Index::try_from(tail[1])?; 522 | let value = Value::try_from(tail[2])?; 523 | ok!(ListOps::LSet(key, index, value)) 524 | } 525 | "lrange" => { 526 | verify_size(&tail, 3)?; 527 | let key = Key::try_from(tail[0])?; 528 | let start_index = Index::try_from(tail[1])?; 529 | let end_index = Index::try_from(tail[2])?; 530 | ok!(ListOps::LRange(key, start_index, end_index)) 531 | } 532 | "ltrim" => { 533 | verify_size(&tail, 3)?; 534 | let key = Key::try_from(tail[0])?; 535 | let start_index = Index::try_from(tail[1])?; 536 | let end_index = Index::try_from(tail[2])?; 537 | ok!(ListOps::LTrim(key, start_index, end_index)) 538 | } 539 | "rpoplpush" => { 540 | verify_size(&tail, 2)?; 541 | let source = Key::try_from(tail[0])?; 542 | let dest = Key::try_from(tail[1])?; 543 | ok!(ListOps::RPopLPush(source, dest)) 544 | } 545 | "hget" => { 546 | verify_size(&tail, 2)?; 547 | let key = Key::try_from(tail[0])?; 548 | let field = Key::try_from(tail[1])?; 549 | ok!(HashOps::HGet(key, field)) 550 | } 551 | "hset" => { 552 | verify_size(&tail, 3)?; 553 | let key = Key::try_from(tail[0])?; 554 | let field = Key::try_from(tail[1])?; 555 | let value = Key::try_from(tail[2])?; 556 | ok!(HashOps::HSet(key, field, value)) 557 | } 558 | "hsetnx" => { 559 | verify_size(&tail, 3)?; 560 | let key = Key::try_from(tail[0])?; 561 | let field = Key::try_from(tail[1])?; 562 | let value = Key::try_from(tail[2])?; 563 | ok!(HashOps::HSetNX(key, field, value)) 564 | } 565 | "hmset" => { 566 | verify_size_lower(&tail, 3)?; 567 | let key = Key::try_from(tail[0])?; 568 | // let args = tails_as_strings(&tail[1..])?; 569 | // // TODO: Avoid cloning here 570 | // let mut key_value_tuples: Vec<(Key, Value)> = Vec::new(); 571 | // for i in args.chunks(2) { 572 | // let key_value = (i[0].clone(), i[1].clone()); 573 | // key_value_tuples.push(key_value); 574 | // } 575 | let key_value_tuples = get_key_value_pairs(&tail[1..])?; 576 | ok!(HashOps::HMSet(key, key_value_tuples)) 577 | } 578 | "hexists" => { 579 | verify_size(&tail, 2)?; 580 | let key = Key::try_from(tail[0])?; 581 | let field = Key::try_from(tail[1])?; 582 | ok!(HashOps::HExists(key, field)) 583 | } 584 | "hgetall" => { 585 | verify_size(&tail, 1)?; 586 | let key = Key::try_from(tail[0])?; 587 | ok!(HashOps::HGetAll(key)) 588 | } 589 | "hmget" => { 590 | verify_size_lower(&tail, 2)?; 591 | let key = Key::try_from(tail[0])?; 592 | let fields = collect_from_tail(&tail[1..])?; 593 | ok!(HashOps::HMGet(key, fields)) 594 | } 595 | "hkeys" => { 596 | verify_size(&tail, 1)?; 597 | let key = Key::try_from(tail[0])?; 598 | ok!(HashOps::HKeys(key)) 599 | } 600 | "hlen" => { 601 | verify_size(&tail, 1)?; 602 | let key = Key::try_from(tail[0])?; 603 | ok!(HashOps::HLen(key)) 604 | } 605 | "hdel" => { 606 | verify_size_lower(&tail, 2)?; 607 | let key = Key::try_from(tail[0])?; 608 | let fields = collect_from_tail(&tail[1..])?; 609 | ok!(HashOps::HDel(key, fields)) 610 | } 611 | "hvals" => { 612 | verify_size(&tail, 1)?; 613 | let key = Key::try_from(tail[0])?; 614 | ok!(HashOps::HVals(key)) 615 | } 616 | "hstrlen" => { 617 | // verify_size(&tail, 2)?; 618 | // let key = Key::try_from(tail[0])?; 619 | // let field = Key::try_from(tail[1])?; 620 | 621 | // ok!(HashOps::HStrLen(key, field)) 622 | // get_key_and_value 623 | let (key, field) = get_key_and_value(array)?; 624 | ok!(HashOps::HStrLen(key, field)) 625 | } 626 | "hincrby" => { 627 | verify_size(&tail, 3)?; 628 | let key = Key::try_from(tail[0])?; 629 | let field = Key::try_from(tail[1])?; 630 | let value = Count::try_from(tail[2])?; 631 | Ok(Ops::Hashes(HashOps::HIncrBy(key, field, value))) 632 | } 633 | // Sorted Sets 634 | "zadd" => { 635 | verify_size_lower(&tail, 3)?; 636 | let key = Key::try_from(tail[0])?; 637 | let member_scores = get_key_value_pairs(&tail[1..])?; 638 | ok!(ZSetOps::ZAdd(key, member_scores)) 639 | } 640 | "zrem" => { 641 | verify_size_lower(&tail, 2)?; 642 | let (key, keys_to_rem) = get_key_and_tail(&array[1..])?; 643 | ok!(ZSetOps::ZRem(key, keys_to_rem)) 644 | } 645 | "zrange" => { 646 | verify_size(&tail, 3)?; 647 | let key = Key::try_from(tail[0])?; 648 | let lower = Score::try_from(tail[1])?; 649 | let upper = Score::try_from(tail[2])?; 650 | ok!(ZSetOps::ZRange(key, lower, upper)) 651 | } 652 | "zcard" => { 653 | verify_size(&tail, 1)?; 654 | let key = Key::try_from(tail[0])?; 655 | ok!(ZSetOps::ZCard(key)) 656 | } 657 | "zscore" => { 658 | verify_size(&tail, 2)?; 659 | let key = Key::try_from(tail[0])?; 660 | let score = Key::try_from(tail[1])?; 661 | ok!(ZSetOps::ZScore(key, score)) 662 | } 663 | "zpopmax" => { 664 | verify_size(&tail, 2)?; 665 | let key = Key::try_from(tail[0])?; 666 | let count = Count::try_from(tail[1])?; 667 | ok!(ZSetOps::ZPopMax(key, count)) 668 | } 669 | "zpopmin" => { 670 | verify_size_lower(&tail, 1)?; 671 | let key = Key::try_from(tail[0])?; 672 | let count = if tail.len() == 1 { 673 | 1.into() 674 | } else { 675 | Count::try_from(tail[1])? 676 | }; 677 | ok!(ZSetOps::ZPopMin(key, count)) 678 | } 679 | "zrank" => { 680 | verify_size(&tail, 2)?; 681 | let key = Key::try_from(tail[0])?; 682 | let member_key = Key::try_from(tail[1])?; 683 | ok!(ZSetOps::ZRank(key, member_key)) 684 | } 685 | "binsert" => { 686 | verify_size(&tail, 2)?; 687 | let key = Key::try_from(tail[0])?; 688 | let value = Value::try_from(tail[1])?; 689 | ok!(BloomOps::BInsert(key, value)) 690 | } 691 | "bcontains" => { 692 | verify_size(&tail, 2)?; 693 | let key = Key::try_from(tail[0])?; 694 | let value = Value::try_from(tail[1])?; 695 | ok!(BloomOps::BContains(key, value)) 696 | } 697 | "select" => { 698 | verify_size(&tail, 1)?; 699 | let new_db = Index::try_from(tail[0])?; 700 | ok!(MiscOps::Select(new_db)) 701 | } 702 | "echo" => { 703 | verify_size(&tail, 1)?; 704 | let val = Value::try_from(tail[0])?; 705 | ok!(MiscOps::Echo(val)) 706 | } 707 | "info" => { 708 | verify_size(&tail, 0)?; 709 | ok!(MiscOps::Info()) 710 | } 711 | // StackOps 712 | "stpush" => { 713 | verify_size(&tail, 2)?; 714 | let key = Key::try_from(tail[0])?; 715 | let val = Value::try_from(tail[1])?; 716 | ok!(StackOps::STPush(key, val)) 717 | } 718 | "stpop" => { 719 | verify_size(&tail, 1)?; 720 | let key = Key::try_from(tail[0])?; 721 | ok!(StackOps::STPop(key)) 722 | } 723 | "stpeek" => { 724 | verify_size(&tail, 1)?; 725 | let key = Key::try_from(tail[0])?; 726 | ok!(StackOps::STPeek(key)) 727 | } 728 | "stsize" => { 729 | verify_size(&tail, 1)?; 730 | let key = Key::try_from(tail[0])?; 731 | ok!(StackOps::STSize(key)) 732 | } 733 | // HyperLogLog 734 | "pfadd" => { 735 | verify_size_lower(&tail, 1)?; 736 | // TODO: Handle zero values case 737 | let key = Key::try_from(tail[0])?; 738 | let vals = collect_from_tail(&tail[1..])?; 739 | ok!(HyperLogLogOps::PfAdd(key, vals)) 740 | } 741 | "pfcount" => { 742 | verify_size_lower(&tail, 1)?; 743 | ok!(HyperLogLogOps::PfCount(collect_from_tail(&tail)?)) 744 | } 745 | "pfmerge" => { 746 | verify_size_lower(&tail, 2)?; 747 | let dest = Key::try_from(tail[0])?; 748 | let sources = collect_from_tail(&tail[1..])?; 749 | ok!(HyperLogLogOps::PfMerge(dest, sources)) 750 | } 751 | _ => Err(OpsError::UnknownOp), 752 | } 753 | } 754 | 755 | pub fn translate(rv: RedisValueRef, state_store: StateStoreRef) -> Result { 756 | match rv { 757 | RedisValueRef::Array(vals) => translate_array(&vals, state_store), 758 | bs @ RedisValueRef::BulkString(_) => translate_array(&[bs], state_store), 759 | _ => Err(OpsError::UnknownOp), 760 | } 761 | } 762 | -------------------------------------------------------------------------------- /src/scripting.rs: -------------------------------------------------------------------------------- 1 | use crate::server::process_command; 2 | use num_traits::cast::ToPrimitive; 3 | use std::{error::Error, sync::Arc}; 4 | use tokio::sync::mpsc::{Receiver, Sender}; 5 | 6 | use crate::startup::Config; 7 | use crate::types::DumpFile; 8 | use crate::types::RedisValueRef; 9 | use crate::{logger::LOGGER, types::StateStoreRef}; 10 | use x7::ffi::{ForeignData, IntoX7Function, Variadic, X7Interpreter}; 11 | use x7::symbols::Expr; 12 | 13 | fn bytes_to_string(s: &[u8]) -> String { 14 | String::from_utf8_lossy(s).to_string() 15 | } 16 | 17 | struct FFIError { 18 | reason: String, 19 | } 20 | 21 | impl FFIError { 22 | fn boxed(reason: String) -> Box { 23 | Box::new(Self { reason }) 24 | } 25 | } 26 | 27 | impl std::fmt::Debug for FFIError { 28 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 29 | write!(f, "{:?}", self.reason) 30 | } 31 | } 32 | 33 | impl std::fmt::Display for FFIError { 34 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 35 | write!(f, "{:?}", self.reason) 36 | } 37 | } 38 | 39 | impl Error for FFIError {} 40 | 41 | impl ForeignData for RedisValueRef { 42 | fn to_x7(&self) -> Result> { 43 | let res = match self { 44 | RedisValueRef::BulkString(s) | RedisValueRef::SimpleString(s) => { 45 | Expr::String(bytes_to_string(s)) 46 | } 47 | RedisValueRef::Error(e) => { 48 | return Err(FFIError::boxed(bytes_to_string(e))); 49 | } 50 | RedisValueRef::ErrorMsg(e) => { 51 | return Err(FFIError::boxed(bytes_to_string(e))); 52 | } 53 | RedisValueRef::Int(i) => Expr::Integer(*i), 54 | RedisValueRef::Array(a) => { 55 | Expr::Tuple(a.iter().map(|ele| ele.to_x7()).collect::>()?) 56 | } 57 | RedisValueRef::NullArray | RedisValueRef::NullBulkString => Expr::Nil, 58 | }; 59 | Ok(res) 60 | } 61 | 62 | fn from_x7(expr: &Expr) -> Result> { 63 | let res = 64 | match expr { 65 | Expr::Nil => RedisValueRef::NullArray, 66 | Expr::Num(n) => RedisValueRef::Int(n.to_i64().ok_or_else(|| { 67 | FFIError::boxed(format!("Failed to convert {} into an i64", n)) 68 | })?), 69 | Expr::Integer(n) => RedisValueRef::Int(*n), 70 | Expr::String(s) => RedisValueRef::BulkString(s.clone().into()), 71 | Expr::Symbol(s) => RedisValueRef::BulkString(s.read().into()), 72 | Expr::List(l) | Expr::Tuple(l) | Expr::Quote(l) => RedisValueRef::Array( 73 | l.iter() 74 | .map(ForeignData::from_x7) 75 | .collect::>()?, 76 | ), 77 | Expr::Bool(b) => RedisValueRef::BulkString(format!("{}", b).into()), 78 | bad_type => { 79 | return Err(FFIError::boxed(format!( 80 | "redis-oxide cannot reason about this type: {:?}", 81 | bad_type 82 | ))) 83 | } 84 | }; 85 | Ok(res) 86 | } 87 | } 88 | 89 | #[allow(clippy::type_complexity)] 90 | pub struct ScriptingBridge { 91 | prog_send: Sender<( 92 | Program, 93 | OneShotSender>>, 94 | )>, 95 | } 96 | 97 | impl ScriptingBridge { 98 | #[allow(clippy::type_complexity)] 99 | pub fn new( 100 | prog_send: Sender<( 101 | Program, 102 | OneShotSender>>, 103 | )>, 104 | ) -> Arc { 105 | let sb = Self { prog_send }; 106 | Arc::new(sb) 107 | } 108 | 109 | pub async fn handle_script_cmd(&self, cmd: Program) -> RedisValueRef { 110 | let (sx, rx) = oneshot_channel(); 111 | if let Err(e) = self.prog_send.send((cmd, sx)).await { 112 | error!(LOGGER, "Failed to send program: {}", e); 113 | } 114 | match rx.await { 115 | Ok(x7_result) => match x7_result { 116 | Ok(r) => r, 117 | Err(e) => RedisValueRef::Error(format!("{}", e).into()), 118 | }, 119 | Err(e) => { 120 | RedisValueRef::Error(format!("Failed to receive a response from x7 {}", e).into()) 121 | } 122 | } 123 | } 124 | } 125 | 126 | use tokio::sync::oneshot::{channel as oneshot_channel, Sender as OneShotSender}; 127 | 128 | use tokio::sync::oneshot::error::TryRecvError; 129 | pub async fn handle_redis_cmd( 130 | mut cmd_recv: Receiver<(Vec, OneShotSender)>, 131 | state_store: StateStoreRef, 132 | dump_file: DumpFile, 133 | scripting_engine: Arc, 134 | ) { 135 | // TODO: Support, or return an error when interacting with 136 | // change db commands 137 | let mut state = state_store.get_default(); 138 | while let Some((cmd, return_channel)) = cmd_recv.recv().await { 139 | debug!(LOGGER, "Recieved redis command: {:?}", cmd); 140 | let res = process_command( 141 | &mut state, 142 | state_store.clone(), 143 | dump_file.clone(), 144 | scripting_engine.clone(), 145 | RedisValueRef::Array(cmd), 146 | ) 147 | .await; 148 | if let Err(e) = return_channel.send(res) { 149 | error!(LOGGER, "Failed to write response! {:?}", e); 150 | } 151 | } 152 | } 153 | 154 | #[derive(Debug)] 155 | pub enum Program { 156 | String(String), 157 | Function(String, Vec), 158 | } 159 | 160 | pub struct ScriptingEngine { 161 | interpreter: X7Interpreter, 162 | #[allow(clippy::type_complexity)] 163 | prog_revc: Receiver<( 164 | Program, 165 | OneShotSender>>, 166 | )>, 167 | // prog_send: Sender>>, 168 | cmd_send: Arc, OneShotSender)>>, 169 | } 170 | 171 | impl ScriptingEngine { 172 | #[allow(clippy::type_complexity)] 173 | pub fn new( 174 | prog_revc: Receiver<( 175 | Program, 176 | OneShotSender>>, 177 | )>, 178 | cmd_send: Sender<(Vec, OneShotSender)>, 179 | state_store: StateStoreRef, 180 | opts: &Config, 181 | ) -> Result> { 182 | let res = Self { 183 | interpreter: X7Interpreter::new(), 184 | prog_revc, 185 | cmd_send: Arc::new(cmd_send), 186 | }; 187 | res.setup_interpreter(state_store); 188 | res.load_scripts_dir(opts)?; 189 | Ok(res) 190 | } 191 | 192 | pub fn main_loop(mut self) { 193 | loop { 194 | if let Some((program, return_channel)) = self.prog_revc.blocking_recv() { 195 | debug!(LOGGER, "Recieved this program: {:?}", program); 196 | self.spawn_handling_thread(program, return_channel); 197 | } 198 | } 199 | } 200 | 201 | fn load_scripts_dir(&self, opts: &Config) -> Result<(), Box> { 202 | if let Some(path) = &opts.scripts_dir { 203 | info!(LOGGER, "Loading scripts in {:?}", path); 204 | self.interpreter.load_lib_dir(path) 205 | } else { 206 | Ok(()) 207 | } 208 | } 209 | 210 | fn add_redis_fn(&self) { 211 | let send_clone = self.cmd_send.clone(); 212 | let send_fn = move |args: Variadic| { 213 | let args = args.into_vec(); 214 | let (sx, mut rx) = oneshot_channel(); 215 | if let Err(e) = send_clone.blocking_send((args, sx)) { 216 | return Err(FFIError::boxed(format!( 217 | "redis-oxide failed to send the command: {}", 218 | e 219 | ))); 220 | } 221 | loop { 222 | match rx.try_recv() { 223 | Ok(ret_value) => return Ok(ret_value), 224 | Err(TryRecvError::Empty) => continue, 225 | Err(TryRecvError::Closed) => { 226 | return Err(FFIError::boxed( 227 | "redix-oxide failed to return a value!".into(), 228 | )) 229 | } 230 | } 231 | } 232 | }; 233 | self.interpreter.add_function("redis", send_fn.to_x7_fn()); 234 | } 235 | 236 | /// Add the "def-redis-fn" function to the interpreter 237 | /// 238 | /// e.g. script '(def-redis-fn my-sum (a b) (+ a b))' 239 | /// >>> my-sum "hello " world 240 | /// "hello world" 241 | fn embed_foreign_script(&self, state_store: StateStoreRef) { 242 | // (def-redis-fn my-sum (a b) (+ a b)) 243 | let interpreter_clone = self.interpreter.clone(); 244 | let f = move |args: Variadic| { 245 | let args = args.into_vec(); 246 | let fn_name = match args[0].get_symbol_string() { 247 | Ok(sym) => sym, 248 | Err(e) => return Err(e), 249 | }; 250 | let f_args = args[1].clone(); // (arg1 arg2) 251 | let f_body = args[2].clone(); // (redis "set" arg1 arg2) 252 | let res = interpreter_clone.add_dynamic_function(&fn_name, f_args, f_body); 253 | if res.is_ok() { 254 | state_store.add_foreign_function(&fn_name.read()); 255 | } 256 | res 257 | }; 258 | self.interpreter 259 | .add_unevaled_function("def-redis-fn", f.to_x7_fn()); 260 | } 261 | 262 | fn setup_interpreter(&self, state_store: StateStoreRef) { 263 | // "redis" 264 | self.add_redis_fn(); 265 | // "def-redis-fn" 266 | self.embed_foreign_script(state_store); 267 | } 268 | 269 | fn spawn_handling_thread( 270 | &self, 271 | program: Program, 272 | return_channel: OneShotSender>>, 273 | ) { 274 | let interpreter = self.interpreter.clone(); 275 | std::thread::spawn(move || { 276 | let res = match program { 277 | Program::String(s) => interpreter.run_program::(&s), 278 | Program::Function(fn_name, fn_args) => interpreter.run_function(&fn_name, &fn_args), 279 | }; 280 | if let Err(e) = return_channel.send(res) { 281 | error!(LOGGER, "Failed to send program result! {:?}", e) 282 | } 283 | }); 284 | } 285 | } 286 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use crate::database::save_state; 2 | use crate::misc::misc_interact; 3 | use crate::ops::{op_interact, Ops}; 4 | /// Server launch file. Starts the services to make redis-oxide work. 5 | use crate::{asyncresp::RespParser, scripting::ScriptingBridge}; 6 | use crate::{logger::LOGGER, types::StateRef}; 7 | use crate::{ 8 | ops::translate, 9 | startup::Config, 10 | types::{DumpFile, RedisValueRef, ReturnValue, StateStoreRef}, 11 | }; 12 | use futures::StreamExt; 13 | use futures_util::sink::SinkExt; 14 | use std::sync::atomic::Ordering; 15 | use std::{net::SocketAddr, sync::Arc}; 16 | use tokio::net::{TcpListener, TcpStream}; 17 | use tokio_util::codec::Decoder; 18 | 19 | fn incr_and_save_if_required(state: StateStoreRef, dump_file: DumpFile) { 20 | state.commands_ran_since_save.fetch_add(1, Ordering::SeqCst); 21 | let should_save = state.commands_ran_since_save.compare_exchange( 22 | state.commands_threshold, 23 | 0, 24 | Ordering::SeqCst, 25 | Ordering::SeqCst, 26 | ); 27 | if should_save.is_ok() { 28 | let state_clone = state; 29 | let dump_file_clone = dump_file; 30 | tokio::spawn(async { 31 | save_state(state_clone, dump_file_clone); 32 | }); 33 | } 34 | } 35 | 36 | pub async fn process_command( 37 | state: &mut StateRef, 38 | state_store: StateStoreRef, 39 | dump_file: DumpFile, 40 | scripting_bridge: Arc, 41 | redis_value: RedisValueRef, 42 | ) -> RedisValueRef { 43 | match translate(redis_value, state_store.clone()) { 44 | Ok(op) => { 45 | debug!(LOGGER, "running op {:?}", op.clone()); 46 | // Step 1: Execute the operation the operation (from translate above) 47 | let res: ReturnValue = match op { 48 | Ops::Misc(op) => { 49 | misc_interact(op, state, state_store.clone(), scripting_bridge.clone()).await 50 | } 51 | _ => op_interact(op, state.clone()).await, 52 | }; 53 | // Step 2: Update commands_ran_since_save counter, and save if necessary 54 | if !state_store.memory_only { 55 | incr_and_save_if_required(state_store.clone(), dump_file.clone()); 56 | } 57 | // Step 3: Finally Return 58 | res.into() 59 | } 60 | Err(e) => RedisValueRef::from(e), 61 | } 62 | } 63 | 64 | /// Spawn a RESP handler for the given socket. 65 | /// 66 | /// This will synchronously process requests / responses for this 67 | /// connection only. Other connections will be spread across the 68 | /// thread pool. 69 | async fn process( 70 | socket: TcpStream, 71 | state_store: StateStoreRef, 72 | dump_file: DumpFile, 73 | scripting_bridge: Arc, 74 | ) { 75 | tokio::spawn(async move { 76 | let mut state = state_store.get_default(); 77 | let mut transport = RespParser::default().framed(socket); 78 | while let Some(redis_value) = transport.next().await { 79 | if let Err(e) = redis_value { 80 | error!(LOGGER, "Error recieving redis value {:?}", e); 81 | continue; 82 | } 83 | let res = process_command( 84 | &mut state, 85 | state_store.clone(), 86 | dump_file.clone(), 87 | scripting_bridge.clone(), 88 | redis_value.unwrap(), 89 | ) 90 | .await; 91 | // let res = match translate(redis_value.unwrap()) { 92 | // Ok(op) => { 93 | // debug!(LOGGER, "running op {:?}", op.clone()); 94 | // // Step 1: Execute the operation the operation (from translate above) 95 | // let res: ReturnValue = match op { 96 | // Ops::Misc(op) => { 97 | // misc_interact( 98 | // op, 99 | // &mut state, 100 | // state_store.clone(), 101 | // scripting_bridge.clone(), 102 | // ) 103 | // .await 104 | // } 105 | // _ => op_interact(op, state.clone()).await, 106 | // }; 107 | // // Step 2: Update commands_ran_since_save counter, and save if necessary 108 | // if !state_store.memory_only { 109 | // incr_and_save_if_required(state_store.clone(), dump_file.clone()); 110 | // } 111 | // // Step 3: Finally Return 112 | // res.into() 113 | // } 114 | // Err(e) => RedisValueRef::from(e), 115 | // }; 116 | if let Err(e) = transport.send(res).await { 117 | error!(LOGGER, "Failed to send data to client! {:?}", e) 118 | }; 119 | } 120 | }); 121 | } 122 | 123 | /// The listener for redis-oxide. Accepts connections and spawns handlers. 124 | pub async fn socket_listener( 125 | state_store: StateStoreRef, 126 | dump_file: DumpFile, 127 | config: Config, 128 | scripting_bridge: Arc, 129 | ) { 130 | // First, get the address determined and parsed. 131 | let addr_str = format!("{}:{}", "127.0.0.1", config.port); 132 | let addr = match addr_str.parse::() { 133 | Ok(s) => s, 134 | Err(e) => { 135 | error!( 136 | LOGGER, 137 | "Could not start server! Could not parse {} as listening address, given error: {}", 138 | addr_str, 139 | e 140 | ); 141 | return; 142 | } 143 | }; 144 | 145 | // Second, bind/listen on that address 146 | let listener = match TcpListener::bind(&addr).await { 147 | Ok(s) => s, 148 | Err(e) => { 149 | error!( 150 | LOGGER, 151 | "Could not start server! Could not bind to {}, given error: {}", addr_str, e 152 | ); 153 | if config.port <= 1024 { 154 | info!(LOGGER, "Hint: You're attempting to bind to a privileged port. Try using -p 6379 or -p 8888"); 155 | } 156 | return; 157 | } 158 | }; 159 | // Finally, loop over each TCP accept and spawn a handler. 160 | info!(LOGGER, "Listening on: {}", addr); 161 | loop { 162 | match listener.accept().await { 163 | Ok((socket, _)) => { 164 | debug!(LOGGER, "Accepted connection!"); 165 | process( 166 | socket, 167 | state_store.clone(), 168 | dump_file.clone(), 169 | scripting_bridge.clone(), 170 | ) 171 | .await; 172 | } 173 | Err(e) => error!(LOGGER, "Failed to establish connectin: {:?}", e), 174 | }; 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/sets.rs: -------------------------------------------------------------------------------- 1 | use crate::op_variants; 2 | use crate::ops::RVec; 3 | use crate::types::{Count, Key, ReturnValue, StateRef, Value}; 4 | use std::collections::HashSet; 5 | 6 | op_variants! { 7 | SetOps, 8 | SAdd(Key, RVec), 9 | SCard(Key), 10 | SDiff(RVec), 11 | SDiffStore(Key, RVec), 12 | SInter(RVec), 13 | SInterStore(Key, RVec), 14 | SIsMember(Key, Value), 15 | SMembers(Key), 16 | SMove(Key, Key, Value), 17 | SPop(Key, Option), 18 | SRandMembers(Key, Option), 19 | SRem(Key, RVec), 20 | SUnion(RVec), 21 | SUnionStore(Key, RVec) 22 | } 23 | 24 | pub enum SetAction { 25 | Diff, 26 | Union, 27 | Inter, 28 | } 29 | 30 | make_reader!(sets, read_sets); 31 | make_writer!(sets, write_sets); 32 | 33 | fn many_set_op(state: &StateRef, keys: RVec, op: SetAction) -> Option> { 34 | let sets_that_exist: Vec<_> = keys 35 | .iter() 36 | .filter(|&k| state.sets.contains_key(k)) 37 | .collect(); 38 | if sets_that_exist.is_empty() { 39 | return None; 40 | } 41 | #[allow(clippy::mutable_key_type)] 42 | let mut head: HashSet = state 43 | .sets 44 | .get_mut(sets_that_exist[0]) 45 | .unwrap() 46 | .value_mut() 47 | .clone(); 48 | // TODO: Make this _way_ cleaner. 49 | for set_key in sets_that_exist.into_iter().skip(1) { 50 | head = match op { 51 | SetAction::Diff => head 52 | .difference(state.sets.get(set_key).unwrap().value()) 53 | .cloned() 54 | .collect(), 55 | SetAction::Union => head 56 | .union(state.sets.get(set_key).unwrap().value()) 57 | .cloned() 58 | .collect(), 59 | SetAction::Inter => head 60 | .intersection(state.sets.get(set_key).unwrap().value()) 61 | .cloned() 62 | .collect(), 63 | } 64 | } 65 | Some(head) 66 | } 67 | 68 | pub async fn set_interact(set_op: SetOps, state: StateRef) -> ReturnValue { 69 | match set_op { 70 | SetOps::SAdd(set_key, vals) => { 71 | let mut set = state.sets.entry(set_key).or_default(); 72 | vals.into_iter() 73 | .fold(0, |acc, val| acc + set.insert(val) as Count) 74 | .into() 75 | } 76 | SetOps::SMembers(set_key) => read_sets!(state, &set_key) 77 | .map(|set| set.iter().cloned().collect()) 78 | .unwrap_or_else(RVec::new) 79 | .into(), 80 | SetOps::SCard(set_key) => read_sets!(state, &set_key) 81 | .map(|set| set.len() as Count) 82 | .unwrap_or(0) 83 | .into(), 84 | SetOps::SRem(set_key, vals) => write_sets!(state, &set_key) 85 | .map(|mut set| { 86 | vals.into_iter() 87 | .fold(0, |acc, val| acc + set.insert(val) as Count) 88 | }) 89 | .unwrap_or(0) 90 | .into(), 91 | SetOps::SDiff(keys) => many_set_op(&state, keys, SetAction::Diff) 92 | .map(|set| set.into_iter().collect()) 93 | .unwrap_or_else(RVec::new) 94 | .into(), 95 | SetOps::SUnion(keys) => many_set_op(&state, keys, SetAction::Union) 96 | .map(|set| set.into_iter().collect()) 97 | .unwrap_or_else(RVec::new) 98 | .into(), 99 | SetOps::SInter(keys) => many_set_op(&state, keys, SetAction::Inter) 100 | .map(|set| set.into_iter().collect()) 101 | .unwrap_or_else(RVec::new) 102 | .into(), 103 | SetOps::SDiffStore(to_store, keys) => match many_set_op(&state, keys, SetAction::Diff) { 104 | Some(hash_set) => { 105 | let hash_set_size = hash_set.len(); 106 | write_sets!(state).insert(to_store, hash_set); 107 | ReturnValue::IntRes(hash_set_size as Count) 108 | } 109 | None => ReturnValue::IntRes(0), 110 | }, 111 | SetOps::SUnionStore(to_store, keys) => match many_set_op(&state, keys, SetAction::Union) { 112 | Some(hash_set) => { 113 | let hash_set_size = hash_set.len(); 114 | write_sets!(state).insert(to_store, hash_set); 115 | ReturnValue::IntRes(hash_set_size as Count) 116 | } 117 | None => ReturnValue::IntRes(0), 118 | }, 119 | SetOps::SInterStore(to_store, keys) => match many_set_op(&state, keys, SetAction::Inter) { 120 | Some(hash_set) => { 121 | let hash_set_size = hash_set.len(); 122 | write_sets!(state).insert(to_store, hash_set); 123 | ReturnValue::IntRes(hash_set_size as Count) 124 | } 125 | None => ReturnValue::IntRes(0), 126 | }, 127 | // There's some surprising complexity behind this command 128 | SetOps::SPop(key, count) => { 129 | let mut set = match state.sets.get_mut(&key) { 130 | Some(s) => s, 131 | None => return ReturnValue::Nil, 132 | }; 133 | if set.is_empty() && count.is_some() { 134 | return ReturnValue::MultiStringRes(vec![]); 135 | } else if set.is_empty() { 136 | return ReturnValue::Nil; 137 | } 138 | let count = count.unwrap_or(1); 139 | if count < 0 { 140 | return ReturnValue::Error(b"Count cannot be less than 0!"); 141 | } 142 | let eles: Vec = set.iter().take(count as usize).cloned().collect(); 143 | for ele in eles.iter() { 144 | set.remove(ele); 145 | } 146 | ReturnValue::MultiStringRes(eles) 147 | } 148 | SetOps::SIsMember(key, member) => match read_sets!(state, &key) { 149 | Some(set) => match set.get(&member) { 150 | Some(_) => ReturnValue::IntRes(1), 151 | None => ReturnValue::IntRes(0), 152 | }, 153 | None => ReturnValue::IntRes(0), 154 | }, 155 | SetOps::SMove(src, dest, member) => { 156 | let sets = read_sets!(state); 157 | if !sets.contains_key(&src) || !sets.contains_key(&dest) { 158 | return ReturnValue::IntRes(0); 159 | } 160 | 161 | // TODO: Why are we allowed to unwrap here? It may not be alive at this time. 162 | let mut src_set = state.sets.get_mut(&src).unwrap(); 163 | match src_set.take(&member) { 164 | Some(res) => { 165 | sets.get_mut(&dest).unwrap().insert(res); 166 | ReturnValue::IntRes(1) 167 | } 168 | None => ReturnValue::IntRes(0), 169 | } 170 | } 171 | SetOps::SRandMembers(key, count) => match read_sets!(state, &key) { 172 | Some(set) => { 173 | let count = count.unwrap_or(1); 174 | if count < 0 { 175 | return ReturnValue::MultiStringRes( 176 | set.iter().cycle().take(-count as usize).cloned().collect(), 177 | ); 178 | }; 179 | ReturnValue::MultiStringRes(set.iter().take(count as usize).cloned().collect()) 180 | } 181 | None => ReturnValue::Nil, 182 | }, 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/sorted_sets.rs: -------------------------------------------------------------------------------- 1 | use crate::ops::RVec; 2 | use crate::types::{Count, Index, Key, ReturnValue, Score, StateRef}; 3 | use crate::{make_reader, make_writer, op_variants}; 4 | 5 | op_variants! { 6 | ZSetOps, 7 | ZAdd(Key, RVec<(Score, Key)>), 8 | ZRem(Key, RVec), 9 | ZRange(Key, Score, Score), 10 | ZCard(Key), 11 | ZScore(Key, Key), 12 | ZPopMax(Key, Count), 13 | ZPopMin(Key, Count), 14 | ZRank(Key, Key) 15 | } 16 | 17 | make_reader!(zsets, read_zsets); 18 | make_writer!(zsets, write_zsets); 19 | 20 | fn deal_with_negative_indices(coll_size: Count, bounds: (Index, Index)) -> (Index, Index) { 21 | let (start, end) = bounds; 22 | let start = if start < 0 { start + coll_size } else { start }; 23 | let end = if end < 0 { end + coll_size } else { end }; 24 | (start, end) 25 | } 26 | 27 | pub async fn zset_interact(zset_op: ZSetOps, state: StateRef) -> ReturnValue { 28 | match zset_op { 29 | ZSetOps::ZAdd(zset_key, member_scores) => { 30 | let mut zset = state.zsets.entry(zset_key).or_default(); 31 | let num_added = zset.add(member_scores); 32 | ReturnValue::IntRes(num_added) 33 | } 34 | ZSetOps::ZRem(zset_key, keys) => write_zsets!(state, &zset_key) 35 | .map(|mut zset| zset.remove(&keys)) 36 | .unwrap_or(0) 37 | .into(), 38 | ZSetOps::ZRange(zset_key, start, stop) => read_zsets!(state, &zset_key) 39 | .map(|zset| { 40 | let (start, stop) = deal_with_negative_indices(zset.card(), (start, stop)); 41 | zset.range((start, stop)) 42 | .into_iter() 43 | .map(|item| item.member) 44 | .collect::>() 45 | }) 46 | .unwrap_or_default() 47 | .into(), 48 | ZSetOps::ZCard(zset_key) => read_zsets!(state, &zset_key) 49 | .map(|zset| zset.card()) 50 | .unwrap_or(0) 51 | .into(), 52 | ZSetOps::ZScore(zset_key, member_key) => read_zsets!(state, &zset_key) 53 | .and_then(|zset| zset.score(member_key)) 54 | .map(ReturnValue::IntRes) 55 | .unwrap_or(ReturnValue::Nil), 56 | ZSetOps::ZPopMax(zset_key, count) => write_zsets!(state, &zset_key) 57 | .map(|mut zset| { 58 | zset.pop_max(count) 59 | .into_iter() 60 | .fold(Vec::new(), |mut acc, zset_mem| { 61 | acc.push(ReturnValue::IntRes(zset_mem.score)); 62 | acc.push(ReturnValue::StringRes(zset_mem.member.into())); 63 | acc 64 | }) 65 | }) 66 | .map(ReturnValue::Array) 67 | .unwrap_or_else(|| ReturnValue::Array(vec![])), 68 | ZSetOps::ZPopMin(zset_key, count) => write_zsets!(state, &zset_key) 69 | .map(|mut zset| { 70 | zset.pop_min(count) 71 | .into_iter() 72 | .fold(Vec::new(), |mut acc, zset_mem| { 73 | acc.push(ReturnValue::IntRes(zset_mem.score)); 74 | acc.push(ReturnValue::StringRes(zset_mem.member.into())); 75 | acc 76 | }) 77 | }) 78 | .map(ReturnValue::Array) 79 | .unwrap_or_else(|| ReturnValue::Array(vec![])), 80 | ZSetOps::ZRank(zset_key, mem_key) => read_zsets!(state, &zset_key) 81 | .and_then(|zset| zset.rank(mem_key)) 82 | .map(ReturnValue::IntRes) 83 | .unwrap_or(ReturnValue::Nil), 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/stack.rs: -------------------------------------------------------------------------------- 1 | use crate::make_reader; 2 | use crate::op_variants; 3 | use crate::types::{Key, ReturnValue, StateRef, Value}; 4 | 5 | op_variants! { 6 | StackOps, 7 | STPush(Key, Value), 8 | STPop(Key), 9 | STPeek(Key), 10 | STSize(Key) 11 | } 12 | 13 | make_reader!(stacks, read_stacks); 14 | 15 | pub async fn stack_interact(stack_op: StackOps, state: StateRef) -> ReturnValue { 16 | match stack_op { 17 | StackOps::STPush(key, value) => state.stacks.entry(key).or_default().push(value).into(), 18 | StackOps::STPop(key) => state 19 | .stacks 20 | .get_mut(&key) 21 | .and_then(|mut st| st.pop()) 22 | .map(ReturnValue::StringRes) 23 | .unwrap_or(ReturnValue::Nil), 24 | StackOps::STPeek(key) => read_stacks!(state, &key) 25 | .and_then(|st| st.peek()) 26 | .map(ReturnValue::StringRes) 27 | .unwrap_or(ReturnValue::Nil), 28 | StackOps::STSize(key) => read_stacks!(state, &key) 29 | .map(|st| st.size()) 30 | .map(ReturnValue::IntRes) 31 | .unwrap_or(ReturnValue::Nil), 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/startup.rs: -------------------------------------------------------------------------------- 1 | use structopt::StructOpt; 2 | 3 | use crate::logger::LOGGER; 4 | use std::path::PathBuf; 5 | 6 | #[derive(Debug, StructOpt)] 7 | #[structopt( 8 | name = "redis-oxide", 9 | about = "A multi-threaded implementation of redis written in rust 🦀" 10 | )] 11 | pub struct Config { 12 | /// Database Dump File Directory Location 13 | #[structopt(short = "d", long = "dump-file", parse(from_os_str))] 14 | pub data_dir: Option, 15 | /// Don't show the starting graphic 16 | #[structopt(short = "g", long = "no-graphic")] 17 | pub dont_show_graphic: bool, 18 | #[structopt(short = "s", long = "ops-until-save", default_value = "10000")] 19 | pub ops_until_save: u64, 20 | #[structopt(short = "p", long = "port", default_value = "6379")] 21 | pub port: u64, 22 | /// Run in memory only mode. Don't save database state to disk 23 | #[structopt(short = "m", long = "memory-only")] 24 | pub memory_only: bool, 25 | #[structopt(short = "f", long = "scripts-dir")] 26 | pub scripts_dir: Option, 27 | } 28 | 29 | pub fn startup_message(config: &Config) { 30 | if !config.dont_show_graphic { 31 | info!( 32 | LOGGER, 33 | r#" 34 | ____/\\\\\\\\\_____ _______/\\\\\______ ____________________ 35 | __/\\\///////\\\___ _____/\\\///\\\____ ____________________ 36 | _\/\\\_____\/\\\___ ___/\\\/__\///\\\__ ____________________ 37 | _\/\\\\\\\\\\\/____ __/\\\______\//\\\_ ____/\\\\\\\\\______ 38 | _\/\\\//////\\\____ _\/\\\_______\/\\\_ __/\\\/__////\______ 39 | _\/\\\____\//\\\___ _\//\\\______/\\\__ _______///\/________ 40 | _\/\\\_____\//\\\__ __\///\\\__/\\\____ _____/\\\/__________ 41 | _\/\\\______\//\\\_ ____\///\\\\\/_____ ___/\\\/____________ 42 | _\///________\///__ ______\/////_______ __\/////////________ 43 | "# 44 | ); 45 | } 46 | info!(LOGGER, "Redis Oxide starting..."); 47 | } 48 | -------------------------------------------------------------------------------- /src/state.rs: -------------------------------------------------------------------------------- 1 | use crate::data_structures::receipt_map::{KeyTypes, Receipt}; 2 | use crate::types::{Index, ReturnValue, State, StateRef, StateStore}; 3 | use std::fmt; 4 | 5 | const DEFAULT_DB: Index = 0; 6 | 7 | impl fmt::Display for ReturnValue { 8 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 9 | match self { 10 | ReturnValue::Ok => write!(f, "OK"), 11 | ReturnValue::StringRes(s) => write!(f, "{:?}", s), 12 | ReturnValue::IntRes(i) => write!(f, "{:?}", i), 13 | ReturnValue::MultiStringRes(ss) => write!(f, "{:?}", ss), 14 | ReturnValue::Nil => write!(f, "(nil)"), 15 | ReturnValue::Error(e) => write!(f, "ERR {:?}", e), 16 | ReturnValue::Array(a) => write!(f, "{:?}", a), 17 | ReturnValue::Ident(r) => write!(f, "{:?}", r), 18 | } 19 | } 20 | } 21 | 22 | impl State { 23 | pub fn get_receipt(&self) -> Receipt { 24 | let mut rm = self.reciept_map.lock(); 25 | rm.get_receipt() 26 | } 27 | 28 | pub fn receipt_timed_out(&self, receipt: Receipt) -> bool { 29 | let rm = self.reciept_map.lock(); 30 | rm.receipt_timed_out(receipt) 31 | } 32 | 33 | pub fn wake_list(&self, list_key: &[u8]) { 34 | let mut rm = self.reciept_map.lock(); 35 | rm.wake_with_key(KeyTypes::list(list_key)); 36 | } 37 | } 38 | 39 | impl StateStore { 40 | pub fn get_or_create(&self, index: Index) -> StateRef { 41 | self.states.entry(index).or_default().clone() 42 | } 43 | 44 | pub fn get_default(&self) -> StateRef { 45 | self.get_or_create(DEFAULT_DB) 46 | } 47 | 48 | pub fn contains_foreign_function(&self, function_symbol: &str) -> bool { 49 | self.foreign_functions.read().contains(function_symbol) 50 | } 51 | 52 | pub fn add_foreign_function(&self, function_symbol: &str) { 53 | self.foreign_functions 54 | .write() 55 | .insert(function_symbol.into()); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/timeouts.rs: -------------------------------------------------------------------------------- 1 | use crate::blocking::{KeyBlocking, YieldingFn}; 2 | use crate::data_structures::receipt_map::Receipt; 3 | use crate::types::{Key, ReturnValue, StateRef, UTimeout}; 4 | use std::future::Future; 5 | use std::time::Duration; 6 | use tokio::time; 7 | 8 | pub async fn blocking_key_timeout( 9 | f: YieldingFn, 10 | state: StateRef, 11 | key: Key, 12 | seconds: UTimeout, 13 | ) -> ReturnValue { 14 | let receipt = state.get_receipt(); 15 | let kb = KeyBlocking::new(f, state.clone(), key.clone(), receipt); 16 | timeout(kb, seconds, state, receipt).await 17 | } 18 | 19 | async fn timeout>( 20 | fut: T, 21 | secs: UTimeout, 22 | state: StateRef, 23 | receipt: Receipt, 24 | ) -> ReturnValue { 25 | match time::timeout(Duration::from_secs(secs as u64), fut).await { 26 | Ok(ret) => ret, 27 | Err(_) => { 28 | let mut rm = state.reciept_map.lock(); 29 | rm.timeout_receipt(receipt); 30 | ReturnValue::Nil 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use dashmap::DashMap; 3 | use growable_bloom_filter::GrowableBloom; 4 | /// Common Types in the project. 5 | use std::collections::{HashMap, HashSet, VecDeque}; 6 | use std::convert::From; 7 | use std::sync::atomic::AtomicU64; 8 | use std::sync::Arc; 9 | 10 | use parking_lot::{Mutex, RwLock}; 11 | use std::fs::File; 12 | 13 | use crate::data_structures::receipt_map::RecieptMap; 14 | use crate::data_structures::sorted_set::SortedSet; 15 | use crate::data_structures::stack::Stack; 16 | 17 | /// These types are used by state and ops to actually perform useful work. 18 | pub type Value = Bytes; 19 | /// Key is the standard type to index our structures 20 | pub type Key = Bytes; 21 | /// Count is used for commands that count. 22 | pub type Count = i64; 23 | /// Index is used to represent indices in structures. 24 | pub type Index = i64; 25 | /// Score is used in sorted sets 26 | pub type Score = i64; 27 | /// Timeout unit 28 | pub type UTimeout = i64; 29 | /// Bool type 30 | pub type RedisBool = i64; 31 | 32 | /// DumpTimeoutUnitpe alias. 33 | pub type DumpFile = Arc>; 34 | 35 | /// RedisValueRef is the canonical type for values flowing 36 | /// through the system. Inputs are converted into RedisValues, 37 | /// and outputs are converted into RedisValues. 38 | #[derive(PartialEq, Clone)] 39 | pub enum RedisValueRef { 40 | BulkString(Bytes), 41 | SimpleString(Bytes), 42 | Error(Bytes), 43 | ErrorMsg(Vec), 44 | Int(i64), 45 | Array(Vec), 46 | NullArray, 47 | NullBulkString, 48 | } 49 | 50 | impl std::fmt::Debug for RedisValueRef { 51 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 52 | match self { 53 | RedisValueRef::BulkString(s) => write!( 54 | f, 55 | "RedisValueRef::BulkString({:?})", 56 | String::from_utf8_lossy(s) 57 | ), 58 | RedisValueRef::SimpleString(s) => write!( 59 | f, 60 | "RedisValueRef::SimpleString({:?})", 61 | String::from_utf8_lossy(s) 62 | ), 63 | RedisValueRef::Error(s) => { 64 | write!(f, "RedisValueRef::Error({:?})", String::from_utf8_lossy(s)) 65 | } 66 | RedisValueRef::ErrorMsg(s) => write!(f, "RedisValueRef::ErrorMsg({:?})", s), 67 | 68 | RedisValueRef::Int(i) => write!(f, "RedisValueRef::Int({:?})", i), 69 | RedisValueRef::NullBulkString => write!(f, "RedisValueRef::NullBulkString"), 70 | RedisValueRef::NullArray => write!(f, "RedisValueRef::NullArray"), 71 | RedisValueRef::Array(arr) => { 72 | write!(f, "RedisValueRef::Array(")?; 73 | for item in arr { 74 | write!(f, "{:?}", item)?; 75 | write!(f, ",")?; 76 | } 77 | write!(f, ")")?; 78 | Ok(()) 79 | } 80 | } 81 | } 82 | } 83 | 84 | // // TODO: Get rid of this 85 | // impl<'a> From for RedisValueRef { 86 | // fn from(other: RedisValueRef) -> RedisValueRef { 87 | // match other { 88 | // RedisValueRef::String(v) => RedisValueRef::BulkString(v.to_vec()), 89 | // RedisValueRef::Error(e) => RedisValueRef::Error(e.to_vec()), 90 | // RedisValueRef::Int(i) => RedisValueRef::Int(i), 91 | // RedisValueRef::Array(a) => RedisValueRef::Array(a.into_iter().map(|i| i.into()).collect()), 92 | // RedisValueRef::NullBulkString => RedisValueRef::NullBulkString, 93 | // RedisValueRef::NullArray => RedisValueRef::NullArray, 94 | // } 95 | // } 96 | // } 97 | 98 | /// Special constants in the RESP protocol. 99 | pub const NULL_BULK_STRING: &str = "$-1\r\n"; 100 | pub const NULL_ARRAY: &str = "*-1\r\n"; 101 | pub const EMPTY_ARRAY: &str = "*0\r\n"; 102 | 103 | use crate::ops::RVec; 104 | 105 | /// Convenience type for returns value. Maps directly to RedisValues. 106 | #[derive(Debug, PartialEq, Clone)] 107 | pub enum ReturnValue { 108 | Ok, 109 | StringRes(Value), 110 | Error(&'static [u8]), 111 | MultiStringRes(Vec), 112 | Array(Vec), 113 | IntRes(i64), 114 | Nil, 115 | Ident(RedisValueRef), 116 | } 117 | 118 | /// Convenience trait to convert Count to ReturnValue. 119 | impl From for ReturnValue { 120 | fn from(int: Count) -> ReturnValue { 121 | ReturnValue::IntRes(int) 122 | } 123 | } 124 | 125 | /// Convenience trait to convert ReturnValues to ReturnValue. 126 | impl From> for ReturnValue { 127 | fn from(vals: RVec) -> ReturnValue { 128 | ReturnValue::Array(vals.into_iter().map(ReturnValue::StringRes).collect()) 129 | } 130 | } 131 | 132 | /// Convenience trait to convert ReturnValues to ReturnValue. 133 | impl From> for ReturnValue { 134 | fn from(strings: Vec) -> ReturnValue { 135 | let strings_to_bytes: Vec = strings 136 | .into_iter() 137 | .map(|s| s.as_bytes().to_vec().into()) 138 | .collect(); 139 | ReturnValue::MultiStringRes(strings_to_bytes) 140 | } 141 | } 142 | 143 | /// Convenience method to determine an error. Used in testing. 144 | impl ReturnValue { 145 | pub fn is_error(&self) -> bool { 146 | if let ReturnValue::Error(_) = *self { 147 | return true; 148 | } 149 | false 150 | } 151 | } 152 | 153 | /// Canonical type for Key-Value storage. 154 | type KeyString = DashMap; 155 | /// Canonical type for Key-Set storage. 156 | type KeySet = DashMap>; 157 | /// Canonical type for Key-List storage. 158 | type KeyList = DashMap>; 159 | /// Canonical type for Key-Hash storage. 160 | type KeyHash = DashMap>; 161 | /// Canonical type for Key-Hash storage. 162 | type KeyZSet = DashMap; 163 | /// Canonical type for Key-Bloom storage. 164 | type KeyBloom = DashMap; 165 | type KeyStack = DashMap>; 166 | type KeyHyperLogLog = DashMap>; 167 | 168 | /// Top level database struct. 169 | /// Holds all StateRef dbs, and will hand them out on request. 170 | #[derive(Default, Serialize, Deserialize)] 171 | pub struct StateStore { 172 | pub states: DashMap, 173 | #[serde(skip)] 174 | pub commands_ran_since_save: AtomicU64, 175 | #[serde(skip)] 176 | pub commands_threshold: u64, 177 | #[serde(skip)] 178 | pub memory_only: bool, 179 | #[serde(skip)] 180 | pub foreign_functions: RwLock>, 181 | } 182 | 183 | /// Reference type for `StateStore` 184 | pub type StateStoreRef = Arc; 185 | 186 | /// Reference type for `State` 187 | pub type StateRef = Arc; 188 | 189 | /// The state stored by redis-oxide. These fields are the ones 190 | /// used by the various datastructure files (keys.rs, etc) 191 | #[derive(Default, Serialize, Deserialize)] 192 | pub struct State { 193 | #[serde(default)] 194 | pub kv: KeyString, 195 | #[serde(default)] 196 | pub sets: KeySet, 197 | #[serde(default)] 198 | pub lists: KeyList, 199 | #[serde(default)] 200 | pub hashes: KeyHash, 201 | #[serde(default)] 202 | pub zsets: KeyZSet, 203 | #[serde(default)] 204 | pub blooms: KeyBloom, 205 | #[serde(default)] 206 | pub stacks: KeyStack, 207 | #[serde(default)] 208 | pub hyperloglogs: KeyHyperLogLog, 209 | #[serde(skip)] 210 | pub reciept_map: Mutex, 211 | } 212 | 213 | /// Mapping of a ReturnValue to a RedisValueRef. 214 | impl From for RedisValueRef { 215 | fn from(state_res: ReturnValue) -> Self { 216 | match state_res { 217 | ReturnValue::Ok => RedisValueRef::SimpleString(Bytes::from_static(b"OK")), 218 | ReturnValue::Nil => RedisValueRef::NullBulkString, 219 | ReturnValue::StringRes(s) => RedisValueRef::BulkString(s), 220 | ReturnValue::MultiStringRes(a) => { 221 | RedisValueRef::Array(a.into_iter().map(RedisValueRef::BulkString).collect()) 222 | } 223 | ReturnValue::IntRes(i) => RedisValueRef::Int(i as i64), 224 | ReturnValue::Error(e) => RedisValueRef::Error(Bytes::from_static(e)), 225 | ReturnValue::Array(a) => { 226 | RedisValueRef::Array(a.into_iter().map(RedisValueRef::from).collect()) 227 | } 228 | ReturnValue::Ident(r) => r, 229 | } 230 | } 231 | } 232 | --------------------------------------------------------------------------------