├── .babelrc ├── .editorconfig ├── .eslintrc.json ├── .gitignore ├── .nojekyll ├── LICENSE ├── README.md ├── _navbar.md ├── _sidebar.md ├── assets ├── geut.png ├── replicate.png ├── saga.jpg └── theolaf.jpg ├── i18n └── es │ ├── README.md │ ├── _sidebar.md │ └── terms.md ├── index.html ├── lib └── for-each-chunk.js ├── package-lock.json ├── package.json ├── problems ├── 10 │ ├── README.es.md │ ├── README.md │ └── olaf │ │ ├── .env.example │ │ ├── assets │ │ └── favicon │ │ │ ├── android-icon-144x144.png │ │ │ ├── android-icon-192x192.png │ │ │ ├── android-icon-36x36.png │ │ │ ├── android-icon-48x48.png │ │ │ ├── android-icon-72x72.png │ │ │ ├── android-icon-96x96.png │ │ │ ├── apple-icon-114x114.png │ │ │ ├── apple-icon-120x120.png │ │ │ ├── apple-icon-144x144.png │ │ │ ├── apple-icon-152x152.png │ │ │ ├── apple-icon-180x180.png │ │ │ ├── apple-icon-57x57.png │ │ │ ├── apple-icon-60x60.png │ │ │ ├── apple-icon-72x72.png │ │ │ ├── apple-icon-76x76.png │ │ │ ├── apple-icon-precomposed.png │ │ │ ├── apple-icon.png │ │ │ ├── browserconfig.xml │ │ │ ├── favicon-16x16.png │ │ │ ├── favicon-32x32.png │ │ │ ├── favicon-96x96.png │ │ │ ├── favicon.ico │ │ │ ├── manifest.json │ │ │ ├── ms-icon-144x144.png │ │ │ ├── ms-icon-150x150.png │ │ │ ├── ms-icon-310x310.png │ │ │ └── ms-icon-70x70.png │ │ ├── index.html │ │ ├── manifest.webmanifest │ │ └── src │ │ ├── assets │ │ └── icon.png │ │ ├── components │ │ ├── header.js │ │ ├── icons │ │ │ ├── clipboard.js │ │ │ ├── key.js │ │ │ ├── moon.js │ │ │ ├── sun.js │ │ │ └── users.js │ │ ├── init-modal.js │ │ ├── input-msg.js │ │ ├── key-modal.js │ │ ├── message.js │ │ ├── user.js │ │ ├── users.js │ │ └── view-messages.js │ │ ├── config.js │ │ ├── index.css │ │ ├── index.js │ │ ├── lib │ │ ├── db-names.js │ │ ├── saga.js │ │ └── theme.js │ │ ├── stores │ │ ├── chat.js │ │ ├── chat.solution.js │ │ └── ui.js │ │ └── views │ │ ├── 404.js │ │ └── main.js ├── 11 │ ├── README.es.md │ └── README.md ├── 01 │ ├── README.es.md │ ├── README.md │ └── cool_cats │ │ ├── olaf.jpg │ │ └── saga.jpg ├── 02 │ ├── README.es.md │ ├── README.md │ ├── index.js │ ├── solution.js │ └── test.js ├── 03 │ ├── README.es.md │ ├── README.md │ ├── index.js │ ├── solution.js │ └── test.js ├── 04 │ ├── README.es.md │ ├── README.md │ ├── index.js │ ├── solution.js │ └── test.js ├── 05 │ ├── README.es.md │ ├── README.md │ ├── index.js │ ├── solution.js │ └── test.js ├── 06 │ ├── README.es.md │ ├── README.md │ ├── index.js │ ├── solution.js │ └── test.js ├── 07 │ ├── README.es.md │ ├── README.md │ ├── index.js │ ├── solution.js │ └── test.js ├── 08 │ ├── README.es.md │ ├── README.md │ ├── index.js │ ├── solution.js │ └── test.js └── 09 │ ├── README.es.md │ ├── README.md │ ├── chat.js │ ├── chat.solution.js │ ├── index.js │ ├── solution.js │ └── test.js ├── sw.js └── terms.md /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": [ 3 | "@babel/preset-env" 4 | ], 5 | "plugins": [ 6 | "@babel/plugin-proposal-class-properties" 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | trim_trailing_whitespace = true 7 | charset = utf-8 8 | indent_style = space 9 | indent_size = 2 10 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "parser": "babel-eslint", 3 | "extends": "standard", 4 | "env": { 5 | "browser": true, 6 | "node": true, 7 | "jest": true 8 | }, 9 | "plugins": ["babel"], 10 | "rules": { 11 | "strict": 0 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (https://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # TypeScript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # Yarn Integrity file 55 | .yarn-integrity 56 | 57 | # dotenv environment variables file 58 | .env 59 | 60 | # next.js build output 61 | .next 62 | 63 | # parcel 64 | .cache 65 | dist 66 | -------------------------------------------------------------------------------- /.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/.nojekyll -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dat-workshop 2 | > Welcome to `building web apps using Dat` workshop 3 | 4 | Sponsored by 5 | 6 |
7 | geut logo 8 |
9 |
10 | This workshop was originally crafted for NodeConf Ar 2018. 11 |
12 |
13 | 14 | ___ 15 | 16 | ## Goal 17 | 18 | Introduce the audience to Dat and its core modules. Learn about P2P architectures and it's differences with the common client/server scenario of regular web apps. Also learn how to combine Dat and web app business logic creating this way, P2P web apps. 19 | 20 | ## About the app 21 | We will be working on a P2P/WebRTC chat app, the final result looks like: [Olaf](https://olafchat.netlify.com/) and it works like a P2P PWA, one user create a room then shares it (dat link) with the rest and that's it! :cat2: 22 | [Try olaf chat](https://olafchat.netlify.com/) 23 | 24 | ## Getting started 25 | 26 | 1. `$ git clone https://github.com/geut/dat-workshop.git` 27 | 2. `npm install` 28 | 29 | (This way we are going to save installation time on the workshop and we will be ready to start immediately.) 30 | 31 | ## Running the workshop 32 | 33 | 1. `npm start`, will launch a docsify instance with the whole workshop on it. 34 | 2. There is no step 2 :stuck_out_tongue_closed_eyes: 35 | 36 | ### What is Dat? 37 | 38 | Dat is a community and an open source data-sharing protocol for applications of the future. 39 | 40 | ## Cool people behind Dat 41 | 42 | It's important to say thanks, so here it is a big thanks to: 43 | 44 | - Karissa McKelvey 45 | - Mathias Buus 46 | - Tara Vancil 47 | - Paul Frazee 48 | - Jim Pick 49 | - Max Ogden 50 | - **and many others!** 51 | 52 | Special thanks to Karissa McKelvey, her work on [chatmesh](https://github.com/karissa/chatmesh-db) (and early work on cabal) was a huge inspiration for this workshop. 53 | 54 | 55 | -------------------------------------------------------------------------------- /_navbar.md: -------------------------------------------------------------------------------- 1 | - Translations 2 | - [:uk: English](/) 3 | - [:es: Spanish](/es/) 4 | -------------------------------------------------------------------------------- /_sidebar.md: -------------------------------------------------------------------------------- 1 | * [Intro](/) 2 | * Problems 3 | * [Problem 1](/problems/01/) 4 | * [Problem 2](/problems/02/) 5 | * [Problem 3](/problems/03/) 6 | * [Problem 4](/problems/04/) 7 | * [Problem 5](/problems/05/) 8 | * [Problem 6](/problems/06/) 9 | * [Problem 7](/problems/07/) 10 | * [Problem 8](/problems/08/) 11 | * [Problem 9](/problems/09/) 12 | * [Problem 10](/problems/10/) 13 | * [Problem 11](/problems/11/) 14 | * [Terms](/terms) 15 | * Modules 16 | * [dat](/dat) 17 | * [hypercore](/hypercore) 18 | * [hyperdb](/hyperdb) 19 | * [random-access-memory](/random-access-memory) 20 | * [pump](/pump) 21 | * [flush-write-stream](/flush-write-stream) 22 | * [hyperid](/hyperid) 23 | * [codecs](/codecs) 24 | * [geut-discovery-swarm-webrtc](/geut-discovery-swarm-webrtc) 25 | -------------------------------------------------------------------------------- /assets/geut.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/assets/geut.png -------------------------------------------------------------------------------- /assets/replicate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/assets/replicate.png -------------------------------------------------------------------------------- /assets/saga.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/assets/saga.jpg -------------------------------------------------------------------------------- /assets/theolaf.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/assets/theolaf.jpg -------------------------------------------------------------------------------- /i18n/es/README.md: -------------------------------------------------------------------------------- 1 | # dat-workshop 2 | > Bienvenido al workshop: `Construyendo aplicaciones web P2P usando Dat` 3 | 4 | Sponsored by 5 | 6 |
7 | geut logo 8 |
9 |
10 | Este workshop fue originalmente creado para NodeConf Ar 2018. 11 |
12 |
13 | 14 | ___ 15 | 16 | ## Objetivo 17 | 18 | Familiarizar a la audiencia con el proyecto Dat y sus modulos principales. Aprender sobre arquitecturas P2P y las diferencias que hay con escenarios mas comunes como el típico cliente/servidor de las aplicaciones web. Ademas, veremos como combinar los conceptos detras de Dat con la lógica de negocios de nuestro proyecto, creando de esta manera, aplicaciones web P2P. 19 | 20 | ## Sobre la app 21 | Vamos a estar trabajando en un chat P2P/WebRTC, el resultado final seria como: [Olaf](https://olafchat.netlify.com/) 22 | y podes utilizarla como PWA, un usuario crea una sala y comparte el dat link para que se conecten y eso es todo! :cat2: 23 | [Prueba olaf chat](https://olafchat.netlify.com/) 24 | 25 | ## Comenzando 26 | 27 | 1. `$ git clone https://github.com/geut/dat-workshop.git` 28 | 2. `npm install` 29 | (De esta forma vamos a ahorrar tiempo de instalación al comienzo del workshop.) 30 | 31 | ## Corriendo el workshop 32 | 33 | Es muy simple, 34 | 35 | 1. `npm start`, para levantar una instancia local de docsify. 36 | 2. No hay paso 2 :stuck_out_tongue_closed_eyes: 37 | 38 | ### Qué es Dat? 39 | 40 | Dat es una comunidad y un protocolo open source para compartir archivos pensado para las aplicaciones del futuro. 41 | 42 | ## Personas importantes de la comunidad 43 | 44 | Es importante decir _gracias_, por eso aquí tenemos un **gracias especial** para: 45 | 46 | - Karissa McKelvey 47 | - Mathias Buus 48 | - Tara Vancil 49 | - Paul Frazee 50 | - Jim Pick 51 | - Max Ogden 52 | - **Y muchos mas!** 53 | 54 | Finalmente, una mención especial para Karissa McKelvey, su trabajo en [chatmesh](https://github.com/karissa/chatmesh-db) (y todo el trabajo inicial de cabal), fueron una gran inspiración para este workshop. 55 | 56 | -------------------------------------------------------------------------------- /i18n/es/_sidebar.md: -------------------------------------------------------------------------------- 1 | * [Introducción](/es/) 2 | * Problemas 3 | * [Problema 1](/es/problems/01/) 4 | * [Problema 2](/es/problems/02/) 5 | * [Problema 3](/es/problems/03/) 6 | * [Problema 4](/es/problems/04/) 7 | * [Problema 5](/es/problems/05/) 8 | * [Problema 6](/es/problems/06/) 9 | * [Problema 7](/es/problems/07/) 10 | * [Problema 8](/es/problems/08/) 11 | * [Problema 9](/es/problems/09/) 12 | * [Problema 10](/es/problems/10/) 13 | * [Problema 11](/es/problems/11/) 14 | * [Terminología](/es/terms) 15 | * Módulos 16 | * [dat](/dat) 17 | * [hypercore](/hypercore) 18 | * [hyperdb](/hyperdb) 19 | * [random-access-memory](/random-access-memory) 20 | * [pump](/pump) 21 | * [flush-write-stream](/flush-write-stream) 22 | * [hyperid](/hyperid) 23 | * [codecs](/codecs) 24 | * [geut-discovery-swarm-webrtc](/geut-discovery-swarm-webrtc) 25 | -------------------------------------------------------------------------------- /i18n/es/terms.md: -------------------------------------------------------------------------------- 1 | # Terminologia 2 | 3 | ## feed 4 | Nuestra instancia de hypercore es un feed de datos. Los feeds son estructuras de datos que 5 | pueden ser compartidas y sincronizadas por medio de una red. 6 | 7 | ## owner 8 | Peer que tiene permisos de escritura en el feed. Es decir, que tiene una secret key para escribir en el feed 9 | o que su public key fue autorizada en una instancia de Hyperdb. 10 | 11 | ## pipe 12 | Termino que recibe el conectar 2 o mas streams. 13 | ``` 14 | a | b | c 15 | ``` 16 | 17 | ## replicar stream 18 | Los feed pueden crear con `replicate()` un stream que puede ser conectado (piped) a un peer con un feed remoto. 19 | Es utilizado para sincronizar feeds. 20 | 21 | ## swarm 22 | Podemos definir a un swarm como un grupo de peers conectados para un proposito, servicio o recurso mutuo. 23 | 24 | ## peer 25 | Llamamos _peer_ a cualquier nodo con IP:PORT conectado a un red. 26 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | dat-workshop 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 112 | 113 | 114 | -------------------------------------------------------------------------------- /lib/for-each-chunk.js: -------------------------------------------------------------------------------- 1 | const { Writable } = require('stream') 2 | 3 | class ForEachChunk extends Writable { 4 | constructor (opts, cb) { 5 | if (!cb) { 6 | cb = opts 7 | opts = {} 8 | } 9 | super(opts) 10 | 11 | this.cb = cb 12 | } 13 | 14 | _write (chunk, enc, next) { 15 | this.cb(chunk, enc, next) 16 | } 17 | } 18 | 19 | module.exports = (...args) => new ForEachChunk(...args) 20 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dat-workshop", 3 | "version": "1.0.0", 4 | "description": "A hands-on introduction to creating decentralized apps using Dat. By GEUT", 5 | "main": "index.js", 6 | "browserslist": [ 7 | "> 5%" 8 | ], 9 | "dependencies": { 10 | "@geut/discovery-swarm-webrtc": "^1.0.1", 11 | "anchorme": "^1.1.2", 12 | "balloon-css": "^0.5.0", 13 | "choo": "^6.13.0", 14 | "choo-devtools": "^2.5.1", 15 | "choo-service-worker": "^2.4.0", 16 | "color-contrast": "0.0.1", 17 | "copy-to-clipboard": "^3.0.8", 18 | "file-type": "^9.0.0", 19 | "flush-write-stream": "^1.0.3", 20 | "hypercore": "^6.19.1", 21 | "hyperdb": "^3.5.0", 22 | "hyperid": "^1.4.1", 23 | "pump": "^3.0.0", 24 | "qrcode": "^1.3.0", 25 | "random-access-idb": "^1.2.0", 26 | "random-access-memory": "^3.0.0", 27 | "random-color": "^1.0.1", 28 | "signalhub": "^4.9.0", 29 | "signalhubws": "^1.0.4", 30 | "tachyons": "^4.11.1", 31 | "tinydate": "^1.0.0" 32 | }, 33 | "devDependencies": { 34 | "@babel/core": "^7.0.0", 35 | "@babel/plugin-proposal-class-properties": "^7.0.0", 36 | "@babel/preset-env": "^7.0.0", 37 | "babel-core": "^7.0.0-bridge.0", 38 | "babel-eslint": "^10.0.1", 39 | "babel-jest": "^23.6.0", 40 | "docsify-cli": "^4.2.1", 41 | "eslint": "^5.6.1", 42 | "eslint-config-standard": "^12.0.0", 43 | "eslint-plugin-babel": "^5.2.1", 44 | "eslint-plugin-import": "^2.14.0", 45 | "eslint-plugin-node": "^7.0.1", 46 | "eslint-plugin-promise": "^4.0.1", 47 | "eslint-plugin-standard": "^4.0.0", 48 | "jest": "^23.6.0", 49 | "parcel-bundler": "^1.12.3", 50 | "parcel-plugin-sw-precache": "^1.0.1" 51 | }, 52 | "scripts": { 53 | "test": "jest", 54 | "start": "docsify serve --port 5000", 55 | "lint": "eslint problems", 56 | "olaf": "parcel problems/10/olaf/index.html -p 3000", 57 | "signal": "signalhubws listen -p 4000" 58 | }, 59 | "repository": { 60 | "type": "git", 61 | "url": "git+https://github.com/geut/dat-workshop.git" 62 | }, 63 | "author": "", 64 | "license": "GPL-3.0", 65 | "bugs": { 66 | "url": "https://github.com/geut/dat-workshop/issues" 67 | }, 68 | "homepage": "https://github.com/geut/dat-workshop#readme" 69 | } 70 | -------------------------------------------------------------------------------- /problems/01/README.es.md: -------------------------------------------------------------------------------- 1 | # 1 - Dat Cli 2 | > A distributed **dat**a community 3 | 4 | ## Instalación 5 | 6 | ``` 7 | $ npm install -g dat 8 | ``` 9 | 10 | ## Compartiendo archivos 11 | 12 | _Entrando en calor_. Hagamos un recorrido rápido por las [_features_](dat) que nos ofrece Dat mediante el uso de su CLI. 13 | Para esto vamos a compartir contenido, modificarlo y sincronizarlo. Tambien veremos como podemos obtener contenido especifico. 14 | 15 | ### dat share 16 | 17 | Dat cuenta con el comando [share](dat#sharing-data) para compartir directorios. 18 | 19 | ### dat sync 20 | 21 | La otra mitad de la historia sobre compartir archivos es descargarlos. Para esto veremos la _feature_ [clone](dat#downloading-data). Para saber *qué* data vamos a descargar necesitaremos una dirección dat o _dat link_. 22 | 23 | ## Ejercicios 24 | 25 | Formaremos grupos de 2 personas :smiley_cat: :smile_cat:. 26 | 27 | Junto a este readme encontrarán un directorio `/cool_cats` con información de suma importancia para compartir :laughing:. 28 | 29 | ### 1- Compartir 30 | 31 | Uno de ustedes será el encargado de compartir nuestro directorio `/cool_cats`. 32 | 33 | ### 2 - Clonar 34 | 35 | La otra persona será quien clonará el dat, usando el dat link que le facilito su compa. 36 | 37 | ### 3 - Crear nuevo contenido 38 | 39 | Uno de ustedes creará un archivo nuevo dentro del directorio compartido. Llamaremos a este archivo: `top_secret.md`. 40 | Aquí les dejamos una idea para el contenido: 41 | 42 | ``` 43 | # TOP SECRET 44 | 45 | > este archivo no se autodestruira en 5 segundos. 46 | 47 | Y esta siendo distribuido. :cool: 48 | ``` 49 | 50 | ### 4 - Obtener nuevo contenido 51 | 52 | Para hacerlo probemos con el comando [pull](dat#updating-downloaded-archives) 53 | 54 | ### 5 - Qué ocurre si nuestro compa modifica el archivo? 55 | 56 | :wink: sync 57 | 58 | ### Extra 59 | 60 | 1. Qué tal si compartimos el dat link con un nuevo compa y solo quiere acceder al archivo `top_secret.md`? 61 | 62 | ### Tip 63 | 64 | 1. Podemos combinar la opción _sparse_ con el archivo especial [`.datdownload`](dat#selecting-files) 65 | 66 | -------------------------------------------------------------------------------- /problems/01/README.md: -------------------------------------------------------------------------------- 1 | # 1 - Dat Cli 2 | > A distributed **dat**a community 3 | 4 | ## Playing with the CLI 5 | 6 | ``` 7 | $ npm install -g dat 8 | ``` 9 | 10 | ## Sharing files 11 | 12 | _Warming up_. Let's do a quick tour through dat CLI [_features_](dat). 13 | To do this, we are going to share some content, change it and then syncronize it. Also, we will see how can we get specific content. 14 | 15 | ### dat share 16 | 17 | Dat comes with a [share](dat#sharing-data) command which can be used to share entire directories. 18 | 19 | ### dat sync 20 | 21 | The ogher side of the story is download. To do this we are going to use the [clone](dat#downloading-data) feature. To know *what* data we are going to download we need an address, or _dat link_. 22 | 23 | ## Exercise 24 | 25 | We will make groups of 2 people (at least) :smiley_cat: :smile_cat:. 26 | 27 | Along with this readme you will find a directory called `/cool_cats` with some _high value_ content to share :laughing:. 28 | 29 | ### 1- Sharing 30 | 31 | One of you (let's say **A member**) in the group will be in charge of sharing our directory called `/cool_cats`. 32 | 33 | ### 2 - Cloning 34 | 35 | The other buddy (the **B member**) will be cloning the dat using the dat link. 36 | 37 | ### 3 - Creating new content 38 | 39 | Now A, will create a new file inside our shared directory called `top_secret.md`. 40 | 41 | Some sample content: 42 | 43 | ``` 44 | # TOP SECRET 45 | 46 | > This file will be destroyed in 5 seconds. 47 | 48 | Also it is being distributed. :cool: 49 | ``` 50 | 51 | ### 4 - Getting updates 52 | 53 | To get new updates we can run the [pull](dat#updating-downloaded-archives) command. 54 | 55 | ### 5 - What happens if the A member updates the file? 56 | 57 | :wink: sync 58 | 59 | ### Extra 60 | 61 | 1. What if we share our dat link with a new computer and they only want to get the `top_secret.md` file? 62 | 63 | ### Tip 64 | 65 | 1. We can combine the _sparse_ option with the special file [`.datdownload`](dat#selecting-files). 66 | 67 | -------------------------------------------------------------------------------- /problems/01/cool_cats/olaf.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/01/cool_cats/olaf.jpg -------------------------------------------------------------------------------- /problems/01/cool_cats/saga.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/01/cool_cats/saga.jpg -------------------------------------------------------------------------------- /problems/02/README.es.md: -------------------------------------------------------------------------------- 1 | # 2 - Hypercore: un log para gobernarlos a todos 2 | 3 | [Hypercore](hypercore) es un modulo fundamental en la arquitectura de Dat. 4 | 5 | Es un [append-only log](https://engineering.linkedin.com/distributed-systems/log-what-every-software-engineer-should-know-about-real-time-datas-unifying) 6 | que provee los mecanismos necesarios para trabajar y compartir grandes volúmenes de datos de forma segura y distribuida. 7 | 8 | Vamos a conocer algunas `funciones` especificas del modulo para poder avanzar en el desarrollo de nuestro backend peer-to-peer. 9 | 10 | ```javascript 11 | const hypercore = require('hypercore') 12 | const feed = hypercore('./my-first-dataset', {valueEncoding: 'utf-8'}) 13 | ``` 14 | 15 | ## Ejercicio 16 | 17 | 1. Crear una función que retorne un `feed` de hypercore utilizando un `storage` en memoria. 18 | 1. Configurar la instancia para que sus datos se codifiquen y decodifiquen en `json`. 19 | 1. Antes de retornar el `feed` agregar al log el objeto: `{ title: 'dat-is-freedom' }`. 20 | 21 | ## Tips 22 | 23 | 1. Hypercore requiere que se especifique como primer parámetro un `storage` que respete la interfaz de [random-access-storage](/random-access-storage). 24 | 1. Si en vez de pasarle un `storage` le definimos un path, utilizara internamente el modulo [random-access-file](/random-access-file). 25 | 1. Entre las opciones de Hypercore podemos definirle que tipo de [codec](/codecs) vamos a utilizar para serializar/deserializar nuestra data. 26 | 27 | 28 | ## **Test** 29 | 30 | ``` 31 | $ npm test ./02 32 | ``` 33 | 34 | [test.js](./test.js ':include') 35 | 36 | ## **Solución** 37 | 38 | [solution.js](./solution.js ':include') 39 | 40 | 41 | -------------------------------------------------------------------------------- /problems/02/README.md: -------------------------------------------------------------------------------- 1 | # 2 - Hypercore: one log to rule them all 2 | 3 | [Hypercore](hypercore) is a fundamental module in Dat's ecosystem. 4 | 5 | It works as an [append-only log](https://engineering.linkedin.com/distributed-systems/log-what-every-software-engineer-should-know-about-real-time-datas-unifying) providing all the mechanisms necessary to work with and share big volumes of data in a distributed and secure manner. 6 | 7 | First, let's take a look at what `hypercore` looks like. It's just a function we can call and will return a [feed](/terms). 8 | 9 | ```javascript 10 | const hypercore = require('hypercore') 11 | const feed = hypercore('./my-first-dataset', {valueEncoding: 'utf-8'}) 12 | ``` 13 | ## Exercise 14 | 15 | 1. Write a function that returns an hypercore `feed` using a memory `storage`. 16 | 1. Setup the instance for handling `json` data. 17 | 1. Before returning the `feed`, add the following object: `{ title: 'dat-is-freedom' }`. 18 | 19 | ## Tips 20 | 21 | 1. `hypercore` takes as a first parameter a function that defines the `storage`. This function must follow the [random-access-storage](/random-access-storage) interface. 22 | 1. If we give hypercore a _filepath_ instead of a `storage` function, `hypercore` will use [random-access-file](/random-access-file) as a default storage. 23 | 1. If you take a look into the `hypercore` options you will see that we can specify the type of [codec](/codecs) to use for parsing the data. 24 | 25 | 26 | ## **Test** 27 | 28 | ``` 29 | $ npm test ./02 30 | ``` 31 | 32 | [test.js](./test.js ':include') 33 | 34 | ## **Solution** 35 | 36 | [solution.js](./solution.js ':include') 37 | 38 | 39 | -------------------------------------------------------------------------------- /problems/02/index.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | // tip: you need a ram storage 3 | 4 | module.exports = () => { 5 | // .. 6 | } 7 | -------------------------------------------------------------------------------- /problems/02/solution.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | 4 | module.exports = () => { 5 | const feed = hypercore(ram, { valueEncoding: 'json' }) 6 | feed.append({ title: 'dat-is-freedom' }) 7 | return feed 8 | } 9 | -------------------------------------------------------------------------------- /problems/02/test.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | const createFeed = require('.') 4 | 5 | describe('problem 02', () => { 6 | const feed = createFeed() 7 | 8 | test('return feed', () => { 9 | expect(feed).toBeInstanceOf(hypercore) 10 | }) 11 | 12 | test('RAM storage', () => { 13 | expect(feed._storage.key).toBeInstanceOf(ram) 14 | }) 15 | 16 | test('append log', (done) => { 17 | feed.get(0, (err, data) => { 18 | if (err) throw err 19 | expect(data).toEqual({ title: 'dat-is-freedom' }) 20 | done() 21 | }) 22 | }) 23 | }) 24 | -------------------------------------------------------------------------------- /problems/03/README.es.md: -------------------------------------------------------------------------------- 1 | # 3 - La clave es la clave 2 | 3 | En el problema anterior vimos cómo instanciar un `feed` de Hypercore y 4 | mencionamos la palabra `seguridad`. 5 | 6 | Tanto Hypercore como todo el ecosistema de Dat trabajan con el concepto de 7 | clave asimétrica para la firma y encriptación de datos. 8 | 9 | Esto nos permite asegurar la protección de nuestros datos al momento de 10 | compartirlos con otros [peers](/es/terms). 11 | 12 | Existen 3 keys para tener en cuenta: 13 | 14 | ### Public Key :key: 15 | 16 | Es una clave única. 17 | 18 | Es la clave que reconocemos de los links de Dat `dat://`. 19 | 20 | La utilizamos para encriptar/desencriptar nuestros mensajes, de forma que solo quienes tengan la Public Key podrán leer nuestro feed. 21 | 22 | Es nuestra responsabilidad compartirla de forma segura con nuestros `peers` de confianza. 23 | 24 | ### Secret Key :closed_lock_with_key: 25 | 26 | Dat actualmente maneja una arquitectura de single-writer / multiple-readers. Solo el `owner` del `feed` es 27 | decir, el dueño de la Secret Key podrá escribir sobre el log. 28 | 29 | > Hablamos de single-writer, pero eso pronto va a cambiar en Dat a multiple-writer. Nosotros no te queremos 30 | hacer esperar, así que mas adelante veremos como utilizar el futuro de Dat, hoy :rocket:. 31 | 32 | ### Discovery Key :earth_americas: 33 | 34 | Mas adelante veremos mejor el concepto de `discovery` pero en este momento consideremos que los peers son 35 | computadoras aisladas que quieren encontrarse y compartir un recurso, en este caso nuestro `feed`. 36 | 37 | Podríamos reconocer a nuestro `feed` por su clave única `public key` y que nuestros peers intercambien información sobre quien 38 | tiene dicha clave. 39 | 40 | Pero podría suceder que algún peer "maligno" se apodere de nuestra key y acceda a nuestros datos. 41 | 42 | Para evitar compartir la public key, Dat ideo una tercera key que permite a los peers utilizarla para descubrirse 43 | entre ellos sin poner en riesgo nuestros datos encriptados. 44 | 45 | ## Hypercore y la Public Key 46 | 47 | Como dijimos anteriormente, la public key sirve para encriptar/desencriptar los mensajes de nuestro feed. Eso quiere decir que 48 | si tenemos la key de un `feed remoto` (el feed de un peer de confianza) podríamos desencriptar los mensajes de su feed y 49 | leerlos. 50 | 51 | Para lograrlo, hypercore acepta un segundo parámetro en su construcción: 52 | ```javascript 53 | hypercore(,,) 54 | ``` 55 | 56 | > Mas adelante veremos como es el proceso de sincronizar nuestro feed local con un feed remoto. 57 | 58 | ## Ejercicio 59 | 60 | 1. Instanciar un feed usando la key remota. 61 | 2. Retornar una [Promise](https://developer.mozilla.org/es/docs/Web/JavaScript/Referencia/Objetos_globales/Promise#S%C3%BAper_simple_(%C2%A110_l%C3%ADneas!)) 62 | con el objeto: `{ publicKey, discoveryKey }`. 63 | * Cada key debe ser retornada en base hexadecimal (`hex`). 64 | 65 | ### Si te queda tiempo 66 | 67 | Que pasaría si intentas acceder al `secretKey`? :speak_no_evil: 68 | 69 | ## Tips 70 | 71 | 1. Una vez que nuestro feed esta `ready`, podremos acceder a nuestras keys. 72 | 1. Podemos saber si nuestro `feed` esta `ready` utilizando el evento `feed.on('ready', ...)`. 73 | Como podemos ver, tanto `hypercore` como muchos otros módulos 74 | heredan la interfaz de [EventEmitter](https://nodejs.org/api/events.html). 75 | 1. Las keys son Node [Buffers](https://nodejs.org/api/buffer.html#buffer_buf_tostring_encoding_start_end), 76 | podemos utilizar `toString` para convertirlos a lo que necesitemos. 77 | 78 | 79 | ## **Test** 80 | 81 | ``` 82 | $ npm test ./03 83 | ``` 84 | 85 | [test.js](./test.js ':include') 86 | 87 | ## **Solución** 88 | 89 | [solution.js](./solution.js ':include') 90 | 91 | -------------------------------------------------------------------------------- /problems/03/README.md: -------------------------------------------------------------------------------- 1 | # 3 - The key is the key 2 | 3 | In the last problem, we saw how to create an hypercore feed (and we also mention the word security). 4 | 5 | `hypercore` and the whole Dat ecosystem work by using an asymmetric keys encription scheme to sign and encrypt all the data. 6 | 7 | This guarantees data integrity when we share data with another [peer](/terms). 8 | 9 | So, let's dive deeper. There are 3 keys to have in mind in the _Dat-verse_ :stars:: 10 | 11 | ### Public Key :key: 12 | 13 | This key is **unique**. 14 | 15 | It's the key from the dat links: `dat://`. 16 | 17 | It is used to encrypt/decrypt our messages (data) in a way that only those who have this Public Key (`PK`, for short) will be able to read our feed. 18 | 19 | It is our responsibility to share this key only with _well known_ `peers`. 20 | 21 | ### Secret Key :closed_lock_with_key: 22 | 23 | These days, Dat works following a single-writer / multiple-readers scheme -- i.e, only the feed `owner` (Secret Key's owner) can write the log. 24 | 25 | > We mention single-writer, but that is about to change. The people behind Dat are working on a built-in multi-writer solution. In this workshop we don't want to make you wait for it, so you will see _how_ today. :rocket:. 26 | 27 | ### Discovery Key :earth_americas: 28 | 29 | Later, we will see more about the whole `discovery` concept, but for now let's assume that peers are isolated computers that want to find each other to share some resource, in this case our `feed`. 30 | 31 | We could recognize our `feed` by its _unique_ `PK` and allow only the peers who know this key to be able to exchange information. 32 | 33 | But it could happen that a "evil" peer takes control of our key allowing it access to our data. :pensive: 34 | 35 | To avoid such a scenario (sharing the PK for discovering purposes), Dat make uses of another key, the `discovery key`. This key is used to discover peers without putting our data at risk. 36 | 37 | ## Hypercore and the Public Key 38 | 39 | As we said earlier, the `PK` is used to encrypt/decrypt data from our feed. That means, that if we own the key from a `remote feed` (another peers feed) we could decrypt and read all the messages on their feed. 40 | 41 | To achieve this, `hypercore` accepts a second parameter in its constructor: 42 | ```javascript 43 | hypercore(,,) 44 | ``` 45 | 46 | > Later, we will see how to sync our local feed with a remote one. 47 | 48 | ## Exercise 49 | 50 | 1. Create a new hypercore feed using the key passed as a parameter. 51 | 2. Return a [Promise](https://developer.mozilla.org/es/docs/Web/JavaScript/Referencia/Objetos_globales/Promise#S%C3%BAper_simple_(%C2%A110_l%C3%ADneas!)) with the object: `{ publicKey, discoveryKey }`. 52 | * Each key should be returned converted to `hex`. 53 | 54 | ### If you have more time... :alarm_clock: 55 | 56 | What would happen if you try to access to the `secretKey`? :speak_no_evil: 57 | 58 | 59 | 60 | ## Tips 61 | 62 | 1. Once our feed is `ready` we can have access to the keys. 63 | 1. To know if the feed is `ready` we can listen for the `ready` event: `feed.on('ready', ...)`. `hypercore` and many others Dat modules inherit from [EventEmitter](https://nodejs.org/api/events.html) interface. 64 | 1. Keys are node.js [Buffers](https://nodejs.org/api/buffer.html#buffer_buf_tostring_encoding_start_end). So, we can use `toString` to cast them to whatever we need. 65 | 66 | 67 | ## **Test** 68 | 69 | ``` 70 | $ npm test ./03 71 | ``` 72 | 73 | [test.js](./test.js ':include') 74 | 75 | ## **Solution** 76 | 77 | [solution.js](./solution.js ':include') 78 | 79 | 80 | -------------------------------------------------------------------------------- /problems/03/index.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | 4 | module.exports = remoteKey => { 5 | return new Promise(resolve => { 6 | // your code goes here :) 7 | }) 8 | } 9 | -------------------------------------------------------------------------------- /problems/03/solution.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | 4 | module.exports = key => { 5 | const feed = hypercore(ram, key) 6 | 7 | return new Promise(resolve => { 8 | feed.on('ready', () => { 9 | resolve({ 10 | publicKey: feed.key.toString('hex'), 11 | discoveryKey: feed.discoveryKey.toString('hex') 12 | }) 13 | }) 14 | }) 15 | } 16 | -------------------------------------------------------------------------------- /problems/03/test.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | const getKeys = require('.') 4 | 5 | describe('problem 03', () => { 6 | const feed = hypercore(ram) 7 | 8 | let keys 9 | 10 | beforeAll(done => { 11 | feed.on('ready', () => { 12 | keys = { 13 | publicKey: feed.key.toString('hex'), 14 | discoveryKey: feed.discoveryKey.toString('hex') 15 | } 16 | done() 17 | }) 18 | }) 19 | 20 | test('return keys', async () => { 21 | expect.assertions(1) 22 | const expectedKeys = await getKeys(keys.publicKey) 23 | 24 | expect(expectedKeys).toEqual(keys) 25 | }) 26 | }) 27 | -------------------------------------------------------------------------------- /problems/04/README.es.md: -------------------------------------------------------------------------------- 1 | # 4 - Todo se trata de streams 2 | 3 | ## Introducción a streams 4 | 5 | Si has programado en Node.js es muy probable que hayas usado [streams](https://nodejs.org/api/stream.html) 6 | sin siquiera saberlo. 7 | 8 | De hecho, son tan importantes que forman parte del core de Node. Cada _request_ o _response_ de tu 9 | servidor, cada `console.log` u operación sobre el filesystem involucra algún tipo de stream. :boom: 10 | 11 | Un stream es una interfaz que representa una secuencia de datos `a---b---c` en el tiempo y en donde 12 | la información fluye desde una _fuente_ hacia un _destino_. 13 | 14 | Los streams nos permiten por ejemplo leer un archivo por partes (chunks) a través de un `ReadableStream`, 15 | aplicarle algún tipo de transformación por medio de un `TransformStream` y escribir cada chunk modificado 16 | en un destino particular con un `WritableStream`. 17 | 18 | Los streams pueden operar en un solo sentido como un ReadableStream que solo lee de una fuente 19 | y envía sus datos al siguiente stream: 20 | 21 | ``` 22 | ReadableStream ---> ( DuplexStream | TransformStream ) ---> WriteableStream 23 | ``` 24 | 25 | Pero también existen los `DuplexStream` que permiten operaciones tanto de lectura como escritura. 26 | Un ejemplo seria un [Socket](https://nodejs.org/api/net.html#net_new_net_socket_options) 27 | 28 | !> Lo importante es tener en cuenta que dichas interfaces existen para definir una única forma de operar 29 | sobre datos de forma eficiente y escalable. No importa si los datos se leen o escriben desde el disco o de una 30 | conexión de red, **los streams hablan un solo lenguaje** y eso nos permite combinarlos como necesitemos. 31 | 32 | :link: No es parte del workshop avanzar demasiado en este tema pero si querés aprender mas te recomendamos el 33 | [stream-handbook](https://github.com/substack/stream-handbook). 34 | 35 | ## Streams en Hypercore 36 | 37 | Internamente `hypercore` utiliza streams para cumplir sus objetivos. 38 | 39 | ### Leyendo nuestros logs 40 | 41 | Podemos leer los datos de nuestro `feed` utilizando `feed.createReadStream` y mostrar los datos en pantalla: 42 | 43 | ```javascript 44 | feed.createReadStream().pipe(process.stdout) 45 | ``` 46 | 47 | Como veras, `console.log` es un `WritableStream` en donde el destino es escribir en pantalla. 48 | 49 | Utilizamos el método `pipe` para conectar y definir el flujo de datos de nuestros streams. 50 | 51 | ```javascript 52 | // a --> b --> c 53 | // unix like: a | b | c 54 | a.pipe(b).pipe(c) 55 | ``` 56 | 57 | ### Replicar 58 | 59 | Supongamos que tenemos un feed local que utiliza la key pública de un feed remoto. En algún momento vamos 60 | a querer leer sus datos, tenemos su key por lo que podemos desencriptarlos. 61 | 62 | Pero antes de desencriptar los datos deberíamos poder obtenerlos, traerlos a nuestro feed local y unificarlos 63 | con los datos que ya tenemos. A este proceso lo llamamos `replicacion`. 64 | 65 | ![replicant scene from blade runner](https://media.giphy.com/media/xtpNfxNz7rTSo/giphy.gif) 66 | 67 | Queremos _replicar_ los datos del feed remoto en nuestro feed local. 68 | 69 | Para poder lograrlo, volvemos a utilizar streams. Hypercore API ofrece un `feed.replicate()` que retorna un 70 | _replication stream_ el cual lee la data de un feed remoto, la incorpora a su feed local y finalmente pasa el resultado 71 | al siguiente stream, es decir se comporta como un `DuplexStream`. 72 | 73 | ![replicate](/assets/replicate.png) 74 | 75 | ### Sincronizar 76 | 77 | Con `replicate()` podemos replicar los datos de un _feed remoto_ en nuestro _feed local_ pero tambien debemos 78 | pensar que el feed remoto puede estar _desactualizado_. 79 | 80 | !> Todos los peers deberían tener, eventualmente, la última versión de los datos. 81 | 82 | Si tomamos en cuenta que la conexión entre dos peers es **bidireccional** podríamos hacer lo siguiente: 83 | ```javascript 84 | // (1) (2) 85 | const r1 = remoteFeed.replicate() 86 | const r2 = localFeed.replicate() 87 | r1.pipe(r2).pipe(r1) 88 | ``` 89 | 90 | 1. Primero recibimos los datos de un feed remoto y los replicamos en nuestro feed local. 91 | 2. Una vez que tenemos nuestro feed actualizado, enviamos los datos nuevamente al feed remoto 92 | para que se actualice en caso de tener data inconsistente. 93 | 3. Al final, ambos feed tienen la misma versión de los datos. 94 | 95 | ## Ejercicio 96 | 97 | Vamos a simular leer mensajes que otro peer escribio. Para eso: 98 | 99 | 1. Vamos a sincronizar el feed local (con el del peer). 100 | 2. Una vez finalizada la sincronización, leeremos los datos del feed y cargaremos 101 | cada mensaje en un array. 102 | 3. Una vez finalizada la lectura del feed, retornar el listado de mensajes. 103 | 104 | 105 | ## Tips 106 | 107 | ### 1 - Pump 108 | 109 | Por implementación de Node, si tenemos streams conectados por `.pipe` y uno de ellos 110 | se destruye, el resto sigue funcionando. 111 | 112 | Nosotros queremos que si algún stream se destruye (intencionalmente o por error) que todos 113 | los streams conectados también lo hagan. Por eso vamos a utilizar el modulo [pump](/pump) 114 | para remplazar a pipe. 115 | 116 | Pump nos permite _pipear_ nuestros streams y asegurarnos que en caso de que uno se destruya, todos lo hagan. :cool: 117 | 118 | > Como feature extra, el último argumento de pump puede ser una función que se ejecuta 119 | cuando finalizan todos los streams. 120 | 121 | ```javascript 122 | a.pipe(b).pipe(c) 123 | 124 | // to 125 | 126 | pump(a, b, c, err => { 127 | console.log('all streams have finished') 128 | }) 129 | ``` 130 | 131 | ### 2 - Lectura/Escritura de datos 132 | 133 | Un WritableStream nos permite iterar sobre los _chunks_ que fluyen en los streams y 134 | escribirlos en donde queramos: disco, network, screen o inclusive en nuestra memoria. 135 | 136 | Sabiendo esto, podemos definir un WritableStream que itere sobre los _chunks_ de forma similar a un `[].forEach` 137 | y guardarlos en la estructura (un `Map` por ejemplo) que necesitemos. 138 | 139 | Les recomendamos que investiguen `forEachChunk`, una función que armamos para ayudarlos a cumplir su objetivo. 140 | 141 | 142 | ## **Test** 143 | 144 | ``` 145 | $ npm test ./04 146 | ``` 147 | 148 | [test.js](./test.js ':include') 149 | 150 | ## **Solución** 151 | 152 | [solution.js](./solution.js ':include') 153 | 154 | 155 | -------------------------------------------------------------------------------- /problems/04/README.md: -------------------------------------------------------------------------------- 1 | # 4 - It's all about streams 2 | 3 | ## Quick introduction to streams 4 | 5 | Well, if you have written even a small node.js app, chances are, that you have used [streams](https://nodejs.org/api/stream.html) somewhere in the road, being aware of it or not. 6 | 7 | In fact, `streams` are part of the node.js core. Every _request_ or _response_ from a server, every `console.log` or filesystem operation are using streams in one way or another. :boom: 8 | 9 | We can picture a stream as an interface for a data _flow_ `a---b---c` which can change over _time_ and where data flows from a _source_ to a _destination_. 10 | 11 | For example, using `streams` we can read a file by _chunks_ making use of a `ReadableStream`, apply some kind of _transformation_ with a `TransformStream` and finally, write every _chunk_ of data into a specific destination using a `WritableStream`. 12 | 13 | Some `streams` operate in a single way, like a `ReadableStream` which only _reads_ from a _source_ and pass the data to the next stream: 14 | 15 | ``` 16 | ReadableStream ---> ( DuplexStream | TransformStream ) ---> WriteableStream 17 | ``` 18 | 19 | Other types of `streams` can operate in bi-directional manner, ie, can perform _reads_ and _writes_. This type of `stream` is known as a `DuplexStream`. One example is the well known [Socket](https://nodejs.org/api/net.html#net_new_net_socket_options). 20 | 21 | !> It is important to keep in mind that this kind of _interface_ exists only to define a common, scalable, and efficient way to communicate data. `streams` help us to abstract the source and destination. It does not matter if we are reading or writing from/to a file or the net. `streams` **speak a unique language** and this allows us to **combine them** the way we need. 22 | 23 | :link: If you want to keep on learning about streams, we recommend you read the 24 | [stream-handbook](https://github.com/substack/stream-handbook). 25 | 26 | ## Streams in Hypercore 27 | 28 | Inside, `hypercore` uses streams to work. 29 | 30 | ### Reading the logs 31 | 32 | We can read our `feed` using `feed.createReadStream` and display that info on the console using `process.stdout`: 33 | 34 | ```javascript 35 | feed.createReadStream().pipe(process.stdout) 36 | ``` 37 | 38 | As you can see, we are using the `pipe` method to _connect_ and define our data flow in our streams. 39 | 40 | ```javascript 41 | // a --> b --> c 42 | // unix like: a | b | c 43 | a.pipe(b).pipe(c) 44 | ``` 45 | 46 | ### Replication 47 | 48 | Ok, let's suppose we have local feed and a remote public key. At some point we want to read data from this other feed, since we have it's PK, we can decrypt them. 49 | 50 | But before decrypt them, we need to fetch and merge them into our local feed. This process is called `replication`. 51 | 52 | ![replicant scene from blade runner](https://media.giphy.com/media/xtpNfxNz7rTSo/giphy.gif) 53 | 54 | We want to _replicate_ remote feed data into our local feed. 55 | 56 | In order to do this, we are going to use streams. Hypercore API has a `feed.replicate()` method which returns a _replication stream_ that reads the remote feed, syncs it with the local feed and finally passes the result to the next stream. In other words, it behaves like a `DuplexStream`. 57 | 58 | ![replicate](/assets/replicate.png) 59 | 60 | ### Sync 61 | 62 | With `replicate()` we can combine the _remote feed_ with our _local feed_ but we need to be aware of our of data in our _remote feed_. 63 | 64 | !> Eventually, all the peers should have the same (up to date) data. 65 | 66 | If we see the conection between two peers as a bi-directional connection, we can do the following: 67 | ```javascript 68 | // (1) (2) 69 | const r1 = remoteFeed.replicate() 70 | const r2 = localFeed.replicate() 71 | r1.pipe(r2).pipe(r1) 72 | ``` 73 | 74 | 1. First, data is received from the remote feed and replicated into our local feed. 75 | 2. Once our feed is updated, data is sent to the remote feed. This is made to ensure consistency. 76 | 3. Finally, both feeds have the same data. 77 | 78 | ## Exercise 79 | 80 | We are going to simulate reading messages from another peer. To do that we need to: 81 | 82 | 1. Sync the local feed with remote one. 83 | 2. Once sync is done, read data from our local feed and push each message into an array. 84 | 3. When we finish reading our feed, we need to return the messages list. 85 | 86 | 87 | 88 | ## Tips 89 | 90 | ### 1 - Pump 91 | 92 | By design, if we have streams connected by `.pipe` and one of them fails, the rest keep working. 93 | 94 | This can lead to multiple error conditions. We want to destroy all the streams if one fails. That's why we are going to use 95 | [pump](/pump) instead of `.pipe`. 96 | 97 | !> As an extra feature, with `pump` we can pass a function as the last argument. This function will be executed when all the streams finish. 98 | 99 | ```javascript 100 | a.pipe(b).pipe(c) 101 | 102 | // becomes 103 | 104 | pump(a, b, c, err => { 105 | console.log('all streams have finished') 106 | }) 107 | ``` 108 | 109 | ### 2 - Reading/Writing data 110 | 111 | A `WritableStream` iterates through all the _chunks_ of data flowing in our streams and we can write them wherever we want, eg: filesystem, network, memory, etc. 112 | 113 | We have made a special function: `forEachChunk`, which can be seen as a little helper to write data (and of course, it is a `WritableStream`) 114 | 115 | 116 | ## **Test** 117 | 118 | ``` 119 | $ npm test ./04 120 | ``` 121 | 122 | [test.js](./test.js ':include') 123 | 124 | ## **Solution** 125 | 126 | [solution.js](./solution.js ':include') 127 | 128 | 129 | -------------------------------------------------------------------------------- /problems/04/index.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | const pump = require('pump') 4 | const forEachChunk = require('../../lib/for-each-chunk') 5 | 6 | /** 7 | * Note: if you are not familiar with `pump` please check 8 | * the tips section! 9 | */ 10 | module.exports = (key, peer) => { 11 | const feed = hypercore(ram, key, { valueEncoding: 'utf8' }) 12 | 13 | return new Promise((resolve, reject) => { 14 | const onFinishSync = () => { 15 | // Cool, our feed is syncronized, now we can continue... 16 | const messageList = [] 17 | 18 | // 2 - You need a ReadableStream to read the data from the feed 19 | // const reader = 20 | 21 | const ws = forEachChunk((chunk, enc, next) => { 22 | // 3 - Append each message into the list 23 | // Think about this writer as a normal [].forEach 24 | // but remember to call next() after processing your chunk 25 | }) 26 | 27 | // 4 - Pump your streams 28 | // pump(reader, /* a stream */, err => { 29 | // if (err) return reject(err) 30 | // resolve(/* data */) 31 | // }) 32 | } 33 | 34 | // 1 - Pipe your streams and replicate! 35 | // pump(peer, /* replication */, /* target */, onFinishSync) 36 | }) 37 | } 38 | -------------------------------------------------------------------------------- /problems/04/solution.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | const pump = require('pump') 4 | const forEachChunk = require('../../lib/for-each-chunk') 5 | 6 | module.exports = (key, peer) => { 7 | const feed = hypercore(ram, key, { valueEncoding: 'utf8' }) 8 | 9 | return new Promise((resolve, reject) => { 10 | // we need to sync our database with the remote one 11 | const onFinishSync = () => { 12 | // when is done we can retrieve the log 13 | const messages = [] 14 | 15 | const reader = feed.createReadStream() 16 | 17 | const ws = forEachChunk((data, enc, next) => { 18 | messages.push(data) 19 | next() 20 | }) 21 | 22 | pump(reader, ws, err => { 23 | if (err) return reject(err) 24 | resolve(messages) 25 | }) 26 | } 27 | 28 | pump(peer, feed.replicate(), peer, onFinishSync) 29 | }) 30 | } 31 | -------------------------------------------------------------------------------- /problems/04/test.js: -------------------------------------------------------------------------------- 1 | const hypercore = require('hypercore') 2 | const ram = require('random-access-memory') 3 | const getMessages = require('.') 4 | 5 | describe('problem 04', () => { 6 | const feed = hypercore(ram) 7 | 8 | beforeAll(done => { 9 | feed.on('ready', () => { 10 | feed.append('dat') 11 | feed.append('is') 12 | feed.append('the') 13 | feed.append('future', err => { 14 | if (err) throw err 15 | done() 16 | }) 17 | }) 18 | }) 19 | 20 | test('read and replicate', async () => { 21 | expect.assertions(1) 22 | const messages = await getMessages(feed.key, feed.replicate()) 23 | expect(messages.join(' ')).toBe('dat is the future') 24 | }) 25 | }) 26 | -------------------------------------------------------------------------------- /problems/05/README.es.md: -------------------------------------------------------------------------------- 1 | # 5 - De un log distribuido a una db distribuida 2 | 3 | Felicitaciones :tada::fireworks:!! 4 | 5 | Si llegaste hasta aquí significa que ya sos una hypercore padawan. Es tiempo de ampliar tu 6 | conocimiento peer-to-peer y convertirte en una gran Jedi. 7 | 8 | Vimos que hypercore nos ofrece un log distribuido pero existen situaciones en donde eso 9 | no nos alcanza, por suerte, nada nos limita a crear estructuras mucho mas complejas 10 | arriba de hypercore. Una de ellas es nuestra :star: del workshop: [hyperdb](/hyperdb) 11 | 12 | ## Hyperdb 13 | 14 | Hyperdb nos ofrece una base de datos key-value distribuida sobre un conjunto 15 | de feeds de hypercore. 16 | 17 | Una de la features mas importantes es, que agrega la posibilidad de tener `multiple-writters` lo cual 18 | llevaría a Dat al siguiente nivel, en donde múltiples usuarios **previamente autorizados** podrían 19 | modificar el contenido de un recurso Dat. **Ya no existiría un solo owner**. 20 | 21 | ## Sobre qué vamos a construir 22 | 23 | En este workshop estamos aprendiendo cómo usar Dat para escrbir una aplicación web P2P. Nuestro objetivo será construir juntos un chat P2P. El codename del chat será: _Olaf_ :smiley_cat:. 24 | 25 | Para construir nuestro chat vamos a usar lo que venimos aprendiendo. En este modulo vamos a introducir una parte fundamental que iremos desarrollando. Les presentamos a _saga_ :heart_eyes_cat: 26 | 27 | ## Saga 28 | 29 | ![saga](/assets/saga.jpg) 30 | 31 | Hyperdb es lo que necesitamos para poder avanzar en nuestro chat peer-to-peer. 32 | 33 | Vamos a definir una API que permita a múltiples usuarios escribir sobre un hyperdb distribuido. 34 | 35 | En este caso enfoquemonos en los requerimientos que tiene un chat para 36 | considerarse como tal: 37 | 38 | 1. Podemos escribir mensajes en el chat. 39 | 1. Podemos leer los mensajes en _real-time_. 40 | 1. Podemos conectar y desconectar usuarios al canal (_room_). 41 | 42 | Entonces, comencemos el desarrollo de nuestra API, aka: **Saga** :heart_eyes_cat: 43 | 44 | ## Ejercicio 45 | 46 | 1. En la clase `Saga` agregar un constructor que reciba un `storage`, una `key` y un `username`. 47 | 2. Con el `storage` y la `key`, crear una instancia de hyperdb con un valueEncoding **json**. 48 | 3. Una instancia de Saga debe permitirnos acceder a: 49 | * la instancia de hyperdb, bajo la prop: `db` 50 | * el username, bajo la prop: `username` 51 | * un [Map](https://developer.mozilla.org/es/docs/Web/JavaScript/Referencia/Objetos_globales/Map) 52 | de usuarios (inicialmente vació): `users` 53 | * un `Map` de mensajes (inicialmente vació): `messages` 54 | * un timestamp con la fecha actual de creación: `timestamp` 55 | 56 | ## Tips 57 | 58 | 1. Vas a encontrar que hyperdb posee una API muy similar a hypercore. :wink: 59 | 2. Sobre `Map`, partes importantes: 60 | - un map puede contener objetos como claves. :cool: 61 | - constructor: `new Map([iterable])` 62 | - métodos: 63 | - `size()`, para obtener el tamaño. 64 | - `set(clave, valor)`, para agregar un _clave_ nueva con _valor_. 65 | - `has(clave)`, para saber si nuestro map contiene determinada clave. 66 | - `keys()`, para obtener todas las claves del map. 67 | - Hint: es :cool: usar `for...of` para iterar sobre los elementos del map. 68 | 69 | 70 | ## **Test** 71 | 72 | ``` 73 | $ npm test ./05 74 | ``` 75 | 76 | [test.js](./test.js ':include') 77 | 78 | ## **Solución** 79 | 80 | [solution.js](./solution.js ':include') 81 | 82 | 83 | -------------------------------------------------------------------------------- /problems/05/README.md: -------------------------------------------------------------------------------- 1 | # 5 - From a distributed log to a distributed db 2 | 3 | Congratulations :tada::fireworks:!! 4 | 5 | If you are here it means you have become a hypercore padawan. It's time to increase your P2P knowledge and follow the road to be a great Jedi. 6 | 7 | We just saw that hypercore gives us a distributed log that we can work with. :cool: 8 | But there are situations when that is not enough. Luckily for us, there are more libs written on top of hypercore and one of those is here to help, meet [hyperdb](/hyperdb) :star: 9 | 10 | ## Hyperdb 11 | 12 | Hyperdb offers a key-value distributed over a set of hypercores database. :cool: 13 | 14 | One of the most important features is that it give us the ability to have `multiple-writers`. This takes Dat to the next level, where multiple users, once authorized, are able to modify a Dat resource (i.e, **we have multiple owners**.) 15 | 16 | ## A word about what we are building 17 | 18 | In this workshop we are learning how to use Dat to create a _P2P web app_. More specifically, our goal here is to build a P2P chat app together and the codename will be _Olaf_ :smiley_cat:. 19 | 20 | So, to build our chat app we are going to use all the things that we have learned. In this module, we are about to introduce a fundamental part, the core of our chat, and it has a codename too: _Saga_ :heart_eyes_cat:. 21 | 22 | ## Saga 23 | 24 | ![saga](/assets/saga.jpg) 25 | 26 | We are going to start using hyperdb to build _saga_. 27 | 28 | Ok, so first let's define a simple API that permits multiple users to write messages over a distributed hyperdb. 29 | 30 | Let's focus on a list of possible requirements our chat can have: 31 | 32 | 1. We should be able to write messages. 33 | 1. We should be able to read messages in _real-time_. 34 | 1. We should be able to detect when an user connects and disconnects from a channel (_room_). 35 | 36 | Cool, let's start writing this new API, **saga**. :heart_eyes_cat: 37 | 38 | ## Exercise 39 | 40 | 1. Add a constructor to the class `Saga` that receives a `storage`, a `key` and a `username`. 41 | 2. With the `storage` and the `key`, create an instance of hyperdb using **json** as the valueEncoding. 42 | 3. `Saga` instance should allow us to access: 43 | * the instance of hyperdb under the prop: `db` 44 | * the username, under the prop: `username` 45 | * a users [Map](https://developer.mozilla.org/es/docs/Web/JavaScript/Referencia/Objetos_globales/Map), initially empty, under the prop: `users`. 46 | * a messages `Map` (initially empty): `messages`. 47 | * a timestamp with the current datetime: `timestamp` 48 | 49 | ## Tips 50 | 51 | 1. `hyperdb` offers an API similar to hypercore. :wink: 52 | 2. About `Map` usage: 53 | - a map can have objects for keys :cool: 54 | - constructor: `new Map([iterable])` 55 | - important methods: 56 | - `size()`, for getting the size. 57 | - `set(key, value)`, for add a new _key_ with _value_. 58 | - `has(key)`, to know if key exists on map. 59 | - `keys()`, to get all the keys (array) of the map. 60 | - Hint: it is :cool: to use `for...of` to iterate over the map. 61 | 62 | 63 | ## **Test** 64 | 65 | ``` 66 | $ npm test ./05 67 | ``` 68 | 69 | [test.js](./test.js ':include') 70 | 71 | ## **Solution** 72 | 73 | [solution.js](./solution.js ':include') 74 | 75 | 76 | -------------------------------------------------------------------------------- /problems/05/index.js: -------------------------------------------------------------------------------- 1 | const hyperdb = require('hyperdb') 2 | 3 | module.exports = class Saga {} 4 | -------------------------------------------------------------------------------- /problems/05/solution.js: -------------------------------------------------------------------------------- 1 | const hyperdb = require('hyperdb') 2 | 3 | module.exports = class Saga { 4 | constructor (storage, key, username) { 5 | this.messages = new Map() 6 | this.users = new Map() 7 | this.username = username 8 | this.timestamp = Date.now() 9 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /problems/05/test.js: -------------------------------------------------------------------------------- 1 | const hyperdb = require('hyperdb') 2 | const ram = require('random-access-memory') 3 | const Saga = require('.') 4 | 5 | describe('problem 05', () => { 6 | const saga = new Saga(ram, null, 'peti') 7 | 8 | test('db instance', () => { 9 | expect(saga.db).toBeInstanceOf(hyperdb) 10 | }) 11 | 12 | test('RAM storage', () => { 13 | expect(saga.db._storage.name).toBe('RAM') 14 | }) 15 | 16 | test('users prop', () => { 17 | expect(saga.users).toEqual(new Map()) 18 | }) 19 | 20 | test('messages prop', () => { 21 | expect(saga.messages).toEqual(new Map()) 22 | }) 23 | 24 | test('username prop', () => { 25 | expect(saga.username).toBe('peti') 26 | }) 27 | 28 | test('timestamp prop', () => { 29 | expect(typeof saga.timestamp).toBe('number') 30 | }) 31 | }) 32 | -------------------------------------------------------------------------------- /problems/06/README.es.md: -------------------------------------------------------------------------------- 1 | # 6 - Escribiendo mensajes 2 | 3 | Una parte esencial en la API de Saga es la posibilidad de escribir mensajes en la db. 4 | 5 | Hyperdb provee diferentes métodos para escribir data en la db, uno de ellos es: 6 | 7 | ```javascript 8 | db.put(key, value, cb) 9 | ``` 10 | 11 | Como dijimos anteriormente hyperdb es una base de datos key-value, esto quiere decir que si 12 | ejecutamos: `.put('/test', 'hi!')` el valor quedara guardado en la key `/test`. 13 | 14 | :microscope: Vieron la `/` que agregamos a la key? `Hyperdb tiene soporte para namespaces`, similar a _redis_. 15 | 16 | Podemos, a modo de ejemplo, leer todos los valores relacionados a un _namespace_ particular y sus _subfolders_: 17 | 18 | ```javascript 19 | db.put('/test/1', 'hi') 20 | db.put('/test/2', 'how are you?', () => { 21 | db.createReadStream('/test').pipe(printInConsole) 22 | }) 23 | 24 | // console: 25 | // [ Node(key=test/1, value='hi', seq=1, feed=0)) ] 26 | // [ Node(key=test/2, value='how are you?', seq=2, feed=0)) ] 27 | ``` 28 | 29 | ## Ejercicio 30 | 31 | Definir dos nuevos métodos de instancia para Saga. 32 | 33 | 1. `ready -> Promise` 34 | Método que retorne una promesa y que se resuelve cuando la db esta `ready`. 35 | 2. `writeMessage(message: string) -> Promise` 36 | Método que recibe como parámetro un `string` y retorna una **Promise**. 37 | 38 | Pero debemos tener en cuenta algunas cosas mas: 39 | - La **promise** se resuelve cuando se termina la operación de escritura y debe retornar la key del mensaje. 40 | Si la escritura falla, debería hacer un `reject` con el error. 41 | - No solo debemos guardar el mensaje sino también información adicional sobre el mismo: 42 | `{ key, message, username, timestamp }` 43 | - Los mensajes se deben guardar bajo el namespace: `/messages/`. 44 | - Las key deben ser únicas. 45 | 46 | ## Tips 47 | 48 | - La mejor forma de definir una key única es utilizar algún generador de IDs. Te recomendamos 49 | que investigues: [hyperid](hyperid) 50 | 51 | 52 | ## **Test** 53 | 54 | ``` 55 | $ npm test ./06 56 | ``` 57 | 58 | [test.js](./test.js ':include') 59 | 60 | ## **Solución** 61 | 62 | [solution.js](./solution.js ':include') 63 | 64 | 65 | -------------------------------------------------------------------------------- /problems/06/README.md: -------------------------------------------------------------------------------- 1 | # 6 - Writing messages 2 | 3 | An essential part of the API of Sage is the possibility to write messages in 4 | the db. 5 | 6 | Hyperdb provides different methods for writing data in the db, one of these is: 7 | 8 | ```javascript 9 | db.put(key, value, cb) 10 | ``` 11 | 12 | Like we said earlier, hyperdb is a key-value database, that means if we execute 13 | `.put('/test', 'hi!')` the valu stays saves in the key `/test`. 14 | 15 | :microscope: See the `/` that we add to the key? `Hyperdb has support for namespaces` similar to _redis_. 16 | 17 | We can, for example, read all the values related to a particular _namespace_ 18 | and their _subfolders_: 19 | 20 | ```javascript 21 | db.put('/test/1', 'hi') 22 | db.put('/test/2', 'how are you?', () => { 23 | db.createReadStream('/test').pipe(printInConsole) 24 | }) 25 | 26 | // console: 27 | // [ Node(key=test/1, value='hi', seq=1, feed=0)) ] 28 | // [ Node(key=test/2, value='how are you?', seq=2, feed=0)) ] 29 | ``` 30 | 31 | ## Exercise 32 | 33 | Define two new instance methods for Saga 34 | 35 | 1. `ready -> Promise` 36 | A method that returns a promise and that can resolve itself when the db is 37 | `ready`. 38 | 2. `writeMessage(message: string) -> Promise` 39 | A method that receives as a parameter a `string` and returns a **Promise**. 40 | 41 | But we have to take into account a few more things: 42 | - The **promise** resolves itself when it terminates the writing operation 43 | and has returned the message key 44 | If the writing fails, we have to make a `reject` with the error 45 | - We can't just save the message if we don't also have aditional information 46 | over the same key: 47 | `{ key, message, username, timestamp }` 48 | - The messages have to be saved below the namespace `/messages/`. 49 | - The keys have to be unique. 50 | 51 | ## Tips 52 | 53 | - The best way to define a unique key is to use some ID generator. We recommend 54 | that you look at [hyperid](hyperid) 55 | 56 | 57 | ## **Test** 58 | 59 | ``` 60 | $ npm test ./06 61 | ``` 62 | 63 | [test.js](./test.js ':include') 64 | 65 | ## **Solution** 66 | 67 | [solution.js](./solution.js ':include') 68 | 69 | 70 | -------------------------------------------------------------------------------- /problems/06/index.js: -------------------------------------------------------------------------------- 1 | const hyperdb = require('hyperdb') 2 | const hyperid = require('hyperid')() 3 | 4 | module.exports = class Saga { 5 | constructor (storage, key, username) { 6 | this.messages = new Map() 7 | this.users = new Map() 8 | this.username = username 9 | this.timestamp = Date.now() 10 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 11 | } 12 | 13 | ready () {} 14 | 15 | writeMessage (message) { 16 | const key = '...' // this key should be unique 17 | const data = {} 18 | 19 | return new Promise((resolve, reject) => { 20 | this.db.put(key, data, err => { 21 | // ... 22 | }) 23 | }) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /problems/06/solution.js: -------------------------------------------------------------------------------- 1 | const hyperdb = require('hyperdb') 2 | const hyperid = require('hyperid') 3 | const uuid = hyperid() 4 | 5 | module.exports = class Saga { 6 | constructor (storage, key, username) { 7 | this.messages = new Map() 8 | this.users = new Map() 9 | this.username = username 10 | this.timestamp = Date.now() 11 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 12 | } 13 | 14 | writeMessage (message) { 15 | const key = `/messages/${uuid()}` 16 | const data = { 17 | key, 18 | message, 19 | username: this.username, 20 | timestamp: Date.now() 21 | } 22 | 23 | return new Promise((resolve, reject) => { 24 | this.db.put(key, data, (err) => { 25 | if (err) return reject(err) 26 | resolve(key) 27 | }) 28 | }) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /problems/06/test.js: -------------------------------------------------------------------------------- 1 | const ram = require('random-access-memory') 2 | const Saga = require('.') 3 | 4 | describe('problem 06', () => { 5 | const saga = new Saga(ram, null, 'peti') 6 | 7 | const readMessage = id => { 8 | return new Promise((resolve, reject) => { 9 | saga.db.get(id, (err, data) => { 10 | if (err) return reject(err) 11 | resolve(data[0].value.message) 12 | }) 13 | }) 14 | } 15 | 16 | test('writeMessage method', async () => { 17 | expect.assertions(1) 18 | const [ id1, id2 ] = await Promise.all([ 19 | saga.writeMessage('hi'), 20 | saga.writeMessage('how are you?') 21 | ]) 22 | const messages = await Promise.all([ 23 | readMessage(id1), 24 | readMessage(id2) 25 | ]) 26 | expect(messages.join(' ')).toBe('hi how are you?') 27 | }) 28 | }) 29 | -------------------------------------------------------------------------------- /problems/07/README.es.md: -------------------------------------------------------------------------------- 1 | # 7 - Mensajes en tiempo real :watch: 2 | 3 | Ok :cool:, ya hemos conseguido escribir mensajes usando nuestra API `Saga`. En algún momento estos mensajes se replicarán y llegarán 4 | a cada peer conectado. 5 | 6 | ...Pero también deberíamos poder leer los mensajes de nuestra db. Y no solo eso, deberíamos poder leer los mensajes en _real-time_. 7 | Queremos que `Saga` detecte cuando un usuario envía/escribe un mensaje en el chat. 8 | 9 | El siguiente ejercicio consiste en lograr ese objetivo: **leer mensajes en real-time**. 10 | 11 | ## Ejercicio 12 | 13 | 1. `Saga` ahora cuenta con un método `initialize`, necesitamos que una vez que este `ready` nuestra db haga un update de los mensajes. Esto quiere decir, 14 | leer de hyperdb cuales fueron los últimos mensajes y agregarlos a nuestro `Map` de mensajes. 15 | 1. Una vez que tenemos el listado, deberíamos llamar una función que se encargue de escuchar por nuevos cambios 16 | en nuestra db y ante un cambio llame nuevamente al update de mensajes. De esta forma resolveríamos el problema de _real-time_. 17 | 1. Cada mensaje nuevo que `Saga` encuentre debería emitir un evento `message` junto al mensaje y la key como argumentos. 18 | 1. Deberás encontrar una forma de ser eficiente en el update de mensajes, finalizar el update cuando sea necesario, no queremos recorrer el 19 | stream entero cada vez que hay un cambio. 20 | 21 | ## Tips 22 | 23 | 1. Para leer de hyperdb te recomendamos utilizar `createHistoryStream` e iterar sobre los mensajes de forma **reversa**. 24 | 1. Ahora `Saga` extiende de EventEmitter, eso quiere decir que tienen a su disposición toda la funcionalidad para emitir eventos. 25 | 1. Cuando queremos finalizar un stream podemos llamar a: `.destroy()`. Si el stream forma parte de una cadena de streams conectados 26 | con `pump`, llamar a un `destroy` provocaría finalizar cada stream conectado. 27 | 1. Recuerda que `pump` acepta como último parámetro una función que se ejecuta cuando finalizan los streams. 28 | 1. No olvides de tener a mano la doc de [hyperdb](hyperdb) :shipit: 29 | 1. Podes preguntarnos lo que necesites! :rainbow: 30 | 31 | 32 | ## **Test** 33 | 34 | ``` 35 | $ npm test ./07 36 | ``` 37 | 38 | [test.js](./test.js ':include') 39 | 40 | ## **Solución** 41 | 42 | [solution.js](./solution.js ':include') 43 | 44 | 45 | -------------------------------------------------------------------------------- /problems/07/README.md: -------------------------------------------------------------------------------- 1 | # 7 - Messages in real time :watch: 2 | 3 | Ok :cool:, we have already written messages using our `Saga` API. at some point 4 | these messages replicate and arrive at each connected peer. 5 | 6 | ...But we also need to have the power to read the messages of our database. And 7 | not only that, we will need to read those messages in _real-time_. We want 8 | `Saga` to detect when the user receives/writes a message in the chat. 9 | 10 | The next exercise achieves this objective: **to read messages in real-time**. 11 | 12 | ## Exercise 13 | 14 | 1. `Saga` now has a method `initialize`, we need that once our db is `ready` 15 | our db updates the messages. This means, read the last messages from the 16 | hyperdb and add those to our message `Map`. 17 | 1. Once we have this list, we should call the function that listens to the new 18 | changes in the db and 19 | en nuestra db y ante un cambio llame nuevamente al update de mensajes. De esta forma resolveríamos el problema de _real-time_. 20 | 1. Each new message that `Saga` encounters would emit a new `message` event 21 | with the key of the message as arguments. 22 | 1. We will find a way to be efficient when updating messages, to finish the 23 | update when it seems necessary. We don't want to werun the entire stream 24 | each time there is a change. 25 | 26 | 27 | ## Tips 28 | 29 | 1. To read the hyperdb we recommend you use `createHistoryStream` and iterate over the messages in **reverse** order. 30 | 1. Now `Saga` extends EventEmitter, that means that it has all the 31 | functionality to emit events. 32 | 1. When we want to finalize a stream we can call `destroy()`. If the stream 33 | forms part of a chain of connected streams with `pump`, calling `destroy` 34 | would cause each connected stream to end. 35 | 1. Remember that `pump` accepts as it's final parameter a function that will be 36 | executied when all the streams end. 37 | 1. Don't forger to have at hand the documentation of [hyperdb](hyperdb) :shipit: 38 | 1. You can ask us anything! :rainbow: 39 | 40 | 41 | ## **Test** 42 | 43 | ``` 44 | $ npm test ./07 45 | ``` 46 | 47 | [test.js](./test.js ':include') 48 | 49 | ## **Solution** 50 | 51 | [solution.js](./solution.js ':include') 52 | 53 | 54 | -------------------------------------------------------------------------------- /problems/07/index.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events') 2 | const hyperdb = require('hyperdb') 3 | const hyperid = require('hyperid') 4 | const pump = require('pump') 5 | const forEachChunk = require('../../lib/for-each-chunk') 6 | 7 | const uuid = hyperid() 8 | 9 | /** 10 | * 0 - When the db is ready we need to update our messages history and start watching for new messages 11 | * 1 - Create a reader history stream and iterate in reverse mode 12 | * 2 - When we have all the messages, destroy the stream 13 | * 3 - Push each new message in the Map messages 14 | * 4 - Emit a `message` event with the data and the key as arguments 15 | * 5 - Pipe into our write stream and call a function when the process is done 16 | * 6 - We need to watch the namespace `messages/*` to detect when there are new messages 17 | */ 18 | module.exports = class Saga extends EventEmitter { 19 | constructor (storage, key, username) { 20 | super() 21 | 22 | this.messages = new Map() 23 | this.users = new Map() 24 | this.username = username 25 | this.timestamp = Date.now() 26 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 27 | } 28 | 29 | async initialize () { 30 | await this._ready() 31 | 32 | this._updateHistory(/* ... */) 33 | } 34 | 35 | writeMessage (message) { 36 | const key = `/messages/${uuid()}` 37 | const data = { 38 | key, 39 | message, 40 | username: this.username, 41 | timestamp: Date.now() 42 | } 43 | 44 | return new Promise((resolve, reject) => { 45 | this.db.put(key, data, (err) => { 46 | if (err) return reject(err) 47 | resolve(key) 48 | }) 49 | }) 50 | } 51 | 52 | _updateHistory (onFinish) { 53 | // const h = (1) 54 | 55 | // forEachChunk returns a stream 56 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 57 | const { key, value } = data 58 | // Note: since we are reading on reverse order... If we already have the last one, we have'em all 59 | if (/messages/.test(key)) { 60 | if (this.messages.has(key)) { 61 | // (2) 62 | return 63 | } 64 | 65 | // (3) 66 | 67 | // (4) 68 | 69 | } 70 | 71 | next() 72 | }) 73 | 74 | pump(h, /* (5) */) 75 | } 76 | 77 | _watchForMessages () { 78 | // (6) 79 | 80 | } 81 | 82 | _ready () { 83 | return new Promise(resolve => this.db.on('ready', resolve)) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /problems/07/solution.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events') 2 | const hyperdb = require('hyperdb') 3 | const hyperid = require('hyperid') 4 | const pump = require('pump') 5 | const forEachChunk = require('../../lib/for-each-chunk') 6 | 7 | const uuid = hyperid() 8 | 9 | module.exports = class Saga extends EventEmitter { 10 | constructor (storage, key, username) { 11 | super() 12 | 13 | this.messages = new Map() 14 | this.users = new Map() 15 | this.username = username 16 | this.timestamp = Date.now() 17 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 18 | } 19 | 20 | async initialize () { 21 | await this._ready() 22 | 23 | this._updateHistory(this._watchForMessages.bind(this)) 24 | } 25 | 26 | writeMessage (message) { 27 | const key = `messages/${uuid()}` 28 | const data = { 29 | key, 30 | message, 31 | username: this.username, 32 | timestamp: Date.now() 33 | } 34 | 35 | return new Promise((resolve, reject) => { 36 | this.db.put(key, data, (err) => { 37 | if (err) return reject(err) 38 | resolve(key) 39 | }) 40 | }) 41 | } 42 | 43 | _updateHistory (onFinish) { 44 | const h = this.db.createHistoryStream({ reverse: true }) 45 | 46 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 47 | const { key, value } = data 48 | 49 | if (/messages/.test(key)) { 50 | if (this.messages.has(key)) { 51 | h.destroy() 52 | return 53 | } 54 | 55 | this.messages.set(key, value) 56 | this.emit('message', value, key) 57 | } 58 | 59 | next() 60 | }) 61 | 62 | pump(h, ws, onFinish) 63 | } 64 | 65 | _watchForMessages () { 66 | this.db.watch('messages', () => { 67 | this._updateHistory() 68 | }) 69 | } 70 | 71 | _ready () { 72 | return new Promise(resolve => this.db.ready(resolve)) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /problems/07/test.js: -------------------------------------------------------------------------------- 1 | const ram = require('random-access-memory') 2 | const Saga = require('.') 3 | 4 | describe('problem 07', () => { 5 | const saga = new Saga(ram, null, 'peti') 6 | 7 | beforeAll(() => saga.initialize()) 8 | 9 | test('_updateHistory method get messages list', async () => { 10 | expect.assertions(1) 11 | 12 | await Promise.all([ 13 | saga.writeMessage('message1'), 14 | saga.writeMessage('message2') 15 | ]) 16 | 17 | const messages = [] 18 | for (const m of saga.messages) { 19 | messages.push(m[1].message) 20 | } 21 | 22 | expect(messages).toEqual(expect.arrayContaining(['message1', 'message2'])) 23 | }) 24 | 25 | test('`message` event on _updateHistory', done => { 26 | expect.assertions(1) 27 | saga.once('message', data => { 28 | expect(data.message).toBe('hi') 29 | done() 30 | }) 31 | saga.writeMessage('hi') 32 | }) 33 | }) 34 | -------------------------------------------------------------------------------- /problems/08/README.es.md: -------------------------------------------------------------------------------- 1 | # 8 - Autorizando a otros peers :horse: :hamster: :rabbit: 2 | 3 | Hasta ahora todo lo que hemos visto ha involucrado pruebas locales con nuestra db pero no hemos interactuado mucho con el resto de nuestros compas. 4 | 5 | Es tiempo de cambiar eso. 6 | 7 | ## Back to hyperdb & hypercore 8 | 9 | Recuerdan que estamos usando hypercore y encima de el, hyperdb. Hagamos un breve repaso juntos, hasta ahora sabemos que: 10 | 11 | - Dat es _singlewriter_, por ahora. 12 | - `hypercore` es el :heartpulse: de Dat. 13 | - `hypercore` nos permite crear un _feed_ (estructura de datos) donde podemos escribir/leer y que podemos compartir. 14 | - Cada _feed_ nos da una clave pública que nos sirve para identificarnos. 15 | - Un _feed_ puede ser visto como un _append-only log_. 16 | - `hyperdb` es una db que opera sobre un **conjunto** de feeds (hypercores). 17 | - `hyperdb` habilita a Dat a soportar _multiwriters_ 18 | 19 | ## Cómo permitimos a otros peers qué escriban en nuestro feed? 20 | 21 | Funciona de la siguiente manera: una operación de escritura en el feed del autor original (el creador de nuestro chat, en nuestro caso), indica que determinado peer puede escribir. La forma de identificar al peer es mediante su clave pública (`PK`), es decir, este peer :horse: debe pasarme su `PK` de alguna forma. 22 | 23 | Internamente, hyperdb **escribe** en el feed, como si fuera un mensaje mas (aunque es un mensaje especial), que determinado peer :horse: puede escribir. 24 | 25 | ## Ejercicio 26 | 27 | 1. `Saga` debe soportar un nuevo metodo (API) que le permitira autorizar a un peer. Este método `_authorize` recibirá como parámetro una `PK`. 28 | 2. Este nuevo método retorna una promesa que cuando resuelve exitosamente entrega un string: `AUTHORIZED`, y cuando hace el reject, devuelve el error. 29 | 30 | ## Tips 31 | 32 | Usaremos dos métodos de la API de hyperdb: 33 | 34 | - [`authorize`](/hyperdb#dbauthorizekey-callback), nos sirve para autorizar. 35 | - [`authorized`](/hyperdb#dbauthorizedkey-callback), nos sirve para chequear si ya autorizamos determinada key. 36 | 37 | 38 | ## **Test** 39 | 40 | ``` 41 | $ npm test ./08 42 | ``` 43 | 44 | [test.js](./test.js ':include') 45 | 46 | ## **Solución** 47 | 48 | [solution.js](./solution.js ':include') 49 | 50 | 51 | -------------------------------------------------------------------------------- /problems/08/README.md: -------------------------------------------------------------------------------- 1 | # 8 - Authorizing other peers :horse: :hamster: :rabbit: 2 | 3 | Until now, all that we've seen has involved local tests with our database but 4 | we have not interacted much with the rest of our computers. 5 | 6 | It's time to change that. 7 | 8 | ## Back to hyperdb & hypercore 9 | 10 | Remember that we are using hypercore and on top of that, hyperdb. Let's make 11 | a quick pass over both, until know we know that 12 | 13 | - Dat is _singlewriter_, for now. 14 | - `hypercore` is the :heartpulse: of Dat. 15 | - `hypercore` allows us to create a _feed_ (data structure) where we can write/read and share. 16 | - Each _feed_ gives us a public key that can serve to identify it (and us). 17 | - A _feed_ can be seen like an _append-only log_. 18 | - `hyperdb` is a db that operates over a **group** of feeds (hypercores). 19 | - `hyperdb` allows DAt to support _multiwriter_ 20 | 21 | ## How can we let other peers write on our feed? 22 | 23 | It works in the following way: an writing operation in the original author's 24 | feed (the creator of our chat, in our case), indicates the peer that wants to 25 | write. The form to identify the peer is through their public key (`PK`), to 26 | say, this peer :horse: could pass their `PK` of their way. 27 | 28 | Internally, hyperdb **writes** in the feed, as if it was another message (but 29 | a special message) which determines a peer :horse: can write. 30 | 31 | ## Exercise 32 | 33 | 1. `Saga` should support a new method (API) which permits it to authorize a peer. This method `_authorize` receives a parameter `PK`. 34 | 2. This new method returns a promise that when it resolves successfully, 35 | delives a string `AUTHORIZED`, and when it rejects, it passes the error 36 | through. 37 | 38 | ## Tips 39 | 40 | We will use two methods from hyperdb API: 41 | 42 | - [`authorize`](/hyperdb#dbauthorizekey-callback), is used to authorize another peer. 43 | - [`authorized`](/hyperdb#dbauthorizedkey-callback), is used to check if some key was already authorized. 44 | 45 | 46 | ## **Test** 47 | 48 | ``` 49 | $ npm test ./08 50 | ``` 51 | 52 | [test.js](./test.js ':include') 53 | 54 | ## **Solution** 55 | 56 | [solution.js](./solution.js ':include') 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /problems/08/index.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events') 2 | const hyperdb = require('hyperdb') 3 | const hyperid = require('hyperid') 4 | const pump = require('pump') 5 | const forEachChunk = require('../../lib/for-each-chunk') 6 | 7 | const uuid = hyperid() 8 | 9 | module.exports = class Saga extends EventEmitter { 10 | constructor (storage, key, username) { 11 | super() 12 | 13 | this.messages = new Map() 14 | this.users = new Map() 15 | this.username = username 16 | this.timestamp = Date.now() 17 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 18 | } 19 | 20 | async initialize () { 21 | await this._ready() 22 | 23 | this._updateHistory(this._watchForMessages.bind(this)) 24 | } 25 | 26 | writeMessage (message) { 27 | const key = `messages/${uuid()}` 28 | const data = { 29 | key, 30 | message, 31 | username: this.username, 32 | timestamp: Date.now() 33 | } 34 | 35 | return new Promise((resolve, reject) => { 36 | this.db.put(key, data, (err) => { 37 | if (err) return reject(err) 38 | resolve(key) 39 | }) 40 | }) 41 | } 42 | 43 | _updateHistory (onFinish) { 44 | const h = this.db.createHistoryStream({ reverse: true }) 45 | 46 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 47 | const { key, value } = data 48 | 49 | if (/messages/.test(key)) { 50 | if (this.messages.has(key)) { 51 | h.destroy() 52 | return 53 | } 54 | 55 | this.messages.set(key, value) 56 | this.emit('message', value, key) 57 | } 58 | 59 | next() 60 | }) 61 | 62 | pump(h, ws, onFinish) 63 | } 64 | 65 | _watchForMessages () { 66 | this.db.watch('messages', () => { 67 | this._updateHistory() 68 | }) 69 | } 70 | 71 | _authorize (key) { 72 | // 1 - check if the key was already authorized. 73 | // Don't forget to handle errors! 74 | // 2 - if it was, resolve 75 | // 3 - if it was not, authorize it! 76 | // that's it :) 77 | } 78 | 79 | _ready () { 80 | return new Promise(resolve => this.db.ready(resolve)) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /problems/08/solution.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events') 2 | const hyperdb = require('hyperdb') 3 | const hyperid = require('hyperid') 4 | const pump = require('pump') 5 | const forEachChunk = require('../../lib/for-each-chunk') 6 | 7 | const uuid = hyperid() 8 | 9 | module.exports = class Saga extends EventEmitter { 10 | constructor (storage, key, username) { 11 | super() 12 | 13 | this.messages = new Map() 14 | this.users = new Map() 15 | this.username = username 16 | this.timestamp = Date.now() 17 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 18 | } 19 | 20 | async initialize () { 21 | await this._ready() 22 | 23 | this._updateHistory(this._watchForMessages.bind(this)) 24 | } 25 | 26 | writeMessage (message) { 27 | const key = `messages/${uuid()}` 28 | const data = { 29 | key, 30 | message, 31 | username: this.username, 32 | timestamp: Date.now() 33 | } 34 | 35 | return new Promise((resolve, reject) => { 36 | this.db.put(key, data, (err) => { 37 | if (err) return reject(err) 38 | resolve(key) 39 | }) 40 | }) 41 | } 42 | 43 | _updateHistory (onFinish) { 44 | const h = this.db.createHistoryStream({ reverse: true }) 45 | 46 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 47 | const { key, value } = data 48 | 49 | if (/messages/.test(key)) { 50 | if (this.messages.has(key)) { 51 | h.destroy() 52 | return 53 | } 54 | 55 | this.messages.set(key, value) 56 | this.emit('message', value, key) 57 | } 58 | 59 | next() 60 | }) 61 | 62 | pump(h, ws, onFinish) 63 | } 64 | 65 | _watchForMessages () { 66 | this.db.watch('messages', () => { 67 | this._updateHistory() 68 | }) 69 | } 70 | 71 | _authorize (key) { 72 | return new Promise((resolve, reject) => { 73 | this.db.authorized(key, (err, auth) => { 74 | if (err) return reject(err) 75 | 76 | if (auth) { 77 | return resolve('AUTHORIZED') 78 | } 79 | 80 | this.db.authorize(key, (err) => { 81 | if (err) return reject(err) 82 | resolve('AUTHORIZED') 83 | }) 84 | }) 85 | }) 86 | } 87 | 88 | _ready () { 89 | return new Promise(resolve => this.db.ready(resolve)) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /problems/08/test.js: -------------------------------------------------------------------------------- 1 | const ram = require('random-access-memory') 2 | const crypto = require('hypercore-crypto') 3 | const Saga = require('.') 4 | 5 | 6 | describe('problem 08', () => { 7 | const saga = new Saga(ram, null, 'peti') 8 | 9 | const keyPair = crypto.keyPair() 10 | const peerKey = keyPair.publicKey 11 | 12 | beforeAll(() => saga.initialize()) 13 | 14 | test('authorize a new key', async () => { 15 | expect.assertions(1) 16 | 17 | await expect(saga._authorize(peerKey)).resolves.toBe('AUTHORIZED') 18 | }) 19 | 20 | test('authorize a known key', async () => { 21 | expect.assertions(1) 22 | 23 | await expect(saga._authorize(peerKey)).resolves.toBe('AUTHORIZED') 24 | }) 25 | }) 26 | -------------------------------------------------------------------------------- /problems/09/README.es.md: -------------------------------------------------------------------------------- 1 | # 9 - Descubriendo el mundo :globe_with_meridians: 2 | 3 | :hatched_chick: Wow lo estas haciendo muy bien! Estamos muy cerca de entrar en la recta final del workshop, donde vamos a saltar a poner todos estos conocimientos en práctica en nuestro chat. 4 | 5 | Pero antes, vamos a estudiar un ultimo feature de Dat que hemos pasado por alto hasta el momento. Se trata de la vieja pregunta; **¿cómo encontramos a otres peers?** 6 | 7 | Ultimamente venimos usando bastante hyperdb, pero el descubrimiento en Dat es resuelto por hypercore. 8 | 9 | Podemos dividir al descubrimiento en 2 partes: 10 | 11 | - A nivel de red, digamos mas de una forma "física", como se arma el _swarm_ (esto es, el conjunto de peers). 12 | - Luego tambien podemos pensar el descubrimiento "a nivel de datos", es decir, buscar información entre los pares. Para resolver esto, Dat utiliza algunas estructuras de datos comos [trie](https://en.wikipedia.org/wiki/Trie) y [merkle trees](https://en.wikipedia.org/wiki/Merkle_tree) que les permiten obtener datos de forma altamente eficiente (recordemos que Dat esta diseñado para compartir grandes volumenes de datos minimizando la cantidad de info que se necesita mover entre pares). 13 | 14 | Pero vamos a enfocarnos en como se arma el enjambre o _swarm_ :honeybee:. Para lograrlo, hypercore utiliza el modulo: [`discovery-swarm`](https://github.com/mafintosh/discovery-swarm) que a su vez utiliza a [`discovery-channel`](https://github.com/maxogden/discovery-channel). 15 | 16 | Dat tiene tres estrategias para armar el _swarm_ de peers: 17 | 18 | 1. Búsqueda de peers en la red local. `discovery-channel` hace uso de un modulo llamado ['dns-discovery'](https://github.com/mafintosh/dns-discovery) que tiene la capacidad de hacer consultas en la red local usando multicast dns (MDNS). 19 | 2. Si no encontramos localmente, vamos a internet. El siguiente paso es buscar usando dns, tambien haciendo uso de `dns-discovery`. 20 | 3. Si esos metodos fallan, vamos a buscar en un DHT o _hash distribuida_. Para esto `discovery-channel` hace uso de [bittorrent-dht](https://github.com/webtorrent/bittorrent-dht). 21 | 22 | Los dos primeros metodos son rapidos. En el metodo 2 y 3 aparece un punto a tener en cuenta, _son centralizados de alguna forma_. ¿Qué queremos decir con esto? ¿Acaso no estamos en un workshop P2P? 23 | 24 | ![the dude asking wtf](https://bukk.it/dudewtf.gif) 25 | 26 | ### bootstrap nodes 27 | 28 | La mayoria de las soluciones descentralizadas haran uso de bootstrap nodes. Los bootstrap nodes funcionan como nodos o _peers_ conocidos de antemano que usaremos para empezar a conocer a los demas y así formar nuestra propia idea de como es el _swarm_. 29 | 30 | En otra palabras, necesitamos conocer a alguien para entrar a la red. 31 | 32 | De hecho, Dat mantiene sus bootstrap nodes en un modulo aparte: [dat-swarm-defaults](https://github.com/datproject/dat-swarm-defaults). 33 | 34 | ## Qué ocurre en la web? 35 | 36 | Hemos visto conceptos especificos a Dat, que funcionan de maravillas en la línea de comandos. Pero en este workshop queremos crear una aplicación web. 37 | 38 | Cuando trabajamos con aplicaciones P2P, rompemos el concepto de `client <-- servidor` al que podemos estar acostumbradas. Las aplicaciones P2P son utiles en otras arquitecturas, un tip es que podemos pensar a cada participante tanto como cliente **y** servidor, donde la comunicación fluye entre pares: `C/S <--> C/S <---> C/S`. 39 | 40 | Para lograr este efecto en la web usaremos **webrtc**. webrtc es una tecnologia que nos permite, entre otras cosas, comunicación directa entre peers. 41 | 42 | Cabe mencionar, que webrtc tambien hace uso de bootstrap nodes. Un modulo interesante para crear un bootstrap node es [signalhub](https://github.com/mafintosh/signalhub). 43 | 44 | ## Ejercicio 45 | 46 | > Preambulo: Vamos a empezar a conectar las partes :electric_plug:. En este ejercicio tendremos que trabajar con dos archivos. Uno es `chat.js` y el otro es `index.js`, donde venimos trabajando con `Saga`. `chat.js` hará uso de `Saga` y es donde crearemos el _swarm_. 47 | 48 | 1. En el archivo `chat.js` vamos a crear un _swarm_. Usaremos un modulo de GEUT: [discovery-swarm-webrtc](/geut-discovery-swarm-webrtc) para hacerlo :cool:. 49 | Hay [dos parámetros](/geut-discovery-swarm-webrtc#const-sw-swarmopts) importantes que debemos tener en cuenta para pasarles al constructor: 50 | - id: `Saga` expone la instancia de hyperdb, de ahí podemos obtener el [feed local](/hyperdb#dblocal) y de ese feed la `key` (en hexadecimal). Esto es lo que debemos pasarle, un identificador único de nuestro feed. 51 | - stream: un stream para replicar con otros peers. Por suerte `Saga` expone un método `replicate`. :wink: 52 | 2. Luego vamos a crear una instancia de `signalhub`. El constructor recibe dos parametros, uno de ellos ya lo tenemos, `signalUrls`; el otro, que debemos completar, es usado como _namespace_, aquí usaremos la `discoveryKey` (en hex tambien) de hyperdb. 53 | 3. Solo nos falta conectar con `Saga`. En este punto **(3)**, tenemos el _swarm_ listo. _swarm_ emite eventos, uno de los mas importantes es `connection`. Esto indica cuando tenemos un nuevo _peer_ conectado :boom:. Cuando esto ocurre debemos informar a `Saga`. Agreguemos aquí el llamado a un **nuevo** método (ahora vamos a implementarlo :wink:) de `Saga`: `connect(peer)` que recibe un peer como parámetro. 54 | 4. Del lado de `Saga` solo nos falta implementar el nuevo método `connect`. Vamos al archivo `index.js`. `connect` recibe un `peer` como parámetro. `peer` es un objeto con una propiedad que nos interesa, `remoteUserData`. De ahí vamos a obtener los datos y la `key` (PK) del nuevo `peer`. Recuerdan el ejercicio anterior? debemos **autorizarlo** usando esta info. 55 | 56 | ## Tips 57 | 58 | - GEUT al rescate. Usaremos un modulo de GEUT: [discovery-swarm-webrtc](/geut-discovery-swarm-webrtc) para crear el swarm. Este modulo mantiene una API muy similar a `discovery-swarm`. 59 | 60 | 61 | ## **Test** 62 | 63 | ``` 64 | $ npm test ./09 65 | ``` 66 | 67 | [test.js](./test.js ':include') 68 | 69 | ## **Solución** 70 | 71 | [solution.js](./solution.js ':include') 72 | 73 | 74 | 75 | ## Extra :books: 76 | 77 | - Les recomendamos este [artículo](https://rangermauve.hashbase.io/posts/how-dat-discovers-peers) de [Ranger Mauve](https://mobile.twitter.com/RangerMauve) 78 | 79 | ___ 80 | Woow eso fue bastante, pero ya estamos cerca de poner todo esto en acción :rocket: 81 | 82 | ![dancing buddies](https://bukk.it/lineup.gif) 83 | -------------------------------------------------------------------------------- /problems/09/README.md: -------------------------------------------------------------------------------- 1 | # 9 - Discovering the world :globe_with_meridians: 2 | 3 | :hatched_chick: Wow no you have done really well! We are really close to entering the final round of the workshop, where we are going to jump to put all that we've learned in practice in our chat app. 4 | 5 | But first, we are going to study the last feature of Dat that we have skipped 6 | until now. It is about the old question; **who do we find other peers?** 7 | 8 | We have looked mostly at using hyperdb, but the discovery in Dat is resolved by 9 | hypercore. 10 | 11 | We can devide the discovery into two parts: 12 | 13 | - At the network level, let's say the "physical" form, is how we create the 14 | _swarm_ (this is the group of peers). 15 | - Then we can also think of the discovery at the level of data, or to say, to 16 | search for information between the peers. To resolve this, Dat uses data 17 | structures like [tries](https://en.wikipedia.org/wiki/Trie) and [merkle trees](https://en.wikipedia.org/wiki/Merkle_tree) which let us get data in a highly efficient way. remember that Dat is designed to share big volumes of data, minimizing the quantity of data that is moved between peers. 18 | 19 | But we are going to focus on how we create the _swarm_ :honeybee:. To achieve 20 | this, hypercore uses the module [`discovery-swarm`](https://github.com/mafintosh/discovery-swarm) which in turn uses [`discovery-channel`](https://github.com/maxogden/discovery-channel). 21 | 22 | Dat has three strategies to connect the _swarm_ of peers: 23 | 24 | 1. Find the peers in the local network. `discovery-channel` uses a module 25 | called ['dns-discovery'](https://github.com/mafintosh/dns-discovery) that has the ability to make queries in the local network using multicast dns (MDNS). 26 | 2. If we don't find eachother locally, we go to the internet. The next phase is 27 | to search using dns, also with `dns-discovery`. 28 | 3. If these methods fail, we are going to look in a DHT or _distributed hashtable_. For this, `discovery-channel` uses [bittorrent-dht](https://github.com/webtorrent/bittorrent-dht). 29 | 30 | The first two methods are fast. In the 2nd or 3rd method a point appears to 31 | have in mind, _they are centralized in some way_. What does that mean? Maybe we 32 | aren't in a P2P workshop? 33 | 34 | ![the dude asking wtf](https://bukk.it/dudewtf.gif) 35 | 36 | ### bootstrap nodes 37 | 38 | Most of the decentralized solutions use bootstrap nodes. The bootstrap nodes 39 | function like known _peers_ beforehand, that we use to start to know others and 40 | like this form our own _swarm_. 41 | 42 | In other words, we need to know someone to enter the network. 43 | 44 | In fact, Dat maintains it's bootstrap nodes in a separate module: [dat-swarm-defaults](https://github.com/datproject/dat-swarm-defaults). 45 | 46 | ## What happens on the web? 47 | 48 | We have seen specific concepts of Dat, that function wonders in the 49 | commandline. But in this workshop we want to create a web application. 50 | 51 | When we work with P2P applications, we break the concept of `client <-- server` 52 | that we are accustomed to. P2P applications are useful in other architectures, 53 | if we can think of each participant as a client **and server** where the 54 | communication flows between pairs `C/S <--> C/S <--> C/S`. 55 | 56 | To achieve this effect in the web we use **webrtc**. webrtc is a technology 57 | that allows us, among other things, communicate directly between peers. 58 | 59 | It's fit to mention that webrtc also is used by bootstrap nodes. An interesting 60 | module to create a bootstrap node is [signalhub](https://github.com/mafintosh/signalhub). 61 | 62 | ## Exercises 63 | 64 | > Introduction: Let's start by connecting the parts :electric_plug:. In this exercise, we will work with two files. One is `chat.js` and the other is `index.js`, where we were working with `Saga`. `chat.js` will use `Saga` and is where we will create the _swarm_. 65 | 66 | 67 | 1. In the `chat.js` file we are going to create a _swarm_. We will use a module 68 | by GEUT: [discovery-swarm-webrtc](/geut-discovery-swarm-webrtc) to do it :cool:. 69 | There are [two important parameters](/geut-discovery-swarm-webrtc#const-sw-swarmopts) that we have account for in the constructor: 70 | - id: `Saga` exposes the hyperdb instance, and from there we can obtain the expone [local feed](/hyperdb#dblocal) and from this feed the `key` (in hexadecimal). This is what we have to pass to it, a unique identifer of our feed. 71 | - stream: a stream to replicate with other peers. Luckly `Saga` exposes a `replicate` method. :wink: 72 | 2. Then we are going to create a `signalhub` instance. The constructor receives two parameters, one of them we already have, `signalUrls`; the other, which we have to complete, is used like a _namespace_, here we will use the `discoveryKey` (in hex too) in hyperdb. 73 | 3. We now only lack to connect to `Saga`. In point **(3)**, we have the _swarm_ ready. _swarm_ emits events, one of the most important parts is `connection`. This indicates when we have a new connected _peer_ :boom: When this occurs we have to tell `Saga`. We will add here a **new** method (now we are going to implement it :wink:): `connect(peer`) which receives a peer as a parameter. 74 | 4. On the `Saga` side, we haven't implemented the new `connect` method. Let's go to the `index.js` file. `connect` receives a `peer` as a parameter. `peer` is an object with a property that interests us, `remoteUserData`. From there we will get the data and the `key` (PK) of our new `peer`. Remember the earlier exercise? We will **authorize it** using this info. 75 | 76 | ## Tips 77 | 78 | - GEUT will rescue you. We will use a module from GEUT: [discovery-swarm-webrtc](/geut-discovery-swarm-webrtc) to create tehs warm. This module maintains an API very similar to `discovery-swarm`. 79 | 80 | 81 | ## **Test** 82 | 83 | ``` 84 | $ npm test ./09 85 | ``` 86 | 87 | [test.js](./test.js ':include') 88 | 89 | ## **Solution** 90 | 91 | [solution.js](./solution.js ':include') 92 | 93 | 94 | 95 | ## Extra :books: 96 | 97 | - We recommend this [article](https://rangermauve.hashbase.io/posts/how-dat-discovers-peers) by [Ranger Mauve](https://mobile.twitter.com/RangerMauve) 98 | 99 | ___ 100 | Woow this was a lot, but now you're already close to putting everything in action :rocket: 101 | 102 | ![dancing buddies](https://bukk.it/lineup.gif) 103 | -------------------------------------------------------------------------------- /problems/09/chat.js: -------------------------------------------------------------------------------- 1 | const signalhub = require('signalhub') 2 | const ram = require('random-access-memory') 3 | const swarm = require('@geut/discovery-swarm-webrtc') 4 | const saga = require('./') 5 | 6 | const webrtcOpts = {} 7 | 8 | async function initChat (username, key) { 9 | const publicKey = key && key.length > 0 ? key : null 10 | const chat = saga(ram, publicKey, username) 11 | 12 | await chat.initialize() 13 | 14 | // (1) 15 | const sw = swarm({ 16 | /* id: ... , */ 17 | /* stream: () => ... */ 18 | }) 19 | 20 | // (2) 21 | // creating a signalhub instance... 22 | const discoveryKey = /* ... */ 23 | const signalUrls = ['https://signalhub-jccqtwhdwc.now.sh/'] 24 | 25 | sw.join(signalhub(discoveryKey, signalUrls), webrtcOpts) 26 | 27 | sw.on('connection', async peer => { 28 | try { 29 | // (3) 30 | /* await ... */ 31 | } catch (err) { 32 | console.log(err) 33 | } 34 | }) 35 | 36 | return chat 37 | } 38 | 39 | module.exports = initChat 40 | -------------------------------------------------------------------------------- /problems/09/chat.solution.js: -------------------------------------------------------------------------------- 1 | const signalhub = require('signalhub') 2 | const ram = require('random-access-memory') 3 | const swarm = require('@geut/discovery-swarm-webrtc') 4 | const saga = require('./') 5 | 6 | const webrtcOpts = {} 7 | 8 | async function initChat (username, key) { 9 | const publicKey = key && key.length > 0 ? key : null 10 | const chat = saga(ram, publicKey, username) 11 | 12 | await chat.initialize() 13 | 14 | const sw = swarm({ 15 | id: chat.db.local.key.toString('hex'), 16 | stream: () => chat.replicate() 17 | }) 18 | 19 | const discoveryKey = chat.db.discoveryKey.toString('hex') 20 | const signalUrls = ['https://signalhub-jccqtwhdwc.now.sh/'] 21 | 22 | sw.join(signalhub(discoveryKey, signalUrls), webrtcOpts) 23 | 24 | sw.on('connection', async peer => { 25 | try { 26 | await chat.connect(peer) 27 | } catch (err) { 28 | console.log(err) 29 | } 30 | }) 31 | 32 | return chat 33 | } 34 | 35 | module.exports = initChat 36 | -------------------------------------------------------------------------------- /problems/09/index.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events') 2 | const hyperdb = require('hyperdb') 3 | const hyperid = require('hyperid') 4 | const pump = require('pump') 5 | const forEachChunk = require('../../lib/for-each-chunk') 6 | 7 | const uuid = hyperid() 8 | 9 | module.exports = class Saga extends EventEmitter { 10 | constructor (storage, key, username) { 11 | super() 12 | 13 | this.messages = new Map() 14 | this.users = new Map() 15 | this.username = username 16 | this.timestamp = Date.now() 17 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 18 | } 19 | 20 | async initialize () { 21 | await this._ready() 22 | 23 | this._updateHistory(this._watchForMessages.bind(this)) 24 | } 25 | 26 | replicate () { 27 | // NEW method !!! 28 | return this.db.replicate({ 29 | live: true, 30 | userData: JSON.stringify({ 31 | key: this.db.local.key, 32 | username: this.username, 33 | timestamp: this.timestamp 34 | }) 35 | }) 36 | } 37 | 38 | async connect (peer) { 39 | if (!peer.remoteUserData) { 40 | throw new Error('peer does not have userData') 41 | } 42 | 43 | // const data = JSON.parse(/* ... */) 44 | // const username = data.username 45 | 46 | // secure copy the buffer 47 | // const key = Buffer.from(data.key) 48 | 49 | // (4) authorize peer 50 | // ... 51 | 52 | // This block will be helpful later 53 | if (!this.users.has(username)) { 54 | this.users.set(username, new Date()) 55 | this.emit('join', data) 56 | peer.on('close', () => { 57 | if (!this.users.has(username)) return 58 | this.users.delete(username) 59 | this.emit('leave', data) 60 | }) 61 | } 62 | } 63 | 64 | writeMessage (message) { 65 | const key = `messages/${uuid()}` 66 | const data = { 67 | key, 68 | message, 69 | username: this.username, 70 | timestamp: Date.now() 71 | } 72 | 73 | return new Promise((resolve, reject) => { 74 | this.db.put(key, data, (err) => { 75 | if (err) return reject(err) 76 | resolve(key) 77 | }) 78 | }) 79 | } 80 | 81 | _updateHistory (onFinish) { 82 | const h = this.db.createHistoryStream({ reverse: true }) 83 | 84 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 85 | const { key, value } = data 86 | 87 | if (/messages/.test(key)) { 88 | if (this.messages.has(key)) { 89 | h.destroy() 90 | return 91 | } 92 | 93 | this.messages.set(key, value) 94 | this.emit('message', value, key) 95 | } 96 | 97 | next() 98 | }) 99 | 100 | pump(h, ws, onFinish) 101 | } 102 | 103 | _watchForMessages () { 104 | this.db.watch('messages', () => { 105 | this._updateHistory() 106 | }) 107 | } 108 | 109 | _authorize (key) { 110 | return new Promise((resolve, reject) => { 111 | this.db.authorized(key, (err, auth) => { 112 | if (err) return reject(err) 113 | 114 | if (auth) { 115 | return resolve('AUTHORIZED') 116 | } 117 | 118 | this.db.authorize(key, (err) => { 119 | if (err) return reject(err) 120 | resolve('AUTHORIZED') 121 | }) 122 | }) 123 | }) 124 | } 125 | 126 | _ready () { 127 | return new Promise(resolve => this.db.ready(resolve)) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /problems/09/solution.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events') 2 | const hyperdb = require('hyperdb') 3 | const hyperid = require('hyperid') 4 | const pump = require('pump') 5 | const forEachChunk = require('../../lib/for-each-chunk') 6 | 7 | const uuid = hyperid() 8 | 9 | module.exports = class Saga extends EventEmitter { 10 | constructor (storage, key, username) { 11 | super() 12 | 13 | this.messages = new Map() 14 | this.users = new Map() 15 | this.username = username 16 | this.timestamp = Date.now() 17 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 18 | } 19 | 20 | async initialize () { 21 | await this._ready() 22 | 23 | this._updateHistory(this._watchForMessages.bind(this)) 24 | } 25 | 26 | replicate () { 27 | // NEW method !!! 28 | return this.db.replicate({ 29 | live: true, 30 | userData: JSON.stringify({ 31 | key: this.db.local.key, 32 | username: this.username, 33 | timestamp: this.timestamp 34 | }) 35 | }) 36 | } 37 | 38 | async connect (peer) { 39 | if (!peer.remoteUserData) { 40 | throw new Error('peer does not have userData') 41 | } 42 | 43 | const data = JSON.parse(peer.remoteUserData) 44 | const username = data.username 45 | 46 | // secure copy the buffer 47 | const key = Buffer.from(data.key) 48 | 49 | // (4) authorize peer 50 | await this._authorize(key) 51 | 52 | // This block will be helpful later 53 | if (!this.users.has(username)) { 54 | this.users.set(username, new Date()) 55 | this.emit('join', data) 56 | peer.on('close', () => { 57 | if (!this.users.has(username)) return 58 | this.users.delete(username) 59 | this.emit('leave', data) 60 | }) 61 | } 62 | } 63 | 64 | writeMessage (message) { 65 | const key = `messages/${uuid()}` 66 | const data = { 67 | key, 68 | message, 69 | username: this.username, 70 | timestamp: Date.now() 71 | } 72 | 73 | return new Promise((resolve, reject) => { 74 | this.db.put(key, data, (err) => { 75 | if (err) return reject(err) 76 | resolve(key) 77 | }) 78 | }) 79 | } 80 | 81 | _updateHistory (onFinish) { 82 | const h = this.db.createHistoryStream({ reverse: true }) 83 | 84 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 85 | const { key, value } = data 86 | 87 | if (/messages/.test(key)) { 88 | if (this.messages.has(key)) { 89 | h.destroy() 90 | return 91 | } 92 | 93 | this.messages.set(key, value) 94 | this.emit('message', value, key) 95 | } 96 | 97 | next() 98 | }) 99 | 100 | pump(h, ws, onFinish) 101 | } 102 | 103 | _watchForMessages () { 104 | this.db.watch('messages', () => { 105 | this._updateHistory() 106 | }) 107 | } 108 | 109 | _authorize (key) { 110 | return new Promise((resolve, reject) => { 111 | this.db.authorized(key, (err, auth) => { 112 | if (err) return reject(err) 113 | 114 | if (auth) { 115 | return resolve('AUTHORIZED') 116 | } 117 | 118 | this.db.authorize(key, (err) => { 119 | if (err) return reject(err) 120 | resolve('AUTHORIZED') 121 | }) 122 | }) 123 | }) 124 | } 125 | 126 | _ready () { 127 | return new Promise(resolve => this.db.ready(resolve)) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /problems/09/test.js: -------------------------------------------------------------------------------- 1 | const Emitter = require('events') 2 | const ram = require('random-access-memory') 3 | const crypto = require('hypercore-crypto') 4 | const Saga = require('.') 5 | 6 | class MockedPeer extends Emitter { 7 | constructor (data) { 8 | super() 9 | this.remoteUserData = JSON.stringify(data) 10 | } 11 | } 12 | 13 | describe('problem 09', () => { 14 | const saga = new Saga(ram, null, 'peti') 15 | 16 | const keyPair = crypto.keyPair() 17 | const peerKey = keyPair.publicKey 18 | const peer = new MockedPeer({ 19 | username: 'test', 20 | key: peerKey, 21 | timestamp: Date.now() 22 | }) 23 | 24 | beforeAll(() => saga.initialize()) 25 | 26 | test('call connect and authorize a new peer', async (done) => { 27 | expect.assertions(2) 28 | 29 | await saga.connect(peer) 30 | 31 | saga.db.authorized(peerKey, (err, auth) => { 32 | expect(err).toBeNull() 33 | expect(auth).toBeTruthy() 34 | done() 35 | }) 36 | }) 37 | }) 38 | -------------------------------------------------------------------------------- /problems/10/README.es.md: -------------------------------------------------------------------------------- 1 | # 10 - Conociendo a Olaf :smiley_cat: 2 | 3 | :checkered_flag: Estamos en la recta final de nuestra aplicación. Vamos a conectar todo lo que vimos con una interfaz de usuario que esperemos sea de su agrado :relaxed:. 4 | 5 | ![el gato olaf](/assets/theolaf.jpg) 6 | 7 | **Con todos nuestro nuevos conocimientos vamos a crear un chat P2P.** 8 | 9 | > Recuerden que la idea del workshop es conocer mas sobre Dat y todas las partes que lo componen. Luego podemos usar ese conocimiento para crear cualquier tipo de aplicación. Sin embargo pueden haber notado que las arquitecturas de estas apps P2P son un poco diferentes a las web apps tradicionales con las que podemos venir trabajando. De cualquier manera, tu imaginación es el limite. 10 | 11 | Pero para ponernos un poco mas prágmaticos, vamos a partir de una app que usaremos como cascarón para nuestro workshop. Esta app se llama `Olaf` y esta construida sobre un stack alternativo: 12 | 13 | - [choo](/choo) 14 | - [tachyons](https://tachyons.io/) 15 | - [parceljs](https://parceljs.org/) 16 | - babel7 17 | - webrtc 18 | 19 | A `Olaf` le agregaremos lo que venimos construyendo en `Saga`, nuestra propia API sobre hyperdb que nos permite replicar feeds y leer/escribir mensajes. :cool: 20 | 21 | ## Un vistazo rápido sobre Choo :steam_locomotive: 22 | 23 | Choo es un framework minimalista para construir aplicaciones web. Tiene todo lo necesario y nada mas. 24 | 25 | Algunos conceptos importantes en el mundo de Choo: 26 | - Tenemos vistas y componentes. Usualmente una vista levanta varios componentes. Una vista seria un [_statefull component_](/choo#components). 27 | - Los componentes son similares a las _stateless functions_ de React. 28 | - En el core de choo hay un sistema de eventos. Por ejemplo, un renderizado se dispara a emitiendo el evento `render`. 29 | - Choo cuenta con un router que mapea regex a vistas. 30 | - Hay algunos [eventos built-in](/choo#events) que que podemos usar cuando creamos apps, pero tambien esta todo bien con agregar nuestros [propios eventos](/choo#stateevents) de acuerdo a la lógica de nuestra app. 31 | - Finalmente, el [estado](/choo#state) es un objeto compartido que puede ser mutado. Un re-renderizar no esta atado a un cambio de estado directamente. Aunque suele ser lo usual, pero no olvidemos que tenemos que emitir el render. 32 | 33 | ## Ejercicio 34 | 35 | 1. Primero vamos a familiarizarnos con `Olaf` :cat2: 36 | - Revisemos las vistas (`main.js`). 37 | - Luego los componentes. 38 | - Y finalmente los `stores`. Aquí encontraremos el binding entre `Saga` y `Olaf`. Mas alla de esto, veamos como esta distribuida la lógica de eventos, cuales se disparan y tratemos de entender en que momento deberian ocurrir. En qué momento se conectan los _listeners_ de nuestra app? 39 | 1. Ahora pasemos a `chat/olaf/lib` donde vamos a encontrar un _placeholder_ para `Saga`. Copiemos el código de la clase de `Saga` del ejercicio anterior. Esto sumariza el trabajo que venimos haciendo. 40 | 1. Volvamos al store principal, `stores/chat.js`. En la función `initRoom` tenemos que terminar el binding entre `Saga` y `Olaf`. 41 | 42 | ## Test 43 | 44 | `npm run olaf` 45 | 46 | ## Tips 47 | 48 | En este ejercicio estamos conectando todo lo que veniamos viendo sobre Dat con una aplicación nueva. Entendemos que hay varios cosas nuevas, por eso no dudes en preguntarnos :smile_cat: 49 | 50 | -------------------------------------------------------------------------------- /problems/10/README.md: -------------------------------------------------------------------------------- 1 | # 10 - Getting to know Olaf :smiley_cat: 2 | 3 | :checkered_flag: We are in the final round of our app. We are going to connect all that we've seen with a user interface that we hope people like :relaxed:. 4 | 5 | ![el gato olaf](/assets/theolaf.jpg) 6 | 7 | **With all our new knowledge we are going to create a P2P chat.** 8 | 9 | > Remember that the workshop idea is to know more about Dat and all of the parts that make it up. Later, we can use that knowledge to create whatever type of application. Nevertheless, note that the architectures of these P2P apps are a little different that traditional webapps with which we are acustomed to working with. Anyway, your imagination is the limit. 10 | 11 | But to a bit pragmatic, we are going to start with an app that we will use like 12 | a shell for our workshop. This app is called `Olaf` and it's built on an 13 | alternative stack. 14 | 15 | - [choo](/choo) 16 | - [tachyons](https://tachyons.io/) 17 | - [parceljs](https://parceljs.org/) 18 | - babel7 19 | - webrtc 20 | 21 | We're going to add to `Olaf` what we have been building in `Saga`, our own API 22 | over hyperdb that allows us to replicate feeds and read/write messages :cool: 23 | 24 | ## A quick glance over Choo :steam_locomotive: 25 | 26 | Choo is a minimalist framework to construct web applications. It has everything 27 | you need and nothing more. 28 | 29 | Some important concepts in the Choo world: 30 | - We have views and components. Usually a view uses various components. A view 31 | would be a [_stateful component_](/choo#components). 32 | - Components are similar to _stateless functions_ in React. 33 | - In choo core, there is an event system. For example, a rendering dispatches the `render` event. 34 | - Choo comes with a router that maps regex to views. 35 | - There are some [built-in events](/choo#events) that we can use when we create apps, but also it's ok to use our [own events](/choo#stateevents) that conforms to the logic of our own app. 36 | - Finally, the [state](/choo#state) is a shared object that can be mutated. 37 | A re-renderizer isn't tied to a change in the state directly. Even though 38 | usually it's usual, but don't forget we have to emit render. 39 | 40 | ## Exercise 41 | 42 | 1. First let's familiarize ourselves with `Olaf` :cat2: 43 | - Review the views (`main.js`). 44 | - Then the components. 45 | - And then the `stores`. Here we find the connection between `Saga` and `Olaf`. We can also see how the event logic is distributed and which ones will be called. Try to understand when they will occur. When do the _listeners_ connect to the app? 46 | 1. Now let's go over `chat/olaf/lib` where we will find a _placeholder_ for 47 | `Saga.` Let's copy the code of the `Saga` class from the last exercise. 48 | 1. Return to the store, `stores/chat.js`. In the `initRoom` function we have to finish the binding between `Saga` and `Olaf`. 49 | 50 | ## Test 51 | 52 | `npm run olaf` 53 | 54 | ## Tips 55 | 56 | In this exercise we are connecting all that we have learned about dat in 57 | a new application. We understand that there are many new things, so don't 58 | hesitate to ask questions :smile_cat: 59 | -------------------------------------------------------------------------------- /problems/10/olaf/.env.example: -------------------------------------------------------------------------------- 1 | #ICE_URLS=stun:localhost:3478 2 | #SIGNAL_URLS=wss://localhost:4000 3 | -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/android-icon-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/android-icon-144x144.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/android-icon-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/android-icon-192x192.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/android-icon-36x36.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/android-icon-36x36.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/android-icon-48x48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/android-icon-48x48.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/android-icon-72x72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/android-icon-72x72.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/android-icon-96x96.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/android-icon-96x96.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-114x114.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-114x114.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-120x120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-120x120.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-144x144.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-152x152.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-152x152.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-180x180.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-180x180.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-57x57.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-57x57.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-60x60.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-60x60.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-72x72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-72x72.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-76x76.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-76x76.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon-precomposed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon-precomposed.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/apple-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/apple-icon.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/browserconfig.xml: -------------------------------------------------------------------------------- 1 | 2 | #ffffff -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/favicon-96x96.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/favicon-96x96.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/favicon.ico -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "App", 3 | "icons": [ 4 | { 5 | "src": "\/android-icon-36x36.png", 6 | "sizes": "36x36", 7 | "type": "image\/png", 8 | "density": "0.75" 9 | }, 10 | { 11 | "src": "\/android-icon-48x48.png", 12 | "sizes": "48x48", 13 | "type": "image\/png", 14 | "density": "1.0" 15 | }, 16 | { 17 | "src": "\/android-icon-72x72.png", 18 | "sizes": "72x72", 19 | "type": "image\/png", 20 | "density": "1.5" 21 | }, 22 | { 23 | "src": "\/android-icon-96x96.png", 24 | "sizes": "96x96", 25 | "type": "image\/png", 26 | "density": "2.0" 27 | }, 28 | { 29 | "src": "\/android-icon-144x144.png", 30 | "sizes": "144x144", 31 | "type": "image\/png", 32 | "density": "3.0" 33 | }, 34 | { 35 | "src": "\/android-icon-192x192.png", 36 | "sizes": "192x192", 37 | "type": "image\/png", 38 | "density": "4.0" 39 | } 40 | ] 41 | } -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/ms-icon-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/ms-icon-144x144.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/ms-icon-150x150.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/ms-icon-150x150.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/ms-icon-310x310.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/ms-icon-310x310.png -------------------------------------------------------------------------------- /problems/10/olaf/assets/favicon/ms-icon-70x70.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/assets/favicon/ms-icon-70x70.png -------------------------------------------------------------------------------- /problems/10/olaf/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Olaf chat 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /problems/10/olaf/manifest.webmanifest: -------------------------------------------------------------------------------- 1 | { 2 | "name": "olaf", 3 | "short_name": "olaf", 4 | "description": "A P2P chat", 5 | "start_url": "/", 6 | "display": "standalone", 7 | "background_color": "#ffdfdf", 8 | "theme_color": "#ffdfdf", 9 | "icons": [ 10 | { 11 | "src": "/assets/favicon/android-icon-36x36.png", 12 | "sizes": "36x36", 13 | "type": "image/png", 14 | "density": "0.75" 15 | }, 16 | { 17 | "src": "/assets/favicon/android-icon-48x48.png", 18 | "sizes": "48x48", 19 | "type": "image/png", 20 | "density": "1.0" 21 | }, 22 | { 23 | "src": "/assets/favicon/android-icon-72x72.png", 24 | "sizes": "72x72", 25 | "type": "image/png", 26 | "density": "1.5" 27 | }, 28 | { 29 | "src": "/assets/favicon/android-icon-96x96.png", 30 | "sizes": "96x96", 31 | "type": "image/png", 32 | "density": "2.0" 33 | }, 34 | { 35 | "src": "/assets/favicon/android-icon-144x144.png", 36 | "sizes": "144x144", 37 | "type": "image/png", 38 | "density": "3.0" 39 | }, 40 | { 41 | "src": "/assets/favicon/android-icon-192x192.png", 42 | "sizes": "192x192", 43 | "type": "image/png", 44 | "density": "4.0" 45 | } 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /problems/10/olaf/src/assets/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geut/dat-workshop/de3da8cc58b77de3e72d06ccea201d0c1db6f49f/problems/10/olaf/src/assets/icon.png -------------------------------------------------------------------------------- /problems/10/olaf/src/components/header.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | const usersIcon = require('./icons/users') 4 | const keyIcon = require('./icons/key') 5 | const moonIcon = require('./icons/moon') 6 | const sunIcon = require('./icons/sun') 7 | 8 | const iconButton = (icon, onclick, classes = '') => { 9 | return html` 10 | 12 | ${icon} 13 | 14 | ` 15 | } 16 | 17 | module.exports = function header (state, emit) { 18 | function toggleFriends () { 19 | const { TOGGLE_FRIENDS } = state.events 20 | emit(TOGGLE_FRIENDS) 21 | } 22 | 23 | function showModalKey () { 24 | const { SHOW_MODAL_KEY } = state.events 25 | emit(SHOW_MODAL_KEY) 26 | } 27 | 28 | function toggleTheme () { 29 | const { TOGGLE_THEME } = state.events 30 | emit(TOGGLE_THEME) 31 | } 32 | 33 | return html`` 43 | } 44 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/icons/clipboard.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | module.exports = function clipboardIcon () { 4 | return html` 16 | 17 | 18 | ` 19 | } 20 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/icons/key.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | module.exports = function keyIcon () { 4 | return html` 5 | 17 | 18 | 19 | 20 | 21 | 22 | ` 23 | } 24 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/icons/moon.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | module.exports = function moonIcon () { 4 | return html` 5 | 6 | ` 7 | } 8 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/icons/sun.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | module.exports = function sunIcon () { 4 | return html` 5 | 6 | ` 7 | } 8 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/icons/users.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | module.exports = function usersIcon () { 4 | return html` 5 | 17 | 18 | 19 | 20 | 21 | 22 | ` 23 | } 24 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/init-modal.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | module.exports = function modal (props, emit, events) { 4 | const { username, key } = props 5 | 6 | return html` 7 | 52 | ` 53 | 54 | function join (e) { 55 | e.stopPropagation() 56 | e.preventDefault() 57 | if (username && username.length > 0 && key && key.length > 0) { 58 | emit(events.INIT_ROOM) 59 | } 60 | } 61 | 62 | function createRoom (e) { 63 | e.stopPropagation() 64 | e.preventDefault() 65 | if (username && username.length > 0) { 66 | emit(events.INIT_ROOM, true) 67 | } 68 | } 69 | 70 | function updateUsername (e) { 71 | emit(events.UPDATE_USERNAME, e.target.value) 72 | } 73 | 74 | function updateKey (e) { 75 | emit(events.UPDATE_KEY, e.target.value) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/input-msg.js: -------------------------------------------------------------------------------- 1 | const Component = require('choo/component') 2 | const html = require('choo/html') 3 | 4 | module.exports = class InputMsg extends Component { 5 | constructor (name, state, emit) { 6 | super(name) 7 | this.state = state 8 | this.emit = emit 9 | } 10 | 11 | update () { 12 | return false 13 | } 14 | 15 | sendMessage = (e) => { 16 | e.preventDefault() 17 | const { events } = this.state 18 | const input = this.element.querySelector('#input-msg') 19 | 20 | if (input.value.length === 0) { 21 | return 22 | } 23 | 24 | this.emit(events.WRITE_MESSAGE, input.value) 25 | input.value = '' 26 | }; 27 | 28 | createElement () { 29 | return html` 30 |
31 |
32 |
33 | 34 | 42 | 47 |
48 |
49 |
50 | ` 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/key-modal.js: -------------------------------------------------------------------------------- 1 | const Component = require('choo/component') 2 | const html = require('choo/html') 3 | const copy = require('copy-to-clipboard') 4 | const QRCode = require('qrcode') 5 | 6 | const clipboardIcon = require('./icons/clipboard') 7 | 8 | const url = window.location.protocol + '//' + window.location.host 9 | 10 | module.exports = class KeyModal extends Component { 11 | constructor (name, state, emit) { 12 | super(name) 13 | this.state = state 14 | this.emit = emit 15 | } 16 | 17 | update ({ key }) { 18 | if (this.key !== key) { 19 | return true 20 | } 21 | } 22 | 23 | load (el) { 24 | this.loadQRCode(el) 25 | } 26 | 27 | afterupdate (el) { 28 | this.loadQRCode(el) 29 | } 30 | 31 | loadQRCode (el) { 32 | QRCode.toCanvas(el.querySelector('#qrcode'), `${url}?key=${this.key}`) 33 | } 34 | 35 | hideModalKey = () => { 36 | const { events: { HIDE_MODAL_KEY } } = this.state 37 | this.emit(HIDE_MODAL_KEY) 38 | }; 39 | 40 | copyToClipboard = (e) => { 41 | e.preventDefault() 42 | copy(`${url}?key=${this.key}`) 43 | }; 44 | 45 | createElement ({ key }) { 46 | this.key = key 47 | 48 | return html` 49 | 61 | ` 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/message.js: -------------------------------------------------------------------------------- 1 | const Component = require('choo/component') 2 | const html = require('choo/html') 3 | const raw = require('choo/html/raw') 4 | const tinydate = require('tinydate').default 5 | const anchorme = require('anchorme').default 6 | const fileType = require('file-type') 7 | 8 | const stamp = tinydate('{HH}:{mm}:{ss}') 9 | 10 | const parseMessage = message => { 11 | const anchor = anchorme(message, { list: true }) 12 | if (anchor.length) { 13 | // detect file type 14 | return Promise.all(anchor.map(async anchorData => { 15 | const controller = new window.AbortController() 16 | const signal = controller.signal 17 | 18 | const fetchPromise = window.fetch(anchorData.raw, { signal }) 19 | 20 | // 5 second timeout: 21 | setTimeout(() => controller.abort(), 5000) 22 | const response = await fetchPromise 23 | 24 | if (!response) return '' 25 | const ab = await response.arrayBuffer() 26 | const ft = fileType(ab) 27 | if (ft && ft.mime.includes('image')) { 28 | return html`
` 29 | } else return '' 30 | })).then(out => { 31 | // prepare output 32 | var f = anchorme(message) 33 | console.log(f) 34 | return html` 35 |
36 | ${out.filter(img => img)} 37 |
38 | ` 39 | }) 40 | } else { 41 | return Promise.resolve() 42 | } 43 | } 44 | 45 | class Message extends Component { 46 | constructor (id, choo, f, opts) { 47 | super() 48 | this.local = { 49 | extra: '' 50 | } 51 | this.parent = {} 52 | this.parent.updateHeight = opts.updateHeight 53 | } 54 | 55 | update ({ message }) { 56 | if (this.local.message !== message) return true 57 | } 58 | 59 | load (el) { 60 | parseMessage(this.local.message) 61 | .then(msg => { 62 | if (msg) { 63 | this.local.extra = msg 64 | this.rerender() 65 | } 66 | }) 67 | .catch(console.log) 68 | } 69 | 70 | createElement (props, color = 'green') { 71 | const { username, message, timestamp } = props 72 | const { extra } = this.local 73 | 74 | this.local.message = message 75 | 76 | const date = stamp(new Date(timestamp)) 77 | 78 | const colorStyle = color ? `color: ${color}` : '' 79 | 80 | return html` 81 |
82 |
83 |
84 |
85 |
86 |
87 | ${username} 88 |
89 |
90 |
91 |

92 | ${raw(anchorme(message))} 93 |

94 |
95 |
96 |
97 | ${extra} 98 |
99 |
100 | ⌚️ 101 |
102 |
103 |
104 |
105 | ` 106 | } 107 | 108 | afterupdate () { 109 | if (this.parent.updateHeight) { 110 | this.parent.updateHeight(this.element.scrollHeight) 111 | } 112 | } 113 | } 114 | 115 | module.exports = Message 116 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/user.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | module.exports = ({ owner = false, username, timestamp, color = 'green' }) => { 4 | if (!username || !timestamp) { 5 | return '' 6 | } 7 | 8 | let connectionTime = 'right now' 9 | const difference = Math.abs(new Date() - new Date(timestamp)) 10 | let time = Math.floor(difference / 36e5) // hours 11 | if (time > 0) { 12 | connectionTime = `${time} hours ago` 13 | } else { 14 | time = Math.floor(difference / 6e4) // minutes 15 | if (time > 0) { 16 | connectionTime = `${time} minutes ago` 17 | } 18 | } 19 | 20 | const colorStyle = color ? `color: ${color}` : '' 21 | 22 | return html` 23 |
  • 25 |
    26 | ${username}${owner ? ' (you)' : ''} 27 |
    28 |
    29 | ${connectionTime} 30 |
    31 |
  • 32 | ` 33 | } 34 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/users.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | const user = require('./user') 4 | 5 | module.exports = function users (state, emit) { 6 | const { chat: { friends, username, userTimestamp }, ui: { showFriendsPanel } } = state 7 | const users = friends.slice() 8 | users.sort((a, b) => a.timestamp - b.timestamp) 9 | 10 | const displayOnMobile = showFriendsPanel ? 'db flex-grow-1 flex-shrink-0' : 'dn' 11 | 12 | return html` 13 | 22 | ` 23 | } 24 | -------------------------------------------------------------------------------- /problems/10/olaf/src/components/view-messages.js: -------------------------------------------------------------------------------- 1 | const Component = require('choo/component') 2 | const html = require('choo/html') 3 | 4 | const Message = require('./message') 5 | 6 | const customStyle = 'outline: none;overflow-x: hidden;overflow-y: auto;transform: translateZ(0);' 7 | 8 | module.exports = class ViewMessages extends Component { 9 | constructor (name, state, emit) { 10 | super(name) 11 | this.state = state 12 | this.emit = emit 13 | this.local = this.state.components[name] = {} 14 | this.setState() 15 | } 16 | 17 | setState () { 18 | this.local.messages = this.state.chat.messages.slice() 19 | this.local.messages.sort((a, b) => a.timestamp - b.timestamp) 20 | } 21 | 22 | update () { 23 | const { chat: { messages } } = this.state 24 | if (this.local.messages.length !== messages.length) { 25 | this.setState() 26 | return true 27 | } 28 | } 29 | 30 | createElement () { 31 | return html` 32 |
    37 | ${this.local.messages.map(m => this.state.cache(Message, `message_${m.key}`, { updateHeight: this.updateHeight }).render(m, this.state.chat.colors[m.username]))} 38 |
    39 | ` 40 | } 41 | 42 | updateHeight = h => { 43 | this.element.scrollTo(0, this.element.scrollHeight + h + 10) 44 | } 45 | 46 | afterupdate (el) { 47 | el.scrollTo(0, el.scrollHeight) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /problems/10/olaf/src/config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | SIGNAL_URLS: 'wss://signalhubws-olaf.glitch.me', 3 | ICE_URLS: 'stun:stun.l.google.com:19302;stun:stun1.l.google.com:19302;stun:stun2.l.google.com:19302;stun:stun3.l.google.com:19302;stun:stun4.l.google.com:19302;stun:stun.ekiga.net;turn:numb.viagenie.ca,muazkh,webrtc@live.com;turn:192.158.29.39:3478?transport=udp,JZEOEt2V3Qb0y27GRntt2u2PAYA=,28224511:1379330808;turn:192.158.29.39:3478?transport=tcp,JZEOEt2V3Qb0y27GRntt2u2PAYA=,28224511:1379330808;turn:turn.bistri.com:80,homeo,homeo;turn:turn.anyfirewall.com:443?transport=tcp,webrtc,webrtc' 4 | } 5 | -------------------------------------------------------------------------------- /problems/10/olaf/src/index.css: -------------------------------------------------------------------------------- 1 | @import 'tachyons'; 2 | @import 'balloon-css/balloon.min.css'; 3 | 4 | html, body { 5 | height: 100%; 6 | } 7 | 8 | .break-word { 9 | overflow-wrap: break-word; 10 | } 11 | 12 | .modal-overlay { 13 | position: fixed; 14 | top: 0; 15 | right: 0; 16 | bottom: 0; 17 | left: 0; 18 | background: rgba(17, 17, 17, 0.51); 19 | } 20 | 21 | .icon-button { 22 | width: 40px; 23 | padding: 10px; 24 | height: 40px; 25 | } 26 | -------------------------------------------------------------------------------- /problems/10/olaf/src/index.js: -------------------------------------------------------------------------------- 1 | var choo = require('choo') 2 | 3 | var app = choo() 4 | if (process.env.NODE_ENV !== 'production') { 5 | app.use(require('choo-devtools')()) 6 | } else { 7 | app.use(require('choo-service-worker')('/service-worker.js')) 8 | } 9 | 10 | if (module.hot) { 11 | module.hot.accept(function () { 12 | window.location.reload() 13 | }) 14 | } 15 | 16 | app.use(require('./stores/chat')) 17 | app.use(require('./stores/ui')) 18 | 19 | app.route('/', require('./views/main')) 20 | app.route('/*', require('./views/404')) 21 | 22 | module.exports = app.mount('body') 23 | -------------------------------------------------------------------------------- /problems/10/olaf/src/lib/db-names.js: -------------------------------------------------------------------------------- 1 | const dbs = JSON.parse(localStorage.getItem('olaf/dbs')) || [] 2 | 3 | export const addDB = (dbName, pubKey) => { 4 | dbs.push({ dbName, pubKey }) 5 | localStorage.setItem('olaf/dbs', JSON.stringify(dbs)) 6 | return dbName 7 | } 8 | 9 | export const getDB = pubKey => { 10 | const db = dbs.find(db => db.pubKey === pubKey) 11 | if (db) { 12 | return db.dbName 13 | } 14 | 15 | return addDB(`olaf-${Date.now()}`, pubKey) 16 | } 17 | 18 | export const updateDB = (dbName, pubKey) => { 19 | const db = dbs.find(db => db.dbName === dbName) 20 | db.pubKey = pubKey 21 | localStorage.setItem('olaf/dbs', JSON.stringify(dbs)) 22 | } 23 | -------------------------------------------------------------------------------- /problems/10/olaf/src/lib/saga.js: -------------------------------------------------------------------------------- 1 | const EventEmitter = require('events') 2 | const { Writable } = require('stream') 3 | const hyperdb = require('hyperdb') 4 | const pump = require('pump') 5 | const hyperid = require('hyperid') 6 | const uuid = hyperid() 7 | 8 | class ForEachChunk extends Writable { 9 | constructor (opts, cb) { 10 | if (!cb) { 11 | cb = opts 12 | opts = {} 13 | } 14 | super(opts) 15 | 16 | this.cb = cb 17 | } 18 | 19 | _write (chunk, enc, next) { 20 | this.cb(chunk, enc, next) 21 | } 22 | } 23 | 24 | const forEachChunk = (...args) => new ForEachChunk(...args) 25 | 26 | class Saga extends EventEmitter { 27 | constructor (storage, key, username) { 28 | super() 29 | 30 | this.messages = new Map() 31 | this.users = new Map() 32 | this.username = username 33 | this.timestamp = Date.now() 34 | this.db = hyperdb(storage, key, { valueEncoding: 'json' }) 35 | } 36 | 37 | async initialize () { 38 | await this._ready() 39 | 40 | this._updateHistory(this._watchForMessages.bind(this)) 41 | } 42 | 43 | writeMessage (message) { 44 | const key = `messages/${uuid()}` 45 | const data = { 46 | key, 47 | message, 48 | username: this.username, 49 | timestamp: Date.now() 50 | } 51 | 52 | return new Promise((resolve, reject) => { 53 | this.db.put(key, data, (err) => { 54 | if (err) { 55 | reject(err) 56 | } else { 57 | resolve(key) 58 | } 59 | }) 60 | }) 61 | } 62 | 63 | replicate () { 64 | return this.db.replicate({ 65 | live: true, 66 | userData: JSON.stringify({ 67 | key: this.db.local.key, 68 | username: this.username, 69 | timestamp: this.timestamp 70 | }) 71 | }) 72 | } 73 | 74 | async connect (peer) { 75 | if (!peer.remoteUserData) { 76 | throw new Error('peer does not have userData') 77 | } 78 | 79 | const data = JSON.parse(peer.remoteUserData) 80 | 81 | const key = Buffer.from(data.key) 82 | const username = data.username 83 | 84 | await this._authorize(key) 85 | 86 | if (!this.users.has(username)) { 87 | this.users.set(username, new Date()) 88 | this.emit('join', data) 89 | peer.on('close', () => { 90 | if (!this.users.has(username)) return 91 | this.users.delete(username) 92 | this.emit('leave', data) 93 | }) 94 | } 95 | } 96 | 97 | _authorize (key) { 98 | return new Promise((resolve, reject) => { 99 | this.db.authorized(key, (err, auth) => { 100 | if (err) return reject(err) 101 | 102 | if (auth) { 103 | return resolve() 104 | } 105 | 106 | this.db.authorize(key, (err) => { 107 | if (err) return reject(err) 108 | resolve() 109 | }) 110 | }) 111 | }) 112 | } 113 | 114 | _updateHistory (onFinish) { 115 | const h = this.db.createHistoryStream({ reverse: true }) 116 | 117 | const ws = forEachChunk({ objectMode: true }, (data, enc, next) => { 118 | const { key, value } = data 119 | 120 | if (/messages/.test(key)) { 121 | if (this.messages.has(key)) { 122 | h.destroy() 123 | return 124 | } 125 | 126 | this.messages.set(key, value) 127 | this.emit('message', value, key) 128 | } 129 | 130 | next() 131 | }) 132 | 133 | pump(h, ws, onFinish) 134 | } 135 | 136 | _watchForMessages () { 137 | this.db.watch('messages', () => { 138 | this._updateHistory() 139 | }) 140 | } 141 | 142 | _ready () { 143 | return new Promise(resolve => this.db.ready(resolve)) 144 | } 145 | } 146 | 147 | module.exports = (...args) => new Saga(...args) 148 | -------------------------------------------------------------------------------- /problems/10/olaf/src/lib/theme.js: -------------------------------------------------------------------------------- 1 | const COLORS = exports.COLORS = { 2 | light: { 3 | name: { 4 | bg: 'bg-washed-red', 5 | color: 'dark-gray' 6 | }, 7 | hex: { 8 | bg: '#FFDFDF', 9 | color: '#333333' 10 | } 11 | }, 12 | dark: { 13 | name: { 14 | bg: 'bg-navy', 15 | color: 'moon-gray' 16 | }, 17 | hex: { 18 | bg: '#001B44', 19 | color: '#CCCCCC' 20 | } 21 | } 22 | } 23 | 24 | exports.THEME = { 25 | light: `${COLORS.light.name.bg} ${COLORS.light.name.color}`, 26 | dark: `${COLORS.dark.name.bg} ${COLORS.dark.name.color}` 27 | } 28 | -------------------------------------------------------------------------------- /problems/10/olaf/src/stores/chat.js: -------------------------------------------------------------------------------- 1 | const signalhub = require('signalhubws') 2 | const rai = require('random-access-idb') 3 | const saga = require('../lib/saga') 4 | const { getDB, updateDB } = require('../lib/db-names') 5 | const { SIGNAL_URLS, ICE_URLS } = require('../config') 6 | const swarm = require('@geut/discovery-swarm-webrtc') 7 | const { COLORS } = require('../lib/theme') 8 | const rcolor = require('random-color') 9 | const contrast = require('color-contrast') 10 | 11 | const webrtcOpts = { 12 | config: { 13 | iceServers: (process.env.ICE_URLS || ICE_URLS).split(';').map(data => { 14 | const [urls, credential, username] = data.split(',') 15 | 16 | if (credential && username) { 17 | return { 18 | urls, 19 | credential, 20 | username 21 | } 22 | } 23 | 24 | return { urls } 25 | }) 26 | } 27 | } 28 | console.log('ICE Servers: ', webrtcOpts.config.iceServers) 29 | 30 | async function initChat (username, key) { 31 | const publicKey = key && key.length > 0 ? key : null 32 | const dbName = getDB(publicKey) 33 | const chat = saga(rai(dbName), publicKey, username) 34 | 35 | await chat.initialize() 36 | 37 | if (publicKey === null) { 38 | updateDB(dbName, chat.db.key.toString('hex')) 39 | } 40 | 41 | const sw = swarm({ 42 | id: username, 43 | stream: () => chat.replicate() 44 | }) 45 | 46 | const discoveryKey = chat.db.discoveryKey.toString('hex') 47 | const signalUrls = (process.env.SIGNAL_URLS || SIGNAL_URLS).split(';') 48 | 49 | sw.join(signalhub(discoveryKey, signalUrls), webrtcOpts) 50 | 51 | sw.on('connection', async peer => { 52 | try { 53 | await chat.connect(peer) 54 | } catch (err) { 55 | console.log(err) 56 | } 57 | }) 58 | 59 | return chat 60 | } 61 | 62 | const TIMEOUT_DISCONNECTION = 30000 63 | 64 | function store (state, emitter) { 65 | state.storeName = 'chat' 66 | 67 | // declare app events 68 | const { events } = state 69 | events.INIT_ROOM = 'chat:init_room' 70 | events.UPDATE_USERNAME = 'chat:update_username' 71 | events.UPDATE_KEY = 'chat:update_key' 72 | events.JOIN_FRIEND = 'chat:join_friend' 73 | events.LEAVE_FRIEND = 'chat:leave_friend' 74 | events.WRITE_MESSAGE = 'chat:write_message' 75 | events.ADD_MESSAGE = 'chat:add_message' 76 | 77 | let chat 78 | const timers = new Map() 79 | 80 | state.chat = { 81 | initRoom: false, 82 | key: null, 83 | username: null, 84 | userTimestamp: null, 85 | messages: [], 86 | friends: [], 87 | colors: {} 88 | } 89 | 90 | emitter.on('DOMContentLoaded', function () { 91 | rehydrate() 92 | // Usually in Choo apps when the DOMContentLoaded is triggered 93 | // that is a good time to attach our listeners. 94 | emitter.on(events.INIT_ROOM, initRoom) 95 | emitter.on(events.UPDATE_USERNAME, updateUsername) 96 | emitter.on(events.UPDATE_KEY, updateKey) 97 | emitter.on(events.ADD_MESSAGE, addMessage) 98 | emitter.on(events.WRITE_MESSAGE, writeMessage) 99 | emitter.on(events.JOIN_FRIEND, joinFriend) 100 | emitter.on(events.LEAVE_FRIEND, leaveFriend) 101 | }) 102 | 103 | function rehydrate () { 104 | const data = JSON.parse(localStorage.getItem('olaf/last-room')) 105 | 106 | state.chat.username = data ? data.username : null 107 | 108 | if (state.query.key) { 109 | state.chat.key = state.query.key 110 | } else { 111 | state.chat.key = data ? data.key : null 112 | } 113 | 114 | render() 115 | } 116 | 117 | async function initRoom (isNew = false) { 118 | chat = await initChat(state.chat.username, isNew ? null : state.chat.key) 119 | 120 | state.chat.key = chat.db.key.toString('hex') 121 | state.chat.userTimestamp = chat.timestamp 122 | state.chat.init = true 123 | 124 | localStorage.setItem('olaf/last-room', JSON.stringify({ username: state.chat.username, key: state.chat.key })) 125 | 126 | // ********************************** // 127 | // Lets connect Saga with Olaf // 128 | chat.on('message', data => { 129 | // ... 130 | }) 131 | 132 | chat.on('join', user => { 133 | // ... 134 | }) 135 | 136 | chat.on('leave', user => { 137 | // ... 138 | }) 139 | // ********************************** // 140 | 141 | render() 142 | } 143 | 144 | function updateUsername (username) { 145 | state.chat.username = username 146 | render() 147 | } 148 | 149 | function updateKey (key) { 150 | state.chat.key = key 151 | render() 152 | } 153 | 154 | function writeMessage (msg) { 155 | chat.writeMessage(msg) 156 | render() 157 | } 158 | 159 | function joinFriend (user) { 160 | const index = state.chat.friends.findIndex(u => u.username === user.username) 161 | 162 | // check if the user already exists 163 | if (index !== -1) { 164 | // check if it has a timer to disconnect 165 | if (timers.has(user.username)) { 166 | clearTimeout(timers.get(user.username)) 167 | timers.delete(user.username) 168 | } 169 | return 170 | } 171 | 172 | let newColor = rcolor(0.99, 0.99).hexString() 173 | const currentTheme = state.ui.toggleTheme ? 'light' : 'dark' 174 | while (contrast(COLORS[currentTheme].hex.bg, newColor) < 4) { 175 | newColor = rcolor(0.99, 0.99).hexString() 176 | } 177 | user.color = newColor 178 | state.chat.colors[user.username] = user.color 179 | state.chat.friends.push(user) 180 | render() 181 | } 182 | 183 | function leaveFriend (user) { 184 | const index = state.chat.friends.findIndex(u => u.username === user.username) 185 | if (index !== -1) { 186 | // the webrtc connection could be losted for a moment so it's better wait a couple of seconds 187 | timers.set(user.username, setTimeout(() => { 188 | state.chat.friends.splice(index, 1) 189 | timers.delete(user.username) 190 | render() 191 | }, TIMEOUT_DISCONNECTION)) 192 | } 193 | } 194 | 195 | function addMessage (data) { 196 | state.chat.messages.push(data) 197 | render() 198 | } 199 | 200 | function render () { 201 | emitter.emit('render') 202 | } 203 | } 204 | 205 | module.exports = store 206 | -------------------------------------------------------------------------------- /problems/10/olaf/src/stores/chat.solution.js: -------------------------------------------------------------------------------- 1 | const signalhub = require('signalhubws') 2 | const rai = require('random-access-idb') 3 | const saga = require('../lib/saga') 4 | const { getDB, updateDB } = require('../lib/db-names') 5 | const { SIGNAL_URLS, ICE_URLS } = require('../config') 6 | const swarm = require('@geut/discovery-swarm-webrtc') 7 | const { COLORS } = require('../lib/theme') 8 | const rcolor = require('random-color') 9 | const contrast = require('color-contrast') 10 | 11 | const webrtcOpts = { 12 | config: { 13 | iceServers: (process.env.ICE_URLS || ICE_URLS).split(';').map(data => { 14 | const [urls, credential, username] = data.split(',') 15 | 16 | if (credential && username) { 17 | return { 18 | urls, 19 | credential, 20 | username 21 | } 22 | } 23 | 24 | return { urls } 25 | }) 26 | } 27 | } 28 | console.log('ICE Servers: ', webrtcOpts.config.iceServers) 29 | 30 | async function initChat (username, key) { 31 | const publicKey = key && key.length > 0 ? key : null 32 | const dbName = getDB(publicKey) 33 | const chat = saga(rai(dbName), publicKey, username) 34 | 35 | await chat.initialize() 36 | 37 | if (publicKey === null) { 38 | updateDB(dbName, chat.db.key.toString('hex')) 39 | } 40 | 41 | const sw = swarm({ 42 | id: username, 43 | stream: () => chat.replicate() 44 | }) 45 | 46 | const discoveryKey = chat.db.discoveryKey.toString('hex') 47 | const signalUrls = (process.env.SIGNAL_URLS || SIGNAL_URLS).split(';') 48 | 49 | sw.join(signalhub(discoveryKey, signalUrls), webrtcOpts) 50 | 51 | sw.on('connection', async peer => { 52 | try { 53 | await chat.connect(peer) 54 | } catch (err) { 55 | console.log(err) 56 | } 57 | }) 58 | 59 | return chat 60 | } 61 | 62 | const TIMEOUT_DISCONNECTION = 30000 63 | 64 | function store (state, emitter) { 65 | state.storeName = 'chat' 66 | 67 | // declare app events 68 | const { events } = state 69 | events.INIT_ROOM = 'chat:init_room' 70 | events.UPDATE_USERNAME = 'chat:update_username' 71 | events.UPDATE_KEY = 'chat:update_key' 72 | events.JOIN_FRIEND = 'chat:join_friend' 73 | events.LEAVE_FRIEND = 'chat:leave_friend' 74 | events.WRITE_MESSAGE = 'chat:write_message' 75 | events.ADD_MESSAGE = 'chat:add_message' 76 | 77 | let chat 78 | const timers = new Map() 79 | 80 | state.chat = { 81 | initRoom: false, 82 | key: null, 83 | username: null, 84 | userTimestamp: null, 85 | messages: [], 86 | friends: [], 87 | colors: {} 88 | } 89 | 90 | emitter.on('DOMContentLoaded', function () { 91 | rehydrate() 92 | emitter.on(events.INIT_ROOM, initRoom) 93 | emitter.on(events.UPDATE_USERNAME, updateUsername) 94 | emitter.on(events.UPDATE_KEY, updateKey) 95 | emitter.on(events.ADD_MESSAGE, addMessage) 96 | emitter.on(events.WRITE_MESSAGE, writeMessage) 97 | emitter.on(events.JOIN_FRIEND, joinFriend) 98 | emitter.on(events.LEAVE_FRIEND, leaveFriend) 99 | }) 100 | 101 | function rehydrate () { 102 | const data = JSON.parse(localStorage.getItem('olaf/last-room')) 103 | 104 | state.chat.username = data ? data.username : null 105 | 106 | if (state.query.key) { 107 | state.chat.key = state.query.key 108 | } else { 109 | state.chat.key = data ? data.key : null 110 | } 111 | 112 | render() 113 | } 114 | 115 | async function initRoom (isNew = false) { 116 | chat = await initChat(state.chat.username, isNew ? null : state.chat.key) 117 | 118 | state.chat.key = chat.db.key.toString('hex') 119 | state.chat.userTimestamp = chat.timestamp 120 | state.chat.init = true 121 | 122 | localStorage.setItem('olaf/last-room', JSON.stringify({ username: state.chat.username, key: state.chat.key })) 123 | 124 | chat.on('message', data => { 125 | emitter.emit(events.ADD_MESSAGE, data) 126 | }) 127 | 128 | chat.on('join', user => { 129 | emitter.emit(events.JOIN_FRIEND, user) 130 | }) 131 | 132 | chat.on('leave', user => { 133 | emitter.emit(events.LEAVE_FRIEND, user) 134 | }) 135 | 136 | render() 137 | } 138 | 139 | function updateUsername (username) { 140 | state.chat.username = username 141 | render() 142 | } 143 | 144 | function updateKey (key) { 145 | state.chat.key = key 146 | render() 147 | } 148 | 149 | function writeMessage (msg) { 150 | chat.writeMessage(msg) 151 | render() 152 | } 153 | 154 | function joinFriend (user) { 155 | const index = state.chat.friends.findIndex(u => u.username === user.username) 156 | 157 | // check if the user already exists 158 | if (index !== -1) { 159 | // check if it has a timer to disconnect 160 | if (timers.has(user.username)) { 161 | clearTimeout(timers.get(user.username)) 162 | timers.delete(user.username) 163 | } 164 | return 165 | } 166 | 167 | let newColor = rcolor(0.99, 0.99).hexString() 168 | const currentTheme = state.ui.toggleTheme ? 'light' : 'dark' 169 | while (contrast(COLORS[currentTheme].hex.bg, newColor) < 4) { 170 | newColor = rcolor(0.99, 0.99).hexString() 171 | } 172 | user.color = newColor 173 | state.chat.colors[user.username] = user.color 174 | state.chat.friends.push(user) 175 | render() 176 | } 177 | 178 | function leaveFriend (user) { 179 | const index = state.chat.friends.findIndex(u => u.username === user.username) 180 | if (index !== -1) { 181 | // the webrtc connection could be losted for a moment so it's better wait a couple of seconds 182 | timers.set(user.username, setTimeout(() => { 183 | state.chat.friends.splice(index, 1) 184 | timers.delete(user.username) 185 | render() 186 | }, TIMEOUT_DISCONNECTION)) 187 | } 188 | } 189 | 190 | function addMessage (data) { 191 | state.chat.messages.push(data) 192 | render() 193 | } 194 | 195 | function render () { 196 | emitter.emit('render') 197 | } 198 | } 199 | 200 | module.exports = store 201 | -------------------------------------------------------------------------------- /problems/10/olaf/src/stores/ui.js: -------------------------------------------------------------------------------- 1 | function Store (state, emitter) { 2 | state.storeName = 'ui' 3 | 4 | // declare app events 5 | const { events } = state 6 | events.SHOW_MODAL_KEY = 'ui:show_modal_key' 7 | events.HIDE_MODAL_KEY = 'ui:hide_modal_key' 8 | events.TOGGLE_FRIENDS = 'ui:toggle_friends' 9 | events.TOGGLE_THEME = 'ui:toggle_theme' 10 | 11 | state.ui = { 12 | showModalKey: false, 13 | showFriendsPanel: false, 14 | toggleTheme: true 15 | } 16 | 17 | emitter.on('DOMContentLoaded', function () { 18 | emitter.on(events.SHOW_MODAL_KEY, showModalKey) 19 | emitter.on(events.HIDE_MODAL_KEY, hideModalKey) 20 | emitter.on(events.TOGGLE_FRIENDS, toggleFriends) 21 | emitter.on(events.TOGGLE_THEME, toggleTheme) 22 | }) 23 | 24 | function showModalKey () { 25 | state.ui.showModalKey = true 26 | emitter.emit('render') 27 | } 28 | 29 | function hideModalKey () { 30 | state.ui.showModalKey = false 31 | emitter.emit('render') 32 | } 33 | 34 | function toggleFriends () { 35 | state.ui.showFriendsPanel = !state.ui.showFriendsPanel 36 | emitter.emit('render') 37 | } 38 | 39 | function toggleTheme () { 40 | state.ui.toggleTheme = !state.ui.toggleTheme 41 | emitter.emit('render') 42 | } 43 | } 44 | 45 | module.exports = Store 46 | -------------------------------------------------------------------------------- /problems/10/olaf/src/views/404.js: -------------------------------------------------------------------------------- 1 | var html = require('choo/html') 2 | 3 | var TITLE = 'olaf - route not found' 4 | 5 | module.exports = view 6 | 7 | function view (state, emit) { 8 | if (state.title !== TITLE) emit(state.events.DOMTITLECHANGE, TITLE) 9 | return html` 10 | 11 |

    Route not found.

    12 | Back to main. 13 | 14 | ` 15 | } 16 | -------------------------------------------------------------------------------- /problems/10/olaf/src/views/main.js: -------------------------------------------------------------------------------- 1 | const html = require('choo/html') 2 | 3 | const { THEME } = require('../lib/theme') 4 | const initModal = require('../components/init-modal') 5 | const header = require('../components/header') 6 | const users = require('../components/users') 7 | 8 | const InputMsg = require('../components/input-msg') 9 | const ViewMessages = require('../components/view-messages') 10 | const KeyModal = require('../components/key-modal') 11 | 12 | module.exports = view 13 | 14 | function view (state, emit) { 15 | const { username, key, init } = state.chat 16 | const { showModalKey, toggleTheme } = state.ui 17 | 18 | const theme = toggleTheme ? THEME.light : THEME.dark 19 | 20 | return html` 21 | 22 |
    23 | ${header(state, emit)} 24 |
    25 | ${state.cache(ViewMessages, 'viewMessages').render()} 26 | ${users(state, emit)} 27 |
    28 |
    29 | ${state.cache(InputMsg, 'inputMsg').render()} 30 |
    31 |
    32 | ${(!init) ? initModal({ username, key }, this.emit, this.state.events) : ''} 33 | ${showModalKey ? state.cache(KeyModal, 'keyModal').render({ key }) : ''} 34 | 35 | ` 36 | } 37 | -------------------------------------------------------------------------------- /problems/11/README.es.md: -------------------------------------------------------------------------------- 1 | # 11 - Lo lograste! 2 | 3 | Si llegaste hasta aquí significa que lo lograste (o simplemente fuiste saltando los links rápidamente) :stuck_out_tongue_closed_eyes: 4 | 5 | De cualquier forma, este documento esta aquí para felicitarte y nada va a cambiar eso: 6 | 7 | :tada: FELICITACIONES AMIGUE, 8 | 9 | BIENVENIDO AL MUNDO P2P! :tada: 10 | 11 | ![party god from adventure time](https://bukk.it/partygod.gif) 12 | ___ 13 | :clap: Gracias por acompañarnos en esta aventura, 14 | 15 | Tus amigues de GEUT. 16 | -------------------------------------------------------------------------------- /problems/11/README.md: -------------------------------------------------------------------------------- 1 | # 11 - Lo lograste! 2 | 3 | Si llegaste hasta aquí significa que lo lograste (o simplemente fuiste saltando los links rápidamente) :stuck_out_tongue_closed_eyes: 4 | 5 | De cualquier forma, este documento esta aquí para felicitarte y nada va a cambiar eso: 6 | 7 | :tada: FELICITACIONES AMIGUE, 8 | 9 | BIENVENIDO AL MUNDO P2P! :tada: 10 | 11 | ![party god from adventure time](https://bukk.it/partygod.gif) 12 | ___ 13 | :clap: Gracias por acompañarnos en esta aventura, 14 | 15 | Tus amigues de GEUT. 16 | -------------------------------------------------------------------------------- /sw.js: -------------------------------------------------------------------------------- 1 | /* =========================================================== 2 | * docsify sw.js 3 | * =========================================================== 4 | * Copyright 2016 @huxpro 5 | * Licensed under Apache 2.0 6 | * Register service worker. 7 | * ========================================================== */ 8 | 9 | const RUNTIME = 'docsify' 10 | const HOSTNAME_WHITELIST = [ 11 | self.location.hostname, 12 | 'fonts.gstatic.com', 13 | 'fonts.googleapis.com', 14 | 'unpkg.com' 15 | ] 16 | 17 | // The Util Function to hack URLs of intercepted requests 18 | const getFixedUrl = (req) => { 19 | var now = Date.now() 20 | var url = new URL(req.url) 21 | 22 | // 1. fixed http URL 23 | // Just keep syncing with location.protocol 24 | // fetch(httpURL) belongs to active mixed content. 25 | // And fetch(httpRequest) is not supported yet. 26 | url.protocol = self.location.protocol 27 | 28 | // 2. add query for caching-busting. 29 | // Github Pages served with Cache-Control: max-age=600 30 | // max-age on mutable content is error-prone, with SW life of bugs can even extend. 31 | // Until cache mode of Fetch API landed, we have to workaround cache-busting with query string. 32 | // Cache-Control-Bug: https://bugs.chromium.org/p/chromium/issues/detail?id=453190 33 | if (url.hostname === self.location.hostname) { 34 | url.search += (url.search ? '&' : '?') + 'cache-bust=' + now 35 | } 36 | return url.href 37 | } 38 | 39 | /** 40 | * @Lifecycle Activate 41 | * New one activated when old isnt being used. 42 | * 43 | * waitUntil(): activating ====> activated 44 | */ 45 | self.addEventListener('activate', event => { 46 | event.waitUntil(self.clients.claim()) 47 | }) 48 | 49 | /** 50 | * @Functional Fetch 51 | * All network requests are being intercepted here. 52 | * 53 | * void respondWith(Promise r) 54 | */ 55 | self.addEventListener('fetch', event => { 56 | // Skip some of cross-origin requests, like those for Google Analytics. 57 | if (HOSTNAME_WHITELIST.indexOf(new URL(event.request.url).hostname) > -1) { 58 | // Stale-while-revalidate 59 | // similar to HTTP's stale-while-revalidate: https://www.mnot.net/blog/2007/12/12/stale 60 | // Upgrade from Jake's to Surma's: https://gist.github.com/surma/eb441223daaedf880801ad80006389f1 61 | const cached = caches.match(event.request) 62 | const fixedUrl = getFixedUrl(event.request) 63 | const fetched = fetch(fixedUrl, { cache: 'no-store' }) 64 | const fetchedCopy = fetched.then(resp => resp.clone()) 65 | 66 | // Call respondWith() with whatever we get first. 67 | // If the fetch fails (e.g disconnected), wait for the cache. 68 | // If there’s nothing in cache, wait for the fetch. 69 | // If neither yields a response, return offline pages. 70 | event.respondWith( 71 | Promise.race([fetched.catch(_ => cached), cached]) 72 | .then(resp => resp || fetched) 73 | .catch(_ => { /* eat any errors */ }) 74 | ) 75 | 76 | // Update the cache with the version we fetched (only for ok status) 77 | event.waitUntil( 78 | Promise.all([fetchedCopy, caches.open(RUNTIME)]) 79 | .then(([response, cache]) => response.ok && cache.put(event.request, response)) 80 | .catch(_ => { /* eat any errors */ }) 81 | ) 82 | } 83 | }) 84 | -------------------------------------------------------------------------------- /terms.md: -------------------------------------------------------------------------------- 1 | # Terminologia 2 | 3 | ## feed 4 | Nuestra instancia de hypercore es un feed de datos. Los feeds son estructuras de datos que 5 | pueden ser compartidas y sincronizadas por medio de una red. 6 | 7 | ## owner 8 | Peer que tiene permisos de escritura en el feed. Es decir, que tiene una secret key para escribir en el feed 9 | o que su public key fue autorizada en una instancia de Hyperdb. 10 | 11 | ## pipe 12 | Termino que recibe el conectar 2 o mas streams. 13 | ``` 14 | a | b | c 15 | ``` 16 | 17 | ## replicar stream 18 | Los feed pueden crear con `replicate()` un stream que puede ser conectado (piped) a un peer con un feed remoto. 19 | Es utilizado para sincronizar feeds. 20 | 21 | ## swarm 22 | Podemos definir a un swarm como un grupo de peers conectados para un proposito, servicio o recurso mutuo. 23 | 24 | ## peer 25 | Llamamos _peer_ a cualquier nodo con IP:PORT conectado a un red. 26 | --------------------------------------------------------------------------------