├── LICENSE
├── README.md
├── appendixB
├── 1
│ └── cutlery_asyncio.py
├── 2
│ └── index.html
├── 3
│ └── charts.html
├── 4
│ └── triggers.py
└── 5
│ └── perf.py
├── chapter2
├── 1
│ └── threading_best_practice.py
├── 2,3
│ └── cutlery_test.py
└── threadmem.py
├── chapter3
├── 1
│ └── quickstart.py
├── 2
│ └── quickstart.py
├── 3
│ └── quickstart_exe.py
├── 6
│ └── coro_send.py
├── 7
│ └── using_await.py
├── 8
│ └── inject_exception_into_coro.py
├── 9
│ └── cancel_coro.py
├── 10
│ └── absorb_cancel_and_move_on.py
├── 11
│ └── exec_coro_w_event_loop.py
├── 12
│ └── always_same_event_loop.py
├── 13
│ └── create_tasks.py
├── 14
│ └── create_tasks_the_modern_way.py
├── 15
│ └── check_future_complete_status.py
├── 16
│ └── interact_w_future_instance.py
├── 17
│ └── set_result_on_task.py
├── 18
│ └── ensure_future.py
├── 19
│ └── listify.py
├── 20
│ └── async_context_manager.py
├── 21
│ └── contextlib_contextmanager.py
├── 22
│ └── contextlib_asynccontextmanager.py
├── 23
│ └── run_in_executor_example.py
├── 24
│ └── nonasync_iterator.py
├── 25
│ └── async_iterator_redis_example.py
├── 26
│ └── async_generator_redis_example.py
├── 27
│ └── async_comprehensions_example1.py
├── 28
│ └── async_comprehensions_example2.py
├── 29
│ └── taskwarning.py
├── 30
│ └── telnetdemo.py
├── 31
│ └── telnetdemo.py
├── 32
│ └── alltaskscomplete.py
├── 33
│ └── shell_signal01.py
├── 34
│ └── shell_signal02.py
├── 35
│ └── shell_signal02b.py
├── 36
│ └── quickstart.py
├── 37
│ └── quickstart.py
├── 38
│ └── quickstart.py
├── 39
│ └── quickstart.py
└── 4,5
│ └── async_func_are_func_not_coro.py
├── chapter4
├── 10
│ └── twisted_defer_example.py
├── 11
│ └── twisted_asyncio.py
├── 12
│ └── janus_demo.py
├── 13
│ └── aiohttp_example.py
├── 14
│ └── news_scraper.py
├── 15
│ └── poller.py
├── 16
│ └── poller_srv.py
├── 17
│ └── poller_aio.py
├── 18
│ └── backend-app.py
├── 19
│ └── metric-server.py
├── 20
│ └── visualization_layer.snip.js
├── 1-9
│ ├── __pycache__
│ │ └── msgproto.cpython-38.pyc
│ ├── mq_client_listen.py
│ ├── mq_client_sender.py
│ ├── mq_server.py
│ ├── mq_server_plus.py
│ └── msgproto.py
└── 21,22,23,24
│ ├── __pycache__
│ ├── model.cpython-38.pyc
│ ├── perf.cpython-38.pyc
│ ├── triggers.cpython-38.pyc
│ └── util.cpython-38.pyc
│ ├── asyncpg-basic.py
│ ├── model.py
│ ├── perf.py
│ ├── sanic_demo.py
│ ├── triggers.py
│ └── util.py
├── requirements.txt
└── resource
└── book_cover.jpg
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Using Asyncio in Python
2 | This project contains the examples of [Caleb Hattingh](https://github.com/cjrh)'s O'Reilly book [Using Asyncio in Python: Understanding Python's Asynchronous Programming Features](https://www.oreilly.com/library/view/using-asyncio-in/9781492075325/).
3 |
4 | 
5 |
6 | **Note**: Some of the examples have been slightly modified to be more easily runnable.
7 |
--------------------------------------------------------------------------------
/appendixB/1/cutlery_asyncio.py:
--------------------------------------------------------------------------------
1 | # Example B-1. Cutlery management using asyncio
2 | import sys
3 | import asyncio
4 | from attr import attrs, attrib
5 |
6 |
7 | # Instead of a ThreadBot, we now have a CoroBot. This code sample uses only
8 | # one thread, and that thread will be managing all 10 separate CoroBot object
9 | # instances—one for each table in the restaurant.
10 | class CoroBot():
11 | def __init__(self):
12 | self.cutlery = Cutlery(knives=0, forks=0)
13 | # Instead of queue.Queue, we’re using the asyncio -enabled queue.
14 | self.tasks = asyncio.Queue()
15 |
16 | async def manage_table(self):
17 | while True:
18 | # This is the main point: the only places at which execution can
19 | # switch between different CoroBot instances is where the await
20 | # keyword appears. It is not possible to have a context switch
21 | # during the rest of this function, and this is why there is no
22 | # race condition during the modification of the kitchen cutlery
23 | # inventory.
24 | task = await self.tasks.get()
25 | if task == 'prepare table':
26 | kitchen.give(to=self.cutlery, knives=4, forks=4)
27 | elif task == 'clear table':
28 | self.cutlery.give(to=kitchen, knives=4, forks=4)
29 | elif task == 'shutdown':
30 | return
31 |
32 |
33 | @attrs
34 | class Cutlery:
35 | knives = attrib(default=0)
36 | forks = attrib(default=0)
37 |
38 | def give(self, to: 'Cutlery', knives=0, forks=0):
39 | self.change(-knives, -forks)
40 | to.change(knives, forks)
41 |
42 | def change(self, knives, forks):
43 | self.knives += knives
44 | self.forks += forks
45 |
46 |
47 | kitchen = Cutlery(knives=100, forks=100)
48 | bots = [CoroBot() for i in range(10)]
49 | for b in bots:
50 | for i in range(int(sys.argv[1])):
51 | b.tasks.put_nowait('prepare table')
52 | b.tasks.put_nowait('clear table')
53 | b.tasks.put_nowait('shutdown')
54 | print('Kitchen inventory before service:', kitchen)
55 | loop = asyncio.get_event_loop()
56 | tasks = []
57 | for b in bots:
58 | t = loop.create_task(b.manage_table())
59 | tasks.append(t)
60 | task_group = asyncio.gather(*tasks)
61 | loop.run_until_complete(task_group)
62 | print('Kitchen inventory after service:', kitchen)
63 |
--------------------------------------------------------------------------------
/appendixB/2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | The News
6 |
39 |
40 |
41 | The News
42 |
43 | $body
44 |
45 |
46 |
--------------------------------------------------------------------------------
/appendixB/3/charts.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Server Performance
7 |
8 |
75 |
81 |
82 |
83 | CPU (%)
84 |
86 |
87 | Memory usage (MB)
88 |
--------------------------------------------------------------------------------
/appendixB/4/triggers.py:
--------------------------------------------------------------------------------
1 | # Example B-4. triggers.py
2 |
3 | # Defined in chapter2/21,22,23,24/
4 |
--------------------------------------------------------------------------------
/appendixB/5/perf.py:
--------------------------------------------------------------------------------
1 | # Example B-5. perf.py
2 |
3 | # Defined in chapter2/21,22,23,24/
4 |
--------------------------------------------------------------------------------
/chapter2/1/threading_best_practice.py:
--------------------------------------------------------------------------------
1 | # Example 2-1. Best practice for threading
2 | from concurrent.futures import ThreadPoolExecutor as Executor
3 |
4 |
5 | def worker(data):
6 | print('')
7 |
8 |
9 | with Executor(max_workers=10) as exe:
10 | future = exe.submit(worker, 'data')
11 |
--------------------------------------------------------------------------------
/chapter2/2,3/cutlery_test.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import sys
3 |
4 | from queue import Queue
5 | from attr import attrs, attrib
6 |
7 |
8 | # Example 2-2. ThreadBot programming for table service
9 | class ThreadBot(threading.Thread):
10 | # A ThreadBot is a subclass of a thread
11 | def __init__(self):
12 | # The target function of the thread is the manage_table() method,
13 | # defined later in the file.
14 | super().__init__(target=self.manage_table)
15 | # This bot is going to be waiting tables and will need to be
16 | # responsible for some cutlery. Each bot keeps track of the cutlery
17 | # that it took from the kitchen here. (The Cutlery class will be
18 | # defined later.)
19 | self.cutlery = Cutlery(knives=0, forks=0)
20 | # The bot will also be assigned tasks. They will be added to this task
21 | # queue, and the bot will perform them during its main processing loop,
22 | # next.
23 | self.tasks = Queue()
24 |
25 | def manage_table(self):
26 | # The primary routine of this bot is this infinite loop. If you need to
27 | # shut down a bot, you have to give them the shutdown task.
28 | while True:
29 | task = self.tasks.get()
30 | if task == 'prepare table':
31 | # There are only three tasks defined for this bot. This one,
32 | # prepare table, is what the bot must do to get a new table
33 | # ready for service. For our test, the only requirement is to
34 | # get sets of cutlery from the kitchen and place them on the
35 | # table. clear table is used when a table is to be cleared:
36 | # the bot must return the used cutlery back to the kitchen.
37 | # shutdown just shuts down the bot.
38 | kitchen.give(to=self.cutlery, knives=4, forks=4)
39 | elif task == 'clear table':
40 | self.cutlery.give(to=kitchen, knives=4, forks=4)
41 | elif task == 'shutdown':
42 | return
43 |
44 |
45 | # Example 2-3. Definition of the Cutlery object
46 |
47 | # attrs , which is an open source Python library that has nothing to do with
48 | # threads or asyncio , is a really wonderful library for making class creation
49 | # easy. Here, the @attrs decorator will ensure that this Cutlery class will get
50 | # all the usual boilerplate code (like __init__() ) automatically set up.
51 | @attrs
52 | class Cutlery:
53 | # The attrib() function provides an easy way to create attributes,
54 | # including defaults, which you might normally have handled as keyword
55 | # arguments in the __init__() method.
56 | knives = attrib(default=0)
57 | forks = attrib(default=0)
58 | # lock = attrib(threading.Lock())
59 |
60 | # This method is used to transfer knives and forks from one Cutlery object
61 | # to another. Typically, it will be used by bots to obtain cutlery from the
62 | # kitchen for new tables, and to return the cutlery back to the kitchen
63 | # after a table is cleared.
64 | def give(self, to: 'Cutlery', knives=0, forks=0):
65 | self.change(-knives, -forks)
66 | to.change(knives, forks)
67 |
68 | # This is a very simple utility function for altering the inventory data
69 | # in the object instance.
70 | def change(self, knives, forks):
71 | # with self.lock:
72 | self.knives += knives
73 | self.forks += forks
74 |
75 |
76 | # We’ve defined kitchen as the identifier for the kitchen inventory of
77 | # cutlery. Typically, each of the bots will obtain cutlery from this
78 | # location. It is also required that they return cutlery to this store when a
79 | # table is cleared.
80 | kitchen = Cutlery(knives=100, forks=100)
81 | # This script is executed when testing. For our test, we’ll be using 10
82 | # ThreadBots.
83 | bots = [ThreadBot() for i in range(10)]
84 |
85 | for bot in bots:
86 | # We get the number of tables as a command-line parameter, and then give
87 | # each bot that number of tasks for preparing and clearing tables in the
88 | # restaurant.
89 | for i in range(int(sys.argv[1])):
90 | bot.tasks.put('prepare table')
91 | bot.tasks.put('clear table')
92 | # The shutdown task will make the bots stop (so that bot.join() a bit
93 | # further down will return). The rest of the script prints diagnostic
94 | # messages and starts up the bots.
95 | bot.tasks.put('shutdown')
96 | print('Kitchen inventory before service:', kitchen)
97 | for bot in bots:
98 | bot.start()
99 | for bot in bots:
100 | bot.join()
101 | print('Kitchen inventory after service:', kitchen)
102 |
--------------------------------------------------------------------------------
/chapter2/threadmem.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import os
3 | from time import sleep
4 | from threading import Thread
5 | threads = [
6 | Thread(target=lambda: sleep(60)) for i in range(10000)
7 | ]
8 | [t.start() for t in threads]
9 | print(f'PID = {os.getpid()}')
10 | [t.join() for t in threads]
11 |
--------------------------------------------------------------------------------
/chapter3/1/quickstart.py:
--------------------------------------------------------------------------------
1 | # Example 3-1. The “Hello World” of Asyncio
2 | import asyncio
3 | import time
4 |
5 |
6 | async def main():
7 | print(f'{time.ctime()} Hello!')
8 | await asyncio.sleep(1.0)
9 | print(f'{time.ctime()} Goodbye!')
10 |
11 | # asyncio provides a run() function to execute an async def function and all
12 | # other coroutines called from there, like sleep() in the main() function.
13 | asyncio.run(main())
14 |
--------------------------------------------------------------------------------
/chapter3/10/absorb_cancel_and_move_on.py:
--------------------------------------------------------------------------------
1 | # Example 3-10. For educational purposes only—don’t do this!
2 | import asyncio
3 |
4 |
5 | async def f():
6 | try:
7 | while True:
8 | await asyncio.sleep(0)
9 | except asyncio.CancelledError:
10 | print('Nope!')
11 | # Instead of printing a message, what happens if after cancellation,
12 | # we just go right back to awaiting another awaitable?
13 | while True:
14 | await asyncio.sleep(0)
15 | else:
16 | return 111
17 | coro = f()
18 | coro.send(None)
19 | # Unsurprisingly, our outer coroutine continues to live, and it immediately
20 | # suspends again inside the new coroutine.
21 | coro.throw(asyncio.CancelledError)
22 | # Everything proceeds normally, and our coroutine continues to suspend and
23 | # resume as expected.
24 | coro.send(None)
25 |
--------------------------------------------------------------------------------
/chapter3/11/exec_coro_w_event_loop.py:
--------------------------------------------------------------------------------
1 | # Example 3-11. Using the event loop to execute coroutines
2 | import asyncio
3 |
4 |
5 | async def f():
6 | await asyncio.sleep(0)
7 | return 111
8 | # Obtain a loop.
9 | loop = asyncio.get_event_loop()
10 | coro = f()
11 | # Run the coroutine to completion. Internally, this is doing all those
12 | # .send(None) method calls for us, and it detects completion of our coroutine
13 | # with the StopIteration exception, which also contains our return value.
14 | loop.run_until_complete(coro)
15 |
--------------------------------------------------------------------------------
/chapter3/12/always_same_event_loop.py:
--------------------------------------------------------------------------------
1 | # Example 3-12. Always getting the same event loop
2 | import asyncio
3 |
4 |
5 | loop = asyncio.get_event_loop()
6 | loop2 = asyncio.get_event_loop()
7 | # Both identifiers, loop and loop2 , refer to the same instance.
8 | print(loop is loop2)
9 |
--------------------------------------------------------------------------------
/chapter3/13/create_tasks.py:
--------------------------------------------------------------------------------
1 | # Example 3-13. Creating tasks
2 | import asyncio
3 |
4 |
5 | async def f():
6 | # Create some tasks!
7 | loop = asyncio.get_event_loop()
8 | for i in range():
9 | loop.create_task('')
10 |
--------------------------------------------------------------------------------
/chapter3/14/create_tasks_the_modern_way.py:
--------------------------------------------------------------------------------
1 | # Example 3-14. Creating tasks the modern way
2 | import asyncio
3 |
4 |
5 | async def f():
6 | # Create some tasks!
7 | for i in range():
8 | asyncio.create_task('')
9 |
--------------------------------------------------------------------------------
/chapter3/15/check_future_complete_status.py:
--------------------------------------------------------------------------------
1 | # Example 3-15. Checking completion status with done()
2 | from asyncio import Future
3 | f = Future()
4 | print(f.done())
5 |
--------------------------------------------------------------------------------
/chapter3/16/interact_w_future_instance.py:
--------------------------------------------------------------------------------
1 | # Example 3-16. Interaction with a Future instance
2 | import asyncio
3 |
4 |
5 | # Create a simple main function. We can run this, wait for a bit, and then set
6 | # a result on this Future, f.
7 | async def main(f: asyncio.Future):
8 | await asyncio.sleep(1)
9 | # Set the result.
10 | f.set_result('I have finished.')
11 | loop = asyncio.get_event_loop()
12 | # Manually create a Future instance. Note that this instance is (by default)
13 | # tied to our loop, but it is not and will not be attached to any coroutine
14 | # (that’s what Tasks are for).
15 | fut = asyncio.Future()
16 | # Before doing anything, verify that the future is not done yet.
17 | print(fut.done())
18 | # Schedule the main() coroutine, passing the future. Remember, all the main()
19 | # coroutine does is sleep and then toggle the Future instance. (Note that the
20 | # main() coroutine will not start running yet: coroutines run only when the
21 | # loop is running.)
22 | loop.create_task(main(fut))
23 | # Here we use run_until_complete() on a Future instance, rather than a Task
24 | # instance. 7 This is different from what you’ve seen before. Now that the
25 | # loop is running, the main() coroutine will begin executing.
26 | loop.run_until_complete(fut)
27 | print(fut.done())
28 | # Eventually, the future completes when its result is set. After completion,
29 | # the result can be accessed.
30 | print(fut.result())
31 |
--------------------------------------------------------------------------------
/chapter3/17/set_result_on_task.py:
--------------------------------------------------------------------------------
1 | # Example 3-17. Calling set_result() on a Task
2 | import asyncio
3 | from contextlib import suppress
4 |
5 |
6 | async def main(f: asyncio.Future):
7 | await asyncio.sleep(1)
8 | try:
9 | # A Task instance is being passed in. It satisfies the type signature
10 | # of the function (because Task is a subclass of Future ), but since
11 | # Python 3.8, we’re no longer allowed to call set_result() on a Task:
12 | # an attempt will raise RuntimeError. The idea is that a Task
13 | # represents a running coroutine, so the result should always come
14 | # only from that.
15 | f.set_result('I have finished.')
16 | except RuntimeError as e:
17 | print(f'No longer allowed: {e}')
18 | # We can, however, still cancel() a task, which will raise
19 | # CancelledError inside the underlying coroutine.
20 | f.cancel()
21 | loop = asyncio.get_event_loop()
22 | # The only difference is that we create a Task instance instead of a Future.
23 | # Of course, the Task API requires us to provide a coroutine; we just use
24 | # sleep() because it’s convenient.
25 | fut = asyncio.Task(asyncio.sleep(1_000_000))
26 | print(fut.done())
27 | loop.create_task(main(fut))
28 | with suppress(asyncio.CancelledError):
29 | loop.run_until_complete(fut)
30 | print(fut.done())
31 | print(fut.cancelled())
32 |
--------------------------------------------------------------------------------
/chapter3/18/ensure_future.py:
--------------------------------------------------------------------------------
1 | # Example 3-18. A closer look at what ensure_future() is doing
2 | import asyncio
3 |
4 |
5 | # A simple do-nothing coroutine function. We just need something that can make
6 | # a coroutine.
7 | async def f():
8 | pass
9 | # We make the coroutine object by calling the function directly. Your code will
10 | # rarely do this, but I want to be explicit here (a few lines down) that we’re
11 | # passing a coroutine object into each of create_task() and ensure_future().
12 | coro = f()
13 | # Obtain the loop.
14 | loop = asyncio.get_event_loop()
15 | # First off, we use loop.create_task() to schedule our coroutine on the loop,
16 | # and we get a new Task instance back.
17 | task = loop.create_task(coro)
18 | # We verify the type. So far, nothing interesting.
19 | assert isinstance(task, asyncio.Task)
20 | # We show that asyncio.ensure_future() can be used to perform the same act as
21 | # create_task() : we passed in a coroutine and we got back a Task instance (and
22 | # the coroutine has been scheduled to run on the loop)! If you’re passing in a
23 | # coroutine, there is no difference between loop.create_task() and
24 | # asyncio.ensure_future().
25 | new_task = asyncio.ensure_future(coro)
26 | assert isinstance(new_task, asyncio.Task)
27 | # But what happens if we pass a Task instance to ensure_future()? Note that
28 | # we’re passing in a Task instance that was already created by
29 | # loop.create_task() in step 4.
30 | mystery_meat = asyncio.ensure_future(task)
31 | # We get back exactly the same Task instance as we passed in: it passes through
32 | # unchanged.
33 | assert mystery_meat is task
34 |
--------------------------------------------------------------------------------
/chapter3/19/listify.py:
--------------------------------------------------------------------------------
1 | # Example 3-19. A utility function for coercing input into a list
2 | from typing import Any, List
3 |
4 |
5 | def listify(x: Any) -> List:
6 | """ Try hard to convert x into a list """
7 | if isinstance(x, (str, bytes)):
8 | return [x]
9 | try:
10 | return [_ for _ in x]
11 | except TypeError:
12 | return [x]
13 |
--------------------------------------------------------------------------------
/chapter3/2/quickstart.py:
--------------------------------------------------------------------------------
1 | # Example 3-2. The “Hello-ish World” of Asyncio
2 | import asyncio
3 | import time
4 |
5 |
6 | async def main():
7 | print(f"{time.ctime()} Hello!")
8 | await asyncio.sleep(1.0)
9 | print(f"{time.ctime()} Goodbye!")
10 |
11 | # You need a loop instance before you can run any coroutines, and this is how
12 | # you get one. In fact, anywhere you call it, get_event_loop() will give you
13 | # the same loop instance each time, as long as you’re using only a single
14 | # thread. If you’re inside an async def function, you should call
15 | # asyncio.get_running_loop() instead, which always gives you what you expect.
16 | # This is covered in much more detail later in the book.
17 | loop = asyncio.get_event_loop()
18 | # In this case, the specific call is loop.create_task(main()). Your coroutine
19 | # function will not be executed until you do this. We say that create_task()
20 | # schedules your coroutine to be run on the loop. The returned task object can
21 | # be used to monitor the status of the task (for example, whether it is still
22 | # running or has completed), and can also be used to obtain a result value from
23 | # your completed coroutine. You can cancel the task with task.cancel() .
24 | task = loop.create_task(main())
25 | # This call will block the current thread, which will usually be the main
26 | # thread. Note that run_until_complete() will keep the loop running only until
27 | # the given coro completes—but all other tasks scheduled on the loop will also
28 | # run while the loop is running. Internally, asyncio.run() calls
29 | # run_until_complete() for you and therefore blocks the main thread in the
30 | # same way.
31 | loop.run_until_complete(task)
32 | pending = asyncio.all_tasks(loop=loop)
33 | for task in pending:
34 | task.cancel()
35 | # When the “main” part of the program unblocks, either due to a process signal
36 | # being received or the loop being stopped by some code calling loop.stop(),
37 | # the code after run_until_complete() will run. The standard idiom as shown
38 | # here is to gather the still-pending tasks, cancel them, and then use
39 | # loop.run_until_complete() again until those tasks are done. gather() is the
40 | # method for doing the gathering. Note that asyncio.run() will do all of the
41 | # cancelling, gathering, and waiting for pending tasks to finish up
42 | group = asyncio.gather(*pending, return_exceptions=True)
43 | loop.run_until_complete(group)
44 | # loop.close() is usually the final action: it must be called on a stopped
45 | # loop, and it will clear all queues and shut down the executor. A stopped
46 | # loop can be restarted, but a closed loop is gone for good. Internally,
47 | # asyncio.run() will close the loop before returning. This is fine because
48 | # run() creates a new event loop every time you call it.
49 | loop.close()
50 |
--------------------------------------------------------------------------------
/chapter3/20/async_context_manager.py:
--------------------------------------------------------------------------------
1 | # Example 3-20. Async context manager
2 | import asyncio
3 |
4 |
5 | async def get_conn(host, port):
6 | class Conn:
7 | async def close(self):
8 | await asyncio.sleep(0)
9 | await asyncio.sleep(0)
10 | return Conn()
11 |
12 |
13 | class Connection:
14 | def __init__(self, host, port):
15 | self.host = host
16 | self.port = port
17 |
18 | # Instead of the __enter__() special method for synchronous context
19 | # managers, the new __aenter__() special method is used. This special
20 | # method must be an async def method.
21 | async def __aenter__(self):
22 | self.conn = await get_conn(self.host, self.port)
23 | return self.conn
24 |
25 | # Likewise, instead of __exit__() , use __aexit__(). The parameters are
26 | # identical to those for __exit__() and are populated if an exception was
27 | # raised in the body of the context manager.
28 | async def __aexit__(self, exc_type, exc, tb):
29 | await self.conn.close()
30 |
31 |
32 | async def main():
33 | async with Connection('localhost', 9001) as conn:
34 | #
35 | pass
36 |
37 | asyncio.run(main())
38 |
--------------------------------------------------------------------------------
/chapter3/21/contextlib_contextmanager.py:
--------------------------------------------------------------------------------
1 | # Example 3-21. The blocking way
2 | from contextlib import contextmanager
3 |
4 |
5 | def download_webpage(url):
6 | class Data:
7 | pass
8 | return Data()
9 |
10 |
11 | def update_stats(url):
12 | pass
13 |
14 |
15 | def process(data):
16 | pass
17 |
18 |
19 | # The @contextmanager decorator transforms a generator function into a context
20 | # manager.
21 | @contextmanager
22 | def web_page(url):
23 | # This function call (which I made up for this example) looks suspiciously
24 | # like the sort of thing that will want to use a network interface, which
25 | # is many orders of magnitude slower than “normal” CPU-bound code. This
26 | # context manager must be used in a dedicated thread; otherwise, the whole
27 | # program will be paused while waiting for data.
28 | data = download_webpage(url)
29 | yield data
30 | # Imagine that we update some statistics every time we process data from a
31 | # URL, such as the number of times the URL has been downloaded. From a
32 | # concurrency perspective, we would need to know whether this function
33 | # involves I/O internally, such as writing to a database over a network.
34 | # If so, update_stats() is also a blocking call.
35 | update_stats(url)
36 |
37 |
38 | # Our context manager is being used. Note specifically how the network call
39 | # (to download_webpage() ) is hidden inside the construction of the context
40 | # manager.
41 | with web_page('google.com') as data:
42 | # This function call, process() , might also be blocking. We’d have to
43 | # look at what the function does, because the distinction between what is
44 | # blocking or nonblocking is not clear-cut. It might be:
45 | # • Innocuous and nonblocking (fast and CPU-bound)
46 | # • Mildly blocking (fast and I/O-bound, perhaps something like fast
47 | # disk access instead of network I/O)
48 | # • Blocking (slow and I/O-bound)
49 | # • Diabolical (slow and CPU-bound)
50 | # For the sake of simplicity in this example, let’s presume that the call
51 | # to process() is a fast, CPU-bound operation and therefore nonblocking.
52 | process(data)
53 |
--------------------------------------------------------------------------------
/chapter3/22/contextlib_asynccontextmanager.py:
--------------------------------------------------------------------------------
1 | # Example 3-22. The nonblocking way
2 | import asyncio
3 | from contextlib import asynccontextmanager
4 |
5 |
6 | async def download_webpage(url):
7 | class Data:
8 | pass
9 | await asyncio.sleep(0)
10 | return Data()
11 |
12 |
13 | async def update_stats(url):
14 | await asyncio.sleep(0)
15 |
16 |
17 | def process(data):
18 | pass
19 |
20 |
21 | # The new @asynccontextmanager decorator is used in exactly the same way.
22 | @asynccontextmanager
23 | # It does, however, require that the decorated generator function be declared
24 | # with async def.
25 | async def web_page(url):
26 | # As before, we fetch the data from the URL before making it available to
27 | # the body of the context manager. I have added the await keyword, which
28 | # tells us that this coroutine will allow the event loop to run other
29 | # tasks while we wait for the network call to complete.
30 | #
31 | # Note that we cannot simply tack on the await keyword to anything. This
32 | # change presupposes that we were also able to modify the
33 | # download_webpage() function itself, and convert it into a coroutine that
34 | # is compatible with the await keyword. For the times when it is not
35 | # possible to modify the function, a different approach is needed; we’ll
36 | # discuss that in the next example.
37 | data = await download_webpage(url)
38 | # As before, the data is made available to the body of the context
39 | # manager. I’m trying to keep the code simple, so I’ve omitted the usual
40 | # try/finally handler that you should normally write to deal with
41 | # exceptions raised in the body of caller.
42 | #
43 | # Note that the presence of yield is what changes a function into a
44 | # generator function; the additional presence of the async def keywords in
45 | # point 1 makes this an asynchronous generator function. When called, it
46 | # will return an asynchronous generator. The inspect module has two
47 | # functions that can test for these: isasyncgenfunction() and
48 | # isasyncgen(), respectively.
49 | yield data
50 | # Here, assume that we’ve also converted the code inside the update
51 | # stats() function to allow it to produce coroutines. We can then use the
52 | # await keyword, which allows a context switch to the event loop while we
53 | # wait for the I/O-bound work to complete.
54 | await update_stats(url)
55 |
56 |
57 | async def main():
58 | # Another change was required in the usage of the context manager itself:
59 | # we needed to use async with instead of a plain with.
60 | async with web_page('google.com') as data:
61 | process(data)
62 | asyncio.run(main())
63 |
--------------------------------------------------------------------------------
/chapter3/23/run_in_executor_example.py:
--------------------------------------------------------------------------------
1 | # Example 3-23. The nonblocking-with-a-little-help-from-my-friends way
2 | import asyncio
3 | from contextlib import asynccontextmanager
4 |
5 |
6 | def download_webpage(url):
7 | class Data:
8 | pass
9 | return Data()
10 |
11 |
12 | def update_stats(url):
13 | pass
14 |
15 |
16 | def process(data):
17 | pass
18 |
19 |
20 | @asynccontextmanager
21 | # For this example, assume that we are unable to modify the code for our two
22 | # blocking calls, download_webpage() and update_stats() ; i.e., we can’t alter
23 | # them to be coroutine functions. That’s bad, because the most grave sin of
24 | # event-based programming is breaking the rule that you must never, under any
25 | # circumstances, prevent the event loop from processing events.
26 | # To get around the problem, we will use an executor to run the blocking calls
27 | # in a separate thread. The executor is made available to us as an attribute
28 | # of the event loop itself.
29 | async def web_page(url):
30 | loop = asyncio.get_event_loop()
31 | # We call the executor. The signature is
32 | # AbstractEventLoop.run_in_executor (executor, func, *args). If you want
33 | # to use the default executor (which is a ThreadPoolExecutor), you must
34 | # pass None as the value for the executor argument.
35 | data = await loop.run_in_executor(
36 | None, download_webpage, url)
37 | yield data
38 | # As with the call to download_webpage(), we also run the other blocking
39 | # call to update_stats() in an executor. Note that you must use the await
40 | # keyword in front. If you forget, the execution of the asynchronous
41 | # generator (i.e., your async context manager) will not wait for the call
42 | # to complete before proceeding.
43 | await loop.run_in_executor(None, update_stats, url)
44 |
45 |
46 | async def main():
47 | async with web_page('google.com') as data:
48 | process(data)
49 | asyncio.run(main())
50 |
--------------------------------------------------------------------------------
/chapter3/24/nonasync_iterator.py:
--------------------------------------------------------------------------------
1 | # Example 3-24. A traditional, nonasync iterator
2 | class A:
3 | # An iterator must implement the __iter__() special method.
4 | def __iter__(self):
5 | # Initialize some state to the “starting” state.
6 | self.x = 0
7 | # The __iter__() special method must return an iterable; i.e., an
8 | # object that implements the __next__() special method. In this case,
9 | # it’s the same instance, because A itself also implements the
10 | # __next__() special method.
11 | return self
12 |
13 | # The __next__() method is defined. This will be called for every step in
14 | # the iteration sequence until...
15 | def __next__(self):
16 | if self.x > 2:
17 | # ...StopIteration is raised.
18 | raise StopIteration
19 | else:
20 | self.x += 1
21 | # The returned values for each iteration are generated.
22 | return self.x
23 |
24 |
25 | for i in A():
26 | print(i)
27 |
--------------------------------------------------------------------------------
/chapter3/25/async_iterator_redis_example.py:
--------------------------------------------------------------------------------
1 | # Example 3-25. Async iterator for fetching data from Redis
2 | import asyncio
3 |
4 |
5 | # Mock Redis interface
6 | class Redis:
7 | async def get(self, key):
8 | await asyncio.sleep(0)
9 | return 'value'
10 |
11 |
12 | class OneAtATime:
13 | # The initializer of this class is quite ordinary: we store the Redis
14 | # connection instance and the list of keys to iterate over.
15 | def __init__(self, redis, keys):
16 | self.redis = redis
17 | self.keys = keys
18 |
19 | # Just as in the previous code example with __iter__() , we use
20 | # __aiter__() to set things up for iteration. We create a normal iterator
21 | # over the keys, self.ikeys, and return self because OneAtATime also
22 | # implements the __anext__() coroutine method.
23 | def __aiter__(self):
24 | self.ikeys = iter(self.keys)
25 | return self
26 |
27 | # Note that the __anext__() method is declared with async def , while the
28 | # __aiter__() method is declared only with def .
29 | async def __anext__(self):
30 | try:
31 | # For each key, we fetch the value from Redis: self.ikeys is a
32 | # regular iterator over the keys, so we use next() to move over
33 | # them.
34 | k = next(self.ikeys)
35 | # When self.ikeys is exhausted, we handle the StopIteration and simply
36 | # turn it into a StopAsyncIteration! This is how you signal stop from
37 | # inside an async iterator.
38 | except StopIteration:
39 | raise StopAsyncIteration
40 | # Finally—the entire point of this example—we can get the data from
41 | # Redis associated with this key. We can await the data, which means
42 | # that other code can run on the event loop while we wait on network
43 | # I/O.
44 | value = await self.redis.get(k)
45 | return value
46 |
47 |
48 | # Mock create_redis
49 | # Real one: aioredis.create_redis
50 | async def create_redis(socket):
51 | await asyncio.sleep(0)
52 | return Redis()
53 |
54 |
55 | async def do_something_with(value):
56 | await asyncio.sleep(0)
57 |
58 |
59 | # The main() function: we run it using asyncio.run() toward the bottom of the
60 | # code sample.
61 | async def main():
62 | # We use the high-level interface in aioredis to get a connection.
63 | redis = await create_redis(('localhost', 6379))
64 | # Imagine that each of the values associated with these keys is quite
65 | # large and stored in the Redis instance.
66 | keys = ['Americas', 'Africa', 'Europe', 'Asia']
67 | # We’re using async for : the point is that iteration is able to suspend
68 | # itself while waiting for the next datum to arrive.
69 | async for value in OneAtATime(redis, keys):
70 | # For completeness, imagine that we also perform some I/O-bound
71 | # activity on the fetched value—perhaps a simple data transformation—
72 | # and then it gets sent on to another destination.
73 | await do_something_with(value)
74 | asyncio.run(main())
75 |
--------------------------------------------------------------------------------
/chapter3/26/async_generator_redis_example.py:
--------------------------------------------------------------------------------
1 | # Example 3-26. Easier with an async generator
2 | import asyncio
3 |
4 |
5 | # Mock Redis interface
6 | class Redis:
7 | async def get(self, key):
8 | await asyncio.sleep(0)
9 | return 'value'
10 |
11 |
12 | # Mock create_redis
13 | # Real one: aioredis.create_redis
14 | async def create_redis(socket):
15 | await asyncio.sleep(0)
16 | return Redis()
17 |
18 |
19 | async def do_something_with(value):
20 | await asyncio.sleep(0)
21 |
22 |
23 | # Our function is now declared with async def , making it a coroutine
24 | # function, and since this function also contains the yield keyword, we refer
25 | # to it as an asynchronous generator function.
26 | async def one_at_a_time(redis, keys):
27 | for k in keys:
28 | # We don’t have to do the convoluted things necessary in the previous
29 | # example with self.ikeys: here, we just loop over the keys directly
30 | # and obtain the value...
31 | value = await redis.get(k)
32 | # ...and then yield it to the caller, just like a normal generator.
33 | yield value
34 |
35 |
36 | # The main() function is identical to the version in Example 3-25.
37 | async def main():
38 | redis = await create_redis(('localhost', 6379))
39 | keys = ['Americas', 'Africa', 'Europe', 'Asia']
40 | async for value in one_at_a_time(redis, keys):
41 | await do_something_with(value)
42 | asyncio.run(main())
43 |
--------------------------------------------------------------------------------
/chapter3/27/async_comprehensions_example1.py:
--------------------------------------------------------------------------------
1 | # Example 3-27. Async list, dict, and set comprehensions
2 | import asyncio
3 |
4 |
5 | async def doubler(n):
6 | for i in range(n):
7 | # doubler() is a very simple async generator: given an upper value,
8 | # it’ll iterate over a simple range, yielding a tuple of the value and
9 | # its double.
10 | yield i, i * 2
11 | # Sleep a little, just to emphasize that this is really an async
12 | # function.
13 | await asyncio.sleep(0.1)
14 |
15 |
16 | async def main():
17 | # An async list comprehension: note how async for is used instead of the
18 | # usual for.
19 | result = [x async for x in doubler(3)]
20 | print(result)
21 | # An async dict comprehension; all the usual tricks work, such as
22 | # unpacking the tuple into x and y so that they can feed the dict
23 | # comprehension syntax.
24 | result = {x: y async for x, y in doubler(3)}
25 | print(result)
26 | # The async set comprehension works exactly as you would expect.
27 | result = {x async for x in doubler(3)}
28 | print(result)
29 | asyncio.run(main())
30 |
--------------------------------------------------------------------------------
/chapter3/28/async_comprehensions_example2.py:
--------------------------------------------------------------------------------
1 | # Example 3-28. Putting it all together
2 | import asyncio
3 |
4 |
5 | # A simple coroutine function: sleep for a bit; then return the parameter plus
6 | # 100.
7 | async def f(x):
8 | await asyncio.sleep(0.1)
9 | return x + 100
10 |
11 |
12 | # This is an async generator, which we will call inside an async list
13 | # comprehension a bit farther down, using async for to drive the iteration.
14 | async def factory(n):
15 | for x in range(n):
16 | await asyncio.sleep(0.1)
17 | # The async generator will yield a tuple of f and the iteration var x.
18 | # The f return value is a coroutine function, not yet a coroutine.
19 | yield f, x
20 |
21 |
22 | async def main():
23 | # Finally, the async comprehension. This example has been contrived to
24 | # demonstrate a comprehension that includes both async for and await.
25 | # Let’s break down what’s happening inside the comprehension. First, the
26 | # factory(3) call returns an async generator, which must be driven by
27 | # iteration. Because it’s an async generator, you can’t just use for;
28 | # you must use async for.
29 | results = [await f(x) async for f, x in factory(3)]
30 | print('results = ', results)
31 | asyncio.run(main())
32 |
--------------------------------------------------------------------------------
/chapter3/29/taskwarning.py:
--------------------------------------------------------------------------------
1 | # Example 3-29. Destroyer of pending tasks
2 | import asyncio
3 |
4 |
5 | async def f(delay):
6 | await asyncio.sleep(delay)
7 | loop = asyncio.get_event_loop()
8 | # Task 1 will run for 1 second.
9 | t1 = loop.create_task(f(1))
10 | # Task 2 will run for 2 seconds.
11 | t2 = loop.create_task(f(2))
12 | # Run only until task 1 is complete.
13 | loop.run_until_complete(t1)
14 | loop.close()
15 |
--------------------------------------------------------------------------------
/chapter3/3/quickstart_exe.py:
--------------------------------------------------------------------------------
1 | # Example 3 - 3. The basic executor interface
2 | import time
3 | import asyncio
4 |
5 |
6 | async def main():
7 | print(f'{time.ctime()} Hello!')
8 | await asyncio.sleep(1.0)
9 | print(f'{time.ctime()} Goodbye!')
10 |
11 |
12 | # blocking() calls the traditional time.sleep() internally, which would have
13 | # blocked the main thread and prevented your event loop from running. This
14 | # means that you must not make this function a coroutine—indeed, you cannot
15 | # even call this function from anywhere in the main thread, which is where the
16 | # asyncio loop is running. We solve this problem by running this function in an
17 | # executor.
18 | def blocking():
19 | # Unrelated to this section, but something to keep in mind for later in
20 | # the book: note that the blocking sleep time (0.5 seconds) is shorter
21 | # than the nonblocking sleep time (1 second) in the main() coroutine.
22 | # This makes the code sample neat and tidy. In “Waiting for the Executor
23 | # During Shutdown” on page 68 we’ll explore what happens if executor
24 | # functions outlive their async counterparts during the shutdown sequence.
25 | time.sleep(0.5)
26 | print(f'{time.ctime()} Hello from a thread!')
27 |
28 |
29 | loop = asyncio.get_event_loop()
30 | task = loop.create_task(main())
31 | # This is the last of our list of essential, must-know features of asyncio . Sometimes
32 | # you need to run things in a separate thread or even a separate process: this
33 | # method is used for exactly that. Here we pass our blocking function to be run in
34 | # the default executor. 4 Note that run_in_executor() does not block the main
35 | # thread: it only schedules the executor task to run (it returns a Future , which
36 | # means you can await it if the method is called within another coroutine func‐
37 | # tion). The executor task will begin executing only after run_until_complete() is
38 | # called, which allows the event loop to start processing events.
39 | loop.run_in_executor(None, blocking)
40 | loop.run_until_complete(task)
41 | # Further to the note in callout 2: the set of tasks in pending does not include an
42 | # entry for the call to blocking() made in run_in_executor() . This will be true of
43 | # any call that returns a Future rather than a Task . The documentation is quite
44 | # good at specifying return types, so you’ll see the return type there; just remember
45 | # that all_tasks() really does return only Task s, not Future s.
46 | pending = asyncio.all_tasks(loop=loop)
47 | for task in pending:
48 | task.cancel()
49 | group = asyncio.gather(*pending, return_exceptions=True)
50 | loop.run_until_complete(group)
51 | loop.close()
52 |
--------------------------------------------------------------------------------
/chapter3/30/telnetdemo.py:
--------------------------------------------------------------------------------
1 | # Example 3-30. Asyncio application life cycle (based on the TCP echo server
2 | # in the Python documentation)
3 | import asyncio
4 | from asyncio import StreamReader, StreamWriter
5 |
6 |
7 | # This echo() coroutine function will be used (by the server) to create a
8 | # coroutine for each connection made. The function is using the streams API
9 | # for networking with asyncio .
10 | async def echo(reader: StreamReader, writer: StreamWriter):
11 | print('New connection.')
12 | try:
13 | # To keep the connection alive, we’ll have an infinite loop to wait
14 | # for messages.
15 | while data := await reader.readline():
16 | # Return the data back to the sender, but in ALL CAPS.
17 | writer.write(data.upper())
18 | await writer.drain()
19 | print('Leaving Connection.')
20 | except asyncio.CancelledError:
21 | # If this task is cancelled, we’ll print a message.
22 | print('Connection dropped!')
23 |
24 |
25 | async def main(host='127.0.0.1', port=8888):
26 | # This code for starting a TCP server is taken directly from the
27 | # Python 3.8 documentation.
28 | server = await asyncio.start_server(echo, host, port)
29 | async with server:
30 | await server.serve_forever()
31 | try:
32 | asyncio.run(main())
33 | except KeyboardInterrupt:
34 | print('Bye!')
35 |
--------------------------------------------------------------------------------
/chapter3/31/telnetdemo.py:
--------------------------------------------------------------------------------
1 | # Example 3-31. Creating a task inside a cancellation handler
2 | import asyncio
3 | from asyncio import StreamReader, StreamWriter
4 |
5 |
6 | # Pretend that this coroutine actually contacts an external server to submit
7 | # event notifications.
8 | async def send_event(msg: str):
9 | await asyncio.sleep(1)
10 |
11 |
12 | async def echo(reader: StreamReader, writer: StreamWriter):
13 | print('New connection.')
14 | try:
15 | while (data := await reader.readline()):
16 | writer.write(data.upper())
17 | await writer.drain()
18 | print('Leaving Connection.')
19 | except asyncio.CancelledError:
20 | msg = 'Connection dropped!'
21 | print(msg)
22 | # Because the event notifier involves network access, it is common for
23 | # such calls to be made in a separate async task; that’s why we’re
24 | # using the create_task() function here.
25 | asyncio.create_task(send_event(msg))
26 |
27 |
28 | async def main(host='127.0.0.1', port=8888):
29 | server = await asyncio.start_server(echo, host, port)
30 | async with server:
31 | await server.serve_forever()
32 | try:
33 | asyncio.run(main())
34 | except KeyboardInterrupt:
35 | print('Bye!')
36 |
--------------------------------------------------------------------------------
/chapter3/32/alltaskscomplete.py:
--------------------------------------------------------------------------------
1 | # Example 3-32. All the tasks will complete
2 | import asyncio
3 |
4 |
5 | async def f(delay):
6 | # It would be awful if someone were to pass in a zero...
7 | await asyncio.sleep(1 / delay)
8 | return delay
9 | loop = asyncio.get_event_loop()
10 | for i in range(10):
11 | loop.create_task(f(i))
12 | pending = asyncio.all_tasks(loop=loop)
13 | group = asyncio.gather(*pending, return_exceptions=True)
14 | results = loop.run_until_complete(group)
15 | print(f'Results: {results}')
16 | loop.close()
17 |
--------------------------------------------------------------------------------
/chapter3/33/shell_signal01.py:
--------------------------------------------------------------------------------
1 | # Example 3-33. Refresher for using KeyboardInterrupt as a SIGINT handler
2 | import asyncio
3 |
4 |
5 | # This is the main part of our application. To keep things simple, we’re just
6 | # going to sleep in an infinite loop.
7 | async def main():
8 | while True:
9 | print('')
10 | await asyncio.sleep(1)
11 |
12 |
13 | if __name__ == '__main__':
14 | loop = asyncio.get_event_loop()
15 | # This startup and shutdown sequence will be familiar to you from the
16 | # previous section. We schedule main(), call run_forever(), and wait for
17 | # something to stop the loop.
18 | task = loop.create_task(main())
19 | try:
20 | loop.run_until_complete(task)
21 | # In this case, only Ctrl-C will stop the loop. Then we handle
22 | # KeyboardInterrupt and do all the necessary cleanup bits, as covered in
23 | # the previous sections.
24 | except KeyboardInterrupt:
25 | print('Got signal: SIGINT, shutting down.')
26 | tasks = asyncio.all_tasks(loop=loop)
27 | for t in tasks:
28 | t.cancel()
29 | group = asyncio.gather(*tasks, return_exceptions=True)
30 | loop.run_until_complete(group)
31 | loop.close()
32 |
--------------------------------------------------------------------------------
/chapter3/34/shell_signal02.py:
--------------------------------------------------------------------------------
1 | # Example 3-34. Handle both SIGINT and SIGTERM, but stop the loop only once
2 | import asyncio
3 | # Import the signal values from the standard library signal module.
4 | from signal import SIGINT, SIGTERM
5 |
6 |
7 | async def main():
8 | try:
9 | while True:
10 | print('')
11 | await asyncio.sleep(1)
12 | # This time, our main() coroutine is going to do some cleanup internally.
13 | # When the cancellation signal is received (initiated by cancelling each
14 | # of the tasks), there will be a period of 3 seconds where main() will
15 | # continue running during the run_until_complete() phase of the shutdown
16 | # process. It’ll print, “Your app is shutting down...”.
17 | except asyncio.CancelledError:
18 | for i in range(3):
19 | print('')
20 | await asyncio.sleep(1)
21 |
22 |
23 | # This is a callback handler for when we receive a signal. It is configured on
24 | # the loop via the call to add_signal_handler() a bit farther down.
25 | def handler(sig):
26 | # The primary purpose of the handler is to stop the loop: this will
27 | # unblock the loop.run_forever() call and allow pending task collection
28 | # and cancellation, and the run_complete() for shutdown.
29 | loop.stop()
30 | print(f'Got signal: {sig!s}, shutting down.')
31 | # Since we are now in shutdown mode, we don’t want another SIGINT or
32 | # SIGTERM to trigger this handler again: that would call loop.stop()
33 | # during the run_until_complete() phase, which would interfere with our
34 | # shutdown process. Therefore, we remove the signal handler for SIGTERM
35 | # from the loop.
36 | loop.remove_signal_handler(SIGTERM)
37 | # This is a “gotcha”: we can’t simply remove the handler for SIGINT,
38 | # because if we did that, KeyboardInterrupt would again become the handler
39 | # for SIGINT, the same as it was before we added our own handlers.
40 | # Instead, we set an empty lambda function as the handler. This means that
41 | # KeyboardInterrupt stays away, and SIGINT (and Ctrl-C) has no effect.
42 | loop.add_signal_handler(SIGINT, lambda: None)
43 |
44 |
45 | if __name__ == '__main__':
46 | loop = asyncio.get_event_loop()
47 | # Here the signal handlers are attached to the loop. Note that, as
48 | # discussed previously, setting a handler on SIGINT means a
49 | # KeyboardInterrupt will no longer be raised on SIGINT. The raising of a
50 | # KeyboardInterrupt is the “default” handler for SIGINT and is
51 | # preconfigured in Python until you do something to change the handler, as
52 | # we’re doing here.
53 | for sig in (SIGTERM, SIGINT):
54 | loop.add_signal_handler(sig, handler, sig)
55 | loop.create_task(main())
56 | # As usual, execution blocks on run_forever() until something stops the
57 | # loop. In this case, the loop will be stopped inside handler() if either
58 | # SIGINT or SIGTERM is sent to our process. The remainder of the code is
59 | # the same as before.
60 | loop.run_forever()
61 | tasks = asyncio.all_tasks(loop=loop)
62 | for t in tasks:
63 | t.cancel()
64 | group = asyncio.gather(*tasks, return_exceptions=True)
65 | loop.run_until_complete(group)
66 | loop.close()
67 |
--------------------------------------------------------------------------------
/chapter3/35/shell_signal02b.py:
--------------------------------------------------------------------------------
1 | # Example 3-35. Signal handling when using asyncio.run()
2 | import asyncio
3 | from signal import SIGINT, SIGTERM
4 |
5 |
6 | async def main():
7 | loop = asyncio.get_running_loop()
8 | for sig in (SIGTERM, SIGINT):
9 | # Because asyncio.run() takes control of the event loop startup, our
10 | # first opportunity to change signal handling behavior will be in the
11 | # main() function.
12 | loop.add_signal_handler(sig, handler, sig)
13 | try:
14 | while True:
15 | print('')
16 | await asyncio.sleep(1)
17 | except asyncio.CancelledError:
18 | for i in range(3):
19 | print('')
20 | await asyncio.sleep(1)
21 |
22 |
23 | def handler(sig):
24 | loop = asyncio.get_running_loop()
25 | # Inside the signal handler, we can’t stop the loop as in previous
26 | # examples, because we’ll get warnings about how the loop was stopped
27 | # before the task created for main() was completed. Instead, we can
28 | # initiate task cancellation here, which will ultimately result in the
29 | # main() task exiting; when that happens, the cleanup handling inside
30 | # asyncio.run() will take over.
31 | for task in asyncio.all_tasks(loop=loop):
32 | task.cancel()
33 | print(f'Got signal: {sig!s}, shutting down.')
34 | loop.remove_signal_handler(SIGTERM)
35 | loop.add_signal_handler(SIGINT, lambda: None)
36 |
37 |
38 | if __name__ == '__main__':
39 | asyncio.run(main())
40 |
--------------------------------------------------------------------------------
/chapter3/36/quickstart.py:
--------------------------------------------------------------------------------
1 | # Example 3-36. The executor takes too long to finish
2 | import time
3 | import asyncio
4 |
5 |
6 | async def main():
7 | loop = asyncio.get_running_loop()
8 | loop.run_in_executor(None, blocking)
9 | print(f'{time.ctime()} Hello!')
10 | await asyncio.sleep(1.0)
11 | print(f'{time.ctime()} Goodbye!')
12 |
13 |
14 | def blocking():
15 | # This code sample is exactly the same as the one in Example 3-3, except
16 | # that the sleep time in the blocking function is now longer than in the
17 | # async one.
18 | time.sleep(1.5)
19 | print(f"{time.ctime()} Hello from a thread!")
20 |
21 |
22 | asyncio.run(main())
23 |
--------------------------------------------------------------------------------
/chapter3/37/quickstart.py:
--------------------------------------------------------------------------------
1 | # Example 3-37. Option A: wrap the executor call inside a coroutine
2 | import time
3 | import asyncio
4 |
5 |
6 | async def main():
7 | loop = asyncio.get_running_loop()
8 | # The idea aims at fixing the shortcoming that run_in_executor() returns
9 | # only a Future instance and not a task. We can’t capture the job in
10 | # all_tasks() (used within asyncio.run()), but we can use await on the
11 | # future. The first part of the plan is to create a future inside the
12 | # main() function.
13 | future = loop.run_in_executor(None, blocking)
14 | try:
15 | print(f'{time.ctime()} Hello!')
16 | await asyncio.sleep(1.0)
17 | print(f'{time.ctime()} Goodbye!')
18 | finally:
19 | # We can use the try/finally structure to ensure that we wait for the
20 | # future to be finished before the main() function returns.
21 | await future
22 |
23 |
24 | def blocking():
25 | time.sleep(2.0)
26 | print(f"{time.ctime()} Hello from a thread!")
27 |
28 |
29 | try:
30 | asyncio.run(main())
31 | except KeyboardInterrupt:
32 | print('Bye!')
33 |
--------------------------------------------------------------------------------
/chapter3/38/quickstart.py:
--------------------------------------------------------------------------------
1 | # Example 3-38. Option B: add the executor future to the gathered tasks
2 | import time
3 | import asyncio
4 |
5 |
6 | # This utility function make_coro() simply waits for the future to complete—
7 | # but crucially, it continues to wait for the future even inside the exception
8 | # handler for CancelledError.
9 | async def make_coro(future):
10 | try:
11 | return await future
12 | except asyncio.CancelledError:
13 | return await future
14 |
15 |
16 | async def main():
17 | loop = asyncio.get_running_loop()
18 | future = loop.run_in_executor(None, blocking)
19 | # We take the future returned from the run_in_executor() call and pass it
20 | # into a new utility function, make_coro(). The important point here is
21 | # that we’re using create_task(), which means that this task will appear
22 | # in the list of all_tasks() within the shutdown handling of
23 | # asyncio.run(), and will receive a cancellation during the shutdown
24 | # process.
25 | asyncio.create_task(make_coro(future))
26 | print(f'{time.ctime()} Hello!')
27 | await asyncio.sleep(1.0)
28 | print(f'{time.ctime()} Goodbye!')
29 |
30 |
31 | def blocking():
32 | time.sleep(2.0)
33 | print(f"{time.ctime()} Hello from a thread!")
34 |
35 |
36 | try:
37 | asyncio.run(main())
38 | except KeyboardInterrupt:
39 | print('Bye!')
40 |
--------------------------------------------------------------------------------
/chapter3/39/quickstart.py:
--------------------------------------------------------------------------------
1 | # Example 3-39. Option C: just like camping, bring your own loop and your own
2 | # executor
3 | import time
4 | import asyncio
5 | from concurrent.futures import ThreadPoolExecutor as Executor
6 |
7 |
8 | async def main():
9 | print(f'{time.ctime()} Hello!')
10 | await asyncio.sleep(1.0)
11 | print(f'{time.ctime()} Goodbye!')
12 | loop.stop()
13 |
14 |
15 | def blocking():
16 | time.sleep(2.0)
17 | print(f"{time.ctime()} Hello from a thread!")
18 |
19 |
20 | loop = asyncio.get_event_loop()
21 | # This time, we create our own executor instance.
22 | executor = Executor()
23 | # We have to set our custom executor as the default one for the loop. This
24 | # means that anywhere the code calls run_in_executor(), it’ll be using our
25 | # custom instance.
26 | loop.set_default_executor(executor)
27 | loop.create_task(main())
28 | # As before, we run the blocking function.
29 | future = loop.run_in_executor(None, blocking)
30 | try:
31 | loop.run_forever()
32 | except KeyboardInterrupt:
33 | print('Cancelled')
34 | tasks = asyncio.all_tasks(loop=loop)
35 | for t in tasks:
36 | t.cancel()
37 | group = asyncio.gather(*tasks, return_exceptions=True)
38 | loop.run_until_complete(group)
39 | # Finally, we can explicitly wait for all the executor jobs to finish before
40 | # closing the loop. This will avoid the “Event loop is closed” messages that
41 | # we saw before. We can do this because we have access to the executor object;
42 | # the default executor is not exposed in the asyncio API, which is why we
43 | # cannot call shutdown() on it and were forced to create our own executor
44 | # instance.
45 | executor.shutdown(wait=True)
46 | loop.close()
47 |
--------------------------------------------------------------------------------
/chapter3/4,5/async_func_are_func_not_coro.py:
--------------------------------------------------------------------------------
1 | # Example 3-4. Async functions are functions, not coroutines
2 | import inspect
3 |
4 | # This is the simplest possible declaration of a coroutine: it looks like a
5 | # regular function, except that it begins with the keywords async def.
6 |
7 |
8 | async def f():
9 | return 123
10 |
11 | # Surprise! The precise type of f is not “coroutine”; it’s just an ordinary
12 | # function. While it is common to refer to async def functions as coroutines,
13 | # strictly speaking they are considered by Python to be coroutine functions.
14 | # This behavior is identical to the way generator functions work in Python:
15 | # >>> def g():
16 | # ...
17 | # yield 123
18 | # ...
19 | # >>> type(g)
20 | #
21 | # >>> gen = g()
22 | # >>> type(gen)
23 | #
24 | # Even though g is sometimes incorrectly referred to as a “generator,” it
25 | # remains a function, and it is only when this function is evaluated that
26 | # the generator is returned. Coroutine functions work in exactly the same
27 | # way: you need to call the async def function to obtain the coroutine object.
28 | print(type(f))
29 | # The inspect module in the standard library can provide much better introspec‐
30 | # tive capabilities than the type() built-in function.
31 | # There is an iscoroutinefunction() function that lets you distinguish between
32 | # an ordinary function and a coroutine function.
33 | print(inspect.iscoroutinefunction(f))
34 |
35 | # Example 3-5. An async def function returns a coroutine object
36 | coro = f()
37 | print(type(coro))
38 | print(inspect.iscoroutine(coro))
39 |
--------------------------------------------------------------------------------
/chapter3/6/coro_send.py:
--------------------------------------------------------------------------------
1 | async def f():
2 | return 123
3 | coro = f()
4 | try:
5 | # A coroutine is initiated by “sending” it a None. Internally, this is
6 | # what the event loop is going to be doing to your precious coroutines;
7 | # you will never have to do this manually. All the coroutines you make
8 | # will be executed either with loop.create_task(coro) or await coro. It’s
9 | # the loop that does the .send(None) behind the scenes.
10 | coro.send(None)
11 | except StopIteration as e:
12 | # When the coroutine returns, a special kind of exception is raised,
13 | # called StopIteration . Note that we can access the return value of the
14 | # coroutine via the value attribute of the exception itself. Again, you
15 | # don’t need to know that it works like this: from your point of view,
16 | # async def functions will simply return a value with the return
17 | # statement, just like normal functions.
18 | print('The answer was:', e.value)
19 |
--------------------------------------------------------------------------------
/chapter3/7/using_await.py:
--------------------------------------------------------------------------------
1 | # Example 3-7. Using await on a coroutine
2 | import asyncio
3 |
4 |
5 | async def f():
6 | await asyncio.sleep(1.0)
7 | return 123
8 |
9 |
10 | async def main():
11 | # Calling f() produces a coroutine; this means we are allowed to await it.
12 | # The value of the result variable will be 123 when f() completes.
13 | result = await f()
14 | return result
15 |
16 | asyncio.run(main())
17 |
--------------------------------------------------------------------------------
/chapter3/8/inject_exception_into_coro.py:
--------------------------------------------------------------------------------
1 | # Example 3-8. Using coro.throw() to inject exceptions into a coroutine
2 | import asyncio
3 |
4 |
5 | async def f():
6 | await asyncio.sleep(0)
7 | # As before, a new coroutine is created from the coroutine function f()
8 | coro = f()
9 | coro.send(None)
10 | # Instead of doing another send(), we call throw() and provide an exception
11 | # class and a value. This raises an exception inside our coroutine, at the
12 | # await point.
13 | coro.throw(Exception, 'blah')
14 |
--------------------------------------------------------------------------------
/chapter3/9/cancel_coro.py:
--------------------------------------------------------------------------------
1 | # Example 3-9. Coroutine cancellation with CancelledError
2 | import asyncio
3 |
4 |
5 | async def f():
6 | try:
7 | while True:
8 | await asyncio.sleep(0)
9 | # Our coroutine function now handles an exception. In fact, it handles the
10 | # specific exception type used throughout the asyncio library for task
11 | # cancellation: asyncio.CancelledError. Note that the exception is being
12 | # injected into the coroutine from outside; i.e., by the event loop, which
13 | # we’re still simulating with manual send() and throw() commands. In real
14 | # code, which you’ll see later, CancelledError is raised inside the
15 | # task-wrapped coroutine when tasks are cancelled.
16 | except asyncio.CancelledError:
17 | # A simple message to say that the task got cancelled. Note that by
18 | # handling the exception, we ensure it will no longer propagate and
19 | # our coroutine will return .
20 | print('I was cancelled!')
21 | else:
22 | return 111
23 | coro = f()
24 | coro.send(None)
25 | coro.send(None)
26 | # Here we throw() the CancelledError exception.
27 | coro.throw(asyncio.CancelledError)
28 |
29 | # After we run, we should notice:
30 | # - As expected, we see our cancellation message being printed.
31 | # - Our coroutine exits normally. (Recall that the StopIteration exception is
32 | # the normal way that coroutines exit.)
33 |
--------------------------------------------------------------------------------
/chapter4/1-9/__pycache__/msgproto.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ckaraneen/usingaio/7facd6ebbf045053e4955151e88e8e6f43299458/chapter4/1-9/__pycache__/msgproto.cpython-38.pyc
--------------------------------------------------------------------------------
/chapter4/1-9/mq_client_listen.py:
--------------------------------------------------------------------------------
1 | # Example 4-3. Listener: a toolkit for listening for messages on our message
2 | # broker
3 | import asyncio
4 | import argparse
5 | import uuid
6 | from msgproto import read_msg, send_msg
7 |
8 |
9 | async def main(args):
10 | # The uuid standard library module is a convenient way of creating an
11 | # “identity” for this listener. If you start up multiple instances, each
12 | # will have its own identity, and you’ll be able to track what is
13 | # happening in the logs.
14 | me = uuid.uuid4().hex[:8]
15 | print(f'Starting up {me}')
16 | # Open a connection to the server.
17 | reader, writer = await asyncio.open_connection(
18 | args.host, args.port)
19 | print(f'I am {writer.get_extra_info("sockname")}')
20 | # The channel to subscribe to is an input parameter, captured in
21 | # args.listen. Encode it into bytes before sending.
22 | channel = args.listen.encode()
23 | # By our protocol rules (as discussed in the broker code analysis
24 | # previously), the first thing to do after connecting is to send the
25 | # channel name to subscribe to.
26 | await send_msg(writer, channel)
27 | try:
28 | # This loop does nothing else but wait for data to appear on the
29 | # socket.
30 | while data := await read_msg(reader):
31 | print(f'Received by {me}: {data[:20]}')
32 | print('Connection ended.')
33 | except asyncio.IncompleteReadError:
34 | print('Server closed.')
35 | finally:
36 | writer.close()
37 | await writer.wait_closed()
38 |
39 | if __name__ == '__main__':
40 | # The command-line arguments for this program make it easy to point to a
41 | # host, a port, and a channel name to listen to.
42 | parser = argparse.ArgumentParser()
43 | parser.add_argument('--host', default='localhost')
44 | parser.add_argument('--port', default=25000)
45 | parser.add_argument('--listen', default='/topic/foo')
46 | try:
47 | asyncio.run(main(parser.parse_args()))
48 | except KeyboardInterrupt:
49 | print('Bye!')
50 |
--------------------------------------------------------------------------------
/chapter4/1-9/mq_client_sender.py:
--------------------------------------------------------------------------------
1 | # Example 4-4. Sender: a toolkit for sending data to our message broker
2 | import asyncio
3 | import argparse
4 | import uuid
5 | from itertools import count
6 | from msgproto import send_msg
7 |
8 |
9 | async def main(args):
10 | # As with the listener, claim an identity.
11 | me = uuid.uuid4().hex[:8]
12 | print(f'Starting up {me}')
13 | # Reach out and make a connection.
14 | reader, writer = await asyncio.open_connection(
15 | host=args.host, port=args.port)
16 | print(f'I am {writer.get_extra_info("sockname")}')
17 | # According to our protocol rules, the first thing to do after connecting
18 | # to the server is to give the name of the channel to subscribe to;
19 | # however, since we are a sender, we don’t really care about subscribing
20 | # to any channels. Nevertheless, the protocol requires it, so just provide
21 | # a null channel to subscribe to (we won’t actually listen for anything).
22 | channel = b'/null'
23 | # Send the channel to subscribe to.
24 | await send_msg(writer, channel)
25 | # The command-line parameter args.channel provides the channel to which we
26 | # want to send messages. It must be converted to bytes first before
27 | # sending.
28 | chan = args.channel.encode()
29 | try:
30 | # Using itertools.count() is like a while True loop, except that we
31 | # get an iteration variable to use. We use this in the debugging
32 | # messages since it makes it a bit easier to track which message got
33 | # sent from where.
34 | for i in count():
35 | # The delay between sent messages is an input parameter,
36 | # args.interval. The next line generates the message payload. It’s
37 | # either a bytestring of specified size (args.size) or a
38 | # descriptive message. This flexibility is just for testing.
39 | await asyncio.sleep(args.interval)
40 | data = b'X' * args.size or f'Msg {i} from {me}'.encode()
41 | try:
42 | await send_msg(writer, chan)
43 | # Note that two messages are sent here: the first is the
44 | # destination channel name, and the second is the payload.
45 | await send_msg(writer, data)
46 | except OSError:
47 | print('Connection ended.')
48 | break
49 | except asyncio.CancelledError:
50 | writer.close()
51 | await writer.wait_closed()
52 |
53 |
54 | if __name__ == '__main__':
55 | # As with the listener, there are a bunch of command-line options for
56 | # tweaking the sender: channel determines the target channel to send to,
57 | # while interval controls the delay between sends. The size parameter
58 | # controls the size of each message payload.
59 | parser = argparse.ArgumentParser()
60 | parser.add_argument('--host', default='localhost')
61 | parser.add_argument('--port', default=25000, type=int)
62 | parser.add_argument('--channel', default='/topic/foo')
63 | parser.add_argument('--interval', default=1, type=float)
64 | parser.add_argument('--size', default=0, type=int)
65 | try:
66 | asyncio.run(main(parser.parse_args()))
67 | except KeyboardInterrupt:
68 | print('Bye!')
69 |
--------------------------------------------------------------------------------
/chapter4/1-9/mq_server.py:
--------------------------------------------------------------------------------
1 | # Example 4-2. A 40-line prototype server
2 | import asyncio
3 | from asyncio import StreamReader, StreamWriter, gather
4 | from collections import deque, defaultdict
5 | from typing import Deque, DefaultDict
6 | # Imports from our msgproto.py module.
7 | from msgproto import read_msg, send_msg
8 |
9 | # A global collection of currently active subscribers. Every time a client
10 | # connects, they must first send a channel name they’re subscribing to. A
11 | # deque will hold all the subscribers for a particular channel.
12 | SUBSCRIBERS: DefaultDict[bytes, Deque] = defaultdict(deque)
13 |
14 |
15 | async def client(reader: StreamReader, writer: StreamWriter):
16 | # The client() coroutine function will produce a long-lived coroutine for
17 | # each new connection. Think of it as a callback for the TCP server
18 | # started in main(). On this line, I’ve shown how the host and port of the
19 | # remote peer can be obtained, for example, for logging.
20 | peername = writer.get_extra_info('peername')
21 | # Our protocol for clients is the following:
22 | # • On first connect, a client must send a message containing the
23 | # channel to subscribe to (here, subscribe_chan).
24 | # • Thereafter, for the life of the connection, a client sends a message
25 | # to a channel by first sending a message containing the destination
26 | # channel name, followed by a message containing the data. Our broker
27 | # will send such data messages to every client subscribed to that
28 | # channel name.
29 | subscribe_chan = await read_msg(reader)
30 | # Add the StreamWriter instance to the global collection of subscribers.
31 | SUBSCRIBERS[subscribe_chan].append(writer)
32 | print(f'Remote {peername} subscribed to {subscribe_chan}')
33 | try:
34 | # An infinite loop, waiting for data from this client. The first
35 | # message from a client must be the destination channel name.
36 | while channel_name := await read_msg(reader):
37 | # Next comes the actual data to distribute to the channel.
38 | data = await read_msg(reader)
39 | print(f'Sending to {channel_name}: {data[:19]}...')
40 | # Get the deque of subscribers on the target channel.
41 | conns = SUBSCRIBERS[channel_name]
42 | # Some special handling if the channel name begins with the magic
43 | # word /queue: in this case, we send the data to only one of the
44 | # subscribers, not all of them. This can be used for sharing work
45 | # between a bunch of workers, rather than the usual pub-sub
46 | # notification scheme, where all subscribers on a channel get all
47 | # the messages.
48 | if conns and channel_name.startswith(b'/queue'):
49 | # Here is why we use a deque and not a list: rotation of the
50 | # deque is how we keep track of which client is next in line
51 | # for /queue distribution. This seems expensive until you
52 | # realize that a single deque rotation is an O(1) operation.
53 | conns.rotate()
54 | # Target only whichever client is first; this changes after
55 | # every rotation.
56 | conns = [conns[0]]
57 | # Create a list of coroutines for sending the message to each
58 | # writer, and then unpack these into gather() so we can wait for
59 | # all of the sending to complete. This line is a bad flaw in our
60 | # program, but it may not be obvious why: though it may be true
61 | # that all of the sending to each subscriber will happen
62 | # concurrently, what happens if we have one very slow client? In
63 | # this case, the gather() will finish only when the slowest
64 | # subscriber has received its data. We can’t receive any more data
65 | # from the sending client until all these send_msg() coroutines
66 | # finish. This slows all message distribution to the speed of the
67 | # slowest subscriber.
68 | await gather(*[send_msg(c, data) for c in conns])
69 | except asyncio.CancelledError:
70 | print(f'Remote {peername} closing connection.')
71 | writer.close()
72 | await writer.wait_closed()
73 | except asyncio.IncompleteReadError:
74 | print(f'Remote {peername} disconnected')
75 | finally:
76 | print(f'Remote {peername} closed')
77 | # When leaving the client() coroutine, we make sure to remove
78 | # ourselves from the global SUBSCRIBERS collection. Unfortunately,
79 | # this is an O(n) operation, which can be a little expensive for very
80 | # large n. A different data structure would fix this, but for now we
81 | # console ourselves with the knowledge that connections are intended
82 | # to be long-lived—thus, there should be few disconnection events—and
83 | # n is unlikely to be very large (say ~10,000 as a rough
84 | # order-of-magnitude estimate), and this code is at least easy to
85 | # understand.
86 | SUBSCRIBERS[subscribe_chan].remove(writer)
87 |
88 |
89 | async def main(*args, **kwargs):
90 | server = await asyncio.start_server(*args, **kwargs)
91 | async with server:
92 | await server.serve_forever()
93 | try:
94 | asyncio.run(main(client, host='127.0.0.1', port=25000))
95 | except KeyboardInterrupt:
96 | print('Bye!')
97 |
--------------------------------------------------------------------------------
/chapter4/1-9/mq_server_plus.py:
--------------------------------------------------------------------------------
1 | # Example 4-9. Message broker: improved design
2 | import asyncio
3 | from asyncio import StreamReader, StreamWriter, Queue
4 | from collections import deque, defaultdict
5 | from contextlib import suppress
6 | from typing import Deque, DefaultDict, Dict
7 | from msgproto import read_msg, send_msg
8 |
9 |
10 | SUBSCRIBERS: DefaultDict[bytes, Deque] = defaultdict(deque)
11 | SEND_QUEUES: DefaultDict[StreamWriter, Queue] = defaultdict(Queue)
12 | # In the previous implementation, there were only SUBSCRIBERS ; now there are
13 | # SEND_QUEUES and CHAN_QUEUES as global collections. This is a consequence of
14 | # completely decoupling the receiving and sending of data. SEND_QUEUES has one
15 | # queue entry for each client connection: all data that must be sent to that
16 | # client must be placed onto that queue. (If you peek ahead, the send_client()
17 | # coroutine will pull data off SEND_QUEUES and send it.)
18 | CHAN_QUEUES: Dict[bytes, Queue] = {}
19 |
20 |
21 | async def client(reader: StreamReader, writer: StreamWriter):
22 | peername = writer.get_extra_info('peername')
23 | subscribe_chan = await read_msg(reader)
24 | # Up until this point in the client() coroutine function, the code is the
25 | # same as in the simple server: the subscribed channel name is received,
26 | # and we add the StreamWriter instance for the new client to the global
27 | # SUBSCRIBERS collection.
28 | SUBSCRIBERS[subscribe_chan].append(writer)
29 | # This is new: we create a long-lived task that will do all the sending of
30 | # data to this client. The task will run independently as a separate
31 | # coroutine and will pull messages off the supplied queue,
32 | # SEND_QUEUES[writer], for sending.
33 | send_task = asyncio.create_task(
34 | send_client(writer, SEND_QUEUES[writer]))
35 | print(f'Remote {peername} subscribed to {subscribe_chan}')
36 | try:
37 | while channel_name := await read_msg(reader):
38 | data = await read_msg(reader)
39 | # Now we’re inside the loop where we receive data. Remember that
40 | # we always receive two messages: one for the destination channel
41 | # name, and one for the data. We’re going to create a new,
42 | # dedicated Queue for every destination channel, and that’s what
43 | # CHAN_QUEUES is for: when any client wants to push data to a
44 | # channel, we’re going to put that data onto the appropriate queue
45 | # and then go immediately back to listening for more data. This
46 | # approach decouples the distribution of messages from the
47 | # receiving of messages from this client.
48 | if channel_name not in CHAN_QUEUES:
49 | # If there isn’t already a queue for the target channel, make
50 | # one.
51 | CHAN_QUEUES[channel_name] = Queue(maxsize=10)
52 | # Create a dedicated and long-lived task for that channel. The
53 | # coroutine chan_sender() will be responsible for taking data
54 | # off the channel queue and distributing that data to
55 | # subscribers.
56 | asyncio.create_task(chan_sender(channel_name))
57 | # Place the newly received data onto the specific channel’s queue.
58 | # If the queue fills up, we’ll wait here until there is space for
59 | # the new data. Waiting here means we won’t be reading any new
60 | # data off the socket, which means that the client will have to
61 | # wait on sending new data into the socket on its side. This isn’t
62 | # necessarily a bad thing, since it communicates so-called
63 | # back-pressure to this client. (Alternatively, you could choose
64 | # to drop messages here if the use case is OK with that.)
65 | await CHAN_QUEUES[channel_name].put(data)
66 | except asyncio.CancelledError:
67 | print(f'Remote {peername} connection cancelled.')
68 | except asyncio.IncompleteReadError:
69 | print(f'Remote {peername} disconnected')
70 | finally:
71 | print(f'Remote {peername} closed')
72 | # When the connection is closed, it’s time to clean up. The long-lived
73 | # task we created for sending data to this client, send_task, can be
74 | # shut down by placing None onto its queue, SEND_QUEUES[writer] (check
75 | # the code for send_client() ). It’s important to use a value on the
76 | # queue, rather than outright cancellation, because there may already
77 | # be data on that queue and we want that data to be sent out before
78 | # send_client() is ended.
79 | await SEND_QUEUES[writer].put(None)
80 | # Wait for that sender task to finish...
81 | await send_task
82 | # ...then remove the entry in the SEND_QUEUES collection (and in the
83 | # next line, we also remove the sock from the SUBSCRIBERS collection
84 | # as before).
85 | del SEND_QUEUES[writer]
86 | SUBSCRIBERS[subscribe_chan].remove(writer)
87 |
88 |
89 | # The send_client() coroutine function is very nearly a textbook example of
90 | # pulling work off a queue. Note how the coroutine will exit only if None is
91 | # placed onto the queue. Note also how we suppress CancelledError inside the
92 | # loop: this is because we want this task to be closed only by receiving a
93 | # None on the queue. This way, all pending data on the queue can be sent out
94 | # before shutdown.
95 | async def send_client(writer: StreamWriter, queue: Queue):
96 | while True:
97 | try:
98 | data = await queue.get()
99 | except asyncio.CancelledError:
100 | continue
101 | if not data:
102 | break
103 | try:
104 | await send_msg(writer, data)
105 | except asyncio.CancelledError:
106 | await send_msg(writer, data)
107 | writer.close()
108 | await writer.wait_closed()
109 |
110 |
111 | async def chan_sender(name: bytes):
112 | with suppress(asyncio.CancelledError):
113 | while True:
114 | writers = SUBSCRIBERS[name]
115 | if not writers:
116 | await asyncio.sleep(1)
117 | # chan_sender() is the distribution logic for a channel: it
118 | # sends data from a dedicated channel Queue instance to all
119 | # the subscribers on that channel. But what happens if there
120 | # are no subscribers for this channel yet? We’ll just wait a
121 | # bit and try again. (Note, though, that the queue for this
122 | # channel, CHAN_QUEUES[name], will keep filling up.)
123 | continue
124 | # As in our previous broker implementation, we do something
125 | # special for channels whose name begins with /queue: we rotate
126 | # the deque and send only to the first entry. This acts like a
127 | # crude load-balancing system because each subscriber gets
128 | # different messages off the same queue. For all other channels,
129 | # all subscribers get all the messages.
130 | if name.startswith(b'/queue'):
131 | writers.rotate()
132 | writers = [writers[0]]
133 | # We’ll wait here for data on the queue, and exit if None is
134 | # received. Currently, this isn’t triggered anywhere (so these
135 | # chan_sender() coroutines live forever), but if logic were added
136 | # to clean up these channel tasks after, say, some period of
137 | # inactivity, that’s how it would be done.
138 | if not (msg := await CHAN_QUEUES[name].get()):
139 | break
140 | for writer in writers:
141 | if not SEND_QUEUES[writer].full():
142 | print(f'Sending to {name}: {msg[:19]}...')
143 | # Data has been received, so it’s time to send to
144 | # subscribers. We do not do the sending here: instead, we
145 | # place the data onto each subscriber’s own send queue.
146 | # This decoupling is necessary to make sure that a slow
147 | # subscriber doesn’t slow down anyone else receiving data.
148 | # And furthermore, if the subscriber is so slow that their
149 | # send queue fills up, we don’t put that data on their
150 | # queue; i.e., it is lost.
151 | await SEND_QUEUES[writer].put(msg)
152 |
153 |
154 | async def main(*args, **kwargs):
155 | server = await asyncio.start_server(*args, **kwargs)
156 | async with server:
157 | await server.serve_forever()
158 | try:
159 | asyncio.run(main(client, host='127.0.0.1', port=25000))
160 | except KeyboardInterrupt:
161 | print('Bye!')
162 |
--------------------------------------------------------------------------------
/chapter4/1-9/msgproto.py:
--------------------------------------------------------------------------------
1 | # Example 4-1. Message protocol: read and write
2 | from asyncio import StreamReader, StreamWriter
3 |
4 |
5 | async def read_msg(stream: StreamReader) -> bytes:
6 | # Get the first 4 bytes. This is the size prefix.
7 | size_bytes = await stream.readexactly(4)
8 | # Those 4 bytes must be converted into an integer.
9 | size = int.from_bytes(size_bytes, byteorder='big')
10 | # Now we know the payload size, so we read that off the stream.
11 | data = await stream.readexactly(size)
12 | return data
13 |
14 |
15 | async def send_msg(stream: StreamWriter, data: bytes):
16 | size_bytes = len(data).to_bytes(4, byteorder='big')
17 | # Write is the inverse of read: first we send the length of the data,
18 | # encoded as 4 bytes, and thereafter the data.
19 | stream.writelines([size_bytes, data])
20 | await stream.drain()
21 |
--------------------------------------------------------------------------------
/chapter4/10/twisted_defer_example.py:
--------------------------------------------------------------------------------
1 | # Example 4-10. Even more Twisted with inlined callbacks
2 | from twisted.internet import defer
3 |
4 |
5 | # Ordinarily, Twisted requires creating instances of Deferred and adding
6 | # callbacks to those instances as the method of constructing async programs.
7 | # A few years ago, the @inlineCallbacks decorator was added, which repurposes
8 | # generators as coroutines.
9 | @defer.inlineCallbacks
10 | def f():
11 | yield
12 | # While @inlineCallbacks did allow you to write code that was linear in
13 | # appearance (unlike callbacks), some hacks were required, such as this
14 | # call to defer.returnValue(), which is how you have to return values from
15 | # @inlineCallbacks coroutines.
16 | defer.returnValue(123)
17 |
18 |
19 | @defer.inlineCallbacks
20 | def my_coro_func():
21 | # Here we can see the yield that makes this function a generator. For
22 | # @inlineCallbacks to work, there must be at least one yield present in
23 | # the function being decorated.
24 | value = yield f()
25 | assert value == 123
26 |
--------------------------------------------------------------------------------
/chapter4/11/twisted_asyncio.py:
--------------------------------------------------------------------------------
1 | # Example 4-11. Support for asyncio in Twisted
2 | from time import ctime
3 | # This is how you tell Twisted to use the asyncio event loop as its main
4 | # reactor. Note that this line must come before the reactor is imported from
5 | # twisted.internet on the following line.
6 |
7 | # from twisted.internet import asyncioreactor
8 | # asyncioreactor.install()
9 |
10 | # Anyone familiar with Twisted programming will recognize these imports. We
11 | # don’t have space to cover them in depth here, but in a nutshell, the reactor
12 | # is the Twisted version of the asyncio loop, and defer and task are
13 | # namespaces for tools to work with scheduling coroutines.
14 | from twisted.internet import reactor, defer, task
15 |
16 |
17 | # Seeing async def here, in a Twisted program, looks odd, but this is indeed
18 | # what the new support for async/await gives us: the ability to use native
19 | # coroutines directly in Twisted programs.
20 | async def main():
21 | for i in range(5):
22 | print(f'{ctime()} Hello {i}')
23 | # In the older @inlineCallbacks world, you would have used yield from
24 | # here, but now we can use await, the same as in asyncio code. The
25 | # other part of this line, deferLater(), is an alternative way to do
26 | # the same thing as asyncio.sleep(1). We await a future where, after
27 | # one second, a do-nothing callback will fire.
28 | await task.deferLater(reactor, 1, lambda: None)
29 | # ensureDeferred() is a Twisted version of scheduling a coroutine. This would
30 | # be analogous to loop.create_task() or asyncio.ensure_future().
31 | defer.ensureDeferred(main())
32 | # Running the reactor is the same as loop.run_forever() in asyncio.
33 | reactor.run()
34 |
--------------------------------------------------------------------------------
/chapter4/12/janus_demo.py:
--------------------------------------------------------------------------------
1 | # Example 4-12. Connecting coroutines and threads with a Janus queue
2 | import asyncio
3 | import random
4 | import time
5 | import janus
6 |
7 |
8 | async def main():
9 | loop = asyncio.get_running_loop()
10 | # Create a Janus queue. Note that just like an asyncio.Queue, the Janus
11 | # queue will be associated with a specific event loop. As usual, if you
12 | # don’t provide the loop parameter, the standard get_event_loop() call
13 | # will be used internally.
14 | queue = janus.Queue() # older version: janus.Queue(loop=loop)
15 | loop.run_in_executor(None, data_source, queue)
16 | # Our main() coroutine function simply waits for data on a queue. This
17 | # line will suspend until there is data, exactly until there is data,
18 | # exactly like calling get() on an asyncio.Queue instance. The queue
19 | # object has two faces: this one is called async_q and provides the
20 | # async-compatible queue API.
21 | while (data := await queue.async_q.get()) is not None:
22 | # Print a message.
23 | print(f'Got {data} off queue')
24 | print('Done.')
25 |
26 |
27 | def data_source(queue):
28 | for i in range(10):
29 | r = random.randint(0, 4)
30 | # Inside the data_source() function, a random int is generated, which
31 | # is used both as a sleep duration and a data value. Note that the
32 | # time.sleep() call is blocking, so this function must be executed in
33 | # a thread.
34 | time.sleep(r)
35 | # Place the data onto the Janus queue. This shows the other face of
36 | # the Janus queue: sync_q, which provides the standard, blocking Queue
37 | # API.
38 | queue.sync_q.put(r)
39 | queue.sync_q.put(None)
40 |
41 |
42 | asyncio.run(main())
43 |
--------------------------------------------------------------------------------
/chapter4/13/aiohttp_example.py:
--------------------------------------------------------------------------------
1 | # Example 4-13. Minimal aiohttp example
2 | from aiohttp import web
3 |
4 |
5 | async def hello(request):
6 | return web.Response(text="Hello, world")
7 | # An Application instance is created.
8 | app = web.Application()
9 | # A route is created, with the target coroutine hello() given as the handler.
10 | app.router.add_get('/', hello)
11 | # The web application is run.
12 | web.run_app(app, port=8080)
13 |
--------------------------------------------------------------------------------
/chapter4/14/news_scraper.py:
--------------------------------------------------------------------------------
1 | # Example 4-14. Code for the news scraper
2 |
3 | # To obtain and run the Splash container, run these commands in
4 | # your shell:
5 | # $ docker pull scrapinghub/splash
6 | # $ docker run --rm -p 8050:8050 scrapinghub/splash
7 | # Our server backend will call the Splash API at http://localhost:8050.
8 |
9 | # Run: python news_scraper.py
10 | # and go to http://0.0.0.0:8080/news
11 |
12 | import os
13 |
14 | from asyncio import gather, create_task
15 | from string import Template
16 | from aiohttp import web, ClientSession
17 | from bs4 import BeautifulSoup
18 |
19 | INDEX_HTML_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(
20 | __file__)), '../../appendixB/2/index.html')
21 |
22 |
23 | # The news() function is the handler for the /news URL on our server. It
24 | # returns the HTML page showing all the headlines.
25 | async def news(request):
26 | sites = [
27 | # Here, we have only two news websites to be scraped: CNN and
28 | # Al Jazeera. More could easily be added, but then additional
29 | # postprocessors would also have to be added, just like the
30 | # cnn_articles() and aljazeera_articles() functions that are
31 | # customized to extract headline data.
32 | ('http://edition.cnn.com', cnn_articles),
33 | ('http://www.aljazeera.com', aljazeera_articles),
34 | ]
35 | # For each news site, we create a task to fetch and process the HTML page
36 | # data for its front page. Note that we unpack the tuple ( (*s) ) since
37 | # the news_fetch() coroutine function takes both the URL and the
38 | # postprocessing function as parameters. Each news_fetch() call will
39 | # return a list of tuples as headline results, in the form
40 | # , .
41 | tasks = [create_task(news_fetch(*s)) for s in sites]
42 | # All the tasks are gathered together into a single Future (gather()
43 | # returns a future representing the state of all the tasks being
44 | # gathered), and then we immediately await the completion of that future.
45 | # This line will suspend until the future completes.
46 | await gather(*tasks)
47 |
48 | # Since all the news_fetch() tasks are now complete, we collect all of the
49 | # results into a dictionary. Note how nested comprehensions are used to
50 | # iterate over tasks, and then over the list of tuples returned by each
51 | # task. We also use f-strings to substitute data directly, including even
52 | # the kind of page, which will be used in CSS to color the div background.
53 | items = {
54 | # In this dictionary, the key is the headline title, and the value is
55 | # an HTML string for a div that will be displayed in our result page.
56 | text: (
57 | f''
58 | f'
'
59 | f'{text}'
60 | f''
61 | f'
'
62 | )
63 | for task in tasks for href, text, kind in task.result()
64 | }
65 | content = ''.join(items[x] for x in sorted(items))
66 | # Our web server is going to return HTML. We’re loading HTML data from a
67 | # local file called index.html. This file is presented in Example B-1 if
68 | # you want to recreate the case study yourself.
69 | page = Template(open(INDEX_HTML_FILE_PATH).read())
70 | return web.Response(
71 | # We substitute the collected headline div into the template and
72 | # return the page to the browser client.
73 | body=page.safe_substitute(body=content),
74 | content_type='text/html',
75 | )
76 |
77 |
78 | async def news_fetch(url, postprocess):
79 | # Here, inside the news_fetch() coroutine function, we have a tiny
80 | # template for hitting the Splash API (which, for me, is running in a
81 | # local Docker container on port 8050). This demonstrates how aiohttp can
82 | # be used as an HTTP client.
83 | proxy_url = (
84 | f'http://localhost:8050/render.html?'
85 | f'url={url}&timeout=60&wait=1'
86 | )
87 | async with ClientSession() as session:
88 | # The standard way is to create a ClientSession() instance, and then
89 | # use the get() method on the session instance to perform the REST
90 | # call. In the next line, the response data is obtained. Note that
91 | # because we’re always operating on coroutines, with async with and
92 | # await, this coroutine will never block: we’ll be able to handle many
93 | # thousands of these requests, even though this operation
94 | # (news_fetch()) might be relatively slow since we’re doing web calls
95 | # internally.
96 | async with session.get(proxy_url) as resp:
97 | data = await resp.read()
98 | data = data.decode('utf-8')
99 | # After the data is obtained, we call the postprocessing function. For
100 | # CNN, it’ll be cnn_articles(), and for Al Jazeera it’ll be
101 | # aljazeera_articles().
102 | return postprocess(url, data)
103 |
104 |
105 | # We have space only for a brief look at the postprocessing. After getting the
106 | # page data, we use the Beautiful Soup 4 library for extracting headlines.
107 | def cnn_articles(url, page_data):
108 | soup = BeautifulSoup(page_data, 'lxml')
109 |
110 | def match(tag):
111 | return (
112 | tag.text and tag.has_attr('href')
113 | and tag['href'].startswith('/')
114 | and tag['href'].endswith('.html')
115 | # and tag.find(class_='cd__headline-text')
116 | )
117 | # The match() function will return all matching tags (I’ve manually
118 | # checked the HTML source of these news websites to figure out which
119 | # combination of filters extracts the best tags), and then we return
120 | # a list of tuples matching the format , .
121 | headlines = soup.find_all(match)
122 | return [(url + hl['href'], hl.text, 'cnn')
123 | for hl in headlines]
124 |
125 |
126 | # This is the analogous postprocessor for Al Jazeera. The match() condition is
127 | # slightly different, but it is otherwise the same as the CNN one.
128 | def aljazeera_articles(url, page_data):
129 | soup = BeautifulSoup(page_data, 'lxml')
130 |
131 | def match(tag):
132 | return (
133 | tag.text and tag.has_attr('href')
134 | and tag['href'].startswith('/news')
135 | and tag['href'].endswith('.html')
136 | )
137 | headlines = soup.find_all(match)
138 | return [(url + hl['href'], hl.text, 'aljazeera')
139 | for hl in headlines]
140 |
141 |
142 | app = web.Application()
143 | app.router.add_get('/news', news)
144 | web.run_app(app, port=8080)
145 |
--------------------------------------------------------------------------------
/chapter4/15/poller.py:
--------------------------------------------------------------------------------
1 | # Example 4-15. The traditional ØMQ approach
2 | import zmq
3 | context = zmq.Context()
4 | # ØMQ sockets have types. This is a PULL socket. You can think of it as a
5 | # receive-only kind of socket that will be fed by some other send-only socket,
6 | # which will be a PUSH type.
7 | receiver = context.socket(zmq.PULL)
8 | receiver.connect("tcp://localhost:5557")
9 | # The SUB socket is another kind of receive-only socket, and it will be fed a
10 | # PUB socket which is send-only.
11 | subscriber = context.socket(zmq.SUB)
12 | subscriber.connect("tcp://localhost:5556")
13 | subscriber.setsockopt_string(zmq.SUBSCRIBE, '')
14 | # If you need to move data between multiple sockets in a threaded ØMQ
15 | # application, you’re going to need a poller. This is because these sockets
16 | # are not thread-safe, so you cannot recv() on different sockets in different
17 | # threads.
18 | poller = zmq.Poller()
19 | poller.register(receiver, zmq.POLLIN)
20 | poller.register(subscriber, zmq.POLLIN)
21 | while True:
22 | try:
23 | # It works similarly to the select() system call. The poller will
24 | # unblock when there is data ready to be received on one of the
25 | # registered sockets, and then it’s up to you to pull the data off and
26 | # do something with it. The big if block is how you detect the correct
27 | # socket.
28 | socks = dict(poller.poll())
29 | except KeyboardInterrupt:
30 | break
31 | if receiver in socks:
32 | message = receiver.recv_json()
33 | print(f'Via PULL: {message}')
34 | if subscriber in socks:
35 | message = subscriber.recv_json()
36 | print(f'Via SUB: {message}')
37 |
--------------------------------------------------------------------------------
/chapter4/16/poller_srv.py:
--------------------------------------------------------------------------------
1 | # Example 4-16. Server code
2 | import zmq
3 | import itertools
4 | import time
5 |
6 |
7 | context = zmq.Context()
8 | pusher = context.socket(zmq.PUSH)
9 | pusher.bind("tcp://*:5557")
10 | publisher = context.socket(zmq.PUB)
11 | publisher.bind("tcp://*:5556")
12 |
13 | for i in itertools.count():
14 | time.sleep(1)
15 | pusher.send_json(i)
16 | publisher.send_json(i)
17 |
--------------------------------------------------------------------------------
/chapter4/17/poller_aio.py:
--------------------------------------------------------------------------------
1 | # Example 4-17. Clean separation with asyncio
2 | import asyncio
3 | import zmq
4 | from zmq.asyncio import Context
5 |
6 | context = Context()
7 |
8 |
9 | async def do_receiver():
10 | # This code sample does the same as Example 4-15, except that now we’re
11 | # taking advantage of coroutines to restructure everything. Now we can
12 | # deal with each socket in isolation. I’ve created two coroutine
13 | # functions, one for each socket; this one is for the PULL socket.
14 | receiver = context.socket(zmq.PULL)
15 | receiver.connect("tcp://localhost:5557")
16 | # I’m using the asyncio support in pyzmq, which means that all send() and
17 | # recv() calls must use the await keyword. The Poller no longer appears
18 | # anywhere, because it’s been integrated into the asyncio event loop
19 | # itself.
20 | while message := await receiver.recv_json():
21 | print(f'Via PULL: {message}')
22 |
23 |
24 | async def do_subscriber():
25 | # This is the handler for the SUB socket. The structure is very similar to
26 | # the PULL socket’s handler, but that need not have been the case. If more
27 | # complex logic had been required, I’d have been able to easily add it
28 | # here, fully encapsulated within the SUB -handler code only.
29 | subscriber = context.socket(zmq.SUB)
30 | subscriber.connect("tcp://localhost:5556")
31 | subscriber.setsockopt_string(zmq.SUBSCRIBE, '')
32 | # Again, the asyncio-compatible sockets require the await keyword to send
33 | # and receive.
34 | while message := await subscriber.recv_json():
35 | print(f'Via SUB: {message}')
36 |
37 |
38 | async def main():
39 | await asyncio.gather(
40 | do_receiver(),
41 | do_subscriber(),
42 | )
43 | asyncio.run(main())
44 |
--------------------------------------------------------------------------------
/chapter4/18/backend-app.py:
--------------------------------------------------------------------------------
1 | # Example 4-18. The application layer: producing metrics
2 | import argparse
3 | import asyncio
4 | from random import randint, uniform
5 | from datetime import datetime as dt
6 | from datetime import timezone as tz
7 | from contextlib import suppress
8 | import zmq
9 | import zmq.asyncio
10 | import psutil
11 |
12 | ctx = zmq.asyncio.Context()
13 |
14 |
15 | # This coroutine function will run as a long-lived coroutine, continually
16 | # sending out data to the server process.
17 | async def stats_reporter(color: str):
18 | p = psutil.Process()
19 | # Create a ØMQ socket. As you know, there are different flavors of socket;
20 | # this one is a PUB type, which allows one-way messages to be sent to
21 | # another ØMQ socket. This socket has—as the ØMQ guide says—superpowers.
22 | # It will automatically handle all reconnection and buffering logic for
23 | # us.
24 | sock = ctx.socket(zmq.PUB)
25 | sock.setsockopt(zmq.LINGER, 1)
26 | # Connect to the server.
27 | sock.connect('tcp://localhost:5555')
28 | # Our shutdown sequence is driven by KeyboardInterrupt, farther down. When
29 | # that signal is received, all the tasks will be cancelled. Here I handle
30 | # the raised CancelledError with the handy suppress() context manager from
31 | # the context lib standard library module.
32 | with suppress(asyncio.CancelledError):
33 | # Iterate forever, sending out data to the server.
34 | while True:
35 | # Since ØMQ knows how to work with complete messages, and not just
36 | # chunks off a bytestream, it opens the door to a bunch of useful
37 | # wrappers around the usual sock.send() idiom: here, I use one of
38 | # those helper methods, send_json(), which will automatically
39 | # serialize the argument into JSON. This allows us to use a dict()
40 | # directly.
41 | await sock.send_json(dict(
42 | color=color,
43 | # A reliable way to transmit datetime information is via the
44 | # ISO 8601 format. This is especially true if you have to pass
45 | # datetime data between software written in different
46 | # languages, since the vast majority of language
47 | # implementations will be able to work with this standard.
48 | timestamp=dt.now(tz=tz.utc).isoformat(),
49 | cpu=p.cpu_percent(),
50 | mem=p.memory_full_info().rss / 1024 / 1024
51 | ))
52 | await asyncio.sleep(1)
53 | # To end up here, we must have received the CancelledError exception
54 | # resulting from task cancellation. The ØMQ socket must be closed to allow
55 | # program shutdown.
56 | sock.close()
57 |
58 |
59 | async def main(args):
60 | asyncio.create_task(stats_reporter(args.color))
61 | leak = []
62 | with suppress(asyncio.CancelledError):
63 | while True:
64 | # The main() function symbolizes the actual microservice
65 | # application. Fake work is produced with this sum over random
66 | # numbers, just to give us some nonzero data to view in the
67 | # visualization layer a bit later.
68 | sum(range(randint(1_000, 10_000_000)))
69 | await asyncio.sleep(uniform(0, 1))
70 | leak += [0] * args.leak
71 |
72 | if __name__ == '__main__':
73 | parser = argparse.ArgumentParser()
74 | # I’m going to create multiple instances of this application, so it will
75 | # be convenient to be able to distinguish between them (later, in the
76 | # graphs) with a --color parameter.
77 | parser.add_argument('--color', type=str)
78 | parser.add_argument('--leak', type=int, default=0)
79 | args = parser.parse_args()
80 | try:
81 | asyncio.run(main(args))
82 | except KeyboardInterrupt:
83 | print('Leaving...')
84 | # Finally, the ØMQ context can be terminated.
85 | ctx.term()
86 |
--------------------------------------------------------------------------------
/chapter4/19/metric-server.py:
--------------------------------------------------------------------------------
1 | # Example 4-19. The collection layer: this server collects process stats
2 | import os
3 | import asyncio
4 | import zmq
5 | import zmq.asyncio
6 | import aiohttp
7 | import json
8 | from contextlib import suppress
9 | from aiohttp import web
10 | from aiohttp_sse import sse_response
11 | from weakref import WeakSet
12 |
13 |
14 | CHARTS_HTML_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(
15 | __file__)), '../../appendixB/3/charts.html')
16 |
17 | # zmq.asyncio.install()
18 | ctx = zmq.asyncio.Context()
19 | # One half of this program will receive data from other applications, and the
20 | # other half will provide data to browser clients via server-sent events
21 | # (SSEs). I use a WeakSet() to keep track of all the currently connected web
22 | # clients. Each connected client will have an associated Queue() instance, so
23 | # this connections identifier is really a set of queues.
24 | connections = WeakSet()
25 |
26 |
27 | async def collector():
28 | # Recall that in the application layer, I used a zmq.PUB socket; here in
29 | # the collection layer, I use its partner, the zmq.SUB socket type. This
30 | # ØMQ socket can only receive, not send.
31 | sock = ctx.socket(zmq.SUB)
32 | # For the zmq.SUB socket type, providing a subscription name is required,
33 | # but for our purposes, we’ll just take everything that comes in—hence the
34 | # empty topic name.
35 | sock.setsockopt_string(zmq.SUBSCRIBE, '')
36 | # I bind the zmq.SUB socket. Think about that for second. In pub-sub
37 | # configurations, you usually have to make the pub end the server (bind())
38 | # and the sub end the client (connect()). ØMQ is different: either end can
39 | # be the server. For our use case, this is important, because each of our
40 | # application-layer instances will be connecting to the same collection
41 | # server domain name, and not the other way around.
42 | sock.bind('tcp://*:5555')
43 | with suppress(asyncio.CancelledError):
44 | # The support for asyncio in pyzmq allows us to await data from our
45 | # connected apps. And not only that, but the incoming data will be
46 | # automatically deserialized from JSON (yes, this means data is a
47 | # dict()).
48 | while data := await sock.recv_json():
49 | print(data)
50 | for q in connections:
51 | # Recall that our connections set holds a queue for every
52 | # connected web client. Now that data has been received, it’s
53 | # time to send it to all the clients: the data is placed onto
54 | # each queue.
55 | await q.put(data)
56 | sock.close()
57 |
58 |
59 | # The feed() coroutine function will create coroutines for each connected web
60 | # client. Internally, server-sent events are used to push data to the web
61 | # clients.
62 | async def feed(request):
63 | queue = asyncio.Queue()
64 | # As described earlier, each web client will have its own queue instance,
65 | # in order to receive data from the collector() coroutine. The queue
66 | # instance is added to the connections set, but because connections is a
67 | # weak set, the entry will automatically be removed from connections when
68 | # the queue goes out of scope—i.e., when a web client disconnects.
69 | # Weakrefs are great for simplifying these kinds of bookkeeping tasks.
70 | connections.add(queue)
71 | with suppress(asyncio.CancelledError):
72 | # The aiohttp_sse package provides the sse_response() context manager.
73 | # This gives us a scope inside which to push data to the web client.
74 | async with sse_response(request) as resp:
75 | # We remain connected to the web client, and wait for data on this
76 | # specific client’s queue.
77 | while data := await queue.get():
78 | print('sending data:', data)
79 | # As soon as the data comes in (inside collector() ), it will
80 | # be sent to the connected web client. Note that I reserialize
81 | # the data dict here. An optimization to this code would be to
82 | # avoid deserializing JSON in collector() , and instead use
83 | # sock.recv_string() to avoid the serialization round trip. Of
84 | # course, in a real scenario, you might want to deserialize in
85 | # the collector, and perform some validation on the data
86 | # before sending it to the browser client. So many choices!
87 | await resp.send(json.dumps(data))
88 | return resp
89 |
90 |
91 | # The index() endpoint is the primary page load, and here we serve a static
92 | # file called charts.html.
93 | async def index(request):
94 | return aiohttp.web.FileResponse(CHARTS_HTML_FILE_PATH)
95 |
96 |
97 | # The aiohttp library provides facilities for us to hook in additional
98 | # long-lived coroutines we might need. With the collector() coroutine, we have
99 | # exactly that situation, so I create a startup coroutine, start_collector(),
100 | # and a shutdown coroutine. These will be called during specific phases of
101 | # aiohttp’s startup and shutdown sequence. Note that I add the collector task
102 | # to the app itself, which implements a mapping protocol so that you can use
103 | # it like a dict.
104 | async def start_collector(app):
105 | loop = asyncio.get_event_loop()
106 | app['collector'] = loop.create_task(collector())
107 |
108 |
109 | async def stop_collector(app):
110 | print('Stopping collector...')
111 | # I obtain our collector() coroutine off the app identifier and call
112 | # cancel() on that.
113 | app['collector'].cancel()
114 | await app['collector']
115 | ctx.term()
116 |
117 | if __name__ == '__main__':
118 | app = web.Application()
119 | app.router.add_route('GET', '/', index)
120 | app.router.add_route('GET', '/feed', feed)
121 | # Finally, you can see where the custom startup and shutdown coroutines
122 | # are hooked in: the app instance provides hooks to which our custom
123 | # coroutines may be appended.
124 | app.on_startup.append(start_collector)
125 | app.on_cleanup.append(stop_collector)
126 | web.run_app(app, host='127.0.0.1', port=8088)
127 |
--------------------------------------------------------------------------------
/chapter4/20/visualization_layer.snip.js:
--------------------------------------------------------------------------------
1 | // Example 4-20. The visualization layer, which is a fancy way of saying “the browser”
2 |
3 | /* This is a snippet of the appendixB/3/charts.html JavaScript
4 | that visualizes the server-sent information */
5 |
6 |
7 | /* Create a new EventSource() instance on the /feed URL. The browser will con‐
8 | nect to /feed on our server, (metric_server.py). Note that the browser will auto‐
9 | matically try to reconnect if the connection is lost. Server-sent events are often
10 | overlooked, but in many situations their simplicity makes them preferable to
11 | WebSockets. */
12 | var evtSource = new EventSource("/feed");
13 | evtSource.onmessage = function(e) {
14 | // The onmessage event will fire every time the server sends data. Here the data // is parsed as JSON.
15 | var obj = JSON.parse(e.data);
16 | if (!(obj.color in cpu)) {
17 | add_timeseries(cpu, cpu_chart, obj.color);
18 | }
19 | if (!(obj.color in mem)) {
20 | add_timeseries(mem, mem_chart, obj.color);
21 | }
22 | /* The cpu identifier is a mapping of a color to a TimeSeries() instance (for more
23 | on this, see Example B-1). Here, we obtain that time series and append data to // it. We also obtain the timestamp and parse it to get the correct format
24 | required by the chart. */
25 | cpu[obj.color].append(
26 | Date.parse(obj.timestamp), obj.cpu);
27 | mem[obj.color].append(
28 | Date.parse(obj.timestamp), obj.mem);
29 | };
30 |
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/__pycache__/model.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ckaraneen/usingaio/7facd6ebbf045053e4955151e88e8e6f43299458/chapter4/21,22,23,24/__pycache__/model.cpython-38.pyc
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/__pycache__/perf.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ckaraneen/usingaio/7facd6ebbf045053e4955151e88e8e6f43299458/chapter4/21,22,23,24/__pycache__/perf.cpython-38.pyc
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/__pycache__/triggers.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ckaraneen/usingaio/7facd6ebbf045053e4955151e88e8e6f43299458/chapter4/21,22,23,24/__pycache__/triggers.cpython-38.pyc
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/__pycache__/util.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ckaraneen/usingaio/7facd6ebbf045053e4955151e88e8e6f43299458/chapter4/21,22,23,24/__pycache__/util.cpython-38.pyc
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/asyncpg-basic.py:
--------------------------------------------------------------------------------
1 | # Example 4-21. Basic demo of asyncpg
2 |
3 | # For all of the code in this section, we’ll need a running instance of
4 | # PostgreSQL. This is most easily done with Docker, using the following
5 | # command:
6 | # docker -d run --rm -p 55432:5432 -e POSTGRES_HOST_AUTH_METHOD=trust postgres
7 |
8 | import asyncio
9 | import asyncpg
10 | import datetime
11 | from util import Database
12 |
13 |
14 | async def main():
15 | async with Database('test', owner=True) as conn:
16 | await demo(conn)
17 |
18 |
19 | async def demo(conn: asyncpg.Connection):
20 | await conn.execute('''
21 | CREATE TABLE users(
22 | id serial PRIMARY KEY,
23 | name text,
24 | dob date
25 | )'''
26 | )
27 | pk = await conn.fetchval(
28 | 'INSERT INTO users(name, dob) VALUES($1, $2) '
29 | 'RETURNING id', 'Bob', datetime.date(1984, 3, 1)
30 | )
31 |
32 | async def get_row():
33 | return await conn.fetchrow(
34 | 'SELECT * FROM users WHERE name = $1',
35 | 'Bob'
36 | )
37 | print('After INSERT:', await get_row())
38 | await conn.execute(
39 | 'UPDATE users SET dob = $1 WHERE id=1',
40 | datetime.date(1985, 3, 1)
41 | )
42 | print('After UPDATE:', await get_row())
43 | await conn.execute(
44 | 'DELETE FROM users WHERE id=1'
45 | )
46 | print('After DELETE:', await get_row())
47 |
48 | if __name__ == '__main__':
49 | asyncio.run(main())
50 |
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/model.py:
--------------------------------------------------------------------------------
1 | # Example 4-24. DB model for the “patron” table
2 | import logging
3 | from json import loads, dumps
4 | # You have to add triggers to the database in order to get notifications when
5 | # data changes. I’ve created these handy helpers to create the trigger
6 | # function itself (with create_notify_trigger) and to add the trigger to a
7 | # specific table (with add_table_triggers). The SQL required to do this is
8 | # somewhat out of scope for this book, but it’s still crucial to understanding
9 | # how this case study works.
10 | from triggers import (
11 | create_notify_trigger, add_table_triggers)
12 | # The third-party boltons package provides a bunch of useful tools, not the
13 | # least of which is the LRU cache, a more versatile option than the @lru_cache
14 | # decorator in the functools standard library module.
15 | from boltons.cacheutils import LRU
16 |
17 | logger = logging.getLogger('perf')
18 |
19 | # This block of text holds all the SQL for the standard CRUD operations. Note
20 | # that I’m using native PostgreSQL syntax for the parameters: $1, $2, and so
21 | # on. There is nothing novel here, and it won’t be discussed further.
22 | CREATE_TABLE = ('CREATE TABLE IF NOT EXISTS patron('
23 | 'id serial PRIMARY KEY, name text, '
24 | 'fav_dish text)')
25 | INSERT = ('INSERT INTO patron(name, fav_dish) '
26 | 'VALUES ($1, $2) RETURNING id')
27 | SELECT = 'SELECT * FROM patron WHERE id = $1'
28 | UPDATE = 'UPDATE patron SET name=$1, fav_dish=$2 WHERE id=$3'
29 | DELETE = 'DELETE FROM patron WHERE id=$1'
30 | EXISTS = "SELECT to_regclass('patron')"
31 |
32 | # Create the cache for this app instance.
33 | CACHE = LRU(max_size=65536)
34 |
35 |
36 | # I called this function from the Sanic module inside the new_patron()
37 | # endpoint for adding new patrons. Inside the function, I use the fetchval()
38 | # method to insert new data. Why fetchval() and not execute()? Because
39 | # fetchval() returns the primary key of the new inserted record!
40 | async def add_patron(conn, data: dict) -> int:
41 | return await conn.fetchval(
42 | INSERT, data['name'], data['fav_dish'])
43 |
44 |
45 | async def update_patron(conn, id: int, data: dict) -> bool:
46 | # Update an existing record. When this succeeds, PostgreSQL will return
47 | # UPDATE 1, so I use that as a check to verify that the update succeeded.
48 | result = await conn.execute(
49 | UPDATE, data['name'], data['fav_dish'], id)
50 | return result == 'UPDATE 1'
51 |
52 |
53 | # Deletion is very similar to updating.
54 | async def delete_patron(conn, id: int):
55 | result = await conn.execute(DELETE, id)
56 | return result == 'DELETE 1'
57 |
58 |
59 | # This is the read operation. This is the only part of our CRUD interface that
60 | # cares about the cache. Think about that for a second: we don’t update the
61 | # cache when doing an insert, update, or delete. This is because we rely on
62 | # the async notification from the database (via the installed triggers) to
63 | # update the cache if any data is changed.
64 | async def get_patron(conn, id: int) -> dict:
65 | if id not in CACHE:
66 | logger.info(f'id={id} Cache miss')
67 | # Of course, we do still want to use the cache after the first GET.
68 | record = await conn.fetchrow(SELECT, id)
69 | CACHE[id] = record and dict(record.items())
70 | return CACHE[id]
71 |
72 |
73 | # The db_event() function is the callback that asyncpg will make when there are
74 | # events on our DB notification channel, chan_patron. This specific parameter
75 | # list is required by asyncpg. conn is the connection on which the event was
76 | # sent, pid is the process ID of the PostgreSQL instance that sent the event,
77 | # channel is the name of the channel (which in this case will be chan_patron),
78 | # and the payload is the data being sent on the channel.
79 | def db_event(conn, pid, channel, payload):
80 | # Deserialize the JSON data to a dict.
81 | event = loads(payload)
82 | logger.info('Got DB event:\n' + dumps(event, indent=4))
83 | id = event['id']
84 | if event['type'] == 'INSERT':
85 | CACHE[id] = event['data']
86 | elif event['type'] == 'UPDATE':
87 | # The cache population is generally quite straightforward, but note
88 | # that update events contain both new and old data, so we need to make
89 | # sure to cache the new data only.
90 | CACHE[id] = event['data']['new']
91 | elif event['type'] == 'DELETE':
92 | CACHE[id] = None
93 |
94 |
95 | # This is a small utility function I’ve made to easily re-create a table if
96 | # it’s missing. This is really useful if you need to do this frequently—such
97 | # as when writing the code samples for this book!
98 | # This is also where the database notification triggers are created and added
99 | # to our patron table.
100 | async def create_table_if_missing(conn):
101 | if not await conn.fetchval(EXISTS):
102 | await conn.fetchval(CREATE_TABLE)
103 | await create_notify_trigger(
104 | conn, channel='chan_patron')
105 | await add_table_triggers(
106 | conn, table='patron')
107 |
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/perf.py:
--------------------------------------------------------------------------------
1 | # Example B-5. perf.py
2 | import logging
3 | from time import perf_counter
4 | from inspect import iscoroutinefunction
5 |
6 | logger = logging.getLogger('perf')
7 | logging.basicConfig(level=logging.INFO)
8 |
9 |
10 | # The aelapsed() decorator will record the time taken to execute the wrapped
11 | # coroutine.
12 | def aelapsed(corofn, caption=''):
13 | async def wrapper(*args, **kwargs):
14 | t0 = perf_counter()
15 | result = await corofn(*args, **kwargs)
16 | delta = (perf_counter() - t0) * 1e3
17 | logger.info(
18 | f'{caption} Elapsed: {delta:.2f} ms')
19 | return result
20 | return wrapper
21 |
22 |
23 | # The aprofiler() metaclass will make sure that every member of the class that
24 | # is a coroutine function will get wrapped in the aelapsed() decorator.
25 | def aprofiler(cls, bases, members):
26 | for k, v in members.items():
27 | if iscoroutinefunction(v):
28 | members[k] = aelapsed(v, k)
29 | return type.__new__(type, cls, bases, members)
30 |
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/sanic_demo.py:
--------------------------------------------------------------------------------
1 | # Example 4-23. API server with Sanic
2 | import argparse
3 | from sanic import Sanic
4 | from sanic.views import HTTPMethodView
5 | from sanic.response import json
6 | # The Database utility helper, as described earlier. This will provide the
7 | # methods required to connect to the database.
8 | from util import Database
9 | # Two more tools I’ve cobbled together to log the elapsed time of each API
10 | # endpoint. I used this in the previous discussion to detect when a GET was
11 | # being returned from the cache. The implementations for aelapsed() and
12 | # aprofiler() are not important for this case study, but you can obtain them
13 | # in Example B-1.
14 | from perf import aelapsed, aprofiler
15 | import model
16 |
17 | # We create the main Sanic app instance.
18 | app = Sanic()
19 |
20 |
21 | # This coroutine function is for creating new patron entries. In an
22 | # add_route() call toward the bottom of the code, new_patron() is associated
23 | # with the endpoint /patron , only for the POST HTTP method. The @aelapsed
24 | # decorator is not part of the Sanic API: it’s my own invention, merely to log
25 | # out timings for each call.
26 | @aelapsed
27 | async def new_patron(request):
28 | # Sanic provides immediate deserialization of received JSON data by using
29 | # the .json attribute on the request object.
30 | data = request.json
31 | # The model module, which I imported, is the model for our patron table in
32 | # the database. I’ll go through that in more detail in the next code
33 | # listing; for now, just understand that all the database queries and SQL
34 | # are in this model module. Here I’m passing the connection pool for the
35 | # database, and the same pattern is used for all the interaction with the
36 | # database model in this function and in the PatronAPI class further down.
37 | id = await model.add_patron(app.pool, data)
38 | # A new primary key, id , will be created, and this is returned back to
39 | # the caller as JSON.
40 | return json(dict(msg='ok', id=id))
41 |
42 |
43 | # While creation is handled in the new_patron() function, all other
44 | # interactions are handled in this class-based view, which is a convenience
45 | # provided by Sanic. All the methods in this class are associated with the
46 | # same URL, /patron/, which you can see in the add_route() function
47 | # near the bottom. Note that the id URL parameter will be passed to each of
48 | # the methods, and this parameter is required for all three endpoints.
49 | # You can safely ignore the metaclass argument: all it does is wrap each
50 | # method with the @aelapsed decorator so that timings will be printed in the
51 | # logs. Again, this is not part of the Sanic API; it’s my own invention for
52 | # logging timing data.
53 | class PatronAPI(HTTPMethodView, metaclass=aprofiler):
54 | async def get(self, request, id):
55 | # As before, model interaction is performed inside the model module.
56 | data = await model.get_patron(app.pool, id)
57 | return json(data)
58 |
59 | async def put(self, request, id):
60 | data = request.json
61 | ok = await model.update_patron(app.pool, id, data)
62 | # If the model reports failure for doing the update, I modify the
63 | # response data. I’ve included this for readers who have not yet seen
64 | # Python’s version of the ternary operator.
65 | return json(dict(msg='ok' if ok else 'bad'))
66 |
67 | async def delete(self, request, id):
68 | ok = await model.delete_patron(app.pool, id)
69 | return json(dict(msg='ok' if ok else 'bad'))
70 |
71 |
72 | # The @app.listener decorators are hooks provided by Sanic to give you a place
73 | # to add extra actions during the startup and shutdown sequence. This one,
74 | # before_server_start, is invoked before the API server is started up. This
75 | # seems like a good place to initialize our database connection.
76 | @app.listener('before_server_start')
77 | async def db_connect(app, loop):
78 | # Use the Database helper to create a connection to our PostgreSQL
79 | # instance. The DB we’re connecting to is test.
80 | app.db = Database('test', owner=False)
81 | # Obtain a connection pool to our database.
82 | app.pool = await app.db.connect()
83 | # Use our model (for the patron table) to create the table if it’s missing.
84 | await model.create_table_if_missing(app.pool)
85 | # Use our model to create a dedicated_listener for database events,
86 | # listening on the channel chan_patron. The callback function for these
87 | # events is model.db_event(), which I’ll go through in the next listing.
88 | # The callback will be called every time the database updates the channel.
89 | await app.db.add_listener('chan_patron', model.db_event)
90 |
91 |
92 | # after_server_stop is the hook for tasks that must happen during shutdown.
93 | # Here we disconnect from the database.
94 | @app.listener('after_server_stop')
95 | async def db_disconnect(app, loop):
96 | await app.db.disconnect()
97 |
98 | if __name__ == "__main__":
99 | parser = argparse.ArgumentParser()
100 | parser.add_argument('--port', type=int, default=8000)
101 | args = parser.parse_args()
102 | # This add_route() call sends POST requests for the /patron URL to the
103 | # new_patron() coroutine function.
104 | app.add_route(
105 | new_patron, '/patron', methods=['POST'])
106 | # This add_route() call sends all requests for the /patron/ URL to
107 | # the PatronAPI class-based view. The method names in that class determine
108 | # which one is called: a GET HTTP request will call the PatronAPI.get()
109 | # method, and so on.
110 | app.add_route(
111 | PatronAPI.as_view(), '/patron/')
112 | app.run(host="0.0.0.0", port=args.port)
113 |
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/triggers.py:
--------------------------------------------------------------------------------
1 | # Example B-4. triggers.py
2 |
3 | # These functions require asyncpg , although this import is used only to allow
4 | # Connection to be used in type annotations.
5 | from asyncpg.connection import Connection
6 |
7 |
8 | # The create_notify_trigger() coroutine function will create the trigger
9 | # function itself in the database. The trigger function will contain the name
10 | # of the channel that updates will be sent to. The code for the function
11 | # itself is in the SQL_CREATE_TRIGGER identifier, and it is set up as a format
12 | # string.
13 | async def create_notify_trigger(
14 | conn: Connection,
15 | trigger_name: str = 'table_update_notify',
16 | channel: str = 'table_change') -> None:
17 | # Recall from the case study example that update notifications included a
18 | # “diff” section in which the difference between old and new data was
19 | # shown. We use the hstore feature of PostgreSQL to calculate that diff.
20 | # It provides something close to the semantics of sets. The hstore
21 | # extension is not enabled by default, so we enable it here.
22 | await conn.execute(
23 | 'CREATE EXTENSION IF NOT EXISTS hstore')
24 | # The desired trigger name and channel are substituted into the template
25 | # and then executed.
26 | await conn.execute(
27 | SQL_CREATE_TRIGGER.format(
28 | trigger_name=trigger_name,
29 | channel=channel))
30 |
31 |
32 | # The second function, add_table_triggers() , connects the trigger function to
33 | # table events like insert, update, and delete.
34 | async def add_table_triggers(
35 | conn: Connection,
36 | table: str,
37 | trigger_name: str = 'table_update_notify',
38 | schema: str = 'public') -> None:
39 | # There are three format strings for each of the three methods.
40 | templates = (SQL_TABLE_INSERT, SQL_TABLE_UPDATE,
41 | SQL_TABLE_DELETE)
42 | for template in templates:
43 | # The desired variables are substituted into the templates and then
44 | # executed.
45 | await conn.execute(
46 | template.format(
47 | table=table,
48 | trigger_name=trigger_name,
49 | schema=schema))
50 |
51 | # This SQL code took me a lot longer than expected to get exactly right! This
52 | # PostgreSQL procedure is called for insert, update, and delete events; the
53 | # way to know which is to check the TG_OP variable. If the operation is
54 | # INSERT, then NEW will be defined (and OLD will not be defined). For DELETE,
55 | # OLD will be defined but not NEW . For UPDATE , both are defined, which
56 | # allows us to calculate the diff. We also make use of PostgreSQL’s built-in
57 | # support for JSON with the row_to_json() and hstore_to_json() functions:
58 | # these mean that our callback handler will receive valid JSON.
59 | #
60 | # Finally, the call to the pg_notify() function is what actually sends the
61 | # event. All subscribers on {channel} will receive the notification.
62 | SQL_CREATE_TRIGGER = """\
63 | CREATE OR REPLACE FUNCTION {trigger_name}()
64 | RETURNS trigger AS $$
65 | DECLARE
66 | id integer; -- or uuid
67 | data json;
68 | BEGIN
69 | data = json 'null';
70 | IF TG_OP = 'INSERT' THEN
71 | id = NEW.id;
72 | data = row_to_json(NEW);
73 | ELSIF TG_OP = 'UPDATE' THEN
74 | id = NEW.id;
75 | data = json_build_object(
76 | 'old', row_to_json(OLD),
77 | 'new', row_to_json(NEW),
78 | 'diff', hstore_to_json(hstore(NEW) - hstore(OLD))
79 | );
80 | ELSE
81 | id = OLD.id;
82 | data = row_to_json(OLD);
83 | END IF;
84 | PERFORM
85 | pg_notify(
86 | '{channel}',
87 | json_build_object(
88 | 'table', TG_TABLE_NAME,
89 | 'id', id,
90 | 'type', TG_OP,
91 | 'data', data
92 | )::text
93 | );
94 | RETURN NEW;
95 | END;
96 | $$ LANGUAGE plpgsql;
97 | """
98 |
99 | # This is standard trigger code: it sets up a trigger to call a specific
100 | # procedure {trigger_name}() when a specific event occurs, like an INSERT or
101 | # an UPDATE.
102 | SQL_TABLE_UPDATE = """\
103 | DROP TRIGGER IF EXISTS
104 | {table}_notify_update ON {schema}.{table};
105 | CREATE TRIGGER {table}_notify_update
106 | AFTER UPDATE ON {schema}.{table}
107 | FOR EACH ROW
108 | EXECUTE PROCEDURE {trigger_name}();
109 | """
110 | SQL_TABLE_INSERT = """\
111 | DROP TRIGGER IF EXISTS
112 | {table}_notify_insert ON {schema}.{table};
113 | CREATE TRIGGER {table}_notify_insert
114 | AFTER INSERT ON {schema}.{table}
115 | FOR EACH ROW
116 | EXECUTE PROCEDURE {trigger_name}();
117 | """
118 | SQL_TABLE_DELETE = """\
119 | DROP TRIGGER IF EXISTS
120 | {table}_notify_delete ON {schema}.{table};
121 | CREATE TRIGGER {table}_notify_delete
122 | AFTER DELETE ON {schema}.{table}
123 | FOR EACH ROW
124 | EXECUTE PROCEDURE {trigger_name}();
125 | """
126 |
--------------------------------------------------------------------------------
/chapter4/21,22,23,24/util.py:
--------------------------------------------------------------------------------
1 | # Example 4-22. Useful tooling for your asyncpg experiments
2 | import argparse
3 | import asyncio
4 | import asyncpg
5 | from asyncpg.pool import Pool
6 |
7 | DSN = 'postgresql://{user}@{host}:{port}'
8 | DSN_DB = DSN + '/{name}'
9 | CREATE_DB = 'CREATE DATABASE {name}'
10 | DROP_DB = 'DROP DATABASE {name}'
11 |
12 |
13 | class Database:
14 | def __init__(self, name, owner=False, **kwargs):
15 | self.params = dict(
16 | user='postgres', host='localhost',
17 | port=55432, name=name)
18 | self.params.update(kwargs)
19 | self.pool: Pool = None
20 | self.owner = owner
21 | self.listeners = []
22 |
23 | async def connect(self) -> Pool:
24 | if self.owner:
25 | await self.server_command(
26 | CREATE_DB.format(**self.params))
27 | self.pool = await asyncpg.create_pool(
28 | DSN_DB.format(**self.params))
29 | return self.pool
30 |
31 | async def disconnect(self):
32 | """Destroy the database"""
33 | if self.pool:
34 | releases = [self.pool.release(conn)
35 | for conn in self.listeners]
36 | await asyncio.gather(*releases)
37 | await self.pool.close()
38 | if self.owner:
39 | await self.server_command(
40 | DROP_DB.format(**self.params))
41 |
42 | async def __aenter__(self) -> Pool:
43 | return await self.connect()
44 |
45 | async def __aexit__(self, *exc):
46 | await self.disconnect()
47 |
48 | async def server_command(self, cmd):
49 | conn = await asyncpg.connect(
50 | DSN.format(**self.params))
51 | await conn.execute(cmd)
52 | await conn.close()
53 |
54 | async def add_listener(self, channel, callback):
55 | conn: asyncpg.Connection = await self.pool.acquire()
56 | await conn.add_listener(channel, callback)
57 | self.listeners.append(conn)
58 |
59 |
60 | if __name__ == '__main__':
61 | parser = argparse.ArgumentParser()
62 | parser.add_argument('--cmd', choices=['create', 'drop'])
63 | parser.add_argument('--name', type=str)
64 | args = parser.parse_args()
65 | d = Database(args.name, owner=True)
66 | if args.cmd == 'create':
67 | asyncio.run(d.connect())
68 | elif args.cmd == 'drop':
69 | asyncio.run(d.disconnect())
70 | else:
71 | parser.print_help()
72 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp==3.6.2
2 | aiohttp-sse==2.0.0
3 | asyncpg==0.21.0
4 | attrs==20.1.0
5 | boltons>=19.0.0,<=21.0.0
6 | bs4==0.0.1
7 | janus==0.5.0
8 | lxml==4.5.2
9 | psutil==5.7.2
10 | pyzmq==19.0.2
11 | sanic==20.6.3
12 | twisted==20.3.0
13 | zmq==0.0.0
14 |
--------------------------------------------------------------------------------
/resource/book_cover.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ckaraneen/usingaio/7facd6ebbf045053e4955151e88e8e6f43299458/resource/book_cover.jpg
--------------------------------------------------------------------------------