├── LICENSE ├── README.md ├── README.old.md ├── e5_refresh.sh ├── redis-3.5.3.dist-info ├── INSTALLER ├── LICENSE ├── METADATA ├── RECORD ├── REQUESTED ├── WHEEL └── top_level.txt ├── redis ├── __init__.py ├── __init__.pyc ├── _compat.py ├── _compat.pyc ├── client.py ├── client.pyc ├── connection.py ├── connection.pyc ├── exceptions.py ├── exceptions.pyc ├── lock.py ├── lock.pyc ├── sentinel.py ├── sentinel.pyc ├── utils.py └── utils.pyc ├── run.py ├── token.txt └── urls.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Office E5 Refresh SCF 2 | 修改自github action版AutoApiSecret-加密版。 3 | 4 | 由于github的TOS,禁止使用action调用相关程序。 5 | 6 | 因此在 [ZYong9908/AutoApiSecret-1](https://github.com/ZYong9908/AutoApiSecret-1)基础上修改为腾讯云SCF版本。 7 | 8 | 9 | 10 | ## Requirement 11 | 12 | ### 云函数 13 | 14 | - 需要Redis保存refresh_token(可以注册Redislab获取30MB的免费redis服务器) 15 | 16 | - 腾讯云开启云函数功能(拥有免费配额) 17 | 18 | ### Linux 19 | 20 | - 支持`cURL` 21 | 22 | 23 | ## 脚本配置 24 | 25 | ### 云函数 26 | 27 | #### 配置教程 28 | 29 | 1. 下载代码zip包:[main.zip](https://github.com/LittleJake/office-e5-refresh-SCF/archive/refs/heads/main.zip) 30 | 31 | 2. 解压`main.zip`,找到`token.txt`,填入refresh_token 32 | 33 | 3. 获取refresh_token参考:[获取微软Office 365应用APPID、secret、access_token、refresh_token等](https://blog.littlejake.net/archives/481/),视频教程:[AutoApi教程](https://www.bilibili.com/video/av95688306/),打开权限: 34 | 35 | ``` 36 | #先注册azure应用,确保应用有以下权限: 37 | #files: Files.Read.All、Files.ReadWrite.All、Sites.Read.All、Sites.ReadWrite.All 38 | #user: User.Read.All、User.ReadWrite.All、Directory.Read.All、Directory.ReadWrite.All 39 | #mail: Mail.Read、Mail.ReadWrite、MailboxSettings.Read、MailboxSettings.ReadWrite 40 | #注册后一定要再点代表xxx授予管理员同意,否则outlook api无法调用 41 | ``` 42 | 43 | 4. 打开云函数页面:[https://console.cloud.tencent.com/scf/list-create](https://console.cloud.tencent.com/scf/list-create) ,新建函数。 44 | > **为了提高访问稳定性,地域选择中国香港** 45 | 46 | ![模板1](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/20210828210618.png) 47 | 48 | 打开高级配置: 49 | 50 | ![高级配置](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/20210828210929.png) 51 | 52 | 打开触发配置: 53 | 54 | ![模板3](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/20210828211214.png) 55 | 56 | #### RedisLab相关参数 57 | 58 | * RedisLab服务器信息(域名和端口) 59 | ![RedisLab](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/202111300019368.png) 60 | * RedisLab服务器信息(连接密码) 61 | ![RedisLab](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/202111300021201.png) 62 | 63 | 64 | 65 | #### 云函数运行截图 66 | 67 | ![运行](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/20210828211457.png) 68 | 69 | 70 | ### Linux主机 71 | 72 | #### 使用shell脚本运行 73 | 74 | > 可以加入到crontab定时执行。 75 | 76 | ```bash 77 | git clone https://github.com/LittleJake/office-e5-refresh-SCF/ 78 | cd office-e5-refresh-SCF 79 | bash e5_refresh.sh 80 | ``` 81 | 82 | ![shell配置](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/202205262243679.png) 83 | 84 | ##### shell脚本运行截图 85 | 86 | ![运行](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/20210928221336.png) 87 | 88 | #### 使用青龙面板 89 | 90 | > 拉取后会自动定时执行:每日 12:15 91 | 92 | ```bash 93 | docker exec -it qinglong ql raw https://raw.fastgit.org/LittleJake/office-e5-refresh-SCF/main/e5_refresh.sh 94 | ``` 95 | 96 | ##### 配置 97 | 98 | 登录到青龙面板 - 脚本管理,找到对应脚本编辑,填入信息后保存。 99 | 100 | ![青龙配置](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/202205262248082.png) 101 | 102 | ![配置随机延迟](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/202205262300305.png) 103 | 104 | ##### 青龙脚本运行截图 105 | 106 | ![运行](https://cdn.jsdelivr.net/gh/LittleJake/blog-static-files@imgs/imgs/202205262252116.png) 107 | 108 | ## 感谢 109 | 110 | @ZYong9908 111 | 112 | 113 | ## 开源协议 114 | 115 | [Apache2.0](LICENSE) 116 | 117 | 118 | ## PS 119 | 120 | [原作者说明](README.old.md) 121 | -------------------------------------------------------------------------------- /README.old.md: -------------------------------------------------------------------------------- 1 | # AutoApiSecret-加密版 2 | AutoApi系列:AutoApi、AutoApiSecret、AutoApiSR、AutoApiS 3 | 4 | # 置顶 # 5 | * 本项目是建立在[原教程](https://blog.432100.xyz/index.php/archives/50/)可以正确调用api的**假设**上的,核心是paran/黑幕大佬的py脚本。 6 | * 本项目只是提供一个自动、免费、无需额外设备的脚本运行方式,换句话说,**借用github的电脑/服务器来干活**。(因为原教程需要服务器/超长时间运转的设备,大部分人都不具备,本项目应运而生) 7 | * 本项目运行依赖**github action**服务,此功能github固有而**非私人提供**的服务器,且整个运行过程只涉及你与github。 8 | * 请务必先阅读理解[原教程](https://blog.432100.xyz/index.php/archives/50/)的**原理说明、设计理念**。 9 | * **不保证一定能续期!不保证一定能续期!不保证一定能续期**!或者说,**只是增大续订可能性**。过期前、后30天都可能续期!!! 10 | * 若理解并接受上述说明,请接着操作;**若否,请点击浏览器右上角 X 。** 11 | 12 | ### 项目说明 ### 13 | * 利用github action实现**定时自动调用api**,保持E5开发活跃。 14 | * **免费,不需要额外设备/服务器**,部署完不用管啦。 15 | * 加密版,隐藏应用id+机密,保护账号安全。 16 | 17 | ### 特别说明/Thanks ### 18 | * 原教程博主-黑幕(酷安id-Paran):https://blog.432100.xyz/index.php/archives/50/ 19 | * 普通版地址:https://github.com/wangziyingwen/AutoApi 20 | * 加密版地址(推荐):https://github.com/wangziyingwen/AutoApiSecret 21 | * 模仿人为应用开发版(包含升级步骤):https://github.com/wangziyingwen/AutoApiSR 22 | * 超级版地址: https://github.com/wangziyingwen/AutoApiS 23 | * **常见错误及解决办法/更新日志**:https://github.com/wangziyingwen/Autoapi-test 24 | * 网页获取refresh_token小工具(不建议使用):https://github.com/wangziyingwen/GetAutoApiToken 25 | * 视频教程:(我操作很慢,自行倍速/快进) 26 | * 在线/下载地址:https://kino-onemanager.herokuapp.com/Video/AutoApi%E6%95%99%E7%A8%8B.mp4?preview 27 | * B站:https://www.bilibili.com/video/av95688306/ 28 | 29 | 30 | 31 | ### 区别 ### 32 | [普通版(弃用)](https://github.com/wangziyingwen/AutoApi):密钥暴露,不在乎的话可以使用 33 | 34 | [加密版(推荐)](https://github.com/wangziyingwen/AutoApiSecret):应用id机密加密隐藏,提高安全性 35 | 36 | [模仿人为应用开发版(半弃用)](https://github.com/wangziyingwen/AutoApiSR):顾名思义,加密版的升级版。由于超级版兼容模拟版的功能,此版本处于一种尴尬位置。(当然也可以正常使用) 37 | 38 | [超级版(不建议)](https://github.com/wangziyingwen/AutoApiS):进一步升级版,增加自定义参数、模式。按目前情况,微软续订要求很低,暂时不需要使用此项目。 39 | 40 | **以上推荐/不建议等只是个人意见,请自行选择版本,可同时使用**。 41 | 42 | -------------------------------------------------------------- 43 | 44 | ## 步骤 ## 45 | 46 | >:blush: :blush: :blush: :blush: :blush: :blush: 47 | 48 | >请注意!请注意!请注意! 49 | 50 | >*** **有错误/问题请看**: [常见错误及解决办法/更新日志](https://github.com/wangziyingwen/Autoapi-test) 51 | 52 | >*** 原教程/博客好像也坏了,看[视频教程](https://www.bilibili.com/video/av95688306/)吧,懒得补充,ORZ. (或者去网上搜一下,有一堆转载,关键词:github action e5 续期) 53 | 54 | >*** 注册应用时用到的[azure管理页面](https://portal.azure.com/),或者直接到[仪表板](https://aad.portal.azure.com/),找到注册应用选项 55 | 56 | >***【重定向URI】填写的内容:http://localhost:53682/ 57 | 58 | >*** rclone下载,请自行百度谷歌,官网好像是[rclone.org](https://rclone.org) 59 | 60 | >:anguished: :anguished: :anguished: :anguished: :anguished: :anguished: 61 | 62 | * 第一步,先大致浏览[原教程](https://blog.432100.xyz/index.php/archives/50/),了解如何获取应用id、机密、refresh_token 3样东西,以方便接下来的操作。 63 | 64 | * 第二步,登陆/新建github账号,回到本项目页面,点击右上角fork本项目的代码到你自己的账号,然后你账号下会出现一个一模一样的项目,接下来的操作均在你的这个项目下进行。(看不到图片/图裂请科学上网) 65 | 66 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/fork.png) 67 | 68 | * 根据[原教程](https://blog.432100.xyz/index.php/archives/50/)获取应用id、机密、refresh_token(自己复制保存,注意区分id机密,别弄混了) 69 | 70 | 然后在线编辑你项目里的1.txt,将整个refresh_token覆盖粘贴进去(里面是我的数据,先删掉或者覆盖掉)。(千万不要改1.py) 71 | 72 | > refresh_token位置如图下。复制refresh_token紧接着的双引号里的内容(红竖线框起来的),不要把双引号复制进去。复制进1.txt后,留意结尾不要留空格或者空行 73 | 74 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/token地方.png) 75 | 76 | * 第三步,依次点击上栏Setting > Secrets > Add a new secret,新建两个secret如图:CONFIG_ID、CONFIG_KEY。 77 | 78 | 内容分别如下: ( 把你的应用id改成你的应用id , 你的应用机密改成你的机密,单引号不要动 ) 79 | 80 | CONFIG_ID 81 | ```shell 82 | id=r'你的应用id' 83 | ``` 84 | CONFIG_KEY 85 | ```shell 86 | secret=r'你的应用机密' 87 | ``` 88 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/机密.png) 89 | 90 | 最终格式应该是类似这样的: 91 | 92 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/格式.png) 93 | 94 | * 第四步,进入你的个人设置页面(右上角头像 Settings,不是仓库里的 Settings),选择 Developer settings > Personal access tokens > Generate new token, 95 | 96 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/Settings.png) 97 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/token.png) 98 | 99 | 设置名字为GITHUB_TOKEN , 然后勾选 repo , admin:repo_hook , workflow 等选项,最后点击Generate token即可。 100 | 101 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/repo.png) 102 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/adminrepo.png) 103 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/workflow.png) 104 | 105 | * 第五步,点击右上角星星/star立马调用一次,再点击上面的Action就能看到每次的运行日志,看看运行状况 106 | 107 | (必需点进去Test Api看下,api有没有调用到位,有没有出错。外面的Auto Api打勾只能说明运行是正常的,我们还需要确认10个api调用成功了,就像图里的一样。如果少了几个api,要么是注册应用的时候赋予api权限没弄好;要么是没登录激活onedrive,登录激活一下) 108 | 109 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/日志.png) 110 | 111 | * 第六步,没出错的话,就搞定啦!!再看看下面的定时次数要不要修改,不打算改就忽略。 112 | 113 | **然后第二天回来确认下是否自动运行了(ation里是否多出来几个)**,是的话就不用管了,完结。 114 | 115 | 我设定的每6小时自动运行一次,每次调用3轮(点击右上角星星/star也可以立马调用一次),你们自行斟酌修改(我也不知道保持活跃要调用多少次、多久): 116 | 117 | * 定时自动启动修改地方:(在.github/workflow/AutoApiSecret.yml文件里,自行百度cron定时任务格式,最短每5分钟一次) 118 | 119 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/定时.png) 120 | 121 | * 每次轮数修改地方:(在1.py最后面) 122 | 123 | ![image](https://github.com/wangziyingwen/ImageHosting/blob/master/AutoApi/次数.png) 124 | 125 | ------------------------------------------------------------ 126 | ### 题外话 ### 127 | > Api调用 128 | 你们可以自己去graph浏览器看一下,学着自己修改要调用什么api(最重要的是调用outlook、onedrive) 129 | https://developer.microsoft.com/zh-CN/graph/graph-explorer/preview 130 | 131 | ### GithubAction介绍 ### 132 | 提供的虚拟环境: 133 | 134 | · 2-core CPU 135 | · 7 GB RAM 内存 136 | · 14 GB SSD 硬盘空间 137 | 138 | 使用限制: 139 | * 每个仓库只能同时支持20个 workflow 并行。 140 | * 每小时可以调用1000次 GitHub API 。 141 | * 每个 job 最多可以执行6个小时。 142 | * 免费版的用户最大支持20个 job 并发执行,macOS 最大只支持5个。 143 | * 私有仓库每月累计使用时间为2000分钟,超过后$ 0.008/分钟,公共仓库则无限制。 144 | 145 | (我们这里用的公共仓库,按理,你们可以设定无限循环调用,然后6小时启动一次,保证24小时全天候调用) 146 | 147 | ### 最后 ### 148 | 教程很直白了,应该都会弄吧! 149 | 150 | 代码小白,多包涵!有问题/修改建议可以点击上方issues发布一下,或者PY给我: 151 | wz.lxh@outlook.com 152 | 153 | Q群:[657581700](https://jq.qq.com/?_wv=1027&k=5FQJbWmV) (项目相关讨论) 154 | 155 | 最后的最后,再次感谢黑幕/paran大佬 156 | 157 | ————wangziyingwen/酷安id-卷腿毛菌 158 | -------------------------------------------------------------------------------- /e5_refresh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 15 12 * * * e5_refresh.sh 3 | # 先注册azure应用,确保应用有以下权限: 4 | # files: Files.Read.All、Files.ReadWrite.All、Sites.Read.All、Sites.ReadWrite.All 5 | # user: User.Read.All、User.ReadWrite.All、Directory.Read.All、Directory.ReadWrite.All 6 | # mail: Mail.Read、Mail.ReadWrite、MailboxSettings.Read、MailboxSettings.ReadWrite 7 | # 注册后一定要再点代表xxx授予管理员同意,否则outlook api无法调用 8 | # 请填写好下面配置CLIENT_ID、CLIENT_SECRET、REFRESH_TOKEN 9 | # 配置开始 10 | 11 | CLIENT_ID='CLIENT_ID' 12 | CLIENT_SECRET='CLIENT_SECRET' 13 | REFRESH_TOKEN='REFRESH_TOKEN' 14 | 15 | # 配置结束 16 | 17 | URLS='https://graph.microsoft.com/v1.0/me/drive/root 18 | https://graph.microsoft.com/v1.0/me/drive 19 | https://graph.microsoft.com/v1.0/me/drives 20 | https://graph.microsoft.com/v1.0/drive/root 21 | https://graph.microsoft.com/v1.0/users 22 | https://graph.microsoft.com/v1.0/me/messages 23 | https://graph.microsoft.com/v1.0/me/mailFolders/inbox/messageRules 24 | https://graph.microsoft.com/v1.0/me/mailFolders/Inbox/messages/delta 25 | https://graph.microsoft.com/v1.0/me/drive/root/children 26 | https://graph.microsoft.com/v1.0/me/mailFolders 27 | https://graph.microsoft.com/v1.0/sites/root 28 | https://graph.microsoft.com/v1.0/me/outlook/masterCategories' 29 | SCRIPT_PATH="$0" 30 | 31 | function get_token(){ 32 | resp=`curl -s -X POST \ 33 | --data-urlencode grant_type=refresh_token \ 34 | --data-urlencode refresh_token=$REFRESH_TOKEN \ 35 | --data-urlencode client_id=$CLIENT_ID \ 36 | --data-urlencode client_secret=$CLIENT_SECRET \ 37 | --data-urlencode redirect_uri=http://localhost:53682/ \ 38 | https://login.microsoftonline.com/common/oauth2/v2.0/token` 39 | 40 | error=${resp#*error\":\"} 41 | error=${error%%\"*} 42 | if [[ $resp =~ '"error"' ]] 43 | then 44 | echo "获取access_token失败,错误信息:$error" 45 | exit 1 46 | fi 47 | 48 | refresh_token=${resp#*refresh_token\":\"} 49 | refresh_token=${refresh_token%%\"*} 50 | access_token=${resp#*access_token\":\"} 51 | access_token=${access_token%%\"*} 52 | sed -i "s/^REFRESH_TOKEN.*/REFRESH_TOKEN='${refresh_token}'/g" $SCRIPT_PATH 53 | } 54 | 55 | function print_log(){ 56 | echo "["`date`"] $1" 57 | } 58 | 59 | print_log "任务开始" 60 | t=`date +%s` 61 | get_token 62 | 63 | for url in $URLS 64 | do 65 | print_log "任务$url" 66 | result=`curl --connect-timeout 5 -m 5 -s -H "Content-Type: application/json" \ 67 | -H "Authorization: ${access_token}" $url` 68 | 69 | if [[ $result =~ '"error"' ]] 70 | then 71 | error=${result#*error\":\"} 72 | error=${error%%\"*} 73 | print_log "调用API失败,错误信息:$error" 74 | else 75 | print_log "调用成功" 76 | fi 77 | sleep 5 78 | done 79 | print_log "任务结束,用时"$((`date +%s`-t))"秒" -------------------------------------------------------------------------------- /redis-3.5.3.dist-info/INSTALLER: -------------------------------------------------------------------------------- 1 | pip 2 | -------------------------------------------------------------------------------- /redis-3.5.3.dist-info/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 Andy McCurdy 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation 5 | files (the "Software"), to deal in the Software without 6 | restriction, including without limitation the rights to use, 7 | copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the 9 | Software is furnished to do so, subject to the following 10 | conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /redis-3.5.3.dist-info/METADATA: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: redis 3 | Version: 3.5.3 4 | Summary: Python client for Redis key-value store 5 | Home-page: https://github.com/andymccurdy/redis-py 6 | Author: Andy McCurdy 7 | Author-email: sedrik@gmail.com 8 | Maintainer: Andy McCurdy 9 | Maintainer-email: sedrik@gmail.com 10 | License: MIT 11 | Keywords: Redis,key-value store 12 | Platform: UNKNOWN 13 | Classifier: Development Status :: 5 - Production/Stable 14 | Classifier: Environment :: Console 15 | Classifier: Intended Audience :: Developers 16 | Classifier: License :: OSI Approved :: MIT License 17 | Classifier: Operating System :: OS Independent 18 | Classifier: Programming Language :: Python 19 | Classifier: Programming Language :: Python :: 2 20 | Classifier: Programming Language :: Python :: 2.7 21 | Classifier: Programming Language :: Python :: 3 22 | Classifier: Programming Language :: Python :: 3.5 23 | Classifier: Programming Language :: Python :: 3.6 24 | Classifier: Programming Language :: Python :: 3.7 25 | Classifier: Programming Language :: Python :: 3.8 26 | Classifier: Programming Language :: Python :: Implementation :: CPython 27 | Classifier: Programming Language :: Python :: Implementation :: PyPy 28 | Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* 29 | Provides-Extra: hiredis 30 | Requires-Dist: hiredis (>=0.1.3) ; extra == 'hiredis' 31 | 32 | redis-py 33 | ======== 34 | 35 | The Python interface to the Redis key-value store. 36 | 37 | .. image:: https://secure.travis-ci.org/andymccurdy/redis-py.svg?branch=master 38 | :target: https://travis-ci.org/andymccurdy/redis-py 39 | .. image:: https://readthedocs.org/projects/redis-py/badge/?version=stable&style=flat 40 | :target: https://redis-py.readthedocs.io/en/stable/ 41 | .. image:: https://badge.fury.io/py/redis.svg 42 | :target: https://pypi.org/project/redis/ 43 | .. image:: https://codecov.io/gh/andymccurdy/redis-py/branch/master/graph/badge.svg 44 | :target: https://codecov.io/gh/andymccurdy/redis-py 45 | 46 | 47 | Python 2 Compatibility Note 48 | --------------------------- 49 | 50 | redis-py 3.5.x will be the last version of redis-py that supports Python 2. 51 | The 3.5.x line will continue to get bug fixes and security patches that 52 | support Python 2 until August 1, 2020. redis-py 4.0 will be the next major 53 | version and will require Python 3.5+. 54 | 55 | 56 | Installation 57 | ------------ 58 | 59 | redis-py requires a running Redis server. See `Redis's quickstart 60 | `_ for installation instructions. 61 | 62 | redis-py can be installed using `pip` similar to other Python packages. Do not use `sudo` 63 | with `pip`. It is usually good to work in a 64 | `virtualenv `_ or 65 | `venv `_ to avoid conflicts with other package 66 | managers and Python projects. For a quick introduction see 67 | `Python Virtual Environments in Five Minutes `_. 68 | 69 | To install redis-py, simply: 70 | 71 | .. code-block:: bash 72 | 73 | $ pip install redis 74 | 75 | or from source: 76 | 77 | .. code-block:: bash 78 | 79 | $ python setup.py install 80 | 81 | 82 | Getting Started 83 | --------------- 84 | 85 | .. code-block:: pycon 86 | 87 | >>> import redis 88 | >>> r = redis.Redis(host='localhost', port=6379, db=0) 89 | >>> r.set('foo', 'bar') 90 | True 91 | >>> r.get('foo') 92 | b'bar' 93 | 94 | By default, all responses are returned as `bytes` in Python 3 and `str` in 95 | Python 2. The user is responsible for decoding to Python 3 strings or Python 2 96 | unicode objects. 97 | 98 | If **all** string responses from a client should be decoded, the user can 99 | specify `decode_responses=True` to `Redis.__init__`. In this case, any 100 | Redis command that returns a string type will be decoded with the `encoding` 101 | specified. 102 | 103 | 104 | Upgrading from redis-py 2.X to 3.0 105 | ---------------------------------- 106 | 107 | redis-py 3.0 introduces many new features but required a number of backwards 108 | incompatible changes to be made in the process. This section attempts to 109 | provide an upgrade path for users migrating from 2.X to 3.0. 110 | 111 | 112 | Python Version Support 113 | ^^^^^^^^^^^^^^^^^^^^^^ 114 | 115 | redis-py 3.0 supports Python 2.7 and Python 3.5+. 116 | 117 | 118 | Client Classes: Redis and StrictRedis 119 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 120 | 121 | redis-py 3.0 drops support for the legacy "Redis" client class. "StrictRedis" 122 | has been renamed to "Redis" and an alias named "StrictRedis" is provided so 123 | that users previously using "StrictRedis" can continue to run unchanged. 124 | 125 | The 2.X "Redis" class provided alternative implementations of a few commands. 126 | This confused users (rightfully so) and caused a number of support issues. To 127 | make things easier going forward, it was decided to drop support for these 128 | alternate implementations and instead focus on a single client class. 129 | 130 | 2.X users that are already using StrictRedis don't have to change the class 131 | name. StrictRedis will continue to work for the foreseeable future. 132 | 133 | 2.X users that are using the Redis class will have to make changes if they 134 | use any of the following commands: 135 | 136 | * SETEX: The argument order has changed. The new order is (name, time, value). 137 | * LREM: The argument order has changed. The new order is (name, num, value). 138 | * TTL and PTTL: The return value is now always an int and matches the 139 | official Redis command (>0 indicates the timeout, -1 indicates that the key 140 | exists but that it has no expire time set, -2 indicates that the key does 141 | not exist) 142 | 143 | 144 | SSL Connections 145 | ^^^^^^^^^^^^^^^ 146 | 147 | redis-py 3.0 changes the default value of the `ssl_cert_reqs` option from 148 | `None` to `'required'`. See 149 | `Issue 1016 `_. This 150 | change enforces hostname validation when accepting a cert from a remote SSL 151 | terminator. If the terminator doesn't properly set the hostname on the cert 152 | this will cause redis-py 3.0 to raise a ConnectionError. 153 | 154 | This check can be disabled by setting `ssl_cert_reqs` to `None`. Note that 155 | doing so removes the security check. Do so at your own risk. 156 | 157 | It has been reported that SSL certs received from AWS ElastiCache do not have 158 | proper hostnames and turning off hostname verification is currently required. 159 | 160 | 161 | MSET, MSETNX and ZADD 162 | ^^^^^^^^^^^^^^^^^^^^^ 163 | 164 | These commands all accept a mapping of key/value pairs. In redis-py 2.X 165 | this mapping could be specified as ``*args`` or as ``**kwargs``. Both of these 166 | styles caused issues when Redis introduced optional flags to ZADD. Relying on 167 | ``*args`` caused issues with the optional argument order, especially in Python 168 | 2.7. Relying on ``**kwargs`` caused potential collision issues of user keys with 169 | the argument names in the method signature. 170 | 171 | To resolve this, redis-py 3.0 has changed these three commands to all accept 172 | a single positional argument named mapping that is expected to be a dict. For 173 | MSET and MSETNX, the dict is a mapping of key-names -> values. For ZADD, the 174 | dict is a mapping of element-names -> score. 175 | 176 | MSET, MSETNX and ZADD now look like: 177 | 178 | .. code-block:: python 179 | 180 | def mset(self, mapping): 181 | def msetnx(self, mapping): 182 | def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): 183 | 184 | All 2.X users that use these commands must modify their code to supply 185 | keys and values as a dict to these commands. 186 | 187 | 188 | ZINCRBY 189 | ^^^^^^^ 190 | 191 | redis-py 2.X accidentally modified the argument order of ZINCRBY, swapping the 192 | order of value and amount. ZINCRBY now looks like: 193 | 194 | .. code-block:: python 195 | 196 | def zincrby(self, name, amount, value): 197 | 198 | All 2.X users that rely on ZINCRBY must swap the order of amount and value 199 | for the command to continue to work as intended. 200 | 201 | 202 | Encoding of User Input 203 | ^^^^^^^^^^^^^^^^^^^^^^ 204 | 205 | redis-py 3.0 only accepts user data as bytes, strings or numbers (ints, longs 206 | and floats). Attempting to specify a key or a value as any other type will 207 | raise a DataError exception. 208 | 209 | redis-py 2.X attempted to coerce any type of input into a string. While 210 | occasionally convenient, this caused all sorts of hidden errors when users 211 | passed boolean values (which were coerced to 'True' or 'False'), a None 212 | value (which was coerced to 'None') or other values, such as user defined 213 | types. 214 | 215 | All 2.X users should make sure that the keys and values they pass into 216 | redis-py are either bytes, strings or numbers. 217 | 218 | 219 | Locks 220 | ^^^^^ 221 | 222 | redis-py 3.0 drops support for the pipeline-based Lock and now only supports 223 | the Lua-based lock. In doing so, LuaLock has been renamed to Lock. This also 224 | means that redis-py Lock objects require Redis server 2.6 or greater. 225 | 226 | 2.X users that were explicitly referring to "LuaLock" will have to now refer 227 | to "Lock" instead. 228 | 229 | 230 | Locks as Context Managers 231 | ^^^^^^^^^^^^^^^^^^^^^^^^^ 232 | 233 | redis-py 3.0 now raises a LockError when using a lock as a context manager and 234 | the lock cannot be acquired within the specified timeout. This is more of a 235 | bug fix than a backwards incompatible change. However, given an error is now 236 | raised where none was before, this might alarm some users. 237 | 238 | 2.X users should make sure they're wrapping their lock code in a try/catch 239 | like this: 240 | 241 | .. code-block:: python 242 | 243 | try: 244 | with r.lock('my-lock-key', blocking_timeout=5) as lock: 245 | # code you want executed only after the lock has been acquired 246 | except LockError: 247 | # the lock wasn't acquired 248 | 249 | 250 | API Reference 251 | ------------- 252 | 253 | The `official Redis command documentation `_ does a 254 | great job of explaining each command in detail. redis-py attempts to adhere 255 | to the official command syntax. There are a few exceptions: 256 | 257 | * **SELECT**: Not implemented. See the explanation in the Thread Safety section 258 | below. 259 | * **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py 260 | uses 'delete' instead. 261 | * **MULTI/EXEC**: These are implemented as part of the Pipeline class. The 262 | pipeline is wrapped with the MULTI and EXEC statements by default when it 263 | is executed, which can be disabled by specifying transaction=False. 264 | See more about Pipelines below. 265 | * **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate 266 | class as it places the underlying connection in a state where it can't 267 | execute non-pubsub commands. Calling the pubsub method from the Redis client 268 | will return a PubSub instance where you can subscribe to channels and listen 269 | for messages. You can only call PUBLISH from the Redis client (see 270 | `this comment on issue #151 271 | `_ 272 | for details). 273 | * **SCAN/SSCAN/HSCAN/ZSCAN**: The \*SCAN commands are implemented as they 274 | exist in the Redis documentation. In addition, each command has an equivalent 275 | iterator method. These are purely for convenience so the user doesn't have 276 | to keep track of the cursor while iterating. Use the 277 | scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior. 278 | 279 | 280 | More Detail 281 | ----------- 282 | 283 | Connection Pools 284 | ^^^^^^^^^^^^^^^^ 285 | 286 | Behind the scenes, redis-py uses a connection pool to manage connections to 287 | a Redis server. By default, each Redis instance you create will in turn create 288 | its own connection pool. You can override this behavior and use an existing 289 | connection pool by passing an already created connection pool instance to the 290 | connection_pool argument of the Redis class. You may choose to do this in order 291 | to implement client side sharding or have fine-grain control of how 292 | connections are managed. 293 | 294 | .. code-block:: pycon 295 | 296 | >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) 297 | >>> r = redis.Redis(connection_pool=pool) 298 | 299 | Connections 300 | ^^^^^^^^^^^ 301 | 302 | ConnectionPools manage a set of Connection instances. redis-py ships with two 303 | types of Connections. The default, Connection, is a normal TCP socket based 304 | connection. The UnixDomainSocketConnection allows for clients running on the 305 | same device as the server to connect via a unix domain socket. To use a 306 | UnixDomainSocketConnection connection, simply pass the unix_socket_path 307 | argument, which is a string to the unix domain socket file. Additionally, make 308 | sure the unixsocket parameter is defined in your redis.conf file. It's 309 | commented out by default. 310 | 311 | .. code-block:: pycon 312 | 313 | >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock') 314 | 315 | You can create your own Connection subclasses as well. This may be useful if 316 | you want to control the socket behavior within an async framework. To 317 | instantiate a client class using your own connection, you need to create 318 | a connection pool, passing your class to the connection_class argument. 319 | Other keyword parameters you pass to the pool will be passed to the class 320 | specified during initialization. 321 | 322 | .. code-block:: pycon 323 | 324 | >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass, 325 | your_arg='...', ...) 326 | 327 | Connections maintain an open socket to the Redis server. Sometimes these 328 | sockets are interrupted or disconnected for a variety of reasons. For example, 329 | network appliances, load balancers and other services that sit between clients 330 | and servers are often configured to kill connections that remain idle for a 331 | given threshold. 332 | 333 | When a connection becomes disconnected, the next command issued on that 334 | connection will fail and redis-py will raise a ConnectionError to the caller. 335 | This allows each application that uses redis-py to handle errors in a way 336 | that's fitting for that specific application. However, constant error 337 | handling can be verbose and cumbersome, especially when socket disconnections 338 | happen frequently in many production environments. 339 | 340 | To combat this, redis-py can issue regular health checks to assess the 341 | liveliness of a connection just before issuing a command. Users can pass 342 | ``health_check_interval=N`` to the Redis or ConnectionPool classes or 343 | as a query argument within a Redis URL. The value of ``health_check_interval`` 344 | must be an integer. A value of ``0``, the default, disables health checks. 345 | Any positive integer will enable health checks. Health checks are performed 346 | just before a command is executed if the underlying connection has been idle 347 | for more than ``health_check_interval`` seconds. For example, 348 | ``health_check_interval=30`` will ensure that a health check is run on any 349 | connection that has been idle for 30 or more seconds just before a command 350 | is executed on that connection. 351 | 352 | If your application is running in an environment that disconnects idle 353 | connections after 30 seconds you should set the ``health_check_interval`` 354 | option to a value less than 30. 355 | 356 | This option also works on any PubSub connection that is created from a 357 | client with ``health_check_interval`` enabled. PubSub users need to ensure 358 | that ``get_message()`` or ``listen()`` are called more frequently than 359 | ``health_check_interval`` seconds. It is assumed that most workloads already 360 | do this. 361 | 362 | If your PubSub use case doesn't call ``get_message()`` or ``listen()`` 363 | frequently, you should call ``pubsub.check_health()`` explicitly on a 364 | regularly basis. 365 | 366 | Parsers 367 | ^^^^^^^ 368 | 369 | Parser classes provide a way to control how responses from the Redis server 370 | are parsed. redis-py ships with two parser classes, the PythonParser and the 371 | HiredisParser. By default, redis-py will attempt to use the HiredisParser if 372 | you have the hiredis module installed and will fallback to the PythonParser 373 | otherwise. 374 | 375 | Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was 376 | kind enough to create Python bindings. Using Hiredis can provide up to a 377 | 10x speed improvement in parsing responses from the Redis server. The 378 | performance increase is most noticeable when retrieving many pieces of data, 379 | such as from LRANGE or SMEMBERS operations. 380 | 381 | Hiredis is available on PyPI, and can be installed via pip just like redis-py. 382 | 383 | .. code-block:: bash 384 | 385 | $ pip install hiredis 386 | 387 | Response Callbacks 388 | ^^^^^^^^^^^^^^^^^^ 389 | 390 | The client class uses a set of callbacks to cast Redis responses to the 391 | appropriate Python type. There are a number of these callbacks defined on 392 | the Redis client class in a dictionary called RESPONSE_CALLBACKS. 393 | 394 | Custom callbacks can be added on a per-instance basis using the 395 | set_response_callback method. This method accepts two arguments: a command 396 | name and the callback. Callbacks added in this manner are only valid on the 397 | instance the callback is added to. If you want to define or override a callback 398 | globally, you should make a subclass of the Redis client and add your callback 399 | to its RESPONSE_CALLBACKS class dictionary. 400 | 401 | Response callbacks take at least one parameter: the response from the Redis 402 | server. Keyword arguments may also be accepted in order to further control 403 | how to interpret the response. These keyword arguments are specified during the 404 | command's call to execute_command. The ZRANGE implementation demonstrates the 405 | use of response callback keyword arguments with its "withscores" argument. 406 | 407 | Thread Safety 408 | ^^^^^^^^^^^^^ 409 | 410 | Redis client instances can safely be shared between threads. Internally, 411 | connection instances are only retrieved from the connection pool during 412 | command execution, and returned to the pool directly after. Command execution 413 | never modifies state on the client instance. 414 | 415 | However, there is one caveat: the Redis SELECT command. The SELECT command 416 | allows you to switch the database currently in use by the connection. That 417 | database remains selected until another is selected or until the connection is 418 | closed. This creates an issue in that connections could be returned to the pool 419 | that are connected to a different database. 420 | 421 | As a result, redis-py does not implement the SELECT command on client 422 | instances. If you use multiple Redis databases within the same application, you 423 | should create a separate client instance (and possibly a separate connection 424 | pool) for each database. 425 | 426 | It is not safe to pass PubSub or Pipeline objects between threads. 427 | 428 | Pipelines 429 | ^^^^^^^^^ 430 | 431 | Pipelines are a subclass of the base Redis class that provide support for 432 | buffering multiple commands to the server in a single request. They can be used 433 | to dramatically increase the performance of groups of commands by reducing the 434 | number of back-and-forth TCP packets between the client and server. 435 | 436 | Pipelines are quite simple to use: 437 | 438 | .. code-block:: pycon 439 | 440 | >>> r = redis.Redis(...) 441 | >>> r.set('bing', 'baz') 442 | >>> # Use the pipeline() method to create a pipeline instance 443 | >>> pipe = r.pipeline() 444 | >>> # The following SET commands are buffered 445 | >>> pipe.set('foo', 'bar') 446 | >>> pipe.get('bing') 447 | >>> # the EXECUTE call sends all buffered commands to the server, returning 448 | >>> # a list of responses, one for each command. 449 | >>> pipe.execute() 450 | [True, b'baz'] 451 | 452 | For ease of use, all commands being buffered into the pipeline return the 453 | pipeline object itself. Therefore calls can be chained like: 454 | 455 | .. code-block:: pycon 456 | 457 | >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() 458 | [True, True, 6] 459 | 460 | In addition, pipelines can also ensure the buffered commands are executed 461 | atomically as a group. This happens by default. If you want to disable the 462 | atomic nature of a pipeline but still want to buffer commands, you can turn 463 | off transactions. 464 | 465 | .. code-block:: pycon 466 | 467 | >>> pipe = r.pipeline(transaction=False) 468 | 469 | A common issue occurs when requiring atomic transactions but needing to 470 | retrieve values in Redis prior for use within the transaction. For instance, 471 | let's assume that the INCR command didn't exist and we need to build an atomic 472 | version of INCR in Python. 473 | 474 | The completely naive implementation could GET the value, increment it in 475 | Python, and SET the new value back. However, this is not atomic because 476 | multiple clients could be doing this at the same time, each getting the same 477 | value from GET. 478 | 479 | Enter the WATCH command. WATCH provides the ability to monitor one or more keys 480 | prior to starting a transaction. If any of those keys change prior the 481 | execution of that transaction, the entire transaction will be canceled and a 482 | WatchError will be raised. To implement our own client-side INCR command, we 483 | could do something like this: 484 | 485 | .. code-block:: pycon 486 | 487 | >>> with r.pipeline() as pipe: 488 | ... while True: 489 | ... try: 490 | ... # put a WATCH on the key that holds our sequence value 491 | ... pipe.watch('OUR-SEQUENCE-KEY') 492 | ... # after WATCHing, the pipeline is put into immediate execution 493 | ... # mode until we tell it to start buffering commands again. 494 | ... # this allows us to get the current value of our sequence 495 | ... current_value = pipe.get('OUR-SEQUENCE-KEY') 496 | ... next_value = int(current_value) + 1 497 | ... # now we can put the pipeline back into buffered mode with MULTI 498 | ... pipe.multi() 499 | ... pipe.set('OUR-SEQUENCE-KEY', next_value) 500 | ... # and finally, execute the pipeline (the set command) 501 | ... pipe.execute() 502 | ... # if a WatchError wasn't raised during execution, everything 503 | ... # we just did happened atomically. 504 | ... break 505 | ... except WatchError: 506 | ... # another client must have changed 'OUR-SEQUENCE-KEY' between 507 | ... # the time we started WATCHing it and the pipeline's execution. 508 | ... # our best bet is to just retry. 509 | ... continue 510 | 511 | Note that, because the Pipeline must bind to a single connection for the 512 | duration of a WATCH, care must be taken to ensure that the connection is 513 | returned to the connection pool by calling the reset() method. If the 514 | Pipeline is used as a context manager (as in the example above) reset() 515 | will be called automatically. Of course you can do this the manual way by 516 | explicitly calling reset(): 517 | 518 | .. code-block:: pycon 519 | 520 | >>> pipe = r.pipeline() 521 | >>> while True: 522 | ... try: 523 | ... pipe.watch('OUR-SEQUENCE-KEY') 524 | ... ... 525 | ... pipe.execute() 526 | ... break 527 | ... except WatchError: 528 | ... continue 529 | ... finally: 530 | ... pipe.reset() 531 | 532 | A convenience method named "transaction" exists for handling all the 533 | boilerplate of handling and retrying watch errors. It takes a callable that 534 | should expect a single parameter, a pipeline object, and any number of keys to 535 | be WATCHed. Our client-side INCR command above can be written like this, 536 | which is much easier to read: 537 | 538 | .. code-block:: pycon 539 | 540 | >>> def client_side_incr(pipe): 541 | ... current_value = pipe.get('OUR-SEQUENCE-KEY') 542 | ... next_value = int(current_value) + 1 543 | ... pipe.multi() 544 | ... pipe.set('OUR-SEQUENCE-KEY', next_value) 545 | >>> 546 | >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') 547 | [True] 548 | 549 | Be sure to call `pipe.multi()` in the callable passed to `Redis.transaction` 550 | prior to any write commands. 551 | 552 | Publish / Subscribe 553 | ^^^^^^^^^^^^^^^^^^^ 554 | 555 | redis-py includes a `PubSub` object that subscribes to channels and listens 556 | for new messages. Creating a `PubSub` object is easy. 557 | 558 | .. code-block:: pycon 559 | 560 | >>> r = redis.Redis(...) 561 | >>> p = r.pubsub() 562 | 563 | Once a `PubSub` instance is created, channels and patterns can be subscribed 564 | to. 565 | 566 | .. code-block:: pycon 567 | 568 | >>> p.subscribe('my-first-channel', 'my-second-channel', ...) 569 | >>> p.psubscribe('my-*', ...) 570 | 571 | The `PubSub` instance is now subscribed to those channels/patterns. The 572 | subscription confirmations can be seen by reading messages from the `PubSub` 573 | instance. 574 | 575 | .. code-block:: pycon 576 | 577 | >>> p.get_message() 578 | {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1} 579 | >>> p.get_message() 580 | {'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2} 581 | >>> p.get_message() 582 | {'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3} 583 | 584 | Every message read from a `PubSub` instance will be a dictionary with the 585 | following keys. 586 | 587 | * **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', 588 | 'punsubscribe', 'message', 'pmessage' 589 | * **channel**: The channel [un]subscribed to or the channel a message was 590 | published to 591 | * **pattern**: The pattern that matched a published message's channel. Will be 592 | `None` in all cases except for 'pmessage' types. 593 | * **data**: The message data. With [un]subscribe messages, this value will be 594 | the number of channels and patterns the connection is currently subscribed 595 | to. With [p]message messages, this value will be the actual published 596 | message. 597 | 598 | Let's send a message now. 599 | 600 | .. code-block:: pycon 601 | 602 | # the publish method returns the number matching channel and pattern 603 | # subscriptions. 'my-first-channel' matches both the 'my-first-channel' 604 | # subscription and the 'my-*' pattern subscription, so this message will 605 | # be delivered to 2 channels/patterns 606 | >>> r.publish('my-first-channel', 'some data') 607 | 2 608 | >>> p.get_message() 609 | {'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'} 610 | >>> p.get_message() 611 | {'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'} 612 | 613 | Unsubscribing works just like subscribing. If no arguments are passed to 614 | [p]unsubscribe, all channels or patterns will be unsubscribed from. 615 | 616 | .. code-block:: pycon 617 | 618 | >>> p.unsubscribe() 619 | >>> p.punsubscribe('my-*') 620 | >>> p.get_message() 621 | {'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'} 622 | >>> p.get_message() 623 | {'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'} 624 | >>> p.get_message() 625 | {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'} 626 | 627 | redis-py also allows you to register callback functions to handle published 628 | messages. Message handlers take a single argument, the message, which is a 629 | dictionary just like the examples above. To subscribe to a channel or pattern 630 | with a message handler, pass the channel or pattern name as a keyword argument 631 | with its value being the callback function. 632 | 633 | When a message is read on a channel or pattern with a message handler, the 634 | message dictionary is created and passed to the message handler. In this case, 635 | a `None` value is returned from get_message() since the message was already 636 | handled. 637 | 638 | .. code-block:: pycon 639 | 640 | >>> def my_handler(message): 641 | ... print('MY HANDLER: ', message['data']) 642 | >>> p.subscribe(**{'my-channel': my_handler}) 643 | # read the subscribe confirmation message 644 | >>> p.get_message() 645 | {'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1} 646 | >>> r.publish('my-channel', 'awesome data') 647 | 1 648 | # for the message handler to work, we need tell the instance to read data. 649 | # this can be done in several ways (read more below). we'll just use 650 | # the familiar get_message() function for now 651 | >>> message = p.get_message() 652 | MY HANDLER: awesome data 653 | # note here that the my_handler callback printed the string above. 654 | # `message` is None because the message was handled by our handler. 655 | >>> print(message) 656 | None 657 | 658 | If your application is not interested in the (sometimes noisy) 659 | subscribe/unsubscribe confirmation messages, you can ignore them by passing 660 | `ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all 661 | subscribe/unsubscribe messages to be read, but they won't bubble up to your 662 | application. 663 | 664 | .. code-block:: pycon 665 | 666 | >>> p = r.pubsub(ignore_subscribe_messages=True) 667 | >>> p.subscribe('my-channel') 668 | >>> p.get_message() # hides the subscribe message and returns None 669 | >>> r.publish('my-channel', 'my data') 670 | 1 671 | >>> p.get_message() 672 | {'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'} 673 | 674 | There are three different strategies for reading messages. 675 | 676 | The examples above have been using `pubsub.get_message()`. Behind the scenes, 677 | `get_message()` uses the system's 'select' module to quickly poll the 678 | connection's socket. If there's data available to be read, `get_message()` will 679 | read it, format the message and return it or pass it to a message handler. If 680 | there's no data to be read, `get_message()` will immediately return None. This 681 | makes it trivial to integrate into an existing event loop inside your 682 | application. 683 | 684 | .. code-block:: pycon 685 | 686 | >>> while True: 687 | >>> message = p.get_message() 688 | >>> if message: 689 | >>> # do something with the message 690 | >>> time.sleep(0.001) # be nice to the system :) 691 | 692 | Older versions of redis-py only read messages with `pubsub.listen()`. listen() 693 | is a generator that blocks until a message is available. If your application 694 | doesn't need to do anything else but receive and act on messages received from 695 | redis, listen() is an easy way to get up an running. 696 | 697 | .. code-block:: pycon 698 | 699 | >>> for message in p.listen(): 700 | ... # do something with the message 701 | 702 | The third option runs an event loop in a separate thread. 703 | `pubsub.run_in_thread()` creates a new thread and starts the event loop. The 704 | thread object is returned to the caller of `run_in_thread()`. The caller can 705 | use the `thread.stop()` method to shut down the event loop and thread. Behind 706 | the scenes, this is simply a wrapper around `get_message()` that runs in a 707 | separate thread, essentially creating a tiny non-blocking event loop for you. 708 | `run_in_thread()` takes an optional `sleep_time` argument. If specified, the 709 | event loop will call `time.sleep()` with the value in each iteration of the 710 | loop. 711 | 712 | Note: Since we're running in a separate thread, there's no way to handle 713 | messages that aren't automatically handled with registered message handlers. 714 | Therefore, redis-py prevents you from calling `run_in_thread()` if you're 715 | subscribed to patterns or channels that don't have message handlers attached. 716 | 717 | .. code-block:: pycon 718 | 719 | >>> p.subscribe(**{'my-channel': my_handler}) 720 | >>> thread = p.run_in_thread(sleep_time=0.001) 721 | # the event loop is now running in the background processing messages 722 | # when it's time to shut it down... 723 | >>> thread.stop() 724 | 725 | A PubSub object adheres to the same encoding semantics as the client instance 726 | it was created from. Any channel or pattern that's unicode will be encoded 727 | using the `charset` specified on the client before being sent to Redis. If the 728 | client's `decode_responses` flag is set the False (the default), the 729 | 'channel', 'pattern' and 'data' values in message dictionaries will be byte 730 | strings (str on Python 2, bytes on Python 3). If the client's 731 | `decode_responses` is True, then the 'channel', 'pattern' and 'data' values 732 | will be automatically decoded to unicode strings using the client's `charset`. 733 | 734 | PubSub objects remember what channels and patterns they are subscribed to. In 735 | the event of a disconnection such as a network error or timeout, the 736 | PubSub object will re-subscribe to all prior channels and patterns when 737 | reconnecting. Messages that were published while the client was disconnected 738 | cannot be delivered. When you're finished with a PubSub object, call its 739 | `.close()` method to shutdown the connection. 740 | 741 | .. code-block:: pycon 742 | 743 | >>> p = r.pubsub() 744 | >>> ... 745 | >>> p.close() 746 | 747 | 748 | The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also 749 | supported: 750 | 751 | .. code-block:: pycon 752 | 753 | >>> r.pubsub_channels() 754 | [b'foo', b'bar'] 755 | >>> r.pubsub_numsub('foo', 'bar') 756 | [(b'foo', 9001), (b'bar', 42)] 757 | >>> r.pubsub_numsub('baz') 758 | [(b'baz', 0)] 759 | >>> r.pubsub_numpat() 760 | 1204 761 | 762 | Monitor 763 | ^^^^^^^ 764 | redis-py includes a `Monitor` object that streams every command processed 765 | by the Redis server. Use `listen()` on the `Monitor` object to block 766 | until a command is received. 767 | 768 | .. code-block:: pycon 769 | 770 | >>> r = redis.Redis(...) 771 | >>> with r.monitor() as m: 772 | >>> for command in m.listen(): 773 | >>> print(command) 774 | 775 | Lua Scripting 776 | ^^^^^^^^^^^^^ 777 | 778 | redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are 779 | a number of edge cases that make these commands tedious to use in real world 780 | scenarios. Therefore, redis-py exposes a Script object that makes scripting 781 | much easier to use. 782 | 783 | To create a Script instance, use the `register_script` function on a client 784 | instance passing the Lua code as the first argument. `register_script` returns 785 | a Script instance that you can use throughout your code. 786 | 787 | The following trivial Lua script accepts two parameters: the name of a key and 788 | a multiplier value. The script fetches the value stored in the key, multiplies 789 | it with the multiplier value and returns the result. 790 | 791 | .. code-block:: pycon 792 | 793 | >>> r = redis.Redis() 794 | >>> lua = """ 795 | ... local value = redis.call('GET', KEYS[1]) 796 | ... value = tonumber(value) 797 | ... return value * ARGV[1]""" 798 | >>> multiply = r.register_script(lua) 799 | 800 | `multiply` is now a Script instance that is invoked by calling it like a 801 | function. Script instances accept the following optional arguments: 802 | 803 | * **keys**: A list of key names that the script will access. This becomes the 804 | KEYS list in Lua. 805 | * **args**: A list of argument values. This becomes the ARGV list in Lua. 806 | * **client**: A redis-py Client or Pipeline instance that will invoke the 807 | script. If client isn't specified, the client that initially 808 | created the Script instance (the one that `register_script` was 809 | invoked from) will be used. 810 | 811 | Continuing the example from above: 812 | 813 | .. code-block:: pycon 814 | 815 | >>> r.set('foo', 2) 816 | >>> multiply(keys=['foo'], args=[5]) 817 | 10 818 | 819 | The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is 820 | passed to the script along with the multiplier value of 5. Lua executes the 821 | script and returns the result, 10. 822 | 823 | Script instances can be executed using a different client instance, even one 824 | that points to a completely different Redis server. 825 | 826 | .. code-block:: pycon 827 | 828 | >>> r2 = redis.Redis('redis2.example.com') 829 | >>> r2.set('foo', 3) 830 | >>> multiply(keys=['foo'], args=[5], client=r2) 831 | 15 832 | 833 | The Script object ensures that the Lua script is loaded into Redis's script 834 | cache. In the event of a NOSCRIPT error, it will load the script and retry 835 | executing it. 836 | 837 | Script objects can also be used in pipelines. The pipeline instance should be 838 | passed as the client argument when calling the script. Care is taken to ensure 839 | that the script is registered in Redis's script cache just prior to pipeline 840 | execution. 841 | 842 | .. code-block:: pycon 843 | 844 | >>> pipe = r.pipeline() 845 | >>> pipe.set('foo', 5) 846 | >>> multiply(keys=['foo'], args=[5], client=pipe) 847 | >>> pipe.execute() 848 | [True, 25] 849 | 850 | Sentinel support 851 | ^^^^^^^^^^^^^^^^ 852 | 853 | redis-py can be used together with `Redis Sentinel `_ 854 | to discover Redis nodes. You need to have at least one Sentinel daemon running 855 | in order to use redis-py's Sentinel support. 856 | 857 | Connecting redis-py to the Sentinel instance(s) is easy. You can use a 858 | Sentinel connection to discover the master and slaves network addresses: 859 | 860 | .. code-block:: pycon 861 | 862 | >>> from redis.sentinel import Sentinel 863 | >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) 864 | >>> sentinel.discover_master('mymaster') 865 | ('127.0.0.1', 6379) 866 | >>> sentinel.discover_slaves('mymaster') 867 | [('127.0.0.1', 6380)] 868 | 869 | You can also create Redis client connections from a Sentinel instance. You can 870 | connect to either the master (for write operations) or a slave (for read-only 871 | operations). 872 | 873 | .. code-block:: pycon 874 | 875 | >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) 876 | >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) 877 | >>> master.set('foo', 'bar') 878 | >>> slave.get('foo') 879 | b'bar' 880 | 881 | The master and slave objects are normal Redis instances with their 882 | connection pool bound to the Sentinel instance. When a Sentinel backed client 883 | attempts to establish a connection, it first queries the Sentinel servers to 884 | determine an appropriate host to connect to. If no server is found, 885 | a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are 886 | subclasses of ConnectionError. 887 | 888 | When trying to connect to a slave client, the Sentinel connection pool will 889 | iterate over the list of slaves until it finds one that can be connected to. 890 | If no slaves can be connected to, a connection will be established with the 891 | master. 892 | 893 | See `Guidelines for Redis clients with support for Redis Sentinel 894 | `_ to learn more about Redis Sentinel. 895 | 896 | Scan Iterators 897 | ^^^^^^^^^^^^^^ 898 | 899 | The \*SCAN commands introduced in Redis 2.8 can be cumbersome to use. While 900 | these commands are fully supported, redis-py also exposes the following methods 901 | that return Python iterators for convenience: `scan_iter`, `hscan_iter`, 902 | `sscan_iter` and `zscan_iter`. 903 | 904 | .. code-block:: pycon 905 | 906 | >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')): 907 | ... r.set(key, value) 908 | >>> for key in r.scan_iter(): 909 | ... print(key, r.get(key)) 910 | A 1 911 | B 2 912 | C 3 913 | 914 | Author 915 | ^^^^^^ 916 | 917 | redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). 918 | It can be found here: https://github.com/andymccurdy/redis-py 919 | 920 | Special thanks to: 921 | 922 | * Ludovico Magnocavallo, author of the original Python Redis client, from 923 | which some of the socket code is still used. 924 | * Alexander Solovyov for ideas on the generic response callback system. 925 | * Paul Hubbard for initial packaging support. 926 | 927 | 928 | -------------------------------------------------------------------------------- /redis-3.5.3.dist-info/RECORD: -------------------------------------------------------------------------------- 1 | redis-3.5.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 2 | redis-3.5.3.dist-info/LICENSE,sha256=eQFI2MEvijiycHp0viNDMWutEmmV_1SAGhgbiyMboSQ,1074 3 | redis-3.5.3.dist-info/METADATA,sha256=55ufgygbtE8nqMl0UVKD90EZ01zKyemMdxFFOwpubC4,36674 4 | redis-3.5.3.dist-info/RECORD,, 5 | redis-3.5.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 6 | redis-3.5.3.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 7 | redis-3.5.3.dist-info/top_level.txt,sha256=OMAefszlde6ZoOtlM35AWzpRIrwtcqAMHGlRit-w2-4,6 8 | redis/__init__.py,sha256=xHeEs2e5uiAwaV0oxJ_DgdOVr8U8Y5OlSCJ9rRbiLkE,1209 9 | redis/__init__.pyc,, 10 | redis/_compat.py,sha256=opM78DdCy4D86p9cpN_O81yNgjVDUwOJGLtMS4LL9-0,5698 11 | redis/_compat.pyc,, 12 | redis/client.py,sha256=hs1gxHDN9AcpPy1Cpf6yHq4ICtDYofW9XijXpSDeSG0,159611 13 | redis/client.pyc,, 14 | redis/connection.py,sha256=B5n2unyz5YhSzhsyK9Wa_AXZjT6guxnqHdZcfbe3jqs,55954 15 | redis/connection.pyc,, 16 | redis/exceptions.py,sha256=phjjyJjnebrM82XDzfjtreGnkWIoSNfDZiyoWs3_zQE,1341 17 | redis/exceptions.pyc,, 18 | redis/lock.py,sha256=VNfWNN46FBwhcPUnFmzC8N8uLuxCsu2YT2drkEzM6_U,11349 19 | redis/lock.pyc,, 20 | redis/sentinel.py,sha256=EhyjT_tZMWKtwuUMMAIRKwfEPL1qBfoldLFQ8tAN1Dg,11710 21 | redis/sentinel.pyc,, 22 | redis/utils.py,sha256=wG1Ws79_HgIzAALwYwK4CrVLLloVTRPRqjo1gxF4U7U,674 23 | redis/utils.pyc,, 24 | -------------------------------------------------------------------------------- /redis-3.5.3.dist-info/REQUESTED: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis-3.5.3.dist-info/REQUESTED -------------------------------------------------------------------------------- /redis-3.5.3.dist-info/WHEEL: -------------------------------------------------------------------------------- 1 | Wheel-Version: 1.0 2 | Generator: bdist_wheel (0.34.2) 3 | Root-Is-Purelib: true 4 | Tag: py2-none-any 5 | Tag: py3-none-any 6 | 7 | -------------------------------------------------------------------------------- /redis-3.5.3.dist-info/top_level.txt: -------------------------------------------------------------------------------- 1 | redis 2 | -------------------------------------------------------------------------------- /redis/__init__.py: -------------------------------------------------------------------------------- 1 | from redis.client import Redis, StrictRedis 2 | from redis.connection import ( 3 | BlockingConnectionPool, 4 | ConnectionPool, 5 | Connection, 6 | SSLConnection, 7 | UnixDomainSocketConnection 8 | ) 9 | from redis.utils import from_url 10 | from redis.exceptions import ( 11 | AuthenticationError, 12 | AuthenticationWrongNumberOfArgsError, 13 | BusyLoadingError, 14 | ChildDeadlockedError, 15 | ConnectionError, 16 | DataError, 17 | InvalidResponse, 18 | PubSubError, 19 | ReadOnlyError, 20 | RedisError, 21 | ResponseError, 22 | TimeoutError, 23 | WatchError 24 | ) 25 | 26 | 27 | def int_or_str(value): 28 | try: 29 | return int(value) 30 | except ValueError: 31 | return value 32 | 33 | 34 | __version__ = '3.5.3' 35 | VERSION = tuple(map(int_or_str, __version__.split('.'))) 36 | 37 | __all__ = [ 38 | 'AuthenticationError', 39 | 'AuthenticationWrongNumberOfArgsError', 40 | 'BlockingConnectionPool', 41 | 'BusyLoadingError', 42 | 'ChildDeadlockedError', 43 | 'Connection', 44 | 'ConnectionError', 45 | 'ConnectionPool', 46 | 'DataError', 47 | 'from_url', 48 | 'InvalidResponse', 49 | 'PubSubError', 50 | 'ReadOnlyError', 51 | 'Redis', 52 | 'RedisError', 53 | 'ResponseError', 54 | 'SSLConnection', 55 | 'StrictRedis', 56 | 'TimeoutError', 57 | 'UnixDomainSocketConnection', 58 | 'WatchError', 59 | ] 60 | -------------------------------------------------------------------------------- /redis/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/__init__.pyc -------------------------------------------------------------------------------- /redis/_compat.py: -------------------------------------------------------------------------------- 1 | """Internal module for Python 2 backwards compatibility.""" 2 | # flake8: noqa 3 | import errno 4 | import socket 5 | import sys 6 | 7 | 8 | def sendall(sock, *args, **kwargs): 9 | return sock.sendall(*args, **kwargs) 10 | 11 | 12 | def shutdown(sock, *args, **kwargs): 13 | return sock.shutdown(*args, **kwargs) 14 | 15 | 16 | def ssl_wrap_socket(context, sock, *args, **kwargs): 17 | return context.wrap_socket(sock, *args, **kwargs) 18 | 19 | 20 | # For Python older than 3.5, retry EINTR. 21 | if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and 22 | sys.version_info[1] < 5): 23 | # Adapted from https://bugs.python.org/review/23863/patch/14532/54418 24 | import time 25 | 26 | # Wrapper for handling interruptable system calls. 27 | def _retryable_call(s, func, *args, **kwargs): 28 | # Some modules (SSL) use the _fileobject wrapper directly and 29 | # implement a smaller portion of the socket interface, thus we 30 | # need to let them continue to do so. 31 | timeout, deadline = None, 0.0 32 | attempted = False 33 | try: 34 | timeout = s.gettimeout() 35 | except AttributeError: 36 | pass 37 | 38 | if timeout: 39 | deadline = time.time() + timeout 40 | 41 | try: 42 | while True: 43 | if attempted and timeout: 44 | now = time.time() 45 | if now >= deadline: 46 | raise socket.error(errno.EWOULDBLOCK, "timed out") 47 | else: 48 | # Overwrite the timeout on the socket object 49 | # to take into account elapsed time. 50 | s.settimeout(deadline - now) 51 | try: 52 | attempted = True 53 | return func(*args, **kwargs) 54 | except socket.error as e: 55 | if e.args[0] == errno.EINTR: 56 | continue 57 | raise 58 | finally: 59 | # Set the existing timeout back for future 60 | # calls. 61 | if timeout: 62 | s.settimeout(timeout) 63 | 64 | def recv(sock, *args, **kwargs): 65 | return _retryable_call(sock, sock.recv, *args, **kwargs) 66 | 67 | def recv_into(sock, *args, **kwargs): 68 | return _retryable_call(sock, sock.recv_into, *args, **kwargs) 69 | 70 | else: # Python 3.5 and above automatically retry EINTR 71 | def recv(sock, *args, **kwargs): 72 | return sock.recv(*args, **kwargs) 73 | 74 | def recv_into(sock, *args, **kwargs): 75 | return sock.recv_into(*args, **kwargs) 76 | 77 | if sys.version_info[0] < 3: 78 | # In Python 3, the ssl module raises socket.timeout whereas it raises 79 | # SSLError in Python 2. For compatibility between versions, ensure 80 | # socket.timeout is raised for both. 81 | import functools 82 | 83 | try: 84 | from ssl import SSLError as _SSLError 85 | except ImportError: 86 | class _SSLError(Exception): 87 | """A replacement in case ssl.SSLError is not available.""" 88 | pass 89 | 90 | _EXPECTED_SSL_TIMEOUT_MESSAGES = ( 91 | "The handshake operation timed out", 92 | "The read operation timed out", 93 | "The write operation timed out", 94 | ) 95 | 96 | def _handle_ssl_timeout(func): 97 | @functools.wraps(func) 98 | def wrapper(*args, **kwargs): 99 | try: 100 | return func(*args, **kwargs) 101 | except _SSLError as e: 102 | message = len(e.args) == 1 and unicode(e.args[0]) or '' 103 | if any(x in message for x in _EXPECTED_SSL_TIMEOUT_MESSAGES): 104 | # Raise socket.timeout for compatibility with Python 3. 105 | raise socket.timeout(*e.args) 106 | raise 107 | return wrapper 108 | 109 | recv = _handle_ssl_timeout(recv) 110 | recv_into = _handle_ssl_timeout(recv_into) 111 | sendall = _handle_ssl_timeout(sendall) 112 | shutdown = _handle_ssl_timeout(shutdown) 113 | ssl_wrap_socket = _handle_ssl_timeout(ssl_wrap_socket) 114 | 115 | if sys.version_info[0] < 3: 116 | from urllib import unquote 117 | from urlparse import parse_qs, urlparse 118 | from itertools import imap, izip 119 | from string import letters as ascii_letters 120 | from Queue import Queue 121 | 122 | # special unicode handling for python2 to avoid UnicodeDecodeError 123 | def safe_unicode(obj, *args): 124 | """ return the unicode representation of obj """ 125 | try: 126 | return unicode(obj, *args) 127 | except UnicodeDecodeError: 128 | # obj is byte string 129 | ascii_text = str(obj).encode('string_escape') 130 | return unicode(ascii_text) 131 | 132 | def iteritems(x): 133 | return x.iteritems() 134 | 135 | def iterkeys(x): 136 | return x.iterkeys() 137 | 138 | def itervalues(x): 139 | return x.itervalues() 140 | 141 | def nativestr(x): 142 | return x if isinstance(x, str) else x.encode('utf-8', 'replace') 143 | 144 | def next(x): 145 | return x.next() 146 | 147 | unichr = unichr 148 | xrange = xrange 149 | basestring = basestring 150 | unicode = unicode 151 | long = long 152 | BlockingIOError = socket.error 153 | else: 154 | from urllib.parse import parse_qs, unquote, urlparse 155 | from string import ascii_letters 156 | from queue import Queue 157 | 158 | def iteritems(x): 159 | return iter(x.items()) 160 | 161 | def iterkeys(x): 162 | return iter(x.keys()) 163 | 164 | def itervalues(x): 165 | return iter(x.values()) 166 | 167 | def nativestr(x): 168 | return x if isinstance(x, str) else x.decode('utf-8', 'replace') 169 | 170 | def safe_unicode(value): 171 | if isinstance(value, bytes): 172 | value = value.decode('utf-8', 'replace') 173 | return str(value) 174 | 175 | next = next 176 | unichr = chr 177 | imap = map 178 | izip = zip 179 | xrange = range 180 | basestring = str 181 | unicode = str 182 | long = int 183 | BlockingIOError = BlockingIOError 184 | 185 | try: # Python 3 186 | from queue import LifoQueue, Empty, Full 187 | except ImportError: # Python 2 188 | from Queue import LifoQueue, Empty, Full 189 | -------------------------------------------------------------------------------- /redis/_compat.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/_compat.pyc -------------------------------------------------------------------------------- /redis/client.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/client.pyc -------------------------------------------------------------------------------- /redis/connection.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from distutils.version import StrictVersion 3 | from itertools import chain 4 | from time import time 5 | import errno 6 | import io 7 | import os 8 | import socket 9 | import threading 10 | import warnings 11 | 12 | from redis._compat import (xrange, imap, unicode, long, 13 | nativestr, basestring, iteritems, 14 | LifoQueue, Empty, Full, urlparse, parse_qs, 15 | recv, recv_into, unquote, BlockingIOError, 16 | sendall, shutdown, ssl_wrap_socket) 17 | from redis.exceptions import ( 18 | AuthenticationError, 19 | AuthenticationWrongNumberOfArgsError, 20 | BusyLoadingError, 21 | ChildDeadlockedError, 22 | ConnectionError, 23 | DataError, 24 | ExecAbortError, 25 | InvalidResponse, 26 | NoPermissionError, 27 | NoScriptError, 28 | ReadOnlyError, 29 | RedisError, 30 | ResponseError, 31 | TimeoutError, 32 | ) 33 | from redis.utils import HIREDIS_AVAILABLE 34 | 35 | try: 36 | import ssl 37 | ssl_available = True 38 | except ImportError: 39 | ssl_available = False 40 | 41 | NONBLOCKING_EXCEPTION_ERROR_NUMBERS = { 42 | BlockingIOError: errno.EWOULDBLOCK, 43 | } 44 | 45 | if ssl_available: 46 | if hasattr(ssl, 'SSLWantReadError'): 47 | NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantReadError] = 2 48 | NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLWantWriteError] = 2 49 | else: 50 | NONBLOCKING_EXCEPTION_ERROR_NUMBERS[ssl.SSLError] = 2 51 | 52 | # In Python 2.7 a socket.error is raised for a nonblocking read. 53 | # The _compat module aliases BlockingIOError to socket.error to be 54 | # Python 2/3 compatible. 55 | # However this means that all socket.error exceptions need to be handled 56 | # properly within these exception handlers. 57 | # We need to make sure socket.error is included in these handlers and 58 | # provide a dummy error number that will never match a real exception. 59 | if socket.error not in NONBLOCKING_EXCEPTION_ERROR_NUMBERS: 60 | NONBLOCKING_EXCEPTION_ERROR_NUMBERS[socket.error] = -999999 61 | 62 | NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys()) 63 | 64 | if HIREDIS_AVAILABLE: 65 | import hiredis 66 | 67 | hiredis_version = StrictVersion(hiredis.__version__) 68 | HIREDIS_SUPPORTS_CALLABLE_ERRORS = \ 69 | hiredis_version >= StrictVersion('0.1.3') 70 | HIREDIS_SUPPORTS_BYTE_BUFFER = \ 71 | hiredis_version >= StrictVersion('0.1.4') 72 | HIREDIS_SUPPORTS_ENCODING_ERRORS = \ 73 | hiredis_version >= StrictVersion('1.0.0') 74 | 75 | if not HIREDIS_SUPPORTS_BYTE_BUFFER: 76 | msg = ("redis-py works best with hiredis >= 0.1.4. You're running " 77 | "hiredis %s. Please consider upgrading." % hiredis.__version__) 78 | warnings.warn(msg) 79 | 80 | HIREDIS_USE_BYTE_BUFFER = True 81 | # only use byte buffer if hiredis supports it 82 | if not HIREDIS_SUPPORTS_BYTE_BUFFER: 83 | HIREDIS_USE_BYTE_BUFFER = False 84 | 85 | SYM_STAR = b'*' 86 | SYM_DOLLAR = b'$' 87 | SYM_CRLF = b'\r\n' 88 | SYM_EMPTY = b'' 89 | 90 | SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server." 91 | 92 | SENTINEL = object() 93 | 94 | 95 | class Encoder(object): 96 | "Encode strings to bytes-like and decode bytes-like to strings" 97 | 98 | def __init__(self, encoding, encoding_errors, decode_responses): 99 | self.encoding = encoding 100 | self.encoding_errors = encoding_errors 101 | self.decode_responses = decode_responses 102 | 103 | def encode(self, value): 104 | "Return a bytestring or bytes-like representation of the value" 105 | if isinstance(value, (bytes, memoryview)): 106 | return value 107 | elif isinstance(value, bool): 108 | # special case bool since it is a subclass of int 109 | raise DataError("Invalid input of type: 'bool'. Convert to a " 110 | "bytes, string, int or float first.") 111 | elif isinstance(value, float): 112 | value = repr(value).encode() 113 | elif isinstance(value, (int, long)): 114 | # python 2 repr() on longs is '123L', so use str() instead 115 | value = str(value).encode() 116 | elif not isinstance(value, basestring): 117 | # a value we don't know how to deal with. throw an error 118 | typename = type(value).__name__ 119 | raise DataError("Invalid input of type: '%s'. Convert to a " 120 | "bytes, string, int or float first." % typename) 121 | if isinstance(value, unicode): 122 | value = value.encode(self.encoding, self.encoding_errors) 123 | return value 124 | 125 | def decode(self, value, force=False): 126 | "Return a unicode string from the bytes-like representation" 127 | if self.decode_responses or force: 128 | if isinstance(value, memoryview): 129 | value = value.tobytes() 130 | if isinstance(value, bytes): 131 | value = value.decode(self.encoding, self.encoding_errors) 132 | return value 133 | 134 | 135 | class BaseParser(object): 136 | EXCEPTION_CLASSES = { 137 | 'ERR': { 138 | 'max number of clients reached': ConnectionError, 139 | 'Client sent AUTH, but no password is set': AuthenticationError, 140 | 'invalid password': AuthenticationError, 141 | # some Redis server versions report invalid command syntax 142 | # in lowercase 143 | 'wrong number of arguments for \'auth\' command': 144 | AuthenticationWrongNumberOfArgsError, 145 | # some Redis server versions report invalid command syntax 146 | # in uppercase 147 | 'wrong number of arguments for \'AUTH\' command': 148 | AuthenticationWrongNumberOfArgsError, 149 | }, 150 | 'EXECABORT': ExecAbortError, 151 | 'LOADING': BusyLoadingError, 152 | 'NOSCRIPT': NoScriptError, 153 | 'READONLY': ReadOnlyError, 154 | 'NOAUTH': AuthenticationError, 155 | 'NOPERM': NoPermissionError, 156 | } 157 | 158 | def parse_error(self, response): 159 | "Parse an error response" 160 | error_code = response.split(' ')[0] 161 | if error_code in self.EXCEPTION_CLASSES: 162 | response = response[len(error_code) + 1:] 163 | exception_class = self.EXCEPTION_CLASSES[error_code] 164 | if isinstance(exception_class, dict): 165 | exception_class = exception_class.get(response, ResponseError) 166 | return exception_class(response) 167 | return ResponseError(response) 168 | 169 | 170 | class SocketBuffer(object): 171 | def __init__(self, socket, socket_read_size, socket_timeout): 172 | self._sock = socket 173 | self.socket_read_size = socket_read_size 174 | self.socket_timeout = socket_timeout 175 | self._buffer = io.BytesIO() 176 | # number of bytes written to the buffer from the socket 177 | self.bytes_written = 0 178 | # number of bytes read from the buffer 179 | self.bytes_read = 0 180 | 181 | @property 182 | def length(self): 183 | return self.bytes_written - self.bytes_read 184 | 185 | def _read_from_socket(self, length=None, timeout=SENTINEL, 186 | raise_on_timeout=True): 187 | sock = self._sock 188 | socket_read_size = self.socket_read_size 189 | buf = self._buffer 190 | buf.seek(self.bytes_written) 191 | marker = 0 192 | custom_timeout = timeout is not SENTINEL 193 | 194 | try: 195 | if custom_timeout: 196 | sock.settimeout(timeout) 197 | while True: 198 | data = recv(self._sock, socket_read_size) 199 | # an empty string indicates the server shutdown the socket 200 | if isinstance(data, bytes) and len(data) == 0: 201 | raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 202 | buf.write(data) 203 | data_length = len(data) 204 | self.bytes_written += data_length 205 | marker += data_length 206 | 207 | if length is not None and length > marker: 208 | continue 209 | return True 210 | except socket.timeout: 211 | if raise_on_timeout: 212 | raise TimeoutError("Timeout reading from socket") 213 | return False 214 | except NONBLOCKING_EXCEPTIONS as ex: 215 | # if we're in nonblocking mode and the recv raises a 216 | # blocking error, simply return False indicating that 217 | # there's no data to be read. otherwise raise the 218 | # original exception. 219 | allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) 220 | if not raise_on_timeout and ex.errno == allowed: 221 | return False 222 | raise ConnectionError("Error while reading from socket: %s" % 223 | (ex.args,)) 224 | finally: 225 | if custom_timeout: 226 | sock.settimeout(self.socket_timeout) 227 | 228 | def can_read(self, timeout): 229 | return bool(self.length) or \ 230 | self._read_from_socket(timeout=timeout, 231 | raise_on_timeout=False) 232 | 233 | def read(self, length): 234 | length = length + 2 # make sure to read the \r\n terminator 235 | # make sure we've read enough data from the socket 236 | if length > self.length: 237 | self._read_from_socket(length - self.length) 238 | 239 | self._buffer.seek(self.bytes_read) 240 | data = self._buffer.read(length) 241 | self.bytes_read += len(data) 242 | 243 | # purge the buffer when we've consumed it all so it doesn't 244 | # grow forever 245 | if self.bytes_read == self.bytes_written: 246 | self.purge() 247 | 248 | return data[:-2] 249 | 250 | def readline(self): 251 | buf = self._buffer 252 | buf.seek(self.bytes_read) 253 | data = buf.readline() 254 | while not data.endswith(SYM_CRLF): 255 | # there's more data in the socket that we need 256 | self._read_from_socket() 257 | buf.seek(self.bytes_read) 258 | data = buf.readline() 259 | 260 | self.bytes_read += len(data) 261 | 262 | # purge the buffer when we've consumed it all so it doesn't 263 | # grow forever 264 | if self.bytes_read == self.bytes_written: 265 | self.purge() 266 | 267 | return data[:-2] 268 | 269 | def purge(self): 270 | self._buffer.seek(0) 271 | self._buffer.truncate() 272 | self.bytes_written = 0 273 | self.bytes_read = 0 274 | 275 | def close(self): 276 | try: 277 | self.purge() 278 | self._buffer.close() 279 | except Exception: 280 | # issue #633 suggests the purge/close somehow raised a 281 | # BadFileDescriptor error. Perhaps the client ran out of 282 | # memory or something else? It's probably OK to ignore 283 | # any error being raised from purge/close since we're 284 | # removing the reference to the instance below. 285 | pass 286 | self._buffer = None 287 | self._sock = None 288 | 289 | 290 | class PythonParser(BaseParser): 291 | "Plain Python parsing class" 292 | def __init__(self, socket_read_size): 293 | self.socket_read_size = socket_read_size 294 | self.encoder = None 295 | self._sock = None 296 | self._buffer = None 297 | 298 | def __del__(self): 299 | try: 300 | self.on_disconnect() 301 | except Exception: 302 | pass 303 | 304 | def on_connect(self, connection): 305 | "Called when the socket connects" 306 | self._sock = connection._sock 307 | self._buffer = SocketBuffer(self._sock, 308 | self.socket_read_size, 309 | connection.socket_timeout) 310 | self.encoder = connection.encoder 311 | 312 | def on_disconnect(self): 313 | "Called when the socket disconnects" 314 | self._sock = None 315 | if self._buffer is not None: 316 | self._buffer.close() 317 | self._buffer = None 318 | self.encoder = None 319 | 320 | def can_read(self, timeout): 321 | return self._buffer and self._buffer.can_read(timeout) 322 | 323 | def read_response(self): 324 | raw = self._buffer.readline() 325 | if not raw: 326 | raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 327 | 328 | byte, response = raw[:1], raw[1:] 329 | 330 | if byte not in (b'-', b'+', b':', b'$', b'*'): 331 | raise InvalidResponse("Protocol Error: %r" % raw) 332 | 333 | # server returned an error 334 | if byte == b'-': 335 | response = nativestr(response) 336 | error = self.parse_error(response) 337 | # if the error is a ConnectionError, raise immediately so the user 338 | # is notified 339 | if isinstance(error, ConnectionError): 340 | raise error 341 | # otherwise, we're dealing with a ResponseError that might belong 342 | # inside a pipeline response. the connection's read_response() 343 | # and/or the pipeline's execute() will raise this error if 344 | # necessary, so just return the exception instance here. 345 | return error 346 | # single value 347 | elif byte == b'+': 348 | pass 349 | # int value 350 | elif byte == b':': 351 | response = long(response) 352 | # bulk response 353 | elif byte == b'$': 354 | length = int(response) 355 | if length == -1: 356 | return None 357 | response = self._buffer.read(length) 358 | # multi-bulk response 359 | elif byte == b'*': 360 | length = int(response) 361 | if length == -1: 362 | return None 363 | response = [self.read_response() for i in xrange(length)] 364 | if isinstance(response, bytes): 365 | response = self.encoder.decode(response) 366 | return response 367 | 368 | 369 | class HiredisParser(BaseParser): 370 | "Parser class for connections using Hiredis" 371 | def __init__(self, socket_read_size): 372 | if not HIREDIS_AVAILABLE: 373 | raise RedisError("Hiredis is not installed") 374 | self.socket_read_size = socket_read_size 375 | 376 | if HIREDIS_USE_BYTE_BUFFER: 377 | self._buffer = bytearray(socket_read_size) 378 | 379 | def __del__(self): 380 | try: 381 | self.on_disconnect() 382 | except Exception: 383 | pass 384 | 385 | def on_connect(self, connection): 386 | self._sock = connection._sock 387 | self._socket_timeout = connection.socket_timeout 388 | kwargs = { 389 | 'protocolError': InvalidResponse, 390 | 'replyError': self.parse_error, 391 | } 392 | 393 | # hiredis < 0.1.3 doesn't support functions that create exceptions 394 | if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: 395 | kwargs['replyError'] = ResponseError 396 | 397 | if connection.encoder.decode_responses: 398 | kwargs['encoding'] = connection.encoder.encoding 399 | if HIREDIS_SUPPORTS_ENCODING_ERRORS: 400 | kwargs['errors'] = connection.encoder.encoding_errors 401 | self._reader = hiredis.Reader(**kwargs) 402 | self._next_response = False 403 | 404 | def on_disconnect(self): 405 | self._sock = None 406 | self._reader = None 407 | self._next_response = False 408 | 409 | def can_read(self, timeout): 410 | if not self._reader: 411 | raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 412 | 413 | if self._next_response is False: 414 | self._next_response = self._reader.gets() 415 | if self._next_response is False: 416 | return self.read_from_socket(timeout=timeout, 417 | raise_on_timeout=False) 418 | return True 419 | 420 | def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True): 421 | sock = self._sock 422 | custom_timeout = timeout is not SENTINEL 423 | try: 424 | if custom_timeout: 425 | sock.settimeout(timeout) 426 | if HIREDIS_USE_BYTE_BUFFER: 427 | bufflen = recv_into(self._sock, self._buffer) 428 | if bufflen == 0: 429 | raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 430 | self._reader.feed(self._buffer, 0, bufflen) 431 | else: 432 | buffer = recv(self._sock, self.socket_read_size) 433 | # an empty string indicates the server shutdown the socket 434 | if not isinstance(buffer, bytes) or len(buffer) == 0: 435 | raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 436 | self._reader.feed(buffer) 437 | # data was read from the socket and added to the buffer. 438 | # return True to indicate that data was read. 439 | return True 440 | except socket.timeout: 441 | if raise_on_timeout: 442 | raise TimeoutError("Timeout reading from socket") 443 | return False 444 | except NONBLOCKING_EXCEPTIONS as ex: 445 | # if we're in nonblocking mode and the recv raises a 446 | # blocking error, simply return False indicating that 447 | # there's no data to be read. otherwise raise the 448 | # original exception. 449 | allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1) 450 | if not raise_on_timeout and ex.errno == allowed: 451 | return False 452 | raise ConnectionError("Error while reading from socket: %s" % 453 | (ex.args,)) 454 | finally: 455 | if custom_timeout: 456 | sock.settimeout(self._socket_timeout) 457 | 458 | def read_response(self): 459 | if not self._reader: 460 | raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) 461 | 462 | # _next_response might be cached from a can_read() call 463 | if self._next_response is not False: 464 | response = self._next_response 465 | self._next_response = False 466 | return response 467 | 468 | response = self._reader.gets() 469 | while response is False: 470 | self.read_from_socket() 471 | response = self._reader.gets() 472 | # if an older version of hiredis is installed, we need to attempt 473 | # to convert ResponseErrors to their appropriate types. 474 | if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: 475 | if isinstance(response, ResponseError): 476 | response = self.parse_error(response.args[0]) 477 | elif isinstance(response, list) and response and \ 478 | isinstance(response[0], ResponseError): 479 | response[0] = self.parse_error(response[0].args[0]) 480 | # if the response is a ConnectionError or the response is a list and 481 | # the first item is a ConnectionError, raise it as something bad 482 | # happened 483 | if isinstance(response, ConnectionError): 484 | raise response 485 | elif isinstance(response, list) and response and \ 486 | isinstance(response[0], ConnectionError): 487 | raise response[0] 488 | return response 489 | 490 | 491 | if HIREDIS_AVAILABLE: 492 | DefaultParser = HiredisParser 493 | else: 494 | DefaultParser = PythonParser 495 | 496 | 497 | class Connection(object): 498 | "Manages TCP communication to and from a Redis server" 499 | 500 | def __init__(self, host='localhost', port=6379, db=0, password=None, 501 | socket_timeout=None, socket_connect_timeout=None, 502 | socket_keepalive=False, socket_keepalive_options=None, 503 | socket_type=0, retry_on_timeout=False, encoding='utf-8', 504 | encoding_errors='strict', decode_responses=False, 505 | parser_class=DefaultParser, socket_read_size=65536, 506 | health_check_interval=0, client_name=None, username=None): 507 | self.pid = os.getpid() 508 | self.host = host 509 | self.port = int(port) 510 | self.db = db 511 | self.username = username 512 | self.client_name = client_name 513 | self.password = password 514 | self.socket_timeout = socket_timeout 515 | self.socket_connect_timeout = socket_connect_timeout or socket_timeout 516 | self.socket_keepalive = socket_keepalive 517 | self.socket_keepalive_options = socket_keepalive_options or {} 518 | self.socket_type = socket_type 519 | self.retry_on_timeout = retry_on_timeout 520 | self.health_check_interval = health_check_interval 521 | self.next_health_check = 0 522 | self.encoder = Encoder(encoding, encoding_errors, decode_responses) 523 | self._sock = None 524 | self._parser = parser_class(socket_read_size=socket_read_size) 525 | self._connect_callbacks = [] 526 | self._buffer_cutoff = 6000 527 | 528 | def __repr__(self): 529 | repr_args = ','.join(['%s=%s' % (k, v) for k, v in self.repr_pieces()]) 530 | return '%s<%s>' % (self.__class__.__name__, repr_args) 531 | 532 | def repr_pieces(self): 533 | pieces = [ 534 | ('host', self.host), 535 | ('port', self.port), 536 | ('db', self.db) 537 | ] 538 | if self.client_name: 539 | pieces.append(('client_name', self.client_name)) 540 | return pieces 541 | 542 | def __del__(self): 543 | try: 544 | self.disconnect() 545 | except Exception: 546 | pass 547 | 548 | def register_connect_callback(self, callback): 549 | self._connect_callbacks.append(callback) 550 | 551 | def clear_connect_callbacks(self): 552 | self._connect_callbacks = [] 553 | 554 | def connect(self): 555 | "Connects to the Redis server if not already connected" 556 | if self._sock: 557 | return 558 | try: 559 | sock = self._connect() 560 | except socket.timeout: 561 | raise TimeoutError("Timeout connecting to server") 562 | except socket.error as e: 563 | raise ConnectionError(self._error_message(e)) 564 | 565 | self._sock = sock 566 | try: 567 | self.on_connect() 568 | except RedisError: 569 | # clean up after any error in on_connect 570 | self.disconnect() 571 | raise 572 | 573 | # run any user callbacks. right now the only internal callback 574 | # is for pubsub channel/pattern resubscription 575 | for callback in self._connect_callbacks: 576 | callback(self) 577 | 578 | def _connect(self): 579 | "Create a TCP socket connection" 580 | # we want to mimic what socket.create_connection does to support 581 | # ipv4/ipv6, but we want to set options prior to calling 582 | # socket.connect() 583 | err = None 584 | for res in socket.getaddrinfo(self.host, self.port, self.socket_type, 585 | socket.SOCK_STREAM): 586 | family, socktype, proto, canonname, socket_address = res 587 | sock = None 588 | try: 589 | sock = socket.socket(family, socktype, proto) 590 | # TCP_NODELAY 591 | sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 592 | 593 | # TCP_KEEPALIVE 594 | if self.socket_keepalive: 595 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) 596 | for k, v in iteritems(self.socket_keepalive_options): 597 | sock.setsockopt(socket.IPPROTO_TCP, k, v) 598 | 599 | # set the socket_connect_timeout before we connect 600 | sock.settimeout(self.socket_connect_timeout) 601 | 602 | # connect 603 | sock.connect(socket_address) 604 | 605 | # set the socket_timeout now that we're connected 606 | sock.settimeout(self.socket_timeout) 607 | return sock 608 | 609 | except socket.error as _: 610 | err = _ 611 | if sock is not None: 612 | sock.close() 613 | 614 | if err is not None: 615 | raise err 616 | raise socket.error("socket.getaddrinfo returned an empty list") 617 | 618 | def _error_message(self, exception): 619 | # args for socket.error can either be (errno, "message") 620 | # or just "message" 621 | if len(exception.args) == 1: 622 | return "Error connecting to %s:%s. %s." % \ 623 | (self.host, self.port, exception.args[0]) 624 | else: 625 | return "Error %s connecting to %s:%s. %s." % \ 626 | (exception.args[0], self.host, self.port, exception.args[1]) 627 | 628 | def on_connect(self): 629 | "Initialize the connection, authenticate and select a database" 630 | self._parser.on_connect(self) 631 | 632 | # if username and/or password are set, authenticate 633 | if self.username or self.password: 634 | if self.username: 635 | auth_args = (self.username, self.password or '') 636 | else: 637 | auth_args = (self.password,) 638 | # avoid checking health here -- PING will fail if we try 639 | # to check the health prior to the AUTH 640 | self.send_command('AUTH', *auth_args, check_health=False) 641 | 642 | try: 643 | auth_response = self.read_response() 644 | except AuthenticationWrongNumberOfArgsError: 645 | # a username and password were specified but the Redis 646 | # server seems to be < 6.0.0 which expects a single password 647 | # arg. retry auth with just the password. 648 | # https://github.com/andymccurdy/redis-py/issues/1274 649 | self.send_command('AUTH', self.password, check_health=False) 650 | auth_response = self.read_response() 651 | 652 | if nativestr(auth_response) != 'OK': 653 | raise AuthenticationError('Invalid Username or Password') 654 | 655 | # if a client_name is given, set it 656 | if self.client_name: 657 | self.send_command('CLIENT', 'SETNAME', self.client_name) 658 | if nativestr(self.read_response()) != 'OK': 659 | raise ConnectionError('Error setting client name') 660 | 661 | # if a database is specified, switch to it 662 | if self.db: 663 | self.send_command('SELECT', self.db) 664 | if nativestr(self.read_response()) != 'OK': 665 | raise ConnectionError('Invalid Database') 666 | 667 | def disconnect(self): 668 | "Disconnects from the Redis server" 669 | self._parser.on_disconnect() 670 | if self._sock is None: 671 | return 672 | try: 673 | if os.getpid() == self.pid: 674 | shutdown(self._sock, socket.SHUT_RDWR) 675 | self._sock.close() 676 | except socket.error: 677 | pass 678 | self._sock = None 679 | 680 | def check_health(self): 681 | "Check the health of the connection with a PING/PONG" 682 | if self.health_check_interval and time() > self.next_health_check: 683 | try: 684 | self.send_command('PING', check_health=False) 685 | if nativestr(self.read_response()) != 'PONG': 686 | raise ConnectionError( 687 | 'Bad response from PING health check') 688 | except (ConnectionError, TimeoutError): 689 | self.disconnect() 690 | self.send_command('PING', check_health=False) 691 | if nativestr(self.read_response()) != 'PONG': 692 | raise ConnectionError( 693 | 'Bad response from PING health check') 694 | 695 | def send_packed_command(self, command, check_health=True): 696 | "Send an already packed command to the Redis server" 697 | if not self._sock: 698 | self.connect() 699 | # guard against health check recursion 700 | if check_health: 701 | self.check_health() 702 | try: 703 | if isinstance(command, str): 704 | command = [command] 705 | for item in command: 706 | sendall(self._sock, item) 707 | except socket.timeout: 708 | self.disconnect() 709 | raise TimeoutError("Timeout writing to socket") 710 | except socket.error as e: 711 | self.disconnect() 712 | if len(e.args) == 1: 713 | errno, errmsg = 'UNKNOWN', e.args[0] 714 | else: 715 | errno = e.args[0] 716 | errmsg = e.args[1] 717 | raise ConnectionError("Error %s while writing to socket. %s." % 718 | (errno, errmsg)) 719 | except BaseException: 720 | self.disconnect() 721 | raise 722 | 723 | def send_command(self, *args, **kwargs): 724 | "Pack and send a command to the Redis server" 725 | self.send_packed_command(self.pack_command(*args), 726 | check_health=kwargs.get('check_health', True)) 727 | 728 | def can_read(self, timeout=0): 729 | "Poll the socket to see if there's data that can be read." 730 | sock = self._sock 731 | if not sock: 732 | self.connect() 733 | sock = self._sock 734 | return self._parser.can_read(timeout) 735 | 736 | def read_response(self): 737 | "Read the response from a previously sent command" 738 | try: 739 | response = self._parser.read_response() 740 | except socket.timeout: 741 | self.disconnect() 742 | raise TimeoutError("Timeout reading from %s:%s" % 743 | (self.host, self.port)) 744 | except socket.error as e: 745 | self.disconnect() 746 | raise ConnectionError("Error while reading from %s:%s : %s" % 747 | (self.host, self.port, e.args)) 748 | except BaseException: 749 | self.disconnect() 750 | raise 751 | 752 | if self.health_check_interval: 753 | self.next_health_check = time() + self.health_check_interval 754 | 755 | if isinstance(response, ResponseError): 756 | raise response 757 | return response 758 | 759 | def pack_command(self, *args): 760 | "Pack a series of arguments into the Redis protocol" 761 | output = [] 762 | # the client might have included 1 or more literal arguments in 763 | # the command name, e.g., 'CONFIG GET'. The Redis server expects these 764 | # arguments to be sent separately, so split the first argument 765 | # manually. These arguments should be bytestrings so that they are 766 | # not encoded. 767 | if isinstance(args[0], unicode): 768 | args = tuple(args[0].encode().split()) + args[1:] 769 | elif b' ' in args[0]: 770 | args = tuple(args[0].split()) + args[1:] 771 | 772 | buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF)) 773 | 774 | buffer_cutoff = self._buffer_cutoff 775 | for arg in imap(self.encoder.encode, args): 776 | # to avoid large string mallocs, chunk the command into the 777 | # output list if we're sending large values or memoryviews 778 | arg_length = len(arg) 779 | if (len(buff) > buffer_cutoff or arg_length > buffer_cutoff 780 | or isinstance(arg, memoryview)): 781 | buff = SYM_EMPTY.join( 782 | (buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)) 783 | output.append(buff) 784 | output.append(arg) 785 | buff = SYM_CRLF 786 | else: 787 | buff = SYM_EMPTY.join( 788 | (buff, SYM_DOLLAR, str(arg_length).encode(), 789 | SYM_CRLF, arg, SYM_CRLF)) 790 | output.append(buff) 791 | return output 792 | 793 | def pack_commands(self, commands): 794 | "Pack multiple commands into the Redis protocol" 795 | output = [] 796 | pieces = [] 797 | buffer_length = 0 798 | buffer_cutoff = self._buffer_cutoff 799 | 800 | for cmd in commands: 801 | for chunk in self.pack_command(*cmd): 802 | chunklen = len(chunk) 803 | if (buffer_length > buffer_cutoff or chunklen > buffer_cutoff 804 | or isinstance(chunk, memoryview)): 805 | output.append(SYM_EMPTY.join(pieces)) 806 | buffer_length = 0 807 | pieces = [] 808 | 809 | if chunklen > buffer_cutoff or isinstance(chunk, memoryview): 810 | output.append(chunk) 811 | else: 812 | pieces.append(chunk) 813 | buffer_length += chunklen 814 | 815 | if pieces: 816 | output.append(SYM_EMPTY.join(pieces)) 817 | return output 818 | 819 | 820 | class SSLConnection(Connection): 821 | 822 | def __init__(self, ssl_keyfile=None, ssl_certfile=None, 823 | ssl_cert_reqs='required', ssl_ca_certs=None, 824 | ssl_check_hostname=False, **kwargs): 825 | if not ssl_available: 826 | raise RedisError("Python wasn't built with SSL support") 827 | 828 | super(SSLConnection, self).__init__(**kwargs) 829 | 830 | self.keyfile = ssl_keyfile 831 | self.certfile = ssl_certfile 832 | if ssl_cert_reqs is None: 833 | ssl_cert_reqs = ssl.CERT_NONE 834 | elif isinstance(ssl_cert_reqs, basestring): 835 | CERT_REQS = { 836 | 'none': ssl.CERT_NONE, 837 | 'optional': ssl.CERT_OPTIONAL, 838 | 'required': ssl.CERT_REQUIRED 839 | } 840 | if ssl_cert_reqs not in CERT_REQS: 841 | raise RedisError( 842 | "Invalid SSL Certificate Requirements Flag: %s" % 843 | ssl_cert_reqs) 844 | ssl_cert_reqs = CERT_REQS[ssl_cert_reqs] 845 | self.cert_reqs = ssl_cert_reqs 846 | self.ca_certs = ssl_ca_certs 847 | self.check_hostname = ssl_check_hostname 848 | 849 | def _connect(self): 850 | "Wrap the socket with SSL support" 851 | sock = super(SSLConnection, self)._connect() 852 | if hasattr(ssl, "create_default_context"): 853 | context = ssl.create_default_context() 854 | context.check_hostname = self.check_hostname 855 | context.verify_mode = self.cert_reqs 856 | if self.certfile and self.keyfile: 857 | context.load_cert_chain(certfile=self.certfile, 858 | keyfile=self.keyfile) 859 | if self.ca_certs: 860 | context.load_verify_locations(self.ca_certs) 861 | sock = ssl_wrap_socket(context, sock, server_hostname=self.host) 862 | else: 863 | # In case this code runs in a version which is older than 2.7.9, 864 | # we want to fall back to old code 865 | sock = ssl_wrap_socket(ssl, 866 | sock, 867 | cert_reqs=self.cert_reqs, 868 | keyfile=self.keyfile, 869 | certfile=self.certfile, 870 | ca_certs=self.ca_certs) 871 | return sock 872 | 873 | 874 | class UnixDomainSocketConnection(Connection): 875 | 876 | def __init__(self, path='', db=0, username=None, password=None, 877 | socket_timeout=None, encoding='utf-8', 878 | encoding_errors='strict', decode_responses=False, 879 | retry_on_timeout=False, 880 | parser_class=DefaultParser, socket_read_size=65536, 881 | health_check_interval=0, client_name=None): 882 | self.pid = os.getpid() 883 | self.path = path 884 | self.db = db 885 | self.username = username 886 | self.client_name = client_name 887 | self.password = password 888 | self.socket_timeout = socket_timeout 889 | self.retry_on_timeout = retry_on_timeout 890 | self.health_check_interval = health_check_interval 891 | self.next_health_check = 0 892 | self.encoder = Encoder(encoding, encoding_errors, decode_responses) 893 | self._sock = None 894 | self._parser = parser_class(socket_read_size=socket_read_size) 895 | self._connect_callbacks = [] 896 | self._buffer_cutoff = 6000 897 | 898 | def repr_pieces(self): 899 | pieces = [ 900 | ('path', self.path), 901 | ('db', self.db), 902 | ] 903 | if self.client_name: 904 | pieces.append(('client_name', self.client_name)) 905 | return pieces 906 | 907 | def _connect(self): 908 | "Create a Unix domain socket connection" 909 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 910 | sock.settimeout(self.socket_timeout) 911 | sock.connect(self.path) 912 | return sock 913 | 914 | def _error_message(self, exception): 915 | # args for socket.error can either be (errno, "message") 916 | # or just "message" 917 | if len(exception.args) == 1: 918 | return "Error connecting to unix socket: %s. %s." % \ 919 | (self.path, exception.args[0]) 920 | else: 921 | return "Error %s connecting to unix socket: %s. %s." % \ 922 | (exception.args[0], self.path, exception.args[1]) 923 | 924 | 925 | FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO') 926 | 927 | 928 | def to_bool(value): 929 | if value is None or value == '': 930 | return None 931 | if isinstance(value, basestring) and value.upper() in FALSE_STRINGS: 932 | return False 933 | return bool(value) 934 | 935 | 936 | URL_QUERY_ARGUMENT_PARSERS = { 937 | 'socket_timeout': float, 938 | 'socket_connect_timeout': float, 939 | 'socket_keepalive': to_bool, 940 | 'retry_on_timeout': to_bool, 941 | 'max_connections': int, 942 | 'health_check_interval': int, 943 | 'ssl_check_hostname': to_bool, 944 | } 945 | 946 | 947 | class ConnectionPool(object): 948 | "Generic connection pool" 949 | @classmethod 950 | def from_url(cls, url, db=None, decode_components=False, **kwargs): 951 | """ 952 | Return a connection pool configured from the given URL. 953 | 954 | For example:: 955 | 956 | redis://[[username]:[password]]@localhost:6379/0 957 | rediss://[[username]:[password]]@localhost:6379/0 958 | unix://[[username]:[password]]@/path/to/socket.sock?db=0 959 | 960 | Three URL schemes are supported: 961 | 962 | - ```redis://`` 963 | `_ creates a 964 | normal TCP socket connection 965 | - ```rediss://`` 966 | `_ creates 967 | a SSL wrapped TCP socket connection 968 | - ``unix://`` creates a Unix Domain Socket connection 969 | 970 | There are several ways to specify a database number. The parse function 971 | will return the first specified option: 972 | 1. A ``db`` querystring option, e.g. redis://localhost?db=0 973 | 2. If using the redis:// scheme, the path argument of the url, e.g. 974 | redis://localhost/0 975 | 3. The ``db`` argument to this function. 976 | 977 | If none of these options are specified, db=0 is used. 978 | 979 | The ``decode_components`` argument allows this function to work with 980 | percent-encoded URLs. If this argument is set to ``True`` all ``%xx`` 981 | escapes will be replaced by their single-character equivalents after 982 | the URL has been parsed. This only applies to the ``hostname``, 983 | ``path``, ``username`` and ``password`` components. 984 | 985 | Any additional querystring arguments and keyword arguments will be 986 | passed along to the ConnectionPool class's initializer. The querystring 987 | arguments ``socket_connect_timeout`` and ``socket_timeout`` if supplied 988 | are parsed as float values. The arguments ``socket_keepalive`` and 989 | ``retry_on_timeout`` are parsed to boolean values that accept 990 | True/False, Yes/No values to indicate state. Invalid types cause a 991 | ``UserWarning`` to be raised. In the case of conflicting arguments, 992 | querystring arguments always win. 993 | 994 | """ 995 | url = urlparse(url) 996 | url_options = {} 997 | 998 | for name, value in iteritems(parse_qs(url.query)): 999 | if value and len(value) > 0: 1000 | parser = URL_QUERY_ARGUMENT_PARSERS.get(name) 1001 | if parser: 1002 | try: 1003 | url_options[name] = parser(value[0]) 1004 | except (TypeError, ValueError): 1005 | warnings.warn(UserWarning( 1006 | "Invalid value for `%s` in connection URL." % name 1007 | )) 1008 | else: 1009 | url_options[name] = value[0] 1010 | 1011 | if decode_components: 1012 | username = unquote(url.username) if url.username else None 1013 | password = unquote(url.password) if url.password else None 1014 | path = unquote(url.path) if url.path else None 1015 | hostname = unquote(url.hostname) if url.hostname else None 1016 | else: 1017 | username = url.username or None 1018 | password = url.password or None 1019 | path = url.path 1020 | hostname = url.hostname 1021 | 1022 | # We only support redis://, rediss:// and unix:// schemes. 1023 | if url.scheme == 'unix': 1024 | url_options.update({ 1025 | 'username': username, 1026 | 'password': password, 1027 | 'path': path, 1028 | 'connection_class': UnixDomainSocketConnection, 1029 | }) 1030 | 1031 | elif url.scheme in ('redis', 'rediss'): 1032 | url_options.update({ 1033 | 'host': hostname, 1034 | 'port': int(url.port or 6379), 1035 | 'username': username, 1036 | 'password': password, 1037 | }) 1038 | 1039 | # If there's a path argument, use it as the db argument if a 1040 | # querystring value wasn't specified 1041 | if 'db' not in url_options and path: 1042 | try: 1043 | url_options['db'] = int(path.replace('/', '')) 1044 | except (AttributeError, ValueError): 1045 | pass 1046 | 1047 | if url.scheme == 'rediss': 1048 | url_options['connection_class'] = SSLConnection 1049 | else: 1050 | valid_schemes = ', '.join(('redis://', 'rediss://', 'unix://')) 1051 | raise ValueError('Redis URL must specify one of the following ' 1052 | 'schemes (%s)' % valid_schemes) 1053 | 1054 | # last shot at the db value 1055 | url_options['db'] = int(url_options.get('db', db or 0)) 1056 | 1057 | # update the arguments from the URL values 1058 | kwargs.update(url_options) 1059 | 1060 | # backwards compatability 1061 | if 'charset' in kwargs: 1062 | warnings.warn(DeprecationWarning( 1063 | '"charset" is deprecated. Use "encoding" instead')) 1064 | kwargs['encoding'] = kwargs.pop('charset') 1065 | if 'errors' in kwargs: 1066 | warnings.warn(DeprecationWarning( 1067 | '"errors" is deprecated. Use "encoding_errors" instead')) 1068 | kwargs['encoding_errors'] = kwargs.pop('errors') 1069 | 1070 | return cls(**kwargs) 1071 | 1072 | def __init__(self, connection_class=Connection, max_connections=None, 1073 | **connection_kwargs): 1074 | """ 1075 | Create a connection pool. If max_connections is set, then this 1076 | object raises redis.ConnectionError when the pool's limit is reached. 1077 | 1078 | By default, TCP connections are created unless connection_class is 1079 | specified. Use redis.UnixDomainSocketConnection for unix sockets. 1080 | 1081 | Any additional keyword arguments are passed to the constructor of 1082 | connection_class. 1083 | """ 1084 | max_connections = max_connections or 2 ** 31 1085 | if not isinstance(max_connections, (int, long)) or max_connections < 0: 1086 | raise ValueError('"max_connections" must be a positive integer') 1087 | 1088 | self.connection_class = connection_class 1089 | self.connection_kwargs = connection_kwargs 1090 | self.max_connections = max_connections 1091 | 1092 | # a lock to protect the critical section in _checkpid(). 1093 | # this lock is acquired when the process id changes, such as 1094 | # after a fork. during this time, multiple threads in the child 1095 | # process could attempt to acquire this lock. the first thread 1096 | # to acquire the lock will reset the data structures and lock 1097 | # object of this pool. subsequent threads acquiring this lock 1098 | # will notice the first thread already did the work and simply 1099 | # release the lock. 1100 | self._fork_lock = threading.Lock() 1101 | self.reset() 1102 | 1103 | def __repr__(self): 1104 | return "%s<%s>" % ( 1105 | type(self).__name__, 1106 | repr(self.connection_class(**self.connection_kwargs)), 1107 | ) 1108 | 1109 | def reset(self): 1110 | self._lock = threading.Lock() 1111 | self._created_connections = 0 1112 | self._available_connections = [] 1113 | self._in_use_connections = set() 1114 | 1115 | # this must be the last operation in this method. while reset() is 1116 | # called when holding _fork_lock, other threads in this process 1117 | # can call _checkpid() which compares self.pid and os.getpid() without 1118 | # holding any lock (for performance reasons). keeping this assignment 1119 | # as the last operation ensures that those other threads will also 1120 | # notice a pid difference and block waiting for the first thread to 1121 | # release _fork_lock. when each of these threads eventually acquire 1122 | # _fork_lock, they will notice that another thread already called 1123 | # reset() and they will immediately release _fork_lock and continue on. 1124 | self.pid = os.getpid() 1125 | 1126 | def _checkpid(self): 1127 | # _checkpid() attempts to keep ConnectionPool fork-safe on modern 1128 | # systems. this is called by all ConnectionPool methods that 1129 | # manipulate the pool's state such as get_connection() and release(). 1130 | # 1131 | # _checkpid() determines whether the process has forked by comparing 1132 | # the current process id to the process id saved on the ConnectionPool 1133 | # instance. if these values are the same, _checkpid() simply returns. 1134 | # 1135 | # when the process ids differ, _checkpid() assumes that the process 1136 | # has forked and that we're now running in the child process. the child 1137 | # process cannot use the parent's file descriptors (e.g., sockets). 1138 | # therefore, when _checkpid() sees the process id change, it calls 1139 | # reset() in order to reinitialize the child's ConnectionPool. this 1140 | # will cause the child to make all new connection objects. 1141 | # 1142 | # _checkpid() is protected by self._fork_lock to ensure that multiple 1143 | # threads in the child process do not call reset() multiple times. 1144 | # 1145 | # there is an extremely small chance this could fail in the following 1146 | # scenario: 1147 | # 1. process A calls _checkpid() for the first time and acquires 1148 | # self._fork_lock. 1149 | # 2. while holding self._fork_lock, process A forks (the fork() 1150 | # could happen in a different thread owned by process A) 1151 | # 3. process B (the forked child process) inherits the 1152 | # ConnectionPool's state from the parent. that state includes 1153 | # a locked _fork_lock. process B will not be notified when 1154 | # process A releases the _fork_lock and will thus never be 1155 | # able to acquire the _fork_lock. 1156 | # 1157 | # to mitigate this possible deadlock, _checkpid() will only wait 5 1158 | # seconds to acquire _fork_lock. if _fork_lock cannot be acquired in 1159 | # that time it is assumed that the child is deadlocked and a 1160 | # redis.ChildDeadlockedError error is raised. 1161 | if self.pid != os.getpid(): 1162 | # python 2.7 doesn't support a timeout option to lock.acquire() 1163 | # we have to mimic lock timeouts ourselves. 1164 | timeout_at = time() + 5 1165 | acquired = False 1166 | while time() < timeout_at: 1167 | acquired = self._fork_lock.acquire(False) 1168 | if acquired: 1169 | break 1170 | if not acquired: 1171 | raise ChildDeadlockedError 1172 | # reset() the instance for the new process if another thread 1173 | # hasn't already done so 1174 | try: 1175 | if self.pid != os.getpid(): 1176 | self.reset() 1177 | finally: 1178 | self._fork_lock.release() 1179 | 1180 | def get_connection(self, command_name, *keys, **options): 1181 | "Get a connection from the pool" 1182 | self._checkpid() 1183 | with self._lock: 1184 | try: 1185 | connection = self._available_connections.pop() 1186 | except IndexError: 1187 | connection = self.make_connection() 1188 | self._in_use_connections.add(connection) 1189 | 1190 | try: 1191 | # ensure this connection is connected to Redis 1192 | connection.connect() 1193 | # connections that the pool provides should be ready to send 1194 | # a command. if not, the connection was either returned to the 1195 | # pool before all data has been read or the socket has been 1196 | # closed. either way, reconnect and verify everything is good. 1197 | try: 1198 | if connection.can_read(): 1199 | raise ConnectionError('Connection has data') 1200 | except ConnectionError: 1201 | connection.disconnect() 1202 | connection.connect() 1203 | if connection.can_read(): 1204 | raise ConnectionError('Connection not ready') 1205 | except BaseException: 1206 | # release the connection back to the pool so that we don't 1207 | # leak it 1208 | self.release(connection) 1209 | raise 1210 | 1211 | return connection 1212 | 1213 | def get_encoder(self): 1214 | "Return an encoder based on encoding settings" 1215 | kwargs = self.connection_kwargs 1216 | return Encoder( 1217 | encoding=kwargs.get('encoding', 'utf-8'), 1218 | encoding_errors=kwargs.get('encoding_errors', 'strict'), 1219 | decode_responses=kwargs.get('decode_responses', False) 1220 | ) 1221 | 1222 | def make_connection(self): 1223 | "Create a new connection" 1224 | if self._created_connections >= self.max_connections: 1225 | raise ConnectionError("Too many connections") 1226 | self._created_connections += 1 1227 | return self.connection_class(**self.connection_kwargs) 1228 | 1229 | def release(self, connection): 1230 | "Releases the connection back to the pool" 1231 | self._checkpid() 1232 | with self._lock: 1233 | try: 1234 | self._in_use_connections.remove(connection) 1235 | except KeyError: 1236 | # Gracefully fail when a connection is returned to this pool 1237 | # that the pool doesn't actually own 1238 | pass 1239 | 1240 | if self.owns_connection(connection): 1241 | self._available_connections.append(connection) 1242 | else: 1243 | # pool doesn't own this connection. do not add it back 1244 | # to the pool and decrement the count so that another 1245 | # connection can take its place if needed 1246 | self._created_connections -= 1 1247 | connection.disconnect() 1248 | return 1249 | 1250 | def owns_connection(self, connection): 1251 | return connection.pid == self.pid 1252 | 1253 | def disconnect(self, inuse_connections=True): 1254 | """ 1255 | Disconnects connections in the pool 1256 | 1257 | If ``inuse_connections`` is True, disconnect connections that are 1258 | current in use, potentially by other threads. Otherwise only disconnect 1259 | connections that are idle in the pool. 1260 | """ 1261 | self._checkpid() 1262 | with self._lock: 1263 | if inuse_connections: 1264 | connections = chain(self._available_connections, 1265 | self._in_use_connections) 1266 | else: 1267 | connections = self._available_connections 1268 | 1269 | for connection in connections: 1270 | connection.disconnect() 1271 | 1272 | 1273 | class BlockingConnectionPool(ConnectionPool): 1274 | """ 1275 | Thread-safe blocking connection pool:: 1276 | 1277 | >>> from redis.client import Redis 1278 | >>> client = Redis(connection_pool=BlockingConnectionPool()) 1279 | 1280 | It performs the same function as the default 1281 | ``:py:class: ~redis.connection.ConnectionPool`` implementation, in that, 1282 | it maintains a pool of reusable connections that can be shared by 1283 | multiple redis clients (safely across threads if required). 1284 | 1285 | The difference is that, in the event that a client tries to get a 1286 | connection from the pool when all of connections are in use, rather than 1287 | raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default 1288 | ``:py:class: ~redis.connection.ConnectionPool`` implementation does), it 1289 | makes the client wait ("blocks") for a specified number of seconds until 1290 | a connection becomes available. 1291 | 1292 | Use ``max_connections`` to increase / decrease the pool size:: 1293 | 1294 | >>> pool = BlockingConnectionPool(max_connections=10) 1295 | 1296 | Use ``timeout`` to tell it either how many seconds to wait for a connection 1297 | to become available, or to block forever: 1298 | 1299 | # Block forever. 1300 | >>> pool = BlockingConnectionPool(timeout=None) 1301 | 1302 | # Raise a ``ConnectionError`` after five seconds if a connection is 1303 | # not available. 1304 | >>> pool = BlockingConnectionPool(timeout=5) 1305 | """ 1306 | def __init__(self, max_connections=50, timeout=20, 1307 | connection_class=Connection, queue_class=LifoQueue, 1308 | **connection_kwargs): 1309 | 1310 | self.queue_class = queue_class 1311 | self.timeout = timeout 1312 | super(BlockingConnectionPool, self).__init__( 1313 | connection_class=connection_class, 1314 | max_connections=max_connections, 1315 | **connection_kwargs) 1316 | 1317 | def reset(self): 1318 | # Create and fill up a thread safe queue with ``None`` values. 1319 | self.pool = self.queue_class(self.max_connections) 1320 | while True: 1321 | try: 1322 | self.pool.put_nowait(None) 1323 | except Full: 1324 | break 1325 | 1326 | # Keep a list of actual connection instances so that we can 1327 | # disconnect them later. 1328 | self._connections = [] 1329 | 1330 | # this must be the last operation in this method. while reset() is 1331 | # called when holding _fork_lock, other threads in this process 1332 | # can call _checkpid() which compares self.pid and os.getpid() without 1333 | # holding any lock (for performance reasons). keeping this assignment 1334 | # as the last operation ensures that those other threads will also 1335 | # notice a pid difference and block waiting for the first thread to 1336 | # release _fork_lock. when each of these threads eventually acquire 1337 | # _fork_lock, they will notice that another thread already called 1338 | # reset() and they will immediately release _fork_lock and continue on. 1339 | self.pid = os.getpid() 1340 | 1341 | def make_connection(self): 1342 | "Make a fresh connection." 1343 | connection = self.connection_class(**self.connection_kwargs) 1344 | self._connections.append(connection) 1345 | return connection 1346 | 1347 | def get_connection(self, command_name, *keys, **options): 1348 | """ 1349 | Get a connection, blocking for ``self.timeout`` until a connection 1350 | is available from the pool. 1351 | 1352 | If the connection returned is ``None`` then creates a new connection. 1353 | Because we use a last-in first-out queue, the existing connections 1354 | (having been returned to the pool after the initial ``None`` values 1355 | were added) will be returned before ``None`` values. This means we only 1356 | create new connections when we need to, i.e.: the actual number of 1357 | connections will only increase in response to demand. 1358 | """ 1359 | # Make sure we haven't changed process. 1360 | self._checkpid() 1361 | 1362 | # Try and get a connection from the pool. If one isn't available within 1363 | # self.timeout then raise a ``ConnectionError``. 1364 | connection = None 1365 | try: 1366 | connection = self.pool.get(block=True, timeout=self.timeout) 1367 | except Empty: 1368 | # Note that this is not caught by the redis client and will be 1369 | # raised unless handled by application code. If you want never to 1370 | raise ConnectionError("No connection available.") 1371 | 1372 | # If the ``connection`` is actually ``None`` then that's a cue to make 1373 | # a new connection to add to the pool. 1374 | if connection is None: 1375 | connection = self.make_connection() 1376 | 1377 | try: 1378 | # ensure this connection is connected to Redis 1379 | connection.connect() 1380 | # connections that the pool provides should be ready to send 1381 | # a command. if not, the connection was either returned to the 1382 | # pool before all data has been read or the socket has been 1383 | # closed. either way, reconnect and verify everything is good. 1384 | try: 1385 | if connection.can_read(): 1386 | raise ConnectionError('Connection has data') 1387 | except ConnectionError: 1388 | connection.disconnect() 1389 | connection.connect() 1390 | if connection.can_read(): 1391 | raise ConnectionError('Connection not ready') 1392 | except BaseException: 1393 | # release the connection back to the pool so that we don't leak it 1394 | self.release(connection) 1395 | raise 1396 | 1397 | return connection 1398 | 1399 | def release(self, connection): 1400 | "Releases the connection back to the pool." 1401 | # Make sure we haven't changed process. 1402 | self._checkpid() 1403 | if not self.owns_connection(connection): 1404 | # pool doesn't own this connection. do not add it back 1405 | # to the pool. instead add a None value which is a placeholder 1406 | # that will cause the pool to recreate the connection if 1407 | # its needed. 1408 | connection.disconnect() 1409 | self.pool.put_nowait(None) 1410 | return 1411 | 1412 | # Put the connection back into the pool. 1413 | try: 1414 | self.pool.put_nowait(connection) 1415 | except Full: 1416 | # perhaps the pool has been reset() after a fork? regardless, 1417 | # we don't want this connection 1418 | pass 1419 | 1420 | def disconnect(self): 1421 | "Disconnects all connections in the pool." 1422 | self._checkpid() 1423 | for connection in self._connections: 1424 | connection.disconnect() 1425 | -------------------------------------------------------------------------------- /redis/connection.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/connection.pyc -------------------------------------------------------------------------------- /redis/exceptions.py: -------------------------------------------------------------------------------- 1 | "Core exceptions raised by the Redis client" 2 | 3 | 4 | class RedisError(Exception): 5 | pass 6 | 7 | 8 | class ConnectionError(RedisError): 9 | pass 10 | 11 | 12 | class TimeoutError(RedisError): 13 | pass 14 | 15 | 16 | class AuthenticationError(ConnectionError): 17 | pass 18 | 19 | 20 | class BusyLoadingError(ConnectionError): 21 | pass 22 | 23 | 24 | class InvalidResponse(RedisError): 25 | pass 26 | 27 | 28 | class ResponseError(RedisError): 29 | pass 30 | 31 | 32 | class DataError(RedisError): 33 | pass 34 | 35 | 36 | class PubSubError(RedisError): 37 | pass 38 | 39 | 40 | class WatchError(RedisError): 41 | pass 42 | 43 | 44 | class NoScriptError(ResponseError): 45 | pass 46 | 47 | 48 | class ExecAbortError(ResponseError): 49 | pass 50 | 51 | 52 | class ReadOnlyError(ResponseError): 53 | pass 54 | 55 | 56 | class NoPermissionError(ResponseError): 57 | pass 58 | 59 | 60 | class LockError(RedisError, ValueError): 61 | "Errors acquiring or releasing a lock" 62 | # NOTE: For backwards compatability, this class derives from ValueError. 63 | # This was originally chosen to behave like threading.Lock. 64 | pass 65 | 66 | 67 | class LockNotOwnedError(LockError): 68 | "Error trying to extend or release a lock that is (no longer) owned" 69 | pass 70 | 71 | 72 | class ChildDeadlockedError(Exception): 73 | "Error indicating that a child process is deadlocked after a fork()" 74 | pass 75 | 76 | 77 | class AuthenticationWrongNumberOfArgsError(ResponseError): 78 | """ 79 | An error to indicate that the wrong number of args 80 | were sent to the AUTH command 81 | """ 82 | pass 83 | -------------------------------------------------------------------------------- /redis/exceptions.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/exceptions.pyc -------------------------------------------------------------------------------- /redis/lock.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time as mod_time 3 | import uuid 4 | from redis.exceptions import LockError, LockNotOwnedError 5 | from redis.utils import dummy 6 | 7 | 8 | class Lock(object): 9 | """ 10 | A shared, distributed Lock. Using Redis for locking allows the Lock 11 | to be shared across processes and/or machines. 12 | 13 | It's left to the user to resolve deadlock issues and make sure 14 | multiple clients play nicely together. 15 | """ 16 | 17 | lua_release = None 18 | lua_extend = None 19 | lua_reacquire = None 20 | 21 | # KEYS[1] - lock name 22 | # ARGV[1] - token 23 | # return 1 if the lock was released, otherwise 0 24 | LUA_RELEASE_SCRIPT = """ 25 | local token = redis.call('get', KEYS[1]) 26 | if not token or token ~= ARGV[1] then 27 | return 0 28 | end 29 | redis.call('del', KEYS[1]) 30 | return 1 31 | """ 32 | 33 | # KEYS[1] - lock name 34 | # ARGV[1] - token 35 | # ARGV[2] - additional milliseconds 36 | # ARGV[3] - "0" if the additional time should be added to the lock's 37 | # existing ttl or "1" if the existing ttl should be replaced 38 | # return 1 if the locks time was extended, otherwise 0 39 | LUA_EXTEND_SCRIPT = """ 40 | local token = redis.call('get', KEYS[1]) 41 | if not token or token ~= ARGV[1] then 42 | return 0 43 | end 44 | local expiration = redis.call('pttl', KEYS[1]) 45 | if not expiration then 46 | expiration = 0 47 | end 48 | if expiration < 0 then 49 | return 0 50 | end 51 | 52 | local newttl = ARGV[2] 53 | if ARGV[3] == "0" then 54 | newttl = ARGV[2] + expiration 55 | end 56 | redis.call('pexpire', KEYS[1], newttl) 57 | return 1 58 | """ 59 | 60 | # KEYS[1] - lock name 61 | # ARGV[1] - token 62 | # ARGV[2] - milliseconds 63 | # return 1 if the locks time was reacquired, otherwise 0 64 | LUA_REACQUIRE_SCRIPT = """ 65 | local token = redis.call('get', KEYS[1]) 66 | if not token or token ~= ARGV[1] then 67 | return 0 68 | end 69 | redis.call('pexpire', KEYS[1], ARGV[2]) 70 | return 1 71 | """ 72 | 73 | def __init__(self, redis, name, timeout=None, sleep=0.1, 74 | blocking=True, blocking_timeout=None, thread_local=True): 75 | """ 76 | Create a new Lock instance named ``name`` using the Redis client 77 | supplied by ``redis``. 78 | 79 | ``timeout`` indicates a maximum life for the lock. 80 | By default, it will remain locked until release() is called. 81 | ``timeout`` can be specified as a float or integer, both representing 82 | the number of seconds to wait. 83 | 84 | ``sleep`` indicates the amount of time to sleep per loop iteration 85 | when the lock is in blocking mode and another client is currently 86 | holding the lock. 87 | 88 | ``blocking`` indicates whether calling ``acquire`` should block until 89 | the lock has been acquired or to fail immediately, causing ``acquire`` 90 | to return False and the lock not being acquired. Defaults to True. 91 | Note this value can be overridden by passing a ``blocking`` 92 | argument to ``acquire``. 93 | 94 | ``blocking_timeout`` indicates the maximum amount of time in seconds to 95 | spend trying to acquire the lock. A value of ``None`` indicates 96 | continue trying forever. ``blocking_timeout`` can be specified as a 97 | float or integer, both representing the number of seconds to wait. 98 | 99 | ``thread_local`` indicates whether the lock token is placed in 100 | thread-local storage. By default, the token is placed in thread local 101 | storage so that a thread only sees its token, not a token set by 102 | another thread. Consider the following timeline: 103 | 104 | time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. 105 | thread-1 sets the token to "abc" 106 | time: 1, thread-2 blocks trying to acquire `my-lock` using the 107 | Lock instance. 108 | time: 5, thread-1 has not yet completed. redis expires the lock 109 | key. 110 | time: 5, thread-2 acquired `my-lock` now that it's available. 111 | thread-2 sets the token to "xyz" 112 | time: 6, thread-1 finishes its work and calls release(). if the 113 | token is *not* stored in thread local storage, then 114 | thread-1 would see the token value as "xyz" and would be 115 | able to successfully release the thread-2's lock. 116 | 117 | In some use cases it's necessary to disable thread local storage. For 118 | example, if you have code where one thread acquires a lock and passes 119 | that lock instance to a worker thread to release later. If thread 120 | local storage isn't disabled in this case, the worker thread won't see 121 | the token set by the thread that acquired the lock. Our assumption 122 | is that these cases aren't common and as such default to using 123 | thread local storage. 124 | """ 125 | self.redis = redis 126 | self.name = name 127 | self.timeout = timeout 128 | self.sleep = sleep 129 | self.blocking = blocking 130 | self.blocking_timeout = blocking_timeout 131 | self.thread_local = bool(thread_local) 132 | self.local = threading.local() if self.thread_local else dummy() 133 | self.local.token = None 134 | self.register_scripts() 135 | 136 | def register_scripts(self): 137 | cls = self.__class__ 138 | client = self.redis 139 | if cls.lua_release is None: 140 | cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT) 141 | if cls.lua_extend is None: 142 | cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT) 143 | if cls.lua_reacquire is None: 144 | cls.lua_reacquire = \ 145 | client.register_script(cls.LUA_REACQUIRE_SCRIPT) 146 | 147 | def __enter__(self): 148 | # force blocking, as otherwise the user would have to check whether 149 | # the lock was actually acquired or not. 150 | if self.acquire(blocking=True): 151 | return self 152 | raise LockError("Unable to acquire lock within the time specified") 153 | 154 | def __exit__(self, exc_type, exc_value, traceback): 155 | self.release() 156 | 157 | def acquire(self, blocking=None, blocking_timeout=None, token=None): 158 | """ 159 | Use Redis to hold a shared, distributed lock named ``name``. 160 | Returns True once the lock is acquired. 161 | 162 | If ``blocking`` is False, always return immediately. If the lock 163 | was acquired, return True, otherwise return False. 164 | 165 | ``blocking_timeout`` specifies the maximum number of seconds to 166 | wait trying to acquire the lock. 167 | 168 | ``token`` specifies the token value to be used. If provided, token 169 | must be a bytes object or a string that can be encoded to a bytes 170 | object with the default encoding. If a token isn't specified, a UUID 171 | will be generated. 172 | """ 173 | sleep = self.sleep 174 | if token is None: 175 | token = uuid.uuid1().hex.encode() 176 | else: 177 | encoder = self.redis.connection_pool.get_encoder() 178 | token = encoder.encode(token) 179 | if blocking is None: 180 | blocking = self.blocking 181 | if blocking_timeout is None: 182 | blocking_timeout = self.blocking_timeout 183 | stop_trying_at = None 184 | if blocking_timeout is not None: 185 | stop_trying_at = mod_time.time() + blocking_timeout 186 | while True: 187 | if self.do_acquire(token): 188 | self.local.token = token 189 | return True 190 | if not blocking: 191 | return False 192 | next_try_at = mod_time.time() + sleep 193 | if stop_trying_at is not None and next_try_at > stop_trying_at: 194 | return False 195 | mod_time.sleep(sleep) 196 | 197 | def do_acquire(self, token): 198 | if self.timeout: 199 | # convert to milliseconds 200 | timeout = int(self.timeout * 1000) 201 | else: 202 | timeout = None 203 | if self.redis.set(self.name, token, nx=True, px=timeout): 204 | return True 205 | return False 206 | 207 | def locked(self): 208 | """ 209 | Returns True if this key is locked by any process, otherwise False. 210 | """ 211 | return self.redis.get(self.name) is not None 212 | 213 | def owned(self): 214 | """ 215 | Returns True if this key is locked by this lock, otherwise False. 216 | """ 217 | stored_token = self.redis.get(self.name) 218 | # need to always compare bytes to bytes 219 | # TODO: this can be simplified when the context manager is finished 220 | if stored_token and not isinstance(stored_token, bytes): 221 | encoder = self.redis.connection_pool.get_encoder() 222 | stored_token = encoder.encode(stored_token) 223 | return self.local.token is not None and \ 224 | stored_token == self.local.token 225 | 226 | def release(self): 227 | "Releases the already acquired lock" 228 | expected_token = self.local.token 229 | if expected_token is None: 230 | raise LockError("Cannot release an unlocked lock") 231 | self.local.token = None 232 | self.do_release(expected_token) 233 | 234 | def do_release(self, expected_token): 235 | if not bool(self.lua_release(keys=[self.name], 236 | args=[expected_token], 237 | client=self.redis)): 238 | raise LockNotOwnedError("Cannot release a lock" 239 | " that's no longer owned") 240 | 241 | def extend(self, additional_time, replace_ttl=False): 242 | """ 243 | Adds more time to an already acquired lock. 244 | 245 | ``additional_time`` can be specified as an integer or a float, both 246 | representing the number of seconds to add. 247 | 248 | ``replace_ttl`` if False (the default), add `additional_time` to 249 | the lock's existing ttl. If True, replace the lock's ttl with 250 | `additional_time`. 251 | """ 252 | if self.local.token is None: 253 | raise LockError("Cannot extend an unlocked lock") 254 | if self.timeout is None: 255 | raise LockError("Cannot extend a lock with no timeout") 256 | return self.do_extend(additional_time, replace_ttl) 257 | 258 | def do_extend(self, additional_time, replace_ttl): 259 | additional_time = int(additional_time * 1000) 260 | if not bool( 261 | self.lua_extend( 262 | keys=[self.name], 263 | args=[ 264 | self.local.token, 265 | additional_time, 266 | replace_ttl and "1" or "0" 267 | ], 268 | client=self.redis, 269 | ) 270 | ): 271 | raise LockNotOwnedError( 272 | "Cannot extend a lock that's" " no longer owned" 273 | ) 274 | return True 275 | 276 | def reacquire(self): 277 | """ 278 | Resets a TTL of an already acquired lock back to a timeout value. 279 | """ 280 | if self.local.token is None: 281 | raise LockError("Cannot reacquire an unlocked lock") 282 | if self.timeout is None: 283 | raise LockError("Cannot reacquire a lock with no timeout") 284 | return self.do_reacquire() 285 | 286 | def do_reacquire(self): 287 | timeout = int(self.timeout * 1000) 288 | if not bool(self.lua_reacquire(keys=[self.name], 289 | args=[self.local.token, timeout], 290 | client=self.redis)): 291 | raise LockNotOwnedError("Cannot reacquire a lock that's" 292 | " no longer owned") 293 | return True 294 | -------------------------------------------------------------------------------- /redis/lock.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/lock.pyc -------------------------------------------------------------------------------- /redis/sentinel.py: -------------------------------------------------------------------------------- 1 | import random 2 | import weakref 3 | 4 | from redis.client import Redis 5 | from redis.connection import ConnectionPool, Connection 6 | from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError, 7 | TimeoutError) 8 | from redis._compat import iteritems, nativestr, xrange 9 | 10 | 11 | class MasterNotFoundError(ConnectionError): 12 | pass 13 | 14 | 15 | class SlaveNotFoundError(ConnectionError): 16 | pass 17 | 18 | 19 | class SentinelManagedConnection(Connection): 20 | def __init__(self, **kwargs): 21 | self.connection_pool = kwargs.pop('connection_pool') 22 | super(SentinelManagedConnection, self).__init__(**kwargs) 23 | 24 | def __repr__(self): 25 | pool = self.connection_pool 26 | s = '%s' % (type(self).__name__, pool.service_name) 27 | if self.host: 28 | host_info = ',host=%s,port=%s' % (self.host, self.port) 29 | s = s % host_info 30 | return s 31 | 32 | def connect_to(self, address): 33 | self.host, self.port = address 34 | super(SentinelManagedConnection, self).connect() 35 | if self.connection_pool.check_connection: 36 | self.send_command('PING') 37 | if nativestr(self.read_response()) != 'PONG': 38 | raise ConnectionError('PING failed') 39 | 40 | def connect(self): 41 | if self._sock: 42 | return # already connected 43 | if self.connection_pool.is_master: 44 | self.connect_to(self.connection_pool.get_master_address()) 45 | else: 46 | for slave in self.connection_pool.rotate_slaves(): 47 | try: 48 | return self.connect_to(slave) 49 | except ConnectionError: 50 | continue 51 | raise SlaveNotFoundError # Never be here 52 | 53 | def read_response(self): 54 | try: 55 | return super(SentinelManagedConnection, self).read_response() 56 | except ReadOnlyError: 57 | if self.connection_pool.is_master: 58 | # When talking to a master, a ReadOnlyError when likely 59 | # indicates that the previous master that we're still connected 60 | # to has been demoted to a slave and there's a new master. 61 | # calling disconnect will force the connection to re-query 62 | # sentinel during the next connect() attempt. 63 | self.disconnect() 64 | raise ConnectionError('The previous master is now a slave') 65 | raise 66 | 67 | 68 | class SentinelConnectionPool(ConnectionPool): 69 | """ 70 | Sentinel backed connection pool. 71 | 72 | If ``check_connection`` flag is set to True, SentinelManagedConnection 73 | sends a PING command right after establishing the connection. 74 | """ 75 | 76 | def __init__(self, service_name, sentinel_manager, **kwargs): 77 | kwargs['connection_class'] = kwargs.get( 78 | 'connection_class', SentinelManagedConnection) 79 | self.is_master = kwargs.pop('is_master', True) 80 | self.check_connection = kwargs.pop('check_connection', False) 81 | super(SentinelConnectionPool, self).__init__(**kwargs) 82 | self.connection_kwargs['connection_pool'] = weakref.proxy(self) 83 | self.service_name = service_name 84 | self.sentinel_manager = sentinel_manager 85 | 86 | def __repr__(self): 87 | return "%s>> from redis.sentinel import Sentinel 140 | >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) 141 | >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) 142 | >>> master.set('foo', 'bar') 143 | >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) 144 | >>> slave.get('foo') 145 | b'bar' 146 | 147 | ``sentinels`` is a list of sentinel nodes. Each node is represented by 148 | a pair (hostname, port). 149 | 150 | ``min_other_sentinels`` defined a minimum number of peers for a sentinel. 151 | When querying a sentinel, if it doesn't meet this threshold, responses 152 | from that sentinel won't be considered valid. 153 | 154 | ``sentinel_kwargs`` is a dictionary of connection arguments used when 155 | connecting to sentinel instances. Any argument that can be passed to 156 | a normal Redis connection can be specified here. If ``sentinel_kwargs`` is 157 | not specified, any socket_timeout and socket_keepalive options specified 158 | in ``connection_kwargs`` will be used. 159 | 160 | ``connection_kwargs`` are keyword arguments that will be used when 161 | establishing a connection to a Redis server. 162 | """ 163 | 164 | def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None, 165 | **connection_kwargs): 166 | # if sentinel_kwargs isn't defined, use the socket_* options from 167 | # connection_kwargs 168 | if sentinel_kwargs is None: 169 | sentinel_kwargs = { 170 | k: v 171 | for k, v in iteritems(connection_kwargs) 172 | if k.startswith('socket_') 173 | } 174 | self.sentinel_kwargs = sentinel_kwargs 175 | 176 | self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs) 177 | for hostname, port in sentinels] 178 | self.min_other_sentinels = min_other_sentinels 179 | self.connection_kwargs = connection_kwargs 180 | 181 | def __repr__(self): 182 | sentinel_addresses = [] 183 | for sentinel in self.sentinels: 184 | sentinel_addresses.append('%s:%s' % ( 185 | sentinel.connection_pool.connection_kwargs['host'], 186 | sentinel.connection_pool.connection_kwargs['port'], 187 | )) 188 | return '%s' % ( 189 | type(self).__name__, 190 | ','.join(sentinel_addresses)) 191 | 192 | def check_master_state(self, state, service_name): 193 | if not state['is_master'] or state['is_sdown'] or state['is_odown']: 194 | return False 195 | # Check if our sentinel doesn't see other nodes 196 | if state['num-other-sentinels'] < self.min_other_sentinels: 197 | return False 198 | return True 199 | 200 | def discover_master(self, service_name): 201 | """ 202 | Asks sentinel servers for the Redis master's address corresponding 203 | to the service labeled ``service_name``. 204 | 205 | Returns a pair (address, port) or raises MasterNotFoundError if no 206 | master is found. 207 | """ 208 | for sentinel_no, sentinel in enumerate(self.sentinels): 209 | try: 210 | masters = sentinel.sentinel_masters() 211 | except (ConnectionError, TimeoutError): 212 | continue 213 | state = masters.get(service_name) 214 | if state and self.check_master_state(state, service_name): 215 | # Put this sentinel at the top of the list 216 | self.sentinels[0], self.sentinels[sentinel_no] = ( 217 | sentinel, self.sentinels[0]) 218 | return state['ip'], state['port'] 219 | raise MasterNotFoundError("No master found for %r" % (service_name,)) 220 | 221 | def filter_slaves(self, slaves): 222 | "Remove slaves that are in an ODOWN or SDOWN state" 223 | slaves_alive = [] 224 | for slave in slaves: 225 | if slave['is_odown'] or slave['is_sdown']: 226 | continue 227 | slaves_alive.append((slave['ip'], slave['port'])) 228 | return slaves_alive 229 | 230 | def discover_slaves(self, service_name): 231 | "Returns a list of alive slaves for service ``service_name``" 232 | for sentinel in self.sentinels: 233 | try: 234 | slaves = sentinel.sentinel_slaves(service_name) 235 | except (ConnectionError, ResponseError, TimeoutError): 236 | continue 237 | slaves = self.filter_slaves(slaves) 238 | if slaves: 239 | return slaves 240 | return [] 241 | 242 | def master_for(self, service_name, redis_class=Redis, 243 | connection_pool_class=SentinelConnectionPool, **kwargs): 244 | """ 245 | Returns a redis client instance for the ``service_name`` master. 246 | 247 | A SentinelConnectionPool class is used to retrive the master's 248 | address before establishing a new connection. 249 | 250 | NOTE: If the master's address has changed, any cached connections to 251 | the old master are closed. 252 | 253 | By default clients will be a redis.Redis instance. Specify a 254 | different class to the ``redis_class`` argument if you desire 255 | something different. 256 | 257 | The ``connection_pool_class`` specifies the connection pool to use. 258 | The SentinelConnectionPool will be used by default. 259 | 260 | All other keyword arguments are merged with any connection_kwargs 261 | passed to this class and passed to the connection pool as keyword 262 | arguments to be used to initialize Redis connections. 263 | """ 264 | kwargs['is_master'] = True 265 | connection_kwargs = dict(self.connection_kwargs) 266 | connection_kwargs.update(kwargs) 267 | return redis_class(connection_pool=connection_pool_class( 268 | service_name, self, **connection_kwargs)) 269 | 270 | def slave_for(self, service_name, redis_class=Redis, 271 | connection_pool_class=SentinelConnectionPool, **kwargs): 272 | """ 273 | Returns redis client instance for the ``service_name`` slave(s). 274 | 275 | A SentinelConnectionPool class is used to retrive the slave's 276 | address before establishing a new connection. 277 | 278 | By default clients will be a redis.Redis instance. Specify a 279 | different class to the ``redis_class`` argument if you desire 280 | something different. 281 | 282 | The ``connection_pool_class`` specifies the connection pool to use. 283 | The SentinelConnectionPool will be used by default. 284 | 285 | All other keyword arguments are merged with any connection_kwargs 286 | passed to this class and passed to the connection pool as keyword 287 | arguments to be used to initialize Redis connections. 288 | """ 289 | kwargs['is_master'] = False 290 | connection_kwargs = dict(self.connection_kwargs) 291 | connection_kwargs.update(kwargs) 292 | return redis_class(connection_pool=connection_pool_class( 293 | service_name, self, **connection_kwargs)) 294 | -------------------------------------------------------------------------------- /redis/sentinel.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/sentinel.pyc -------------------------------------------------------------------------------- /redis/utils.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | 3 | 4 | try: 5 | import hiredis # noqa 6 | HIREDIS_AVAILABLE = True 7 | except ImportError: 8 | HIREDIS_AVAILABLE = False 9 | 10 | 11 | def from_url(url, db=None, **kwargs): 12 | """ 13 | Returns an active Redis client generated from the given database URL. 14 | 15 | Will attempt to extract the database id from the path url fragment, if 16 | none is provided. 17 | """ 18 | from redis.client import Redis 19 | return Redis.from_url(url, db, **kwargs) 20 | 21 | 22 | @contextmanager 23 | def pipeline(redis_obj): 24 | p = redis_obj.pipeline() 25 | yield p 26 | p.execute() 27 | 28 | 29 | class dummy(object): 30 | """ 31 | Instances of this class can be used as an attribute container. 32 | """ 33 | pass 34 | -------------------------------------------------------------------------------- /redis/utils.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/redis/utils.pyc -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | #先注册azure应用,确保应用有以下权限: 3 | #files: Files.Read.All、Files.ReadWrite.All、Sites.Read.All、Sites.ReadWrite.All 4 | #user: User.Read.All、User.ReadWrite.All、Directory.Read.All、Directory.ReadWrite.All 5 | #mail: Mail.Read、Mail.ReadWrite、MailboxSettings.Read、MailboxSettings.ReadWrite 6 | #注册后一定要再点代表xxx授予管理员同意,否则outlook api无法调用 7 | import requests as req 8 | import json,sys,time,os,random 9 | from urls import * 10 | 11 | num = 0 12 | path = os.getcwd() + r'/token.txt' 13 | def update_token(refresh_token): 14 | headers={'Content-Type':'application/x-www-form-urlencoded'} 15 | data= {'grant_type': 'refresh_token', 16 | 'refresh_token': refresh_token, 17 | 'client_id':os.environ.get('client_id'), 18 | 'client_secret':os.environ.get('client_secret'), 19 | 'redirect_uri':'http://localhost:53682/' 20 | } 21 | html = req.post('https://login.microsoftonline.com/common/oauth2/v2.0/token', data=data,headers=headers) 22 | jsontxt = json.loads(html.text) 23 | access_token = jsontxt['access_token'] 24 | refresh_token = jsontxt['refresh_token'] 25 | return access_token, refresh_token 26 | 27 | def load_token(): 28 | with open(path, "r") as fo: 29 | refresh_token = fo.read() 30 | 31 | try: 32 | REDIS_HOST = os.environ.get('redis_host', '') 33 | REDIS_PORT = int(os.environ.get('redis_port', 0)) 34 | REDIS_PASSWORD = os.environ.get('redis_password', '') 35 | import redis 36 | with redis.Redis(host=REDIS_HOST,port=REDIS_PORT,password=REDIS_PASSWORD) as red: 37 | if red.exists(os.environ.get('client_id')): 38 | refresh_token = red.get(os.environ.get('client_id')) 39 | else: 40 | red.set(os.environ.get('client_id'), refresh_token) 41 | 42 | access_token, refresh_token = update_token(refresh_token) 43 | red.set(os.environ.get('client_id'), refresh_token) 44 | return access_token 45 | except Exception as e: 46 | print(e) 47 | print("连接Redis出错,尝试使用本地refresh_token") 48 | access_token, refresh_token = update_token(refresh_token) 49 | try: 50 | with open(path, "w") as fo: 51 | fo.write(refresh_token) 52 | print("尝试保存refresh_token到文件") 53 | except Exception as e: 54 | print(e) 55 | print("保存refresh_token到文件错误") 56 | return access_token 57 | 58 | def add_count(type=""): 59 | global num 60 | num+=1 61 | print(type + "调用成功"+str(num)+'次') 62 | time.sleep(random.randint(0,10)) 63 | 64 | def main(): 65 | time.sleep(random.randint(0,30)) 66 | access_token = load_token() 67 | headers={'Authorization':access_token,'Content-Type':'application/json'} 68 | try: 69 | for name,url in urls.items(): 70 | if req.get(url,headers=headers).status_code == 200: 71 | add_count(name) 72 | except: 73 | pass 74 | 75 | def main_handler(event, context): 76 | print("任务开始") 77 | for _ in range(random.randint(1,3)): 78 | main() 79 | localtime = time.asctime(time.localtime(time.time())) 80 | print('此次运行结束时间为 :', localtime) 81 | -------------------------------------------------------------------------------- /token.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LittleJake/office-e5-refresh-SCF/16f359768d576de77bd4d17741350d43fef40ccb/token.txt -------------------------------------------------------------------------------- /urls.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | urls = { 3 | 'me/drive/root': r'https://graph.microsoft.com/v1.0/me/drive/root', 4 | 'me/drive': r'https://graph.microsoft.com/v1.0/me/drive', 5 | 'me/drives': r'https://graph.microsoft.com/v1.0/me/drives', 6 | 'drive/root': r'https://graph.microsoft.com/v1.0/drive/root', 7 | 'users': r'https://graph.microsoft.com/v1.0/users', 8 | 'me/messages': r'https://graph.microsoft.com/v1.0/me/messages', 9 | 'me/mailFolders/inbox/messageRules': r'https://graph.microsoft.com/v1.0/me/mailFolders/inbox/messageRules', 10 | 'me/mailFolders/Inbox/messages/delta': r'https://graph.microsoft.com/v1.0/me/mailFolders/Inbox/messages/delta', 11 | 'me/drive/root/children': r'https://graph.microsoft.com/v1.0/me/drive/root/children', 12 | 'me/mailFolders': r'https://graph.microsoft.com/v1.0/me/mailFolders', 13 | 'sites/root': r'https://graph.microsoft.com/v1.0/sites/root', 14 | 'me/outlook/masterCategories': r'https://graph.microsoft.com/v1.0/me/outlook/masterCategories' 15 | } --------------------------------------------------------------------------------