├── .gitignore ├── .travis.yml ├── CONTRIBUTING.rst ├── LICENSE ├── README.md ├── acd_cli.py ├── acdcli ├── __init__.py ├── acd_fuse.py ├── api │ ├── __init__.py │ ├── account.py │ ├── backoff_req.py │ ├── client.py │ ├── common.py │ ├── content.py │ ├── metadata.py │ ├── oauth.py │ └── trash.py ├── cache │ ├── __init__.py │ ├── cursors.py │ ├── db.py │ ├── format.py │ ├── query.py │ ├── schema.py │ └── sync.py ├── plugins │ ├── __init__.py │ └── template.py └── utils │ ├── __init__.py │ ├── conf.py │ ├── hashing.py │ ├── progress.py │ ├── threading.py │ └── time.py ├── assets ├── Makefile ├── amazon-cloud-drive.service └── win_codepage.reg ├── docs ├── FAQ.rst ├── FUSE.rst ├── Makefile ├── TODO.rst ├── authorization.rst ├── conf.py ├── configuration.rst ├── contributors.rst ├── dev.rst ├── dev │ └── db_schema_v1.svg ├── find.rst ├── history.rst ├── hoist.py ├── index.rst ├── make.bat ├── setup.rst ├── sync.rst ├── transfer.rst └── usage.rst ├── setup.py └── tests ├── __init__.py ├── cache_files └── README ├── dummy_files ├── endpoint_data └── oauth.json ├── test_actions.py ├── test_api.py ├── test_api_live.py ├── test_cache.py └── test_helper.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.py[cod] 3 | 4 | *.egg-info/ 5 | dist/ 6 | .idea/ 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.2" 4 | - "3.3" 5 | - "3.4" 6 | - "3.5" 7 | - "3.6" 8 | 9 | addons: 10 | apt: 11 | packages: 12 | - fuse 13 | 14 | install: "pip install ." 15 | # tests 16 | script: python setup.py test -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Contributing guidelines 2 | ======================= 3 | 4 | Using the Issue Tracker 5 | ----------------------- 6 | 7 | The issue tracker is not a forum! This does not mean there is no need for good etiquette, but 8 | that you should not post unnecessary information. Each reply will cause a notification to be 9 | sent to all of the issue's participants and some of them might consider it spam. 10 | 11 | For minor corrections or additions, try to update your posts rather than writing a new reply. 12 | Use strike-through markdown for corrections and put updates at the bottom of your original post. 13 | 14 | Please use the reaction button to "vote" on issues rather than commenting "+1" or similar. 15 | 16 | Adding Issues 17 | +++++++++++++ 18 | 19 | If you have a question, please read the documentation and search the issue tracker. 20 | If you still have a question, please consider using the `Gitter chat 21 | `_ or sending an e-mail to 22 | `acd_cli@mail.com `_ instead of opening an issue. 23 | 24 | If you absolutely must open an issue, check that you are using the latest master commit and 25 | there is no existing issue that fits your problem (including closed and unresolved issues). 26 | Try to reproduce the issue on another machine or ideally on another operating system, if possible. 27 | 28 | Please provide as much possibly relevant information as you can. This should at least contain: 29 | 30 | - your operating system and Python version, e.g. as determined by 31 | :code:`python3 -c 'import platform as p; print("%s\n%s" % (p.python_version(), p.platform()))'` 32 | - the command/s you used 33 | - what happened 34 | - what you think should have happened instead (and maybe give a reason) 35 | 36 | You might find the ``--verbose`` and, to a lesser extent, ``--debug`` flags helpful. 37 | 38 | **Caution:** Be sure not to include authorization tokens from the log output in your comments. 39 | 40 | Use `code block markup `_ for console 41 | output, log messages, etc. 42 | 43 | Code 44 | ---- 45 | 46 | There are no real programming guidelines as of yet. Please use function annotations for typing 47 | like specified in PEP 3107 and, to stay 3.2-compliant, stringified `PEP 484 type hints 48 | `_ where appropriate. 49 | The limit on line length is 100 characters. 50 | 51 | It is a generally a good idea to explicitly announce that you are working on a feature or 52 | an issue. 53 | 54 | Please squash your commits and add yourself to the `contributors list `_ 55 | before making a pull request. 56 | 57 | Have a look at `Github's general guide how to contribute 58 | `_. 59 | It is not necessary to create a feature branch, i.e. you may commit to the master branch. 60 | 61 | If you do not know how to contribute, look for issues tagged with "help wanted" and read the 62 | `TODO list `_ of some of the open tasks. 63 | 64 | Donations 65 | --------- 66 | 67 | You might also want to consider `making a donation 68 | `_ 69 | to further the development of acd\_cli. 70 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | acd_cli 2 | Copyright (C) 2015 yadayada et al. 3 | 4 | This program is free software; you can redistribute it and/or modify 5 | it under the terms of the GNU General Public License as published by 6 | the Free Software Foundation; either version 2 of the License, or 7 | (at your option) any later version. 8 | 9 | This program is distributed in the hope that it will be useful, 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | GNU General Public License for more details. 13 | 14 | You should have received a copy of the GNU General Public License along 15 | with this program; if not, write to the Free Software Foundation, Inc., 16 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 17 | 18 | 19 | GNU GENERAL PUBLIC LICENSE 20 | Version 2, June 1991 21 | 22 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 23 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 24 | Everyone is permitted to copy and distribute verbatim copies 25 | of this license document, but changing it is not allowed. 26 | 27 | Preamble 28 | 29 | The licenses for most software are designed to take away your 30 | freedom to share and change it. By contrast, the GNU General Public 31 | License is intended to guarantee your freedom to share and change free 32 | software--to make sure the software is free for all its users. This 33 | General Public License applies to most of the Free Software 34 | Foundation's software and to any other program whose authors commit to 35 | using it. (Some other Free Software Foundation software is covered by 36 | the GNU Lesser General Public License instead.) You can apply it to 37 | your programs, too. 38 | 39 | When we speak of free software, we are referring to freedom, not 40 | price. Our General Public Licenses are designed to make sure that you 41 | have the freedom to distribute copies of free software (and charge for 42 | this service if you wish), that you receive source code or can get it 43 | if you want it, that you can change the software or use pieces of it 44 | in new free programs; and that you know you can do these things. 45 | 46 | To protect your rights, we need to make restrictions that forbid 47 | anyone to deny you these rights or to ask you to surrender the rights. 48 | These restrictions translate to certain responsibilities for you if you 49 | distribute copies of the software, or if you modify it. 50 | 51 | For example, if you distribute copies of such a program, whether 52 | gratis or for a fee, you must give the recipients all the rights that 53 | you have. You must make sure that they, too, receive or can get the 54 | source code. And you must show them these terms so they know their 55 | rights. 56 | 57 | We protect your rights with two steps: (1) copyright the software, and 58 | (2) offer you this license which gives you legal permission to copy, 59 | distribute and/or modify the software. 60 | 61 | Also, for each author's protection and ours, we want to make certain 62 | that everyone understands that there is no warranty for this free 63 | software. If the software is modified by someone else and passed on, we 64 | want its recipients to know that what they have is not the original, so 65 | that any problems introduced by others will not reflect on the original 66 | authors' reputations. 67 | 68 | Finally, any free program is threatened constantly by software 69 | patents. We wish to avoid the danger that redistributors of a free 70 | program will individually obtain patent licenses, in effect making the 71 | program proprietary. To prevent this, we have made it clear that any 72 | patent must be licensed for everyone's free use or not licensed at all. 73 | 74 | The precise terms and conditions for copying, distribution and 75 | modification follow. 76 | 77 | GNU GENERAL PUBLIC LICENSE 78 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 79 | 80 | 0. This License applies to any program or other work which contains 81 | a notice placed by the copyright holder saying it may be distributed 82 | under the terms of this General Public License. The "Program", below, 83 | refers to any such program or work, and a "work based on the Program" 84 | means either the Program or any derivative work under copyright law: 85 | that is to say, a work containing the Program or a portion of it, 86 | either verbatim or with modifications and/or translated into another 87 | language. (Hereinafter, translation is included without limitation in 88 | the term "modification".) Each licensee is addressed as "you". 89 | 90 | Activities other than copying, distribution and modification are not 91 | covered by this License; they are outside its scope. The act of 92 | running the Program is not restricted, and the output from the Program 93 | is covered only if its contents constitute a work based on the 94 | Program (independent of having been made by running the Program). 95 | Whether that is true depends on what the Program does. 96 | 97 | 1. You may copy and distribute verbatim copies of the Program's 98 | source code as you receive it, in any medium, provided that you 99 | conspicuously and appropriately publish on each copy an appropriate 100 | copyright notice and disclaimer of warranty; keep intact all the 101 | notices that refer to this License and to the absence of any warranty; 102 | and give any other recipients of the Program a copy of this License 103 | along with the Program. 104 | 105 | You may charge a fee for the physical act of transferring a copy, and 106 | you may at your option offer warranty protection in exchange for a fee. 107 | 108 | 2. You may modify your copy or copies of the Program or any portion 109 | of it, thus forming a work based on the Program, and copy and 110 | distribute such modifications or work under the terms of Section 1 111 | above, provided that you also meet all of these conditions: 112 | 113 | a) You must cause the modified files to carry prominent notices 114 | stating that you changed the files and the date of any change. 115 | 116 | b) You must cause any work that you distribute or publish, that in 117 | whole or in part contains or is derived from the Program or any 118 | part thereof, to be licensed as a whole at no charge to all third 119 | parties under the terms of this License. 120 | 121 | c) If the modified program normally reads commands interactively 122 | when run, you must cause it, when started running for such 123 | interactive use in the most ordinary way, to print or display an 124 | announcement including an appropriate copyright notice and a 125 | notice that there is no warranty (or else, saying that you provide 126 | a warranty) and that users may redistribute the program under 127 | these conditions, and telling the user how to view a copy of this 128 | License. (Exception: if the Program itself is interactive but 129 | does not normally print such an announcement, your work based on 130 | the Program is not required to print an announcement.) 131 | 132 | These requirements apply to the modified work as a whole. If 133 | identifiable sections of that work are not derived from the Program, 134 | and can be reasonably considered independent and separate works in 135 | themselves, then this License, and its terms, do not apply to those 136 | sections when you distribute them as separate works. But when you 137 | distribute the same sections as part of a whole which is a work based 138 | on the Program, the distribution of the whole must be on the terms of 139 | this License, whose permissions for other licensees extend to the 140 | entire whole, and thus to each and every part regardless of who wrote it. 141 | 142 | Thus, it is not the intent of this section to claim rights or contest 143 | your rights to work written entirely by you; rather, the intent is to 144 | exercise the right to control the distribution of derivative or 145 | collective works based on the Program. 146 | 147 | In addition, mere aggregation of another work not based on the Program 148 | with the Program (or with a work based on the Program) on a volume of 149 | a storage or distribution medium does not bring the other work under 150 | the scope of this License. 151 | 152 | 3. You may copy and distribute the Program (or a work based on it, 153 | under Section 2) in object code or executable form under the terms of 154 | Sections 1 and 2 above provided that you also do one of the following: 155 | 156 | a) Accompany it with the complete corresponding machine-readable 157 | source code, which must be distributed under the terms of Sections 158 | 1 and 2 above on a medium customarily used for software interchange; or, 159 | 160 | b) Accompany it with a written offer, valid for at least three 161 | years, to give any third party, for a charge no more than your 162 | cost of physically performing source distribution, a complete 163 | machine-readable copy of the corresponding source code, to be 164 | distributed under the terms of Sections 1 and 2 above on a medium 165 | customarily used for software interchange; or, 166 | 167 | c) Accompany it with the information you received as to the offer 168 | to distribute corresponding source code. (This alternative is 169 | allowed only for noncommercial distribution and only if you 170 | received the program in object code or executable form with such 171 | an offer, in accord with Subsection b above.) 172 | 173 | The source code for a work means the preferred form of the work for 174 | making modifications to it. For an executable work, complete source 175 | code means all the source code for all modules it contains, plus any 176 | associated interface definition files, plus the scripts used to 177 | control compilation and installation of the executable. However, as a 178 | special exception, the source code distributed need not include 179 | anything that is normally distributed (in either source or binary 180 | form) with the major components (compiler, kernel, and so on) of the 181 | operating system on which the executable runs, unless that component 182 | itself accompanies the executable. 183 | 184 | If distribution of executable or object code is made by offering 185 | access to copy from a designated place, then offering equivalent 186 | access to copy the source code from the same place counts as 187 | distribution of the source code, even though third parties are not 188 | compelled to copy the source along with the object code. 189 | 190 | 4. You may not copy, modify, sublicense, or distribute the Program 191 | except as expressly provided under this License. Any attempt 192 | otherwise to copy, modify, sublicense or distribute the Program is 193 | void, and will automatically terminate your rights under this License. 194 | However, parties who have received copies, or rights, from you under 195 | this License will not have their licenses terminated so long as such 196 | parties remain in full compliance. 197 | 198 | 5. You are not required to accept this License, since you have not 199 | signed it. However, nothing else grants you permission to modify or 200 | distribute the Program or its derivative works. These actions are 201 | prohibited by law if you do not accept this License. Therefore, by 202 | modifying or distributing the Program (or any work based on the 203 | Program), you indicate your acceptance of this License to do so, and 204 | all its terms and conditions for copying, distributing or modifying 205 | the Program or works based on it. 206 | 207 | 6. Each time you redistribute the Program (or any work based on the 208 | Program), the recipient automatically receives a license from the 209 | original licensor to copy, distribute or modify the Program subject to 210 | these terms and conditions. You may not impose any further 211 | restrictions on the recipients' exercise of the rights granted herein. 212 | You are not responsible for enforcing compliance by third parties to 213 | this License. 214 | 215 | 7. If, as a consequence of a court judgment or allegation of patent 216 | infringement or for any other reason (not limited to patent issues), 217 | conditions are imposed on you (whether by court order, agreement or 218 | otherwise) that contradict the conditions of this License, they do not 219 | excuse you from the conditions of this License. If you cannot 220 | distribute so as to satisfy simultaneously your obligations under this 221 | License and any other pertinent obligations, then as a consequence you 222 | may not distribute the Program at all. For example, if a patent 223 | license would not permit royalty-free redistribution of the Program by 224 | all those who receive copies directly or indirectly through you, then 225 | the only way you could satisfy both it and this License would be to 226 | refrain entirely from distribution of the Program. 227 | 228 | If any portion of this section is held invalid or unenforceable under 229 | any particular circumstance, the balance of the section is intended to 230 | apply and the section as a whole is intended to apply in other 231 | circumstances. 232 | 233 | It is not the purpose of this section to induce you to infringe any 234 | patents or other property right claims or to contest validity of any 235 | such claims; this section has the sole purpose of protecting the 236 | integrity of the free software distribution system, which is 237 | implemented by public license practices. Many people have made 238 | generous contributions to the wide range of software distributed 239 | through that system in reliance on consistent application of that 240 | system; it is up to the author/donor to decide if he or she is willing 241 | to distribute software through any other system and a licensee cannot 242 | impose that choice. 243 | 244 | This section is intended to make thoroughly clear what is believed to 245 | be a consequence of the rest of this License. 246 | 247 | 8. If the distribution and/or use of the Program is restricted in 248 | certain countries either by patents or by copyrighted interfaces, the 249 | original copyright holder who places the Program under this License 250 | may add an explicit geographical distribution limitation excluding 251 | those countries, so that distribution is permitted only in or among 252 | countries not thus excluded. In such case, this License incorporates 253 | the limitation as if written in the body of this License. 254 | 255 | 9. The Free Software Foundation may publish revised and/or new versions 256 | of the General Public License from time to time. Such new versions will 257 | be similar in spirit to the present version, but may differ in detail to 258 | address new problems or concerns. 259 | 260 | Each version is given a distinguishing version number. If the Program 261 | specifies a version number of this License which applies to it and "any 262 | later version", you have the option of following the terms and conditions 263 | either of that version or of any later version published by the Free 264 | Software Foundation. If the Program does not specify a version number of 265 | this License, you may choose any version ever published by the Free Software 266 | Foundation. 267 | 268 | 10. If you wish to incorporate parts of the Program into other free 269 | programs whose distribution conditions are different, write to the author 270 | to ask for permission. For software which is copyrighted by the Free 271 | Software Foundation, write to the Free Software Foundation; we sometimes 272 | make exceptions for this. Our decision will be guided by the two goals 273 | of preserving the free status of all derivatives of our free software and 274 | of promoting the sharing and reuse of software generally. 275 | 276 | NO WARRANTY 277 | 278 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 279 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 280 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 281 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 282 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 283 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 284 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 285 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 286 | REPAIR OR CORRECTION. 287 | 288 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 289 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 290 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 291 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 292 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 293 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 294 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 295 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 296 | POSSIBILITY OF SUCH DAMAGES. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # What is this? 2 | This is a fork of [acd\_cli](https://github.com/yadayada/acd_cli) that works around Amazon's bullshit by using the OAuth endpoint for the ACD desktop apps. 3 | 4 | # Will Amazon ban me if I use this? 5 | Maybe. This fork makes some cursory attempts to appear as though it's the real app, but it's quite possible they can detect that it's not. I take no responsibility if they ban you because you used this. In fact I would recommend your primary use for it be getting your data off ACD and on to a less user-hostile service. 6 | 7 | # Will Amazon break this? 8 | Inevitably. I have no intention of perpetuating a game of whac-a-mole. When it breaks, it breaks. 9 | 10 | # How do I use this? 11 | You will need access to a Windows machine to obtain the credentials needed to use this fork (Mac should also work, although I haven't tested it). 12 | 13 | 1. Remove your existing `~/.cache/acd_cli/` folder. 14 | 2. Remove your existing acd\_cli installation with `pip uninstall acdcli`. 15 | 3. Obtain your refresh token from the Drive for Windows app. You can do this in many ways including extracting the credentials from the memory of the app while it's running or sniffing them when they are sent to Amazon. I will describe the memory dump method below. 16 | 1. Make sure you're logged in to Drive. 17 | 2. Right click the drive process in Task Manager and choose "Create dump file". 18 | 3. Open the dump in your favorite hex editor of choice. 19 | 4. Find the string starting "Atnr|". This is your refresh token. 20 | 4. Create a file at `~/.cache/acd_cli/oauth.json` with this format: 21 | ```json 22 | { 23 | "access_token": "Put whatever here - it won't be used.", 24 | "exp_time": 0.0, 25 | "expires_in": 3600, 26 | "refresh_token": "Your refresh token here.", 27 | "token_type": "bearer" 28 | } 29 | ``` 30 | 31 | 5. Install this fork with `pip install "git+https://github.com/chrisgavin/cheeky_acd_cli.git"`. 32 | 6. Run `acdcli sync` twice (for me it fails the first time due to a missing root node but I think this is an existing bug). 33 | 7. Done. acd\_cli should be working again. Praise Bezos. 34 | 35 | # Why does this exist? 36 | When I signed up for ACD both acd\_cli and rclone were fully functional. I signed up entirely on the basis of the existance of these apps; I'm a Linux user, even if I wanted to use their official apps I can't and their web interface is absolute shit. 37 | 38 | When Amazon banned acd\_cli and rclone they not only made their own service worthless to me, they practially removed my access to data already stored on the service. This is totally unacceptable. 39 | 40 | Amazon only need to do one thing to make me happy: allow non-whitelisted apps to access the Drive data for the user that created the security profile. If you don't want other people using using your service in their projects that's fine but I deserve unrestricted access to my own damn data! 41 | -------------------------------------------------------------------------------- /acdcli/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.3.2' 2 | -------------------------------------------------------------------------------- /acdcli/api/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ******* 3 | ACD API 4 | ******* 5 | 6 | Usage 7 | ===== 8 | :: 9 | 10 | from api import client 11 | acd_client = client.ACDClient() 12 | root = acd_client.get_root_id() 13 | children = acd_client.list_children(root) 14 | for child in children: 15 | print(child['name']) 16 | # ... 17 | 18 | Node JSON Format 19 | ================ 20 | 21 | This is the usual node JSON format for a file:: 22 | 23 | { 24 | 'contentProperties': {'contentType': 'text/plain', 25 | 'extension': 'txt', 26 | 'md5': 'd41d8cd98f00b204e9800998ecf8427e', 27 | 'size': 0, 28 | 'version': 1}, 29 | 'createdBy': '-', 30 | 'createdDate': '2015-01-01T00:00:00.00Z', 31 | 'description': '', 32 | 'eTagResponse': 'AbCdEfGhI01', 33 | 'id': 'AbCdEfGhIjKlMnOpQr0123', 34 | 'isShared': False, 35 | 'kind': 'FILE', 36 | 'labels': [], 37 | 'modifiedDate': '2015-01-01T00:00:00.000Z', 38 | 'name': 'empty.txt', 39 | 'parents': ['0123AbCdEfGhIjKlMnOpQr'], 40 | 'restricted': False, 41 | 'status': 'AVAILABLE', 42 | 'version': 1 43 | } 44 | 45 | The ``modifiedDate`` and ``version`` keys get updated each time the content or metadata is updated. 46 | ``contentProperties['version']`` gets updated on overwrite. 47 | 48 | A folder's JSON looks similar, but it lacks the ``contentProperties`` dictionary. 49 | 50 | ``isShared`` is set to ``False`` even when a node is actually shared. 51 | 52 | .. CAUTION:: 53 | ACD allows hard links for folders! 54 | 55 | """ 56 | 57 | __version__ = '0.9.3' 58 | 59 | # monkey patch the user agent 60 | try: 61 | import requests.utils 62 | 63 | if 'old_dau' not in dir(requests.utils): 64 | requests.utils.old_dau = requests.utils.default_user_agent 65 | 66 | def new_dau(): 67 | return "CloudDriveWin/4.0.13.d2a5aec4" 68 | 69 | requests.utils.default_user_agent = new_dau 70 | except: 71 | pass 72 | -------------------------------------------------------------------------------- /acdcli/api/account.py: -------------------------------------------------------------------------------- 1 | """ACD account information""" 2 | 3 | import logging 4 | import collections 5 | from .common import * 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class _Usage(object): 11 | dict_ = {} 12 | 13 | def __init__(self, dict_): 14 | self.dict_ = dict_ 15 | 16 | @staticmethod 17 | def format_line(type_, count, size): 18 | return '{0:10} {1:7}, {2:>6} {3:3}\n'.format(type_ + ':', count, *size) 19 | 20 | def __str__(self): 21 | str_ = '' 22 | try: 23 | sum_count = 0 24 | sum_bytes = 0 25 | for key in self.dict_.keys(): 26 | if not isinstance(self.dict_[key], dict): 27 | continue 28 | sum_count += self.dict_[key]['total']['count'] 29 | sum_bytes += self.dict_[key]['total']['bytes'] 30 | types = collections.OrderedDict([('Documents', 'doc'), 31 | ('Other', 'other'), 32 | ('Photos', 'photo'), 33 | ('Videos', 'video')]) 34 | total_count = 0 35 | total_bytes = 0 36 | for desc in types: 37 | t = types[desc] 38 | type_usage = self.dict_[t]['total'] 39 | type_count = type_usage['count'] 40 | type_bytes = type_usage['bytes'] 41 | total_count += type_count 42 | total_bytes += type_bytes 43 | str_ += _Usage.format_line(desc, type_count, _Usage.file_size_pair(type_bytes)) 44 | str_ += _Usage.format_line('Total', total_count, _Usage.file_size_pair(total_bytes)) 45 | except KeyError: 46 | logger.warning('Invalid usage JSON string.') 47 | return str_ 48 | 49 | @staticmethod 50 | def file_size_pair(num: int, suffix='B') -> str: 51 | for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: 52 | if abs(num) < 1024.0: 53 | return '%3.1f' % num, '%s%s' % (unit, suffix) 54 | num /= 1024.0 55 | return '%.1f' % num, '%s%s' % ('Yi', suffix) 56 | 57 | 58 | class AccountMixin(object): 59 | def get_account_info(self) -> dict: 60 | """Gets account status [ACTIVE, ...?] and terms of use version.""" 61 | r = self.BOReq.get(self.metadata_url + 'account/info') 62 | return r.json() 63 | 64 | def get_account_usage(self) -> str: 65 | r = self.BOReq.get(self.metadata_url + 'account/usage') 66 | if r.status_code not in OK_CODES: 67 | raise RequestError(r.status_code, r.text) 68 | return _Usage(r.json()) 69 | 70 | def get_quota(self) -> dict: 71 | r = self.BOReq.get(self.metadata_url + 'account/quota') 72 | if r.status_code not in OK_CODES: 73 | raise RequestError(r.status_code, r.text) 74 | return r.json() 75 | 76 | def fs_sizes(self) -> tuple: 77 | """:returns tuple: total and free space""" 78 | q = self.get_quota() 79 | return q.get('quota', 0), q.get('available', 0) 80 | -------------------------------------------------------------------------------- /acdcli/api/backoff_req.py: -------------------------------------------------------------------------------- 1 | import time 2 | from time import sleep 3 | import random 4 | import logging 5 | from threading import Lock, local 6 | 7 | from requests.exceptions import RequestException 8 | 9 | from .common import * 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class BackOffRequest(object): 15 | """Wrapper for requests that implements timed back-off algorithm 16 | https://developer.amazon.com/public/apis/experience/cloud-drive/content/best-practices 17 | Caution: this catches all connection errors and may stall for a long time. 18 | It is necessary to init this module before use.""" 19 | 20 | def __init__(self, auth_callback: 'requests.auth.AuthBase', timeout: 'Tuple[int, int]', proxies: dict={}): 21 | """:arg auth_callback: callable object that attaches auth info to a request 22 | :arg timeout: tuple of connection timeout and idle timeout \ 23 | (http://docs.python-requests.org/en/latest/user/advanced/#timeouts) 24 | :arg proxies: dict of protocol to proxy, \ 25 | see http://docs.python-requests.org/en/master/user/advanced/#proxies 26 | """ 27 | 28 | self.auth_callback = auth_callback 29 | self.timeout = timeout if requests.__version__ >= '2.4.0' else timeout[1] 30 | self.proxies = proxies 31 | 32 | self.__session = requests.session() 33 | self.__thr_local = local() 34 | self.__lock = Lock() 35 | self.__retries = 0 36 | self.__next_req = time.time() 37 | 38 | random.seed() 39 | 40 | def _succeeded(self): 41 | with self.__lock: 42 | self.__retries = 0 43 | self.__calc_next() 44 | 45 | def _failed(self): 46 | with self.__lock: 47 | self.__retries += 1 48 | self.__calc_next() 49 | 50 | def __calc_next(self): 51 | """Calculates minimal acceptable time for next request. 52 | Back-off time is in a range of seconds, depending on number of failed previous tries (r): 53 | [0,2^r], maximum interval [0,256]""" 54 | with self.__lock: 55 | duration = random.random() * 2 ** min(self.__retries, 8) 56 | self.__next_req = time.time() + duration 57 | 58 | def _wait(self): 59 | with self.__lock: 60 | duration = self.__next_req - time.time() 61 | if duration > 5: 62 | logger.warning('Waiting %fs because of error(s).' % duration) 63 | logger.debug('Retry %i, waiting %fs' % (self.__retries, duration)) 64 | if duration > 0: 65 | sleep(duration) 66 | 67 | @catch_conn_exception 68 | def _request(self, type_: str, url: str, acc_codes: 'List[int]', **kwargs) -> requests.Response: 69 | """Performs a HTTP request 70 | 71 | :param type_: the type of HTTP request to perform 72 | :param acc_codes: list of HTTP status codes that indicate a successful request 73 | :param kwargs: may include additional header: dict and timeout: int""" 74 | 75 | self._wait() 76 | 77 | headers = {} 78 | if 'headers' in kwargs: 79 | headers = dict(**(kwargs['headers'])) 80 | del kwargs['headers'] 81 | 82 | last_url = getattr(self.__thr_local, 'last_req_url', None) 83 | if url == last_url: 84 | logger.debug('%s "%s"' % (type_, url)) 85 | else: 86 | logger.info('%s "%s"' % (type_, url)) 87 | if 'data' in kwargs.keys(): 88 | logger.debug(kwargs['data']) 89 | 90 | self.__thr_local.last_req_url = url 91 | 92 | if 'timeout' in kwargs: 93 | timeout = kwargs['timeout'] 94 | del kwargs['timeout'] 95 | else: 96 | timeout = self.timeout 97 | 98 | r = None 99 | exc = False 100 | try: 101 | try: 102 | r = self.__session.request(type_, url, auth=self.auth_callback, 103 | proxies=self.proxies, headers=headers, timeout=timeout, 104 | **kwargs) 105 | except RequestException as e: 106 | r = e.request 107 | raise 108 | except: 109 | exc = True 110 | self._failed() 111 | raise 112 | finally: 113 | if r and 'x-amzn-RequestId' in r.headers: 114 | if (exc or r.status_code not in acc_codes): 115 | logger.info('Failed x-amzn-RequestId: %s' % r.headers['x-amzn-RequestId']) 116 | else: 117 | logger.debug('x-amzn-RequestId: %s' % r.headers['x-amzn-RequestId']) 118 | 119 | self._succeeded() if r.status_code in acc_codes else self._failed() 120 | return r 121 | 122 | # HTTP verbs 123 | 124 | def get(self, url, acc_codes=OK_CODES, **kwargs) -> requests.Response: 125 | return self._request('GET', url, acc_codes, **kwargs) 126 | 127 | def post(self, url, acc_codes=OK_CODES, **kwargs) -> requests.Response: 128 | return self._request('POST', url, acc_codes, **kwargs) 129 | 130 | def patch(self, url, acc_codes=OK_CODES, **kwargs) -> requests.Response: 131 | return self._request('PATCH', url, acc_codes, **kwargs) 132 | 133 | def put(self, url, acc_codes=OK_CODES, **kwargs) -> requests.Response: 134 | return self._request('PUT', url, acc_codes, **kwargs) 135 | 136 | def delete(self, url, acc_codes=OK_CODES, **kwargs) -> requests.Response: 137 | return self._request('DELETE', url, acc_codes, **kwargs) 138 | 139 | def paginated_get(self, url: str, params: dict = None) -> 'List[dict]': 140 | """Gets node list in segments of 200.""" 141 | if params is None: 142 | params = {} 143 | node_list = [] 144 | 145 | while True: 146 | r = self.get(url, params=params) 147 | if r.status_code not in OK_CODES: 148 | logger.error("Error getting node list.") 149 | raise RequestError(r.status_code, r.text) 150 | ret = r.json() 151 | node_list.extend(ret['data']) 152 | if 'nextToken' in ret.keys(): 153 | params['startToken'] = ret['nextToken'] 154 | else: 155 | if ret['count'] != len(node_list): 156 | logger.warning( 157 | 'Expected %i items in page, received %i.' % (ret['count'], len(node_list))) 158 | break 159 | 160 | return node_list 161 | -------------------------------------------------------------------------------- /acdcli/api/client.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import logging 3 | import os 4 | import json 5 | import requests 6 | import time 7 | 8 | from acdcli.utils.conf import get_conf 9 | 10 | from . import oauth 11 | from .backoff_req import BackOffRequest 12 | from .common import * 13 | from .account import AccountMixin 14 | from .content import ContentMixin 15 | from .metadata import MetadataMixin 16 | from .trash import TrashMixin 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | _EXP_TIME_KEY = 'exp_time' 21 | _AMZ_ENDPOINT_REQ_URL = 'https://drive.amazonaws.com/drive/v1/account/endpoint' 22 | 23 | _SETTINGS_FILENAME = 'acd_client.ini' 24 | 25 | _def_conf = configparser.ConfigParser() 26 | _def_conf['endpoints'] = dict(filename='endpoint_data', validity_duration=259200) 27 | _def_conf['transfer'] = dict(fs_chunk_size=128 * 1024, dl_chunk_size=500 * 1024 ** 2, 28 | chunk_retries=1, connection_timeout=30, idle_timeout=60) 29 | _def_conf['proxies'] = dict() 30 | 31 | 32 | class ACDClient(AccountMixin, ContentMixin, MetadataMixin, TrashMixin): 33 | """Provides a client to the Amazon Cloud Drive RESTful interface.""" 34 | 35 | def __init__(self, cache_path='', settings_path=''): 36 | """Initializes OAuth and endpoints.""" 37 | 38 | self._conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf) 39 | 40 | self.cache_path = cache_path 41 | logger.info('Initializing ACD with path "%s".' % cache_path) 42 | 43 | self.handler = oauth.create_handler(cache_path) 44 | 45 | self._endpoint_data = {} 46 | self._load_endpoints() 47 | 48 | requests_timeout = (self._conf.getint('transfer', 'connection_timeout'), 49 | self._conf.getint('transfer', 'idle_timeout')) 50 | proxies = dict(self._conf['proxies']) 51 | 52 | self.BOReq = BackOffRequest(self.handler, requests_timeout, proxies) 53 | 54 | @property 55 | def _endpoint_data_path(self): 56 | return os.path.join(self.cache_path, self._conf['endpoints']['filename']) 57 | 58 | def _load_endpoints(self): 59 | """Tries to load endpoints from file and calls 60 | :meth:`_get_endpoints` on failure or if they are outdated.""" 61 | 62 | if not os.path.isfile(self._endpoint_data_path): 63 | self._endpoint_data = self._get_endpoints() 64 | else: 65 | with open(self._endpoint_data_path) as ep: 66 | self._endpoint_data = json.load(ep) 67 | if time.time() > self._endpoint_data[_EXP_TIME_KEY]: 68 | logger.info('Endpoint data expired.') 69 | self._endpoint_data = self._get_endpoints() 70 | 71 | def _get_endpoints(self) -> dict: 72 | """Retrieves Amazon endpoints and saves them on success. 73 | 74 | :raises: ValueError if requests returned invalid JSON 75 | :raises: KeyError if endpoint data does not include expected keys""" 76 | 77 | r = requests.get(_AMZ_ENDPOINT_REQ_URL, auth=self.handler) 78 | if r.status_code not in OK_CODES: 79 | logger.critical('Error getting endpoint data. Response: %s' % r.text) 80 | raise Exception 81 | 82 | try: 83 | e = r.json() 84 | except ValueError as e: 85 | logger.critical('Invalid JSON: "%s"' % r.text) 86 | raise e 87 | 88 | e[_EXP_TIME_KEY] = time.time() + self._conf.getint('endpoints', 'validity_duration') 89 | self._endpoint_data = e 90 | 91 | try: 92 | self.metadata_url 93 | self.content_url 94 | except KeyError as e: 95 | logger.critical('Received invalid endpoint data.') 96 | raise e 97 | 98 | self._save_endpoint_data() 99 | 100 | return e 101 | 102 | def _save_endpoint_data(self): 103 | f = open(self._endpoint_data_path, 'w') 104 | json.dump(self._endpoint_data, f, indent=4, sort_keys=True) 105 | f.flush() 106 | os.fsync(f.fileno()) 107 | f.close() 108 | 109 | @property 110 | def metadata_url(self): 111 | return self._endpoint_data['metadataUrl'] 112 | 113 | @property 114 | def content_url(self): 115 | return self._endpoint_data['contentUrl'] 116 | -------------------------------------------------------------------------------- /acdcli/api/common.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import re 3 | 4 | from requests.exceptions import ConnectionError 5 | 6 | try: 7 | from requests.exceptions import ReadTimeout as ReadTimeoutError 8 | except ImportError: 9 | try: 10 | from requests.packages.urllib3.exceptions import ReadTimeoutError 11 | except ImportError: 12 | class ReadTimeoutError(Exception): 13 | pass 14 | 15 | # status codes that indicate request success 16 | OK_CODES = [requests.codes.OK] 17 | 18 | 19 | class RequestError(Exception): 20 | """Catch-all exception class for various connection and ACD server errors.""" 21 | 22 | class CODE(object): 23 | CONN_EXCEPTION = 1000 24 | FAILED_SUBREQUEST = 1002 25 | INCOMPLETE_RESULT = 1003 26 | REFRESH_FAILED = 1004 27 | INVALID_TOKEN = 1005 28 | 29 | codes = requests.codes 30 | 31 | def __init__(self, status_code: int, msg: str): 32 | self.status_code = status_code 33 | if msg: 34 | self.msg = msg 35 | else: 36 | self.msg = '[acd_api] no body received.' 37 | 38 | def __str__(self): 39 | return 'RequestError: ' + str(self.status_code) + ', ' + self.msg 40 | 41 | 42 | def catch_conn_exception(func): 43 | """Request connection exception decorator 44 | :raises RequestError""" 45 | 46 | def decorated(*args, **kwargs): 47 | try: 48 | return func(*args, **kwargs) 49 | except (ConnectionError, ReadTimeoutError) as e: 50 | raise RequestError(RequestError.CODE.CONN_EXCEPTION, e.__str__()) 51 | 52 | return decorated 53 | 54 | 55 | def is_valid_id(id: str) -> bool: 56 | return bool(id) and len(id) == 22 and re.match('^[a-zA-Z0-9_-]*$', id) 57 | -------------------------------------------------------------------------------- /acdcli/api/content.py: -------------------------------------------------------------------------------- 1 | import http.client as http 2 | import os 3 | import json 4 | import io 5 | import mimetypes 6 | from collections import OrderedDict 7 | import logging 8 | from urllib.parse import quote_plus 9 | from requests import Response 10 | from requests_toolbelt import MultipartEncoder 11 | 12 | from .common import * 13 | 14 | PARTIAL_SUFFIX = '.__incomplete' 15 | """suffix (file ending) for incomplete files""" 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | class _TeeBufferedReader(object): 21 | """Proxy buffered reader object that allows callbacks on read operations.""" 22 | 23 | def __init__(self, file: io.BufferedReader, callbacks: list = None): 24 | self._file = file 25 | self._callbacks = callbacks 26 | 27 | def __getattr__(self, item): 28 | try: 29 | return object.__getattr__(item) 30 | except AttributeError: 31 | return getattr(self._file, item) 32 | 33 | def read(self, ln=-1): 34 | #ln = ln if ln in (0, -1) else FS_RW_CHUNK_SZ 35 | chunk = self._file.read(ln) 36 | for callback in self._callbacks or []: 37 | callback(chunk) 38 | return chunk 39 | 40 | 41 | def _tee_open(path: str, **kwargs) -> _TeeBufferedReader: 42 | f = open(path, 'rb') 43 | return _TeeBufferedReader(f, **kwargs) 44 | 45 | 46 | def _get_mimetype(file_name: str = '') -> str: 47 | mt = mimetypes.guess_type(file_name)[0] 48 | return mt if mt else 'application/octet-stream' 49 | 50 | 51 | def _stream_is_empty(stream) -> bool: 52 | try: 53 | return not stream.peek(1) 54 | except AttributeError: 55 | logger.debug('Stream does not support peeking, upload will fail if stream does ' 56 | 'not contain at least one byte.') 57 | return False 58 | 59 | 60 | class ContentMixin(object): 61 | """Implements content portion of the ACD API.""" 62 | 63 | def create_folder(self, name: str, parent=None) -> dict: 64 | body = {'kind': 'FOLDER', 'name': name} 65 | if parent: 66 | body['parents'] = [parent] 67 | body_str = json.dumps(body) 68 | 69 | acc_codes = [http.CREATED] 70 | 71 | r = self.BOReq.post(self.metadata_url + 'nodes', acc_codes=acc_codes, data=body_str) 72 | 73 | if r.status_code not in acc_codes: 74 | raise RequestError(r.status_code, r.text) 75 | 76 | return r.json() 77 | 78 | def create_file(self, file_name: str, parent: str = None) -> dict: 79 | params = {'suppress': 'deduplication'} 80 | 81 | basename = os.path.basename(file_name) 82 | metadata = {'kind': 'FILE', 'name': basename} 83 | if parent: 84 | metadata['parents'] = [parent] 85 | mime_type = _get_mimetype(basename) 86 | f = io.BytesIO() 87 | 88 | # basename is ignored 89 | m = MultipartEncoder(fields=OrderedDict([('metadata', json.dumps(metadata)), 90 | ('content', (quote_plus(basename), f, mime_type))]) 91 | ) 92 | 93 | ok_codes = [http.CREATED] 94 | r = self.BOReq.post(self.content_url + 'nodes', params=params, data=m, 95 | acc_codes=ok_codes, headers={'Content-Type': m.content_type}) 96 | 97 | if r.status_code not in ok_codes: 98 | raise RequestError(r.status_code, r.text) 99 | return r.json() 100 | 101 | def clear_file(self, node_id: str) -> dict: 102 | """Clears a file's content by overwriting it with an empty BytesIO. 103 | 104 | :param node_id: valid file node ID""" 105 | 106 | m = MultipartEncoder(fields={('content', (' ', io.BytesIO(), _get_mimetype()))}) 107 | 108 | r = self.BOReq.put(self.content_url + 'nodes/' + node_id + '/content', params={}, 109 | data=m, stream=True, headers={'Content-Type': m.content_type}) 110 | 111 | if r.status_code not in OK_CODES: 112 | raise RequestError(r.status_code, r.text) 113 | 114 | return r.json() 115 | 116 | def upload_file(self, file_name: str, parent: str = None, 117 | read_callbacks=None, deduplication=False) -> dict: 118 | params = {'suppress': 'deduplication'} 119 | if deduplication and os.path.getsize(file_name) > 0: 120 | params = {} 121 | 122 | basename = os.path.basename(file_name) 123 | metadata = {'kind': 'FILE', 'name': basename} 124 | if parent: 125 | metadata['parents'] = [parent] 126 | mime_type = _get_mimetype(basename) 127 | f = _tee_open(file_name, callbacks=read_callbacks) 128 | 129 | m = MultipartEncoder(fields=OrderedDict([('metadata', json.dumps(metadata)), 130 | ('content', ('filename', f, mime_type))])) 131 | 132 | ok_codes = [http.CREATED] 133 | r = self.BOReq.post(self.content_url + 'nodes', params=params, data=m, 134 | acc_codes=ok_codes, stream=True, 135 | headers={'Content-Type': m.content_type}) 136 | 137 | if r.status_code not in ok_codes: 138 | raise RequestError(r.status_code, r.text) 139 | return r.json() 140 | 141 | def _multipart_stream(self, metadata: dict, stream, boundary: str, read_callbacks=None): 142 | """Generator for chunked multipart/form-data file upload from stream input. 143 | 144 | :param metadata: file info, leave empty for overwrite 145 | :param stream: readable object""" 146 | 147 | if metadata: 148 | yield str.encode('--%s\r\nContent-Disposition: form-data; ' 149 | 'name="metadata"\r\n\r\n' % boundary + 150 | '%s\r\n' % json.dumps(metadata)) 151 | yield str.encode('--%s\r\n' % boundary) + \ 152 | b'Content-Disposition: form-data; name="content"; filename="foo"\r\n' + \ 153 | b'Content-Type: application/octet-stream\r\n\r\n' 154 | while True: 155 | f = stream.read(self._conf.getint('transfer', 'fs_chunk_size')) 156 | if f: 157 | for cb in read_callbacks or []: 158 | cb(f) 159 | yield f 160 | else: 161 | break 162 | yield str.encode('\r\n--%s--\r\n' % boundary + 163 | 'multipart/form-data; boundary=%s' % boundary) 164 | 165 | def upload_stream(self, stream, file_name: str, parent: str = None, 166 | read_callbacks=None, deduplication=False) -> dict: 167 | """:param stream: readable object 168 | :param parent: parent node id, defaults to root node if None""" 169 | 170 | if _stream_is_empty(stream): 171 | return self.create_file(file_name, parent) 172 | 173 | params = {} if deduplication else {'suppress': 'deduplication'} 174 | 175 | metadata = {'kind': 'FILE', 'name': file_name} 176 | if parent: 177 | metadata['parents'] = [parent] 178 | 179 | import uuid 180 | boundary = uuid.uuid4().hex 181 | 182 | ok_codes = [http.CREATED] 183 | r = self.BOReq.post(self.content_url + 'nodes', params=params, 184 | data=self._multipart_stream(metadata, stream, boundary, read_callbacks), 185 | acc_codes=ok_codes, 186 | headers={'Content-Type': 'multipart/form-data; boundary=%s' 187 | % boundary}) 188 | 189 | if r.status_code not in ok_codes: 190 | raise RequestError(r.status_code, r.text) 191 | return r.json() 192 | 193 | def overwrite_file(self, node_id: str, file_name: str, 194 | read_callbacks: list = None, deduplication=False) -> dict: 195 | params = {} if deduplication else {'suppress': 'deduplication'} 196 | 197 | basename = os.path.basename(file_name) 198 | mime_type = _get_mimetype(basename) 199 | f = _tee_open(file_name, callbacks=read_callbacks) 200 | 201 | # basename is ignored 202 | m = MultipartEncoder(fields={('content', (quote_plus(basename), f, mime_type))}) 203 | 204 | r = self.BOReq.put(self.content_url + 'nodes/' + node_id + '/content', params=params, 205 | data=m, stream=True, headers={'Content-Type': m.content_type}) 206 | 207 | if r.status_code not in OK_CODES: 208 | raise RequestError(r.status_code, r.text) 209 | 210 | return r.json() 211 | 212 | def overwrite_stream(self, stream, node_id: str, read_callbacks: list = None) -> dict: 213 | """Overwrite content of node with ID *node_id* with content of *stream*. 214 | 215 | :param stream: readable object""" 216 | 217 | if _stream_is_empty(stream): 218 | return self.clear_file(node_id) 219 | 220 | metadata = {} 221 | import uuid 222 | boundary = uuid.uuid4().hex 223 | 224 | r = self.BOReq.put(self.content_url + 'nodes/' + node_id + '/content', 225 | data=self._multipart_stream(metadata, stream, boundary, read_callbacks), 226 | headers={'Content-Type': 'multipart/form-data; boundary=%s' 227 | % boundary}) 228 | 229 | if r.status_code not in OK_CODES: 230 | raise RequestError(r.status_code, r.text) 231 | return r.json() 232 | 233 | def download_file(self, node_id: str, basename: str, dirname: str = None, **kwargs): 234 | """Deals with download preparation, download with :func:`chunked_download` and finish. 235 | Calls callbacks while fast forwarding through incomplete file (if existent). 236 | Will not check for existing file prior to download and overwrite existing file on finish. 237 | 238 | :param dirname: a valid local directory name, or cwd if None 239 | :param basename: a valid file name 240 | :param kwargs: \ 241 | - length: the total length of the file 242 | - write_callbacks (list[function]): passed on to :func:`chunked_download` 243 | - resume (bool=True): whether to resume if partial file exists""" 244 | 245 | chunk_sz = self._conf.getint('transfer', 'fs_chunk_size') 246 | 247 | dl_path = basename 248 | if dirname: 249 | dl_path = os.path.join(dirname, basename) 250 | part_path = dl_path + PARTIAL_SUFFIX 251 | offset = 0 252 | 253 | length = kwargs.get('length', 0) 254 | resume = kwargs.get('resume', True) 255 | if resume and os.path.isfile(part_path): 256 | with open(part_path, 'ab') as f: 257 | part_size = os.path.getsize(part_path) 258 | trunc_pos = part_size - 1 - chunk_sz 259 | trunc_pos = trunc_pos if trunc_pos >= 0 else 0 260 | 261 | if part_size != trunc_pos: 262 | f.truncate(trunc_pos) 263 | logger.debug('Truncated "%s" at %i, ' 264 | 'original size %i.' % (part_path, trunc_pos, part_size)) 265 | 266 | write_callbacks = kwargs.get('write_callbacks') 267 | if write_callbacks: 268 | with open(part_path, 'rb') as f: 269 | for chunk in iter(lambda: f.read(chunk_sz), b''): 270 | for rcb in write_callbacks: 271 | rcb(chunk) 272 | 273 | f = open(part_path, 'ab') 274 | else: 275 | f = open(part_path, 'wb') 276 | offset = f.tell() 277 | 278 | self.chunked_download(node_id, f, offset=offset, **kwargs) 279 | pos = f.tell() 280 | f.close() 281 | if length > 0 and pos < length: 282 | raise RequestError(RequestError.CODE.INCOMPLETE_RESULT, '[acd_api] download incomplete. ' 283 | 'Expected %i, got %i.' % (length, pos)) 284 | 285 | if os.path.isfile(dl_path): 286 | logger.info('Deleting existing file "%s".' % dl_path) 287 | os.remove(dl_path) 288 | os.rename(part_path, dl_path) 289 | 290 | @catch_conn_exception 291 | def chunked_download(self, node_id: str, file: io.BufferedWriter, **kwargs): 292 | """:param kwargs: 293 | offset (int): byte offset -- start byte for ranged request 294 | length (int): total file length[!], equal to end + 1 295 | write_callbacks (list[function]) 296 | """ 297 | ok_codes = [http.PARTIAL_CONTENT] 298 | 299 | write_callbacks = kwargs.get('write_callbacks', []) 300 | 301 | chunk_start = kwargs.get('offset', 0) 302 | length = kwargs.get('length', 100 * 1024 ** 4) 303 | 304 | dl_chunk_sz = self._conf.getint('transfer', 'dl_chunk_size') 305 | 306 | seekable = True 307 | try: 308 | file.tell() 309 | except OSError: 310 | seekable = False 311 | 312 | retries = 0 313 | while chunk_start < length: 314 | chunk_end = chunk_start + dl_chunk_sz - 1 315 | if chunk_end >= length: 316 | chunk_end = length - 1 317 | 318 | if retries >= self._conf.getint('transfer', 'chunk_retries'): 319 | raise RequestError(RequestError.CODE.FAILED_SUBREQUEST, 320 | '[acd_api] Downloading chunk failed multiple times.') 321 | r = self.BOReq.get(self.content_url + 'nodes/' + node_id + '/content', stream=True, 322 | acc_codes=ok_codes, 323 | headers={'Range': 'bytes=%d-%d' % (chunk_start, chunk_end)}) 324 | 325 | logger.debug('Node "%s", range %d-%d' % (node_id, chunk_start, chunk_end)) 326 | # this should only happen at the end of unknown-length downloads 327 | if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE: 328 | r.close() 329 | logger.debug('Invalid byte range requested %d-%d' % (chunk_start, chunk_end)) 330 | break 331 | if r.status_code not in ok_codes: 332 | r.close() 333 | retries += 1 334 | logging.debug('Chunk [%d-%d], retry %d.' % (chunk_start, chunk_end, retries)) 335 | continue 336 | 337 | curr_ln = 0 338 | try: 339 | for chunk in r.iter_content(chunk_size=self._conf.getint('transfer', 'fs_chunk_size')): 340 | if chunk: # filter out keep-alive new chunks 341 | file.write(chunk) 342 | file.flush() 343 | for wcb in write_callbacks: 344 | wcb(chunk) 345 | curr_ln += len(chunk) 346 | finally: 347 | r.close() 348 | if seekable: 349 | chunk_start = file.tell() 350 | else: 351 | chunk_start = chunk_start + curr_ln 352 | 353 | retries = 0 354 | 355 | return 356 | 357 | def response_chunk(self, node_id: str, offset: int, length: int, **kwargs) -> Response: 358 | ok_codes = [http.PARTIAL_CONTENT] 359 | end = offset + length - 1 360 | logger.debug('chunk o %d l %d' % (offset, length)) 361 | 362 | r = self.BOReq.get(self.content_url + 'nodes/' + node_id + '/content', 363 | acc_codes=ok_codes, stream=True, 364 | headers={'Range': 'bytes=%d-%d' % (offset, end)}, **kwargs) 365 | # if r.status_code == http.REQUESTED_RANGE_NOT_SATISFIABLE: 366 | # return 367 | if r.status_code not in ok_codes: 368 | raise RequestError(r.status_code, r.text) 369 | 370 | return r 371 | 372 | def download_chunk(self, node_id: str, offset: int, length: int, **kwargs) -> bytearray: 373 | """Load a file chunk into memory. 374 | 375 | :param length: the length of the download chunk""" 376 | 377 | r = self.response_chunk(node_id, offset, length, **kwargs) 378 | if not r: 379 | return 380 | 381 | buffer = bytearray() 382 | try: 383 | for chunk in r.iter_content(chunk_size=self._conf.getint('transfer', 'fs_chunk_size')): 384 | if chunk: 385 | buffer.extend(chunk) 386 | finally: 387 | r.close() 388 | return buffer 389 | 390 | def download_thumbnail(self, node_id: str, file_name: str, max_dim=128): 391 | """Download a movie's or picture's thumbnail into a file. 392 | Officially supports the image formats JPEG, BMP, PNG, TIFF, some RAW formats 393 | and the video formats MP4, QuickTime, AVI, MTS, MPEG, ASF, WMV, FLV, OGG. 394 | See http://www.amazon.com/gp/help/customer/display.html?nodeId=201634590 395 | Additionally supports MKV. 396 | 397 | :param max_dim: maximum width or height of the resized image/video thumbnail 398 | """ 399 | 400 | r = self.BOReq.get(self.content_url + 'nodes/' + node_id + '/content', 401 | params={'viewBox': max_dim}, stream=True) 402 | if r.status_code not in OK_CODES: 403 | raise RequestError(r.status_code, r.text) 404 | try: 405 | with open(file_name, 'wb') as f: 406 | f.write(r.raw.read()) 407 | finally: 408 | r.close() 409 | -------------------------------------------------------------------------------- /acdcli/api/metadata.py: -------------------------------------------------------------------------------- 1 | """Node metadata operations""" 2 | 3 | import json 4 | import logging 5 | import http.client 6 | import tempfile 7 | from collections import namedtuple 8 | 9 | from .common import * 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | ChangeSet = namedtuple('Changes', ['nodes', 'purged_nodes', 'checkpoint', 'reset']) 14 | 15 | 16 | class MetadataMixin(object): 17 | def get_node_list(self, **params) -> list: 18 | """:param params: may include tempLink='True'""" 19 | return self.BOReq.paginated_get(self.metadata_url + 'nodes', params) 20 | 21 | def get_file_list(self) -> list: 22 | return self.get_node_list(filters='kind:FILE') 23 | 24 | def get_folder_list(self) -> list: 25 | return self.get_node_list(filters='kind:FOLDER') 26 | 27 | def get_asset_list(self) -> list: 28 | return self.get_node_list(filters='kind:ASSET') 29 | 30 | def get_trashed_folders(self) -> list: 31 | return self.get_node_list(filters='status:TRASH AND kind:FOLDER') 32 | 33 | def get_trashed_files(self) -> list: 34 | return self.get_node_list(filters='status:TRASH AND kind:FILE') 35 | 36 | def get_changes(self, checkpoint='', include_purged=False, silent=True, file=None): 37 | """Writes changes into a (temporary) file. See 38 | ``_. 39 | """ 40 | 41 | logger.info('Getting changes with checkpoint "%s".' % checkpoint) 42 | 43 | body = {} 44 | if checkpoint: 45 | body['checkpoint'] = checkpoint 46 | if include_purged: 47 | body['includePurged'] = 'true' 48 | r = self.BOReq.post(self.metadata_url + 'changes', data=json.dumps(body), stream=True) 49 | if r.status_code not in OK_CODES: 50 | r.close() 51 | raise RequestError(r.status_code, r.text) 52 | 53 | if file: 54 | tmp = open(file, 'w+b') 55 | else: 56 | tmp = tempfile.TemporaryFile('w+b') 57 | try: 58 | for line in r.iter_lines(chunk_size=10 * 1024 ** 2, decode_unicode=False): 59 | if line: 60 | tmp.write(line + b'\n') 61 | if not silent: 62 | print('.', end='', flush=True) 63 | if not silent: 64 | print() 65 | except (http.client.IncompleteRead, requests.exceptions.ChunkedEncodingError) as e: 66 | logger.info(str(e)) 67 | raise RequestError(RequestError.CODE.INCOMPLETE_RESULT, 68 | '[acd_api] reading changes terminated prematurely.') 69 | except: 70 | raise 71 | finally: 72 | r.close() 73 | tmp.seek(0) 74 | return tmp 75 | 76 | @staticmethod 77 | def _iter_changes_lines(f) -> 'Generator[ChangeSet]': 78 | """Generates a ChangeSet per line in passed file 79 | 80 | the expected return format should be: 81 | {"checkpoint": str, "reset": bool, "nodes": []} 82 | {"checkpoint": str, "reset": false, "nodes": []} 83 | {"end": true} 84 | 85 | :arg f: opened file with current position at the beginning of a changeset 86 | :throws: RequestError 87 | """ 88 | 89 | end = False 90 | pages = -1 91 | 92 | while True: 93 | line = f.readline() 94 | if not line: 95 | break 96 | 97 | reset = False 98 | pages += 1 99 | 100 | nodes = [] 101 | purged_nodes = [] 102 | 103 | try: 104 | o = json.loads(line.decode('utf-8')) 105 | except ValueError: 106 | raise RequestError(RequestError.CODE.INCOMPLETE_RESULT, 107 | '[acd_api] Invalid JSON in change set, page %i.' % pages) 108 | 109 | try: 110 | if o['end']: 111 | end = True 112 | continue 113 | except KeyError: 114 | pass 115 | 116 | if o['reset']: 117 | logger.info('Found "reset" tag in changes.') 118 | reset = True 119 | 120 | # could this actually happen? 121 | if o['statusCode'] not in OK_CODES: 122 | raise RequestError(RequestError.CODE.FAILED_SUBREQUEST, 123 | '[acd_api] Partial failure in change request.') 124 | 125 | for node in o['nodes']: 126 | if node['status'] == 'PURGED': 127 | purged_nodes.append(node['id']) 128 | else: 129 | nodes.append(node) 130 | 131 | checkpoint = o['checkpoint'] 132 | logger.debug('Checkpoint: %s' % checkpoint) 133 | 134 | yield ChangeSet(nodes, purged_nodes, checkpoint, reset) 135 | 136 | logger.info('%i page(s) in changes.' % pages) 137 | if not end: 138 | logger.warning('End of change request not reached.') 139 | 140 | def get_metadata(self, node_id: str, assets=False, temp_link=True) -> dict: 141 | """Gets a node's metadata. 142 | 143 | :arg assets: also include asset info (e.g. thumbnails) if the node is a file 144 | :arg temp_link: include a temporary download link if the node is a file 145 | """ 146 | params = {'tempLink': 'true' if temp_link else 'false', 147 | 'asset': 'ALL' if assets else 'NONE'} 148 | r = self.BOReq.get(self.metadata_url + 'nodes/' + node_id, params=params) 149 | if r.status_code not in OK_CODES: 150 | raise RequestError(r.status_code, r.text) 151 | return r.json() 152 | 153 | # this will increment the node's version attribute 154 | def update_metadata(self, node_id: str, properties: dict) -> dict: 155 | """Update a node's properties like name, description, status, parents, ...""" 156 | body = json.dumps(properties) 157 | r = self.BOReq.patch(self.metadata_url + 'nodes/' + node_id, data=body) 158 | if r.status_code not in OK_CODES: 159 | raise RequestError(r.status_code, r.text) 160 | return r.json() 161 | 162 | def get_root_node(self) -> dict: 163 | """Gets the root node metadata""" 164 | 165 | params = {'filters': 'isRoot:true'} 166 | r = self.BOReq.get(self.metadata_url + 'nodes', params=params) 167 | 168 | if r.status_code not in OK_CODES: 169 | raise RequestError(r.status_code, r.text) 170 | 171 | data = r.json() 172 | 173 | return data['data'][0] 174 | 175 | def get_root_id(self) -> str: 176 | """Gets the ID of the root node 177 | 178 | :returns: the topmost folder id""" 179 | 180 | r = self.get_root_node() 181 | if 'id' in r['data'][0]: 182 | return r['data'][0]['id'] 183 | 184 | def list_children(self, node_id: str) -> list: 185 | l = self.BOReq.paginated_get(self.metadata_url + 'nodes/' + node_id + '/children') 186 | return l 187 | 188 | def list_child_folders(self, node_id: str) -> list: 189 | l = self.BOReq.paginated_get(self.metadata_url + 'nodes/' + node_id + '/children', 190 | params={'filters': 'kind:FOLDER'}) 191 | return l 192 | 193 | def add_child(self, parent_id: str, child_id: str) -> dict: 194 | """Adds node with ID *child_id* to folder with ID *parent_id*. 195 | 196 | :returns: updated child node dict""" 197 | 198 | r = self.BOReq.put(self.metadata_url + 'nodes/' + parent_id + '/children/' + child_id) 199 | if r.status_code not in OK_CODES: 200 | logger.error('Adding child failed.') 201 | raise RequestError(r.status_code, r.text) 202 | return r.json() 203 | 204 | def remove_child(self, parent_id: str, child_id: str) -> dict: 205 | """:returns: updated child node dict""" 206 | r = self.BOReq.delete( 207 | self.metadata_url + 'nodes/' + parent_id + "/children/" + child_id) 208 | # contrary to response code stated in API doc (202 ACCEPTED) 209 | if r.status_code not in OK_CODES: 210 | logger.error('Removing child failed.') 211 | raise RequestError(r.status_code, r.text) 212 | return r.json() 213 | 214 | def move_node_from(self, node_id: str, old_parent_id: str, new_parent_id: str) -> dict: 215 | """Moves node with given ID from old parent to new parent. 216 | Not tested with multi-parent nodes. 217 | 218 | :returns: changed node dict""" 219 | 220 | data = {'fromParent': old_parent_id, 'childId': node_id} 221 | r = self.BOReq.post(self.metadata_url + 'nodes/' + new_parent_id + '/children', 222 | data=json.dumps(data)) 223 | if r.status_code not in OK_CODES: 224 | raise RequestError(r.status_code, r.text) 225 | return r.json() 226 | 227 | def move_node(self, node_id: str, parent_id: str) -> dict: 228 | return self.update_metadata(node_id, {'parents': [parent_id]}) 229 | 230 | def rename_node(self, node_id: str, new_name: str) -> dict: 231 | properties = {'name': new_name} 232 | return self.update_metadata(node_id, properties) 233 | 234 | def set_available(self, node_id: str) -> dict: 235 | """Sets node status from 'PENDING' to 'AVAILABLE'.""" 236 | properties = {'status': 'AVAILABLE'} 237 | return self.update_metadata(node_id, properties) 238 | 239 | def get_owner_id(self): 240 | """Provisional function for retrieving the security profile's name, a.k.a. owner id.""" 241 | node = self.create_file('acd_cli_get_owner_id') 242 | self.move_to_trash(node['id']) 243 | return node['createdBy'] 244 | 245 | def list_properties(self, node_id: str, owner_id: str) -> dict: 246 | """This will always return an empty dict if the accessor is not the owner. 247 | :param owner_id: owner ID (return status 404 if empty)""" 248 | 249 | r = self.BOReq.get(self.metadata_url + 'nodes/' + node_id + '/properties/' + owner_id) 250 | if r.status_code not in OK_CODES: 251 | raise RequestError(r.status_code, r.text) 252 | return r.json()['data'] 253 | 254 | def add_property(self, node_id: str, owner_id: str, key: str, value: str) -> dict: 255 | """Adds or overwrites *key* property with *content*. Maximum number of keys per owner is 10. 256 | 257 | :param value: string of length <= 500 258 | :raises: RequestError: 404, if owner is empty 259 | RequestError: 400, {...} if maximum of allowed properties is reached 260 | :returns dict: {'key': '', 'location': '/properties/', 261 | 'value': ''}""" 262 | 263 | ok_codes = [requests.codes.CREATED] 264 | r = self.BOReq.put(self.metadata_url + 'nodes/' + node_id + 265 | '/properties/' + owner_id + '/' + key, 266 | data=json.dumps({'value': value}), acc_codes=ok_codes) 267 | if r.status_code not in ok_codes: 268 | raise RequestError(r.status_code, r.text) 269 | return r.json() 270 | 271 | def delete_property(self, node_id: str, owner_id: str, key: str): 272 | """Deletes *key* property from node with ID *node_id*.""" 273 | ok_codes = [requests.codes.NO_CONTENT] 274 | r = self.BOReq.delete(self.metadata_url + 'nodes/' + node_id + 275 | '/properties/' + owner_id + '/' + key, acc_codes=ok_codes) 276 | if r.status_code not in ok_codes: 277 | raise RequestError(r.status_code, r.text) 278 | 279 | def delete_properties(self, node_id: str, owner_id: str): 280 | """Deletes all of the owner's properties. Uses multiple requests.""" 281 | ok_codes = [requests.codes.NO_CONTENT] 282 | prop_dict = self.list_properties(node_id, owner_id) 283 | for key in prop_dict: 284 | r = self.BOReq.delete('%s/nodes/%s/properties/%s/%s' 285 | % (self.metadata_url, node_id, owner_id, key), acc_codes=ok_codes) 286 | if r.status_code not in ok_codes: 287 | raise RequestError(r.status_code, r.text) 288 | 289 | def resolve_folder_path(self, path: str) -> 'List[dict]': 290 | """Resolves a non-trash folder path to a list of folder entries.""" 291 | segments = list(filter(bool, path.split('/'))) 292 | folder_chain = [] 293 | 294 | root = self.get_root_node() 295 | folder_chain.append(root) 296 | 297 | if not segments: 298 | return folder_chain 299 | 300 | for i, segment in enumerate(segments): 301 | dir_entries = self.list_child_folders(folder_chain[-1]['id']) 302 | 303 | for ent in dir_entries: 304 | if ent['status'] == 'AVAILABLE' and ent['name'] == segment: 305 | folder_chain.append(ent) 306 | break 307 | if len(folder_chain) != i + 2: 308 | return [] 309 | 310 | return folder_chain 311 | -------------------------------------------------------------------------------- /acdcli/api/oauth.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import requests 4 | import time 5 | import logging 6 | import webbrowser 7 | import datetime 8 | import random 9 | import string 10 | import uuid 11 | from requests.auth import AuthBase 12 | from urllib.parse import urlparse, parse_qs 13 | from threading import Lock 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | TOKEN_INFO_URL = 'https://api.amazon.com/auth/o2/tokeninfo' 18 | 19 | # Fuck you Amazon you massive twats. 20 | APP_ID = "YW16bjEuYXBwbGljYXRpb24tb2EyLWNsaWVudC40YTY0NzZkODljYTA0NDQ3OGY3MTI3MDZiN2VjYzYwNA" 21 | APP_NAME = "Amazon Drive" 22 | APP_VERSION = "4.0.13.d2a5aec4" 23 | 24 | def create_handler(path: str): 25 | return CheekyOAuthHandler(path) 26 | 27 | class CheekyOAuthHandler(AuthBase): 28 | """An OAuth handler that does some cheeky bullshit to make acd_cli work again.""" 29 | OAUTH_DATA_FILE = 'oauth.json' 30 | AMAZON_OA_TOKEN_URL = 'https://api.amazon.com/auth/token' 31 | 32 | class KEYS(object): 33 | EXP_IN = 'expires_in' 34 | ACC_TOKEN = 'access_token' 35 | REFR_TOKEN = 'refresh_token' 36 | EXP_TIME = 'exp_time' # manually added 37 | 38 | def __init__(self, path): 39 | self.path = path 40 | self.oauth_data = {} 41 | self.oauth_data_path = os.path.join(path, self.OAUTH_DATA_FILE) 42 | self.init_time = time.time() 43 | self.lock = Lock() 44 | 45 | self.load_oauth_data() 46 | logger.info('%s initialized.' % self.__class__.__name__) 47 | 48 | def __call__(self, r: requests.Request): 49 | with self.lock: 50 | r.headers.update( 51 | { 52 | "Accept": "application/json", 53 | "x-amz-access-token": self.get_auth_token(), 54 | "x-amz-clouddrive-appid": APP_ID, 55 | "x-amzn-RequestId": str(uuid.uuid4()), 56 | } 57 | ) 58 | return r 59 | 60 | @property 61 | def exp_time(self): 62 | return self.oauth_data[self.KEYS.EXP_TIME] 63 | 64 | @classmethod 65 | def validate(cls, oauth: str) -> dict: 66 | """Deserialize and validate an OAuth string 67 | 68 | :raises: RequestError""" 69 | 70 | from .common import RequestError 71 | 72 | try: 73 | o = json.loads(oauth) 74 | o[cls.KEYS.ACC_TOKEN] 75 | o[cls.KEYS.EXP_IN] 76 | o[cls.KEYS.REFR_TOKEN] 77 | return o 78 | except (ValueError, KeyError) as e: 79 | logger.critical('Invalid authentication token: Invalid JSON or missing key.' 80 | 'Token:\n%s' % oauth) 81 | raise RequestError(RequestError.CODE.INVALID_TOKEN, e.__str__()) 82 | 83 | def treat_auth_token(self, time_: float): 84 | """Adds expiration time to member OAuth dict using specified begin time.""" 85 | exp_time = time_ + self.oauth_data[self.KEYS.EXP_IN] - 120 86 | self.oauth_data[self.KEYS.EXP_TIME] = exp_time 87 | logger.info('New token expires at %s.' 88 | % datetime.datetime.fromtimestamp(exp_time).isoformat(' ')) 89 | 90 | def load_oauth_data(self): 91 | """Loads oauth data file, validate and add expiration time if necessary""" 92 | self.check_oauth_file_exists() 93 | 94 | with open(self.oauth_data_path) as oa: 95 | o = oa.read() 96 | try: 97 | self.oauth_data = self.validate(o) 98 | except: 99 | logger.critical('Local OAuth data file "%s" is invalid. ' 100 | 'Please fix or delete it.' % self.oauth_data_path) 101 | raise 102 | if self.KEYS.EXP_TIME not in self.oauth_data: 103 | self.treat_auth_token(self.init_time) 104 | self.write_oauth_data() 105 | else: 106 | self.get_auth_token(reload=False) 107 | 108 | def get_auth_token(self, reload=True) -> str: 109 | """Gets current access token, refreshes if necessary. 110 | 111 | :param reload: whether the oauth token file should be reloaded (external update)""" 112 | 113 | if time.time() > self.exp_time: 114 | logger.info('Token expired at %s.' 115 | % datetime.datetime.fromtimestamp(self.exp_time).isoformat(' ')) 116 | 117 | # if multiple instances are running, check for updated file 118 | if reload: 119 | with open(self.oauth_data_path) as oa: 120 | o = oa.read() 121 | self.oauth_data = self.validate(o) 122 | 123 | if time.time() > self.exp_time: 124 | self.refresh_auth_token() 125 | else: 126 | logger.info('Externally updated token found in oauth file.') 127 | return self.oauth_data[self.KEYS.ACC_TOKEN] 128 | 129 | def write_oauth_data(self): 130 | """Dumps (treated) OAuth dict to file as JSON.""" 131 | 132 | new_nm = self.oauth_data_path + ''.join(random.choice(string.hexdigits) for _ in range(8)) 133 | rm_nm = self.oauth_data_path + ''.join(random.choice(string.hexdigits) for _ in range(8)) 134 | 135 | f = open(new_nm, 'w') 136 | json.dump(self.oauth_data, f, indent=4, sort_keys=True) 137 | f.flush() 138 | os.fsync(f.fileno()) 139 | f.close() 140 | 141 | if os.path.isfile(self.oauth_data_path): 142 | os.rename(self.oauth_data_path, rm_nm) 143 | os.rename(new_nm, self.oauth_data_path) 144 | try: 145 | os.remove(rm_nm) 146 | except OSError: 147 | pass 148 | 149 | def refresh_auth_token(self): 150 | """Fetches a new access token using a refresh token.""" 151 | logger.info('Refreshing authentication token.') 152 | 153 | data = { 154 | "app_name": APP_NAME, 155 | "app_version": APP_VERSION, 156 | "requested_token_type": "access_token", 157 | "source_token": self.oauth_data[self.KEYS.REFR_TOKEN], 158 | "source_token_type": "refresh_token", 159 | } 160 | 161 | from .common import RequestError 162 | 163 | t = time.time() 164 | try: 165 | response = requests.post(self.AMAZON_OA_TOKEN_URL, data=data) 166 | except ConnectionError as e: 167 | logger.critical('Error refreshing authentication token.') 168 | raise RequestError(RequestError.CODE.CONN_EXCEPTION, e.__str__()) 169 | 170 | if response.status_code != requests.codes.ok: 171 | raise RequestError(RequestError.CODE.REFRESH_FAILED, 172 | 'Error refreshing authentication token: %s' % response.text) 173 | 174 | response_json = response.json() 175 | response_json[self.KEYS.REFR_TOKEN] = self.oauth_data[self.KEYS.REFR_TOKEN] 176 | self.oauth_data = self.validate(json.dumps(response_json)) 177 | self.treat_auth_token(t) 178 | self.write_oauth_data() 179 | 180 | def check_oauth_file_exists(self): 181 | """Checks for OAuth file existence and one-time initialize if necessary. Throws on error.""" 182 | if not os.path.isfile(self.oauth_data_path): 183 | raise RuntimeError("The OAuth configuration does not exist. You must create it first.") 184 | 185 | def get_access_token_info(self) -> dict: 186 | """ 187 | :returns: 188 | int exp: expiration time in sec, 189 | str aud: client id 190 | user_id, app_id, iat (exp time)""" 191 | 192 | r = requests.get(TOKEN_INFO_URL, 193 | params={'access_token': self.oauth_data['access_token']}) 194 | return r.json() 195 | -------------------------------------------------------------------------------- /acdcli/api/trash.py: -------------------------------------------------------------------------------- 1 | """ 2 | Node trashing and restoration. 3 | https://developer.amazon.com/public/apis/experience/cloud-drive/content/trash 4 | """ 5 | 6 | from .common import * 7 | 8 | 9 | class TrashMixin(object): 10 | def list_trash(self) -> list: 11 | """Retrieves top-level trash list""" 12 | return self.BOReq.paginated_get(self.metadata_url + 'trash') 13 | 14 | def move_to_trash(self, node_id: str) -> dict: 15 | r = self.BOReq.put(self.metadata_url + 'trash/' + node_id) 16 | if r.status_code not in OK_CODES: 17 | raise RequestError(r.status_code, r.text) 18 | return r.json() 19 | 20 | def restore(self, node_id: str) -> dict: 21 | r = self.BOReq.post(self.metadata_url + 'trash/' + node_id + '/restore') 22 | if r.status_code not in OK_CODES: 23 | raise RequestError(r.status_code, r.text) 24 | return r.json() 25 | 26 | # {"message":"Insufficient permissions granted for operation: purgeNode"} 27 | def purge(self, node_id: str) -> dict: 28 | r = self.BOReq.delete(self.metadata_url + 'nodes/' + node_id) 29 | if r.status_code not in OK_CODES: 30 | raise RequestError(r.status_code, r.text) 31 | return r.json() 32 | -------------------------------------------------------------------------------- /acdcli/cache/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chrisgavin/cheeky_acd_cli/bb77ddc4c8ff6075ebb4b0b6ac750c4007770805/acdcli/cache/__init__.py -------------------------------------------------------------------------------- /acdcli/cache/cursors.py: -------------------------------------------------------------------------------- 1 | """Cursor context managers""" 2 | 3 | class cursor(object): 4 | def __init__(self, conn): 5 | self.conn = conn 6 | 7 | def __enter__(self): 8 | self.cursor = self.conn.cursor() 9 | return self.cursor 10 | 11 | def __exit__(self, exc_type, exc_val, exc_tb): 12 | self.cursor.close() 13 | 14 | 15 | class mod_cursor(object): 16 | def __init__(self, conn): 17 | self.conn = conn 18 | 19 | def __enter__(self): 20 | self.cursor = self.conn.cursor() 21 | return self.cursor 22 | 23 | def __exit__(self, exc_type, exc_val, exc_tb): 24 | if exc_type is None: 25 | self.conn.commit() 26 | else: 27 | self.conn.rollback() 28 | self.cursor.close() 29 | -------------------------------------------------------------------------------- /acdcli/cache/db.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import logging 3 | import os 4 | import re 5 | import sqlite3 6 | import sys 7 | from threading import local 8 | 9 | from acdcli.utils.conf import get_conf 10 | 11 | from .cursors import * 12 | from .format import FormatterMixin 13 | from .query import QueryMixin 14 | from .schema import SchemaMixin 15 | from .sync import SyncMixin 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | _ROOT_ID_SQL = 'SELECT id FROM nodes WHERE name IS NULL AND type == "folder" ORDER BY created' 20 | 21 | 22 | _SETTINGS_FILENAME = 'cache.ini' 23 | 24 | _def_conf = configparser.ConfigParser() 25 | _def_conf['sqlite'] = dict(filename='nodes.db', busy_timeout=30000, journal_mode='wal') 26 | _def_conf['blacklist'] = dict(folders=[]) 27 | 28 | 29 | 30 | class IntegrityError(Exception): 31 | def __init__(self, msg): 32 | self.msg = msg 33 | 34 | def __str__(self): 35 | return repr(self.msg) 36 | 37 | 38 | def _create_conn(path: str) -> sqlite3.Connection: 39 | c = sqlite3.connect(path) 40 | c.row_factory = sqlite3.Row # allow dict-like access on rows with col name 41 | return c 42 | 43 | 44 | def _regex_match(pattern: str, cell: str) -> bool: 45 | if cell is None: 46 | return False 47 | return re.match(pattern, cell, re.IGNORECASE) is not None 48 | 49 | 50 | class NodeCache(SchemaMixin, QueryMixin, SyncMixin, FormatterMixin): 51 | IntegrityCheckType = dict(full=0, quick=1, none=2) 52 | """types of SQLite integrity checks""" 53 | 54 | def __init__(self, cache_path: str='', settings_path='', check=IntegrityCheckType['full']): 55 | self._conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf) 56 | 57 | self.db_path = os.path.join(cache_path, self._conf['sqlite']['filename']) 58 | self.tl = local() 59 | 60 | self.integrity_check(check) 61 | try: 62 | self.init() 63 | except sqlite3.DatabaseError as e: 64 | raise IntegrityError(e) 65 | 66 | self._conn.create_function('REGEXP', _regex_match.__code__.co_argcount, _regex_match) 67 | 68 | with cursor(self._conn) as c: 69 | c.execute(_ROOT_ID_SQL) 70 | row = c.fetchone() 71 | if not row: 72 | self.root_id = '' 73 | return 74 | first_id = row['id'] 75 | 76 | if c.fetchone(): 77 | raise IntegrityError('Could not uniquely identify root node.') 78 | 79 | self.root_id = first_id 80 | 81 | self._execute_pragma('busy_timeout', self._conf['sqlite']['busy_timeout']) 82 | if sys.version_info[:3] != (3, 6, 0): 83 | self._execute_pragma('journal_mode', self._conf['sqlite']['journal_mode']) 84 | 85 | @property 86 | def _conn(self) -> sqlite3.Connection: 87 | if not hasattr(self.tl, '_conn'): 88 | self.tl._conn = _create_conn(self.db_path) 89 | return self.tl._conn 90 | 91 | def _execute_pragma(self, key, value) -> str: 92 | with cursor(self._conn) as c: 93 | c.execute('PRAGMA %s=%s;' % (key, value)) 94 | r = c.fetchone() 95 | if r: 96 | logger.debug('Set %s to %s. Result: %s.' % (key, value, r[0])) 97 | return r[0] 98 | 99 | @classmethod 100 | def remove_db_file(cls, cache_path='', settings_path='') -> bool: 101 | """Removes database file.""" 102 | 103 | import os 104 | import random 105 | import string 106 | import tempfile 107 | 108 | conf = get_conf(settings_path, _SETTINGS_FILENAME, _def_conf) 109 | db_path = os.path.join(cache_path, conf['sqlite']['filename']) 110 | 111 | tmp_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(16)) 112 | tmp_name = os.path.join(tempfile.gettempdir(), tmp_name) 113 | 114 | try: 115 | os.rename(db_path, tmp_name) 116 | except OSError: 117 | logger.critical('Error renaming/removing database file "%s".' % db_path) 118 | return False 119 | else: 120 | try: 121 | os.remove(tmp_name) 122 | except OSError: 123 | logger.info('Database file was moved, but not deleted.') 124 | return True 125 | 126 | def integrity_check(self, type_: IntegrityCheckType): 127 | """Performs a `self-integrity check 128 | `_ on the database.""" 129 | 130 | with cursor(self._conn) as c: 131 | if type_ == NodeCache.IntegrityCheckType['full']: 132 | r = c.execute('PRAGMA integrity_check;') 133 | elif type_ == NodeCache.IntegrityCheckType['quick']: 134 | r = c.execute('PRAGMA quick_check;') 135 | else: 136 | return 137 | r = c.fetchone() 138 | if not r or r[0] != 'ok': 139 | logger.warn('Sqlite database integrity check failed. ' 140 | 'You may need to clear the cache if you encounter any errors.') 141 | -------------------------------------------------------------------------------- /acdcli/cache/format.py: -------------------------------------------------------------------------------- 1 | """ 2 | Formatters for query Bundle iterables. Capable of ANSI-type coloring using colors defined in 3 | :envvar:`LS_COLORS`. 4 | """ 5 | 6 | import os 7 | import sys 8 | import datetime 9 | 10 | from .cursors import cursor 11 | 12 | try: 13 | colors = filter(None, os.environ.get('LS_COLORS', '').split(':')) 14 | colors = dict(c.split('=') for c in colors) 15 | # colors is now a mapping of 'type': 'color code' or '*.ext' : 'color code' 16 | except: 17 | colors = {} 18 | 19 | seq_tpl = '\x1B[%sm' 20 | res = seq_tpl % colors.get('rs', '') # reset code 21 | dir_fmt = seq_tpl % colors.get('di', '') + '%s' + res # dir text 22 | nor_fmt = seq_tpl % colors.get('no', '') + '%s' + res # 'normal' colored text 23 | 24 | ColorMode = dict(auto=0, always=1, never=2) 25 | 26 | 27 | def init(color=ColorMode['auto']): 28 | """Disables pre-initialized coloring if never mode specified or stdout is a tty. 29 | 30 | :param color: the color mode to use, defaults to auto""" 31 | 32 | # TODO: fix tty detection 33 | if color == ColorMode['never'] \ 34 | or not res \ 35 | or (color == ColorMode['auto'] and not sys.__stdout__.isatty()): 36 | global get_adfixes, color_path, color_status, seq_tpl, nor_fmt 37 | get_adfixes = lambda _: ('', '') 38 | color_path = lambda x: x 39 | color_status = lambda x: x[0] 40 | seq_tpl = '%s' 41 | nor_fmt = '%s' 42 | 43 | 44 | def color_file(name: str) -> str: 45 | """Colorizes a file name according to its file ending.""" 46 | parts = name.split('.') 47 | if len(parts) > 1: 48 | ext = parts.pop() 49 | code = colors.get('*.' + ext) 50 | if code: 51 | return seq_tpl % code + name + res 52 | 53 | return nor_fmt % name 54 | 55 | 56 | def color_path(path: str) -> str: 57 | """Colorizes a path string.""" 58 | segments = path.split('/') 59 | path_segments = [dir_fmt % s for s in segments[:-1]] 60 | last_seg = segments[-1] if segments[-1:] else '' 61 | file_seg = color_file(last_seg) 62 | return '/'.join(path_segments + [file_seg]) 63 | 64 | 65 | def color_status(status): 66 | """Creates a colored one-character status abbreviation.""" 67 | if status == 'AVAILABLE': 68 | return seq_tpl % '32' + status[0] + res # green 69 | elif status == 'TRASH': 70 | return seq_tpl % '31' + status[0] + res # red 71 | return status[0] 72 | 73 | 74 | def date_str(time_: datetime.datetime) -> str: 75 | """Creates colored date string similar to the one in ls -l.""" 76 | if time_.year == datetime.date.year: 77 | last_seg = str(time_.year).rjust(5) 78 | else: 79 | last_seg = '{0.hour:02}:{0.minute:02}'.format(time_) 80 | return nor_fmt % ('{0:%b} %s %s'.format(time_) % (str(time_.day).rjust(2), last_seg)) 81 | 82 | 83 | class FormatterMixin(object): 84 | def size_nlink_str(self, node, size_bytes=False): 85 | """Creates a right-justified size/nlinks string.""" 86 | from acdcli.utils.progress import file_size_str 87 | 88 | if node.is_file: 89 | if not size_bytes: 90 | return nor_fmt % file_size_str(node.size).rjust(7) 91 | return nor_fmt % str(node.size).rjust(11) 92 | elif node.is_folder: 93 | return nor_fmt % str(self.num_children(node.id)).rjust(7 if not size_bytes else 11) 94 | return '' 95 | 96 | def file_entry(self, file, long=False, size_bytes=False) -> str: 97 | return '[{}] [{}] {}{}{}'.format( 98 | nor_fmt % file.id, 99 | color_status(file.status), 100 | (self.size_nlink_str(file, size_bytes=size_bytes) + ' ') if long else '', 101 | (date_str(file.modified) + ' ') if long else '', 102 | color_path(file.name) 103 | ) 104 | 105 | def ls_format(self, folder_id, folder_path=None, recursive=False, 106 | trash_only=False, trashed_children=False, 107 | long=False, size_bytes=False) -> 'Generator[str]': 108 | 109 | if folder_path is None: 110 | folder_path = [] 111 | 112 | if trash_only: 113 | folders, files = self.list_trashed_children(folder_id) 114 | else: 115 | folders, files = self.list_children(folder_id, trashed_children) 116 | 117 | if recursive: 118 | for file in files: 119 | yield self.file_entry(file, long, size_bytes) 120 | 121 | if files and folders: 122 | yield '' 123 | 124 | is_first = True 125 | for folder in folders: 126 | children = self.num_children(folder.id) 127 | if recursive and not is_first and children > 0: 128 | yield '' 129 | yield '[{}] [{}] {}{}{}{}'.format( 130 | nor_fmt % folder.id, 131 | color_status(folder.status), 132 | (self.size_nlink_str(folder, size_bytes=size_bytes) + ' ') if long else '', 133 | (date_str(folder.modified) + ' ') if long else '', 134 | color_path('/'.join(folder_path) + '/') if folder_path else '', 135 | color_path(folder.name + '/') 136 | ) 137 | is_first = False 138 | 139 | if recursive: 140 | for n in self.ls_format(folder.id, 141 | [f for f in folder_path] + [folder.name], 142 | recursive, False, trashed_children, long, size_bytes): 143 | yield n 144 | 145 | if not recursive: 146 | for file in files: 147 | yield self.file_entry(file, long, size_bytes) 148 | 149 | def tree_format(self, node, path, trash=False, dir_only=False, 150 | depth=0, max_depth=None) -> 'Generator[str]': 151 | """A simple tree formatter that indicates parentship by indentation 152 | (i.e. does not display graphical branches like :program:`tree`).""" 153 | 154 | indent = ' ' * 4 * depth 155 | yield indent + color_path(node.simple_name) 156 | if max_depth is not None and depth >= max_depth: 157 | return 158 | 159 | indent += ' ' * 4 160 | folders, files = self.list_children(node.id, trash) 161 | for folder in folders: 162 | for line in self.tree_format(folder, '', trash, dir_only, depth + 1, max_depth): 163 | yield line 164 | 165 | if not dir_only: 166 | for file in files: 167 | yield indent + color_path(file.simple_name) 168 | 169 | @staticmethod 170 | def id_format(nodes) -> 'Generator[str]': 171 | for node in nodes: 172 | yield node.id 173 | 174 | def long_id_format(self, nodes) -> 'Generator[str]': 175 | for node in nodes: 176 | path = self.first_path(node.id) 177 | yield '[{}] [{}] {}{}'.format( 178 | nor_fmt % node.id, 179 | color_status(node.status), 180 | color_path(path), 181 | color_path(node.simple_name) 182 | ) 183 | 184 | def path_format(self, nodes): 185 | for node in nodes: 186 | yield self.first_path(node.id) + node.name 187 | -------------------------------------------------------------------------------- /acdcli/cache/query.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime 3 | from .cursors import cursor 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | def datetime_from_string(dt: str) -> datetime: 9 | try: 10 | dt = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S.%f+00:00') 11 | except ValueError: 12 | dt = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S+00:00') 13 | return dt 14 | 15 | 16 | CONFLICTING_NODE_SQL = """SELECT n.*, f.* FROM nodes n 17 | JOIN parentage p ON n.id = p.child 18 | LEFT OUTER JOIN files f ON n.id = f.id 19 | WHERE p.parent = (?) AND LOWER(name) = (?) AND status = 'AVAILABLE' 20 | ORDER BY n.name""" 21 | 22 | CHILDREN_SQL = """SELECT n.*, f.* FROM nodes n 23 | JOIN parentage p ON n.id = p.child 24 | LEFT OUTER JOIN files f ON n.id = f.id 25 | WHERE p.parent = (?) 26 | ORDER BY n.name""" 27 | 28 | CHILDRENS_NAMES_SQL = """SELECT n.name FROM nodes n 29 | JOIN parentage p ON n.id = p.child 30 | WHERE p.parent = (?) AND n.status == 'AVAILABLE' 31 | ORDER BY n.name""" 32 | 33 | NUM_CHILDREN_SQL = """SELECT COUNT(n.id) FROM nodes n 34 | JOIN parentage p ON n.id = p.child 35 | WHERE p.parent = (?) AND n.status == 'AVAILABLE'""" 36 | 37 | NUM_PARENTS_SQL = """SELECT COUNT(n.id) FROM nodes n 38 | JOIN parentage p ON n.id = p.parent 39 | WHERE p.child = (?) AND n.status == 'AVAILABLE'""" 40 | 41 | NUM_NODES_SQL = 'SELECT COUNT(*) FROM nodes' 42 | NUM_FILES_SQL = 'SELECT COUNT(*) FROM files' 43 | NUM_FOLDERS_SQL = 'SELECT COUNT(*) FROM nodes WHERE type == "folder"' 44 | 45 | CHILD_OF_SQL = """SELECT n.*, f.* FROM nodes n 46 | JOIN parentage p ON n.id = p.child 47 | LEFT OUTER JOIN files f ON n.id = f.id 48 | WHERE n.name = (?) AND p.parent = (?) 49 | ORDER BY n.status""" 50 | 51 | NODE_BY_ID_SQL = """SELECT n.*, f.* FROM nodes n LEFT OUTER JOIN files f ON n.id = f.id 52 | WHERE n.id = (?)""" 53 | 54 | USAGE_SQL = 'SELECT SUM(size) FROM files' 55 | 56 | FIND_BY_NAME_SQL = """SELECT n.*, f.* FROM nodes n 57 | LEFT OUTER JOIN files f ON n.id = f.id 58 | WHERE n.name LIKE ? 59 | ORDER BY n.name""" 60 | 61 | FIND_BY_REGEX_SQL = """SELECT n.*, f.* FROM nodes n 62 | LEFT OUTER JOIN files f ON n.id = f.id 63 | WHERE n.name REGEXP ? 64 | ORDER BY n.name""" 65 | 66 | FIND_BY_MD5_SQL = """SELECT n.*, f.* FROM nodes n 67 | LEFT OUTER JOIN files f ON n.id = f.id 68 | WHERE f.md5 == (?) 69 | ORDER BY n.name""" 70 | 71 | FIND_FIRST_PARENT_SQL = """SELECT n.* FROM nodes n 72 | JOIN parentage p ON n.id = p.parent 73 | WHERE p.child = (?) 74 | ORDER BY n.status, n.id""" 75 | 76 | # TODO: exclude files in trashed folders?! 77 | FILE_SIZE_EXISTS_SQL = """SELECT COUNT(*) FROM files f 78 | JOIN nodes n ON n.id = f.id 79 | WHERE f.size == (?) AND n.status == 'AVAILABLE'""" 80 | 81 | 82 | class Node(object): 83 | def __init__(self, row): 84 | self.id = row['id'] 85 | self.type = row['type'] 86 | self.name = row['name'] 87 | self.description = row['description'] 88 | self.cre = row['created'] 89 | self.mod = row['modified'] 90 | self.updated = row['updated'] 91 | self.status = row['status'] 92 | 93 | try: 94 | self.md5 = row['md5'] 95 | except IndexError: 96 | self.md5 = None 97 | try: 98 | self.size = row['size'] 99 | except IndexError: 100 | self.size = 0 101 | 102 | def __lt__(self, other): 103 | return self.name < other.name 104 | 105 | def __hash__(self): 106 | return hash(self.id) 107 | 108 | def __repr__(self): 109 | return 'Node(%r, %r)' % (self.id, self.name) 110 | 111 | @property 112 | def is_folder(self): 113 | return self.type == 'folder' 114 | 115 | @property 116 | def is_file(self): 117 | return self.type == 'file' 118 | 119 | @property 120 | def is_available(self): 121 | return self.status == 'AVAILABLE' 122 | 123 | @property 124 | def is_trashed(self): 125 | return self.status == 'TRASH' 126 | 127 | @property 128 | def created(self): 129 | return datetime_from_string(self.cre) 130 | 131 | @property 132 | def modified(self): 133 | return datetime_from_string(self.mod) 134 | 135 | @property 136 | def simple_name(self): 137 | if self.is_file: 138 | return self.name 139 | return (self.name if self.name else '') + '/' 140 | 141 | 142 | class QueryMixin(object): 143 | def get_node(self, id) -> 'Union[Node|None]': 144 | with cursor(self._conn) as c: 145 | c.execute(NODE_BY_ID_SQL, [id]) 146 | r = c.fetchone() 147 | if r: 148 | return Node(r) 149 | 150 | def get_root_node(self): 151 | return self.get_node(self.root_id) 152 | 153 | def get_conflicting_node(self, name: str, parent_id: str): 154 | """Finds conflicting node in folder specified by *parent_id*, if one exists.""" 155 | with cursor(self._conn) as c: 156 | c.execute(CONFLICTING_NODE_SQL, [parent_id, name.lower()]) 157 | r = c.fetchone() 158 | if r: 159 | return Node(r) 160 | 161 | def resolve(self, path: str, trash=False) -> 'Union[Node|None]': 162 | segments = list(filter(bool, path.split('/'))) 163 | if not segments: 164 | if not self.root_id: 165 | return 166 | with cursor(self._conn) as c: 167 | c.execute(NODE_BY_ID_SQL, [self.root_id]) 168 | r = c.fetchone() 169 | return Node(r) 170 | 171 | parent = self.root_id 172 | for i, segment in enumerate(segments): 173 | with cursor(self._conn) as c: 174 | c.execute(CHILD_OF_SQL, [segment, parent]) 175 | r = c.fetchone() 176 | r2 = c.fetchone() 177 | 178 | if not r: 179 | return 180 | r = Node(r) 181 | 182 | if not r.is_available: 183 | if not trash: 184 | return 185 | if r2: 186 | logger.debug('None-unique trash name "%s" in %s.' % (segment, parent)) 187 | return 188 | if i + 1 == len(segments): 189 | return r 190 | if r.is_folder: 191 | parent = r.id 192 | continue 193 | else: 194 | return 195 | 196 | def childrens_names(self, folder_id) -> 'List[str]': 197 | with cursor(self._conn) as c: 198 | c.execute(CHILDRENS_NAMES_SQL, [folder_id]) 199 | kids = [] 200 | row = c.fetchone() 201 | while row: 202 | kids.append(row['name']) 203 | row = c.fetchone() 204 | return kids 205 | 206 | def get_node_count(self) -> int: 207 | with cursor(self._conn) as c: 208 | c.execute(NUM_NODES_SQL) 209 | r = c.fetchone()[0] 210 | return r 211 | 212 | def get_folder_count(self) -> int: 213 | with cursor(self._conn) as c: 214 | c.execute(NUM_FOLDERS_SQL) 215 | r = c.fetchone()[0] 216 | return r 217 | 218 | def get_file_count(self) -> int: 219 | with cursor(self._conn) as c: 220 | c.execute(NUM_FILES_SQL) 221 | r = c.fetchone()[0] 222 | return r 223 | 224 | def calculate_usage(self): 225 | with cursor(self._conn) as c: 226 | c.execute(USAGE_SQL) 227 | r = c.fetchone() 228 | return r[0] if r and r[0] else 0 229 | 230 | def num_children(self, folder_id) -> int: 231 | with cursor(self._conn) as c: 232 | c.execute(NUM_CHILDREN_SQL, [folder_id]) 233 | num = c.fetchone()[0] 234 | return num 235 | 236 | def num_parents(self, node_id) -> int: 237 | with cursor(self._conn) as c: 238 | c.execute(NUM_PARENTS_SQL, [node_id]) 239 | num = c.fetchone()[0] 240 | return num 241 | 242 | def get_child(self, folder_id, child_name) -> 'Union[Node|None]': 243 | with cursor(self._conn) as c: 244 | c.execute(CHILD_OF_SQL, [child_name, folder_id]) 245 | r = c.fetchone() 246 | if r: 247 | r = Node(r) 248 | if r.is_available: 249 | return r 250 | 251 | def list_children(self, folder_id, trash=False) -> 'Tuple[List[Node], List[Node]]': 252 | files = [] 253 | folders = [] 254 | 255 | with cursor(self._conn) as c: 256 | c.execute(CHILDREN_SQL, [folder_id]) 257 | node = c.fetchone() 258 | while node: 259 | node = Node(node) 260 | if node.is_available or trash: 261 | if node.is_file: 262 | files.append(node) 263 | elif node.is_folder: 264 | folders.append(node) 265 | node = c.fetchone() 266 | 267 | return folders, files 268 | 269 | def list_trashed_children(self, folder_id) -> 'Tuple[List[Node], List[Node]]': 270 | folders, files = self.list_children(folder_id, True) 271 | folders[:] = [f for f in folders if f.is_trashed] 272 | files[:] = [f for f in files if f.is_trashed] 273 | return folders, files 274 | 275 | def first_path(self, node_id: str) -> str: 276 | if node_id == self.root_id: 277 | return '/' 278 | with cursor(self._conn) as c: 279 | c.execute(FIND_FIRST_PARENT_SQL, (node_id,)) 280 | r = c.fetchone() 281 | node = Node(r) 282 | if node.id == self.root_id: 283 | return node.simple_name 284 | return self.first_path(node.id) + node.name + '/' 285 | 286 | def find_by_name(self, name: str) -> 'List[Node]': 287 | nodes = [] 288 | with cursor(self._conn) as c: 289 | c.execute(FIND_BY_NAME_SQL, ['%' + name + '%']) 290 | r = c.fetchone() 291 | while r: 292 | nodes.append(Node(r)) 293 | r = c.fetchone() 294 | return nodes 295 | 296 | def find_by_md5(self, md5) -> 'List[Node]': 297 | nodes = [] 298 | with cursor(self._conn) as c: 299 | c.execute(FIND_BY_MD5_SQL, (md5,)) 300 | r = c.fetchone() 301 | while r: 302 | nodes.append(Node(r)) 303 | r = c.fetchone() 304 | return nodes 305 | 306 | def find_by_regex(self, regex) -> 'List[Node]': 307 | nodes = [] 308 | with cursor(self._conn) as c: 309 | c.execute(FIND_BY_REGEX_SQL, (regex,)) 310 | r = c.fetchone() 311 | while r: 312 | nodes.append(Node(r)) 313 | r = c.fetchone() 314 | return nodes 315 | 316 | def file_size_exists(self, size) -> bool: 317 | with cursor(self._conn) as c: 318 | c.execute(FILE_SIZE_EXISTS_SQL, [size]) 319 | no = c.fetchone()[0] 320 | 321 | return bool(no) 322 | -------------------------------------------------------------------------------- /acdcli/cache/schema.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from sqlite3 import OperationalError 3 | from .cursors import * 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | # _KeyValueStorage 8 | 9 | 10 | _CREATION_SCRIPT = """ 11 | CREATE TABLE metadata ( 12 | "key" VARCHAR(64) NOT NULL, 13 | value VARCHAR, 14 | PRIMARY KEY ("key") 15 | ); 16 | 17 | CREATE TABLE nodes ( 18 | id VARCHAR(50) NOT NULL, 19 | type VARCHAR(15), 20 | name VARCHAR(256), 21 | description VARCHAR(500), 22 | created DATETIME, 23 | modified DATETIME, 24 | updated DATETIME, 25 | status VARCHAR(9), 26 | PRIMARY KEY (id), 27 | UNIQUE (id), 28 | CHECK (status IN ('AVAILABLE', 'TRASH', 'PURGED', 'PENDING')) 29 | ); 30 | 31 | CREATE TABLE labels ( 32 | id VARCHAR(50) NOT NULL, 33 | name VARCHAR(256) NOT NULL, 34 | PRIMARY KEY (id, name), 35 | FOREIGN KEY(id) REFERENCES nodes (id) 36 | ); 37 | 38 | CREATE TABLE files ( 39 | id VARCHAR(50) NOT NULL, 40 | md5 VARCHAR(32), 41 | size BIGINT, 42 | PRIMARY KEY (id), 43 | UNIQUE (id), 44 | FOREIGN KEY(id) REFERENCES nodes (id) 45 | ); 46 | 47 | CREATE TABLE parentage ( 48 | parent VARCHAR(50) NOT NULL, 49 | child VARCHAR(50) NOT NULL, 50 | PRIMARY KEY (parent, child), 51 | FOREIGN KEY(parent) REFERENCES folders (id), 52 | FOREIGN KEY(child) REFERENCES nodes (id) 53 | ); 54 | 55 | CREATE INDEX ix_parentage_child ON parentage(child); 56 | CREATE INDEX ix_nodes_names ON nodes(name); 57 | PRAGMA user_version = 3; 58 | """ 59 | 60 | _GEN_DROP_TABLES_SQL = \ 61 | 'SELECT "DROP TABLE " || name || ";" FROM sqlite_master WHERE type == "table"' 62 | 63 | _migrations = [] 64 | """list of all schema migrations""" 65 | 66 | 67 | def _migration(func): 68 | """scheme migration annotation; must be used in correct order""" 69 | _migrations.append(func) 70 | return func 71 | 72 | 73 | @_migration 74 | def _0_to_1(conn): 75 | conn.executescript( 76 | 'ALTER TABLE nodes ADD updated DATETIME;' 77 | 'ALTER TABLE nodes ADD description VARCHAR(500);' 78 | 'PRAGMA user_version = 1;' 79 | ) 80 | conn.commit() 81 | 82 | 83 | @_migration 84 | def _1_to_2(conn): 85 | conn.executescript( 86 | 'DROP TABLE IF EXISTS folders;' 87 | 'CREATE INDEX IF NOT EXISTS ix_nodes_names ON nodes(name);' 88 | 'REINDEX;' 89 | 'PRAGMA user_version = 2;' 90 | ) 91 | conn.commit() 92 | 93 | 94 | @_migration 95 | def _2_to_3(conn): 96 | conn.executescript( 97 | 'CREATE INDEX IF NOT EXISTS ix_parentage_child ON parentage(child);' 98 | 'REINDEX;' 99 | 'PRAGMA user_version = 3;' 100 | ) 101 | conn.commit() 102 | 103 | 104 | class SchemaMixin(object): 105 | _DB_SCHEMA_VER = 3 106 | 107 | def init(self): 108 | try: 109 | self.create_tables() 110 | except OperationalError: 111 | pass 112 | with cursor(self._conn) as c: 113 | c.execute('PRAGMA user_version;') 114 | r = c.fetchone() 115 | ver = r[0] 116 | 117 | logger.info('DB schema version is %i.' % ver) 118 | 119 | if self._DB_SCHEMA_VER > ver: 120 | self._migrate(ver) 121 | 122 | self.KeyValueStorage = _KeyValueStorage(self._conn) 123 | 124 | def create_tables(self): 125 | self._conn.executescript(_CREATION_SCRIPT) 126 | self._conn.commit() 127 | 128 | def _migrate(self, version): 129 | for i, migration in enumerate(_migrations[version:]): 130 | v = i + version 131 | logger.info('Migrating from schema version %i to %i' % (v, v + 1)) 132 | migration(self._conn) 133 | 134 | def drop_all(self): 135 | drop_sql = [] 136 | with cursor(self._conn) as c: 137 | c.execute(_GEN_DROP_TABLES_SQL) 138 | dt = c.fetchone() 139 | while dt: 140 | drop_sql.append(dt[0]) 141 | dt = c.fetchone() 142 | 143 | with mod_cursor(self._conn) as c: 144 | for drop in drop_sql: 145 | c.execute(drop) 146 | self._conn.commit() 147 | logger.info('Dropped all tables.') 148 | return True 149 | 150 | 151 | class _KeyValueStorage(object): 152 | def __init__(self, conn): 153 | self.conn = conn 154 | 155 | def __getitem__(self, key: str): 156 | with cursor(self.conn) as c: 157 | c.execute('SELECT value FROM metadata WHERE key = (?)', [key]) 158 | r = c.fetchone() 159 | if r: 160 | return r['value'] 161 | else: 162 | raise KeyError 163 | 164 | def __setitem__(self, key: str, value: str): 165 | with mod_cursor(self.conn) as c: 166 | c.execute('INSERT OR REPLACE INTO metadata VALUES (?, ?)', [key, value]) 167 | 168 | # def __len__(self): 169 | # return self.Session.query(Metadate).count() 170 | 171 | def get(self, key: str, default: str = None): 172 | with cursor(self.conn) as c: 173 | c.execute('SELECT value FROM metadata WHERE key == ?', [key]) 174 | r = c.fetchone() 175 | 176 | return r['value'] if r else default 177 | 178 | def update(self, dict_: dict): 179 | for key in dict_.keys(): 180 | self.__setitem__(key, dict_[key]) 181 | -------------------------------------------------------------------------------- /acdcli/cache/sync.py: -------------------------------------------------------------------------------- 1 | """ 2 | Syncs Amazon Node API objects with SQLite database. 3 | """ 4 | 5 | import logging 6 | from datetime import datetime 7 | from itertools import islice 8 | from .cursors import mod_cursor 9 | import dateutil.parser as iso_date 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | # prevent sqlite3 from throwing too many arguments errors (#145) 15 | def gen_slice(list_, length=100): 16 | it = iter(list_) 17 | while True: 18 | slice_ = [_ for _ in islice(it, length)] 19 | if not slice_: 20 | return 21 | yield slice_ 22 | 23 | 24 | def placeholders(args): 25 | return '(%s)' % ','.join('?' * len(args)) 26 | 27 | 28 | class SyncMixin(object): 29 | """Sync mixin to the :class:`NodeCache `""" 30 | 31 | def remove_purged(self, purged: list): 32 | """Removes purged nodes from database 33 | 34 | :param purged: list of purged node IDs""" 35 | 36 | if not purged: 37 | return 38 | 39 | for slice_ in gen_slice(purged): 40 | with mod_cursor(self._conn) as c: 41 | c.execute('DELETE FROM nodes WHERE id IN %s' % placeholders(slice_), slice_) 42 | c.execute('DELETE FROM files WHERE id IN %s' % placeholders(slice_), slice_) 43 | c.execute('DELETE FROM parentage WHERE parent IN %s' % placeholders(slice_), slice_) 44 | c.execute('DELETE FROM parentage WHERE child IN %s' % placeholders(slice_), slice_) 45 | c.execute('DELETE FROM labels WHERE id IN %s' % placeholders(slice_), slice_) 46 | 47 | logger.info('Purged %i node(s).' % len(purged)) 48 | 49 | def insert_nodes(self, nodes: list, partial=True): 50 | """Inserts mixed list of files and folders into cache.""" 51 | files = [] 52 | folders = [] 53 | for node in nodes: 54 | if node['status'] == 'PENDING': 55 | continue 56 | kind = node['kind'] 57 | if kind == 'FILE': 58 | if not 'name' in node or not node['name']: 59 | logger.warning('Skipping file %s because its name is empty.' % node['id']) 60 | continue 61 | files.append(node) 62 | elif kind == 'FOLDER': 63 | if (not 'name' in node or not node['name']) \ 64 | and (not 'isRoot' in node or not node['isRoot']): 65 | logger.warning('Skipping non-root folder %s because its name is empty.' 66 | % node['id']) 67 | continue 68 | folders.append(node) 69 | elif kind != 'ASSET': 70 | logger.warning('Cannot insert unknown node type "%s".' % kind) 71 | self.insert_folders(folders) 72 | self.insert_files(files) 73 | 74 | self.insert_parentage(files + folders, partial) 75 | 76 | def insert_node(self, node: dict): 77 | """Inserts single file or folder into cache.""" 78 | if not node: 79 | return 80 | self.insert_nodes([node]) 81 | 82 | def insert_folders(self, folders: list): 83 | """ Inserts list of folders into cache. Sets 'update' column to current date. 84 | 85 | :param folders: list of raw dict-type folders""" 86 | 87 | if not folders: 88 | return 89 | 90 | with mod_cursor(self._conn) as c: 91 | for f in folders: 92 | c.execute( 93 | 'INSERT OR REPLACE INTO nodes ' 94 | '(id, type, name, description, created, modified, updated, status) ' 95 | 'VALUES (?, "folder", ?, ?, ?, ?, ?, ?)', 96 | [f['id'], f.get('name'), f.get('description'), 97 | iso_date.parse(f['createdDate']), iso_date.parse(f['modifiedDate']), 98 | datetime.utcnow(), 99 | f['status'] 100 | ] 101 | ) 102 | 103 | logger.info('Inserted/updated %d folder(s).' % len(folders)) 104 | 105 | def insert_files(self, files: list): 106 | if not files: 107 | return 108 | 109 | with mod_cursor(self._conn) as c: 110 | for f in files: 111 | c.execute('INSERT OR REPLACE INTO nodes ' 112 | '(id, type, name, description, created, modified, updated, status)' 113 | 'VALUES (?, "file", ?, ?, ?, ?, ?, ?)', 114 | [f['id'], f.get('name'), f.get('description'), 115 | iso_date.parse(f['createdDate']), iso_date.parse(f['modifiedDate']), 116 | datetime.utcnow(), 117 | f['status'] 118 | ] 119 | ) 120 | c.execute('INSERT OR REPLACE INTO files (id, md5, size) VALUES (?, ?, ?)', 121 | [f['id'], 122 | f.get('contentProperties', {}).get('md5', 123 | 'd41d8cd98f00b204e9800998ecf8427e'), 124 | f.get('contentProperties', {}).get('size', 0) 125 | ] 126 | ) 127 | 128 | logger.info('Inserted/updated %d file(s).' % len(files)) 129 | 130 | def insert_parentage(self, nodes: list, partial=True): 131 | if not nodes: 132 | return 133 | 134 | if partial: 135 | with mod_cursor(self._conn) as c: 136 | for slice_ in gen_slice(nodes): 137 | c.execute('DELETE FROM parentage WHERE child IN %s' % placeholders(slice_), 138 | [n['id'] for n in slice_]) 139 | 140 | with mod_cursor(self._conn) as c: 141 | for n in nodes: 142 | for p in n['parents']: 143 | c.execute('INSERT OR IGNORE INTO parentage VALUES (?, ?)', [p, n['id']]) 144 | 145 | logger.info('Parented %d node(s).' % len(nodes)) 146 | -------------------------------------------------------------------------------- /acdcli/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | 4 | class RegisterLeafClasses(type): 5 | def __init__(cls, name, bases, nmspc): 6 | super(RegisterLeafClasses, cls).__init__(name, bases, nmspc) 7 | if not hasattr(cls, 'registry'): 8 | cls.registry = set() 9 | cls.registry.add(cls) 10 | cls.registry -= set(bases) # Remove base classes 11 | 12 | # metamethods, called on class objects: 13 | def __iter__(cls): 14 | return iter(cls.registry) 15 | 16 | def __str__(cls): 17 | if cls in cls.registry: 18 | return cls.__name__ 19 | return cls.__name__ + " leaf classes: " + ", ".join([sc.__name__ for sc in cls]) 20 | 21 | 22 | class Plugin(object, metaclass=RegisterLeafClasses): 23 | """Plugin base class. May be subject to changes.""" 24 | MIN_VERSION = None 25 | MAX_VERSION = None 26 | 27 | @classmethod 28 | def check_version(cls, version: str) -> bool: 29 | from distutils.version import StrictVersion 30 | if cls.MIN_VERSION: 31 | if StrictVersion(cls.MIN_VERSION) > StrictVersion(version): 32 | return False 33 | if cls.MAX_VERSION: 34 | return StrictVersion(cls.MAX_VERSION) >= StrictVersion(version) 35 | return True 36 | 37 | @classmethod 38 | def __str__(cls): 39 | return cls.__name__ 40 | 41 | @classmethod 42 | def attach(cls, subparsers: argparse.ArgumentParser, log: list, **kwargs): 43 | pass 44 | 45 | @staticmethod 46 | def action(args: argparse.Namespace): 47 | pass 48 | -------------------------------------------------------------------------------- /acdcli/plugins/template.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a template that you can use for adding custom plugins. 3 | """ 4 | 5 | from . import * 6 | 7 | 8 | class TestPlugin(Plugin): 9 | MIN_VERSION = '0.3.1' 10 | 11 | @classmethod 12 | def attach(cls, subparsers: argparse.ArgumentParser, log: list, **kwargs): 13 | """ Attaches this plugin to the top-level argparse subparser group 14 | :param subparsers the action subparser group 15 | :param log a list to put initialization log messages in 16 | """ 17 | p = subparsers.add_parser('test', add_help=False) 18 | p.add_argument('--silent', action='store_true', default=False) 19 | p.set_defaults(func=cls.action) 20 | 21 | log.append(str(cls) + ' attached.') 22 | 23 | @classmethod 24 | def action(cls, args: argparse.Namespace) -> int: 25 | """ This is where the magic happens. 26 | Return a zero for success, a non-zero int for failure. """ 27 | if not args.silent: 28 | print('This plugin works.') 29 | 30 | # args.cache.do_something() 31 | # args.acd_client.do_something() 32 | 33 | return 0 34 | -------------------------------------------------------------------------------- /acdcli/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chrisgavin/cheeky_acd_cli/bb77ddc4c8ff6075ebb4b0b6ac750c4007770805/acdcli/utils/__init__.py -------------------------------------------------------------------------------- /acdcli/utils/conf.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import logging 3 | import os 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | def get_conf(path, filename, default_conf: configparser.ConfigParser) \ 9 | -> configparser.ConfigParser: 10 | conf = configparser.ConfigParser() 11 | conf.read_dict(default_conf) 12 | 13 | conffn = os.path.join(path, filename) 14 | try: 15 | with open(conffn) as cf: 16 | conf.read_file(cf) 17 | except (OSError, IOError): 18 | pass 19 | 20 | logger.debug('configuration resulting from merging default and %s: %s' % (filename, 21 | {section: dict(conf[section]) for section in conf})) 22 | 23 | return conf 24 | -------------------------------------------------------------------------------- /acdcli/utils/hashing.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import logging 3 | import os 4 | 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | class IncrementalHasher(object): 9 | __slots__ = ('hasher',) 10 | 11 | def __init__(self): 12 | self.hasher = hashlib.md5() 13 | 14 | def update(self, chunk): 15 | self.hasher.update(chunk) 16 | 17 | def get_result(self) -> str: 18 | return self.hasher.hexdigest() 19 | 20 | 21 | def hash_file_obj(fo) -> str: 22 | hasher = hashlib.md5() 23 | fo.seek(0) 24 | for chunk in iter(lambda: fo.read(1024 ** 2), b''): 25 | hasher.update(chunk) 26 | return hasher.hexdigest() 27 | 28 | 29 | def hash_file(file_name: str) -> str: 30 | with open(file_name, 'rb') as f: 31 | md5 = hash_file_obj(f) 32 | logger.debug('MD5 of "%s" is %s' % (os.path.basename(file_name), md5)) 33 | return md5 -------------------------------------------------------------------------------- /acdcli/utils/progress.py: -------------------------------------------------------------------------------- 1 | import time 2 | import sys 3 | from math import floor, log10 4 | from collections import deque 5 | 6 | 7 | class FileProgress(object): 8 | __slots__ = ('current', 'status', 'total') 9 | 10 | def __init__(self, total_sz: int, current: int=0): 11 | self.total = total_sz 12 | self.current = current 13 | self.status = None 14 | 15 | def update(self, chunk): 16 | self.current += chunk.__sizeof__() 17 | 18 | def reset(self): 19 | self.current = 0 20 | 21 | def done(self): 22 | self.current = self.total 23 | 24 | 25 | class MultiProgress(object): 26 | """Container that accumulates multiple FileProgress objects""" 27 | 28 | def __init__(self): 29 | self._progresses = [] 30 | self._last_inv = None 31 | self._last_prog = 0 32 | self._last_speeds = deque([0] * 10, 10) 33 | 34 | def end(self): 35 | self.print_progress() 36 | print() 37 | failed = sum(1 for s in self._progresses if s.status) 38 | if failed: 39 | print('%d file(s) failed.' % failed) 40 | 41 | def add(self, progress: FileProgress): 42 | self._progresses.append(progress) 43 | 44 | def print_progress(self): 45 | total = 0 46 | current = 0 47 | complete = 0 48 | for p in self._progresses: 49 | total += p.total 50 | current += p.current 51 | if p.total <= p.current: 52 | complete += 1 53 | 54 | if current > total: 55 | total = current 56 | self._print(total, current, len(self._progresses), complete) 57 | 58 | def _print(self, total_sz: int, current_sz: int, total_items: int, done: int): 59 | """Prints a line that includes a progress bar, total and current transfer size, 60 | total and done items, average speed, and ETA. Uses ANSI escape codes.""" 61 | 62 | if not self._last_inv: 63 | self._last_inv = time.time() 64 | 65 | t = time.time() 66 | duration = t - self._last_inv 67 | speed = (current_sz - self._last_prog) / duration if duration else 0 68 | rate = float(current_sz) / total_sz if total_sz else 1 69 | self._last_speeds.append(speed) 70 | 71 | avg_speed = float(sum(self._last_speeds)) / len(self._last_speeds) 72 | eta = float(total_sz - current_sz) / avg_speed if avg_speed else 0 73 | 74 | self._last_inv, self._last_prog = t, current_sz 75 | 76 | percentage = round(rate * 100, ndigits=2) if rate <= 1 else 100 77 | completed = "#" * int(percentage / 4) 78 | spaces = " " * (25 - len(completed)) 79 | item_width = floor(log10(total_items)) 80 | sys.stdout.write('[%s%s] %s%% of %s %s/%d %s %s\x1b[K\r' 81 | % (completed, spaces, ('%3.1f' % percentage).rjust(5), 82 | (file_size_str(total_sz)).rjust(7), 83 | str(done).rjust(item_width + 1), total_items, 84 | (speed_str(avg_speed)).rjust(10), time_str(eta).rjust(7))) 85 | sys.stdout.flush() 86 | 87 | 88 | def speed_str(num: int, suffix='B', time_suffix='/s') -> str: 89 | for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: 90 | if abs(num) < 1000.0: 91 | return "%3.1f%s%s%s" % (num, unit, suffix, time_suffix) 92 | num /= 1000.0 93 | return "%.1f%s%s%s" % (num, 'Y', suffix, time_suffix) 94 | 95 | 96 | def file_size_str(num: int, suffix='B') -> str: 97 | for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: 98 | if abs(num) < 1024.0: 99 | return "%4.0f%s%s" % (num, unit, suffix) 100 | num /= 1024.0 101 | return "%.1f%s%s" % (num, 'Yi', suffix) 102 | 103 | 104 | def time_str(num: float) -> str: 105 | if num <= 0: 106 | return '0s' 107 | if num < 60: 108 | return '%02ds' % num 109 | elif num < 3600: 110 | seconds = num % 60 // 1 111 | minutes = (num - seconds) // 60 112 | return '%02d:%02dm' % (minutes, seconds) 113 | elif num <= 86400: 114 | minutes = num % 3600 // 60 115 | hours = (num - minutes) // 3600 116 | return '%02d:%02dh' % (hours, minutes) 117 | elif num <= 31536000: 118 | hours = num % 86400 // 3600 119 | days = (num - hours) // 86400 120 | if days >= 100: 121 | return '%id' % days 122 | return '%id %02dh' % (days, hours) 123 | else: 124 | return '>1 year' 125 | -------------------------------------------------------------------------------- /acdcli/utils/threading.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | import queue 4 | from threading import Thread, Event, Lock 5 | 6 | from . import progress 7 | 8 | _logger = logging.getLogger(__name__) 9 | 10 | 11 | class QueuedLoader(object): 12 | """Multi-threaded loader intended for file transfer jobs.""" 13 | 14 | MAX_NUM_WORKERS = 8 15 | MAX_RETRIES = 4 16 | REFRESH_PROGRESS_INT = 0.3 17 | 18 | def __init__(self, workers=1, print_progress=True, max_retries=0): 19 | self.workers = min(abs(workers), self.MAX_NUM_WORKERS) 20 | self.q = queue.Queue() 21 | self.halt = False 22 | self.exit_stat = 0 23 | self.stat_lock = Lock() 24 | self.print_progress = print_progress 25 | self.retries = min(abs(max_retries), self.MAX_RETRIES) 26 | 27 | self.mp = progress.MultiProgress() 28 | 29 | def _print_prog(self): 30 | while not self.halt: 31 | self.mp.print_progress() 32 | time.sleep(self.REFRESH_PROGRESS_INT) 33 | self.mp.end() 34 | 35 | def _worker_task(self, num: int): 36 | while True: 37 | try_ = 0 38 | f = self.q.get() 39 | while try_ <= self.retries: 40 | rr = f() 41 | if not rr.retry: 42 | break 43 | try_ += 1 44 | 45 | with self.stat_lock: 46 | self.exit_stat |= rr.ret_val 47 | self.q.task_done() 48 | 49 | def add_jobs(self, jobs: list): 50 | """:param jobs: list of partials that return a RetryRetVal and have a pg_handler kwarg""" 51 | for job in jobs: 52 | h = job.keywords.get('pg_handler') 53 | self.mp.add(h) 54 | self.q.put(job) 55 | 56 | def start(self) -> int: 57 | """Starts worker threads and, if applicable, progress printer thread. 58 | :returns: accumulated return value""" 59 | 60 | _logger.info('%d jobs in queue.' % self.q.qsize()) 61 | 62 | p = None 63 | print_progress = self.print_progress and self.q.qsize() > 0 64 | if print_progress: 65 | p = Thread(target=self._print_prog) 66 | p.daemon = True 67 | p.start() 68 | 69 | for i in range(self.workers): 70 | t = Thread(target=self._worker_task, args=(i,), name='worker-' + str(i)) 71 | t.daemon = True 72 | t.start() 73 | 74 | self.q.join() 75 | self.halt = True 76 | if p: 77 | p.join() 78 | 79 | return self.exit_stat 80 | -------------------------------------------------------------------------------- /acdcli/utils/time.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | 3 | 4 | def datetime_to_timestamp(dt: datetime) -> float: 5 | return (dt - datetime(1970, 1, 1)) / timedelta(seconds=1) 6 | -------------------------------------------------------------------------------- /assets/Makefile: -------------------------------------------------------------------------------- 1 | deb: 2 | fpm -s python -t deb requests-toolbelt 3 | fpm -s python -t deb --python-pip /usr/bin/pip3 --python-bin /usr/bin/python3 ../setup.py 4 | 5 | rpm: 6 | fpm -s python -t rpm requests-toolbelt 7 | fpm -s python -t rpm --python-pip /usr/bin/pip3 --python-bin /usr/bin/python3 ../setup.py 8 | -------------------------------------------------------------------------------- /assets/amazon-cloud-drive.service: -------------------------------------------------------------------------------- 1 | # $HOME/.config/systemd/user/amazon-cloud-drive.service 2 | # 3 | # Usage: 4 | # * Setup acd_cli normally and create $HOME/acd, 5 | # * check that /usr/local/bin/acd_cli is existent and executable. 6 | # * Copy this file to: $HOME/.config/systemd/user/amazon-cloud-drive.service 7 | # * Reload systemd: systemctl --user daemon-reload 8 | # * Start service: systemctl --user start amazon-cloud-drive.service 9 | 10 | [Unit] 11 | Description=user Amazon Cloud Drive FUSE mount 12 | Documentation=https://acd-cli.readthedocs.org/en/latest/ 13 | AssertPathIsDirectory=%h/acd 14 | After=network-online.target 15 | 16 | [Service] 17 | Type=simple 18 | ExecStart=/usr/local/bin/acd_cli mount --foreground %h/acd 19 | ExecStop=/usr/local/bin/acd_cli -v -nl umount %h/acd 20 | Restart=on-abort 21 | 22 | [Install] 23 | WantedBy=default.target 24 | -------------------------------------------------------------------------------- /assets/win_codepage.reg: -------------------------------------------------------------------------------- 1 | Windows Registry Editor Version 5.00 2 | 3 | [HKEY_CURRENT_USER\Console] 4 | "FaceName"="Lucida Console" 5 | "CodePage"=dword:0000fde9 -------------------------------------------------------------------------------- /docs/FAQ.rst: -------------------------------------------------------------------------------- 1 | Frequently Asked Questions 2 | ========================== 3 | 4 | Why Did I Get an UnicodeEncodeError? 5 | ------------------------------------ 6 | 7 | If you encounter Unicode problems, check that your locale is set correctly. 8 | Alternatively, you may use the ``--utf`` argument to force acd\_cli to use UTF-8 output encoding 9 | regardless of your console's current encoding. 10 | 11 | Windows users may import the provided reg file (assets/win_codepage.reg), 12 | tested with Windows 8.1, to set the command line interface encoding to cp65001. 13 | 14 | What Is acd\_cli's Installation Path? 15 | ------------------------------------- 16 | 17 | On unixoid operating systems the acd\_cli script may be located by running ``which acd_cli`` 18 | or, if that does not yield a result, by executing ``pip3 show -f acdcli``. 19 | 20 | Where Does acd\_cli Store its Cache and Settings? 21 | ------------------------------------------------- 22 | 23 | You can see which paths are used in the log output of ``acd_cli -v init``. 24 | 25 | My Sync Fails. What Should I Do? 26 | -------------------------------- 27 | 28 | If you are doing an incremental synchronization (i.e. you have synchronized before) and it fails, 29 | a full sync might work ``acd_cli sync -f``. 30 | 31 | If the sync times out, consider increasing the idle timeout (refer to the 32 | :doc:`config documentation `). 33 | 34 | You may also want to try the deprecated (and undocumented) synchronization method ``acd_cli old-sync`` 35 | if you happen to have only up to a few thousand files and folders in total. 36 | 37 | If you do not need to synchronize your full Drive hierarchy, consider running a partial sync 38 | (``psync``). 39 | 40 | How Do I Pass a Node ID Starting with ``-`` (dash/minus/hyphen)? 41 | ---------------------------------------------------------------- 42 | 43 | Precede the node ID by two minuses and a space to have it be interpreted as parameter 44 | and not as an argument, e.g. ``-- -AbCdEfGhIjKlMnOpQr012``. 45 | 46 | Can I Share or Delete Files/Folders? 47 | ------------------------------------ 48 | 49 | No. It is not possible to share or delete using the Amazon Drive API. Please do it manually 50 | using the `Web interface `_. 51 | 52 | What Do I Do When I get an `sqlite3.OperationalError: database is locked` error? 53 | -------------------------------------------------------------------------------- 54 | 55 | Please limit the number or running acd\_cli processes to one. For example, do not have an 56 | active FUSE mount while simultaneously uploading via command line. 57 | 58 | Why Does Python Crash When executing acd\_cli on Mac OS? 59 | -------------------------------------------------------- 60 | 61 | There is an `issue with the _scproxy module `_. 62 | Please precede your usual commands by ``env no_proxy='*'`` to prevent it from causing crashes. 63 | 64 | How Do I Share Directories from ACDFuse with Samba? 65 | --------------------------------------------------- 66 | 67 | By default, only the user that originally mounted the FUSE filesystem has access permissions. 68 | To lift this restriction, run the ``mount`` command with the ``--allow-other`` option. 69 | You may need to edit your system's setting before being able to use this mount option, 70 | e.g. in /etc/fuse.conf. 71 | 72 | Do Transfer Speeds Vary Depending on Geolocation? 73 | ------------------------------------------------- 74 | 75 | Amazon may be throttling users not located in the U.S. To quote the Terms of Use, 76 | 77 | The Service is offered in the United States. We may restrict access from other locations. 78 | There may be limits on the types of content you can store and share using the Service, 79 | such as file types we don't support, and on the number or type of devices you can use 80 | to access the Service. We may impose other restrictions on use of the Service. 81 | -------------------------------------------------------------------------------- /docs/FUSE.rst: -------------------------------------------------------------------------------- 1 | FUSE module 2 | =========== 3 | 4 | Status 5 | ------ 6 | 7 | The FUSE module will never provide anything as good and reliable as a local filesystem. 8 | See the `bug tracker `_ for issues that 9 | may occur. 10 | 11 | acd\_cli's FUSE module has the following filesystem features implemented: 12 | 13 | ===================== =========== 14 | Feature Working 15 | ===================== =========== 16 | Basic operations 17 | ---------------------------------- 18 | List directory ✓ 19 | Read ✓ 20 | Write ✓ [#]_ 21 | Rename ✓ 22 | Move ✓ 23 | Trashing ✓ 24 | OS-level trashing ✓ [#]_ 25 | View trash ❌ 26 | Misc 27 | ---------------------------------- 28 | Automatic sync ✓ 29 | ctime/mtime update ❌ 30 | Custom permissions ❌ 31 | Hard links partially [#]_ 32 | Symbolic links ❌ [#]_ 33 | ===================== =========== 34 | 35 | .. [#] partial writes are not possible (i.e. writes at random offsets) 36 | .. [#] restoring might not work 37 | .. [#] manually created hard links will be displayed, but it is discouraged to use them 38 | .. [#] soft links are not part of the ACD API 39 | 40 | Usage 41 | ----- 42 | 43 | The command to mount the (root of the) Amazon Drive to the empty directory ``path/to/mountpoint`` is 44 | :: 45 | 46 | acd_cli -nl mount path/to/mountpoint 47 | 48 | A non-root folder may be mounted similarly, by 49 | :: 50 | 51 | acd_cli -nl mount --modules="subdir,subdir=/folder" path/to/mountpoint 52 | 53 | Unmounting is usually achieved by the following command 54 | :: 55 | 56 | fusermount -u path/to/mountpoint 57 | 58 | If the mount is busy, Linux users can use the ``--lazy`` (``-z``) flag. 59 | There exists a convenience action ``acd_cli umount`` that unmounts all ACDFuse mounts on 60 | Linux and Mac OS. 61 | 62 | .. NOTE:: 63 | Changes made to your Amazon Drive not using acd\_cli will no longer be synchronized 64 | automatically. See the ``--interval`` option below to re-enable automatic synchronization. 65 | 66 | .. WARNING:: 67 | Using acd_cli's CLI commands (e.g. upload or sync) while having the drive mounted 68 | may lead to errors or corruption of the node cache. 69 | 70 | Mount Options 71 | ~~~~~~~~~~~~~ 72 | 73 | For further information on the most of the options below, see your :manpage:`mount.fuse(8)` man page. 74 | 75 | To convert the node's standard character set (UTF-8) to the system locale, the modules argument 76 | may be used, e.g. ``--modules="iconv,to_code=CHARSET"``. 77 | 78 | --allow-other, -ao allow all users to access the mountpoint (may need extra configuration) 79 | --allow-root, -ar allow the root user to access the mountpoint (may need extra configuration) 80 | --foreground, -fg do not detach process until filesystem is destroyed (blocks) 81 | --gid GID override the group ID (defaults to the user's gid) 82 | --interval INT, -i INT set the node cache sync (refresh) interval to INT seconds 83 | --nlinks, -n calculate the number of links for folders (slower) 84 | --nonempty, -ne allow mounting to a non-empty mount point 85 | --read-only, -ro disallow write operations (does not affect cache refresh) 86 | --single-threaded, -st disallow multi-threaded FUSE operations 87 | --uid UID override the user ID (defaults to the user's uid) 88 | --umask UMASK override the standard permission bits 89 | --volname VN, -vn VN set the volume name to VN (Mac OS) 90 | 91 | Automatic Remount 92 | ~~~~~~~~~~~~~~~~~ 93 | 94 | It is advisable to wait until your network connection is up before you try to run the mount command. 95 | 96 | Linux users may use the systemd service file from the assets directory 97 | to have the drive automatically remounted on login. 98 | Alternative ways are to add a crontab entry using the ``@reboot`` keyword or to add an 99 | fstab entry like so: 100 | :: 101 | 102 | acdmount /mount/point fuse _netdev 0 0 103 | 104 | 105 | For this to work, an executable shell script /usr/bin/acdmount must be created 106 | :: 107 | 108 | #!/bin/bash 109 | 110 | acd_cli -nl mount $1 111 | 112 | Library Path 113 | ~~~~~~~~~~~~ 114 | 115 | If you want or need to override the standard libfuse path, you may set the environment variable 116 | `LIBFUSE_PATH` to the full path of libfuse, e.g. 117 | :: 118 | 119 | export LIBFUSE_PATH="/lib/x86_64-linux-gnu/libfuse.so.2" 120 | 121 | This is particularly helpful if the libfuse library is properly installed, but not found. 122 | 123 | Deleting Nodes 124 | ~~~~~~~~~~~~~~ 125 | 126 | "Deleting" directories or files from the file system will in reality trash them in Amazon Drive. 127 | Calling ``rmdir`` on a directory will always move it into the trash, even if it is not empty. 128 | 129 | Logging 130 | ~~~~~~~ 131 | 132 | For debugging purposes, the recommended command to run is 133 | :: 134 | 135 | acd_cli -d -nl mount -i0 -fg path/to/mountpoint 136 | 137 | That command will disable the automatic refresh (i.e. sync) of the node cache (`-i0`) and disable 138 | detaching from the console. 139 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/acd_cli.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/acd_cli.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/acd_cli" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/acd_cli" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/TODO.rst: -------------------------------------------------------------------------------- 1 | TODO 2 | ---- 3 | 4 | General / API 5 | ~~~~~~~~~~~~~ 6 | 7 | * switch to multiprocessing (?) 8 | * metalink support (?) 9 | 10 | API 11 | ~~~ 12 | 13 | * support of node labels 14 | * support for assets (?) 15 | * favorite support (feature not yet announced officially) 16 | * rip out the Appspot authentication handler 17 | * fix upload of 0-byte streams 18 | 19 | CLI 20 | ~~~ 21 | 22 | * unify the find action 23 | * check symlink behavior for different Python versions (#95) 24 | 25 | FUSE 26 | ~~~~ 27 | 28 | * invalidate chunks of StreamedResponseCache (implement a time-out) 29 | * respect flags when opening files 30 | * use a filesystem test suite 31 | 32 | File Transfer 33 | ~~~~~~~~~~~~~ 34 | 35 | * more sophisticated progress handler that supports offsets 36 | * copy local mtime on upload (#58) 37 | * add path exclusion by argument for download 38 | 39 | User experience 40 | ~~~~~~~~~~~~~~~ 41 | 42 | * shell completion for remote directories (#127) 43 | * even nicer help formatting 44 | * log coloring 45 | 46 | Tests 47 | ~~~~~ 48 | 49 | * cache methods 50 | * more functional tests 51 | * fuse module 52 | 53 | Documentation 54 | ~~~~~~~~~~~~~ 55 | 56 | * write how-to on packaging plugins (sample setup.py) 57 | -------------------------------------------------------------------------------- /docs/authorization.rst: -------------------------------------------------------------------------------- 1 | Authorization 2 | ------------- 3 | 4 | Before you can use the program, you will have to complete the OAuth procedure with Amazon. 5 | The initially obtained OAuth credentials can subsequently be refreshed automatically when 6 | necessary, which is at most once an hour. 7 | 8 | It is necessary to have a (preferrably graphical) Web browser installed to complete the procedure. 9 | You may use another computer for this than the one acd\_cli will run on eventually. 10 | 11 | If you are a new user, your only option is to use the Appspot authentication method 12 | which relays your OAuth tokens through a small Google Compute Engine app. 13 | If you have a security profile which was whitelisted for Amazon Drive access (prior to fall 2016), 14 | please skip to the Security Profile section. 15 | 16 | Simple (Appspot) 17 | ++++++++++++++++ 18 | 19 | This authorization method was created to remove the initial barrier for most casual users. It will 20 | forward your authentication data through an external computing platform service (Google App 21 | Engine) and may be less secure than using your own security profile. Use it at your own risk. 22 | 23 | You may view the source code of the Appspot app that is used to handle the server part 24 | of the OAuth procedure at https://acd-api-oa.appspot.com/src. 25 | 26 | You will not have to prepare anything to initiate this authorization method, just 27 | run, for example, ``acd_cli init``. 28 | 29 | A browser (tab) will open and you will be asked to log into your Amazon account 30 | or grant access for 'acd-api'. 31 | Signing in or clicking on 'Continue' will download a JSON file named ``oauth_data``, which must be 32 | placed in the cache directory displayed on screen (e.g. ``/home//.cache/acd_cli``). 33 | 34 | Advanced Users (Security Profile) 35 | +++++++++++++++++++++++++++++++++ 36 | 37 | You must have a security profile and have it whitelisted, as described in Amazon's 38 | `ACD getting started guide 39 | `_. 40 | The security profile must be whitelisted for read and write aceess and have a redirect 41 | URL set for ``http://localhost``. 42 | 43 | Put your own security profile data in a file called ``client_data`` in the cache directory 44 | and have it adhere to the following form. 45 | 46 | .. code :: json 47 | 48 | { 49 | "CLIENT_ID": "amzn1.application-oa2-client.0123456789abcdef0123456789abcdef", 50 | "CLIENT_SECRET": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" 51 | } 52 | 53 | You may now run ``acd_cli -v init``. 54 | The authentication procedure is similar to the one above. A browser (tab) will be 55 | opened and you will be asked to log in. Unless you have a local webserver running on port 80, 56 | you will be redirected to your browser's error page. Just copy the URL 57 | (e.g. ``http://localhost/?code=AbCdEfGhIjKlMnOpQrSt&scope=clouddrive%3Aread_all+clouddrive%3Awrite``) 58 | into the console. 59 | 60 | Changing Authorization Methods 61 | ++++++++++++++++++++++++++++++ 62 | 63 | If you want to change between authorization methods, go to your cache path (it is stated in the 64 | output of ``acd_cli -v init``) and delete the file ``oauth_data`` and, if it exists, ``client_data``. 65 | 66 | Copying Credentials 67 | +++++++++++++++++++ 68 | 69 | The same OAuth credentials may be used on multiple user accounts and multiple machines without a 70 | problem. To copy them, first look up acd\_cli's source and destination cache path like 71 | mentioned in the section above. Find the file/s ``oauth_data`` and possibly ``client_data`` in the 72 | source path and just copy it/them to the destination path. 73 | 74 | Accessing multiple Amazon accounts 75 | ++++++++++++++++++++++++++++++++++ 76 | 77 | It is possible to use the cache path environment variable to set up an additional cache that is 78 | linked to a different Amazon account by OAuth credentials. Please see the 79 | :doc:`setup section ` on environment variables. 80 | 81 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # acd_cli documentation build configuration file, created by 5 | # sphinx-quickstart on Wed Oct 14 15:29:35 2015. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import sys 17 | import os 18 | import shutil 19 | import subprocess 20 | 21 | # custom imports 22 | dir_ = os.path.dirname(__file__) 23 | sys.path.append(dir_) 24 | sys.path.append(os.path.join(dir_, '..')) 25 | import acdcli 26 | 27 | # copy documents from parent path 28 | import hoist 29 | 30 | # If extensions (or modules to document with autodoc) are in another directory, 31 | # add these directories to sys.path here. If the directory is relative to the 32 | # documentation root, use os.path.abspath to make it absolute, like shown here. 33 | #sys.path.insert(0, os.path.abspath('.')) 34 | 35 | # -- General configuration ------------------------------------------------ 36 | 37 | # If your documentation needs a minimal Sphinx version, state it here. 38 | #needs_sphinx = '1.0' 39 | 40 | # Add any Sphinx extension module names here, as strings. They can be 41 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 42 | # ones. 43 | extensions = [ 44 | 'sphinx.ext.autodoc', 45 | 'sphinx.ext.intersphinx', 46 | 'sphinx.ext.todo', 47 | 'sphinx.ext.mathjax', 48 | 'sphinx.ext.ifconfig', 49 | 'sphinx.ext.viewcode', 50 | 'sphinx_paramlinks' 51 | ] 52 | 53 | # Add any paths that contain templates here, relative to this directory. 54 | templates_path = ['_templates'] 55 | 56 | # The suffix(es) of source filenames. 57 | # You can specify multiple suffix as a list of string: 58 | # source_suffix = ['.rst', '.md'] 59 | source_suffix = '.rst' 60 | 61 | # The encoding of source files. 62 | #source_encoding = 'utf-8-sig' 63 | 64 | # The master toctree document. 65 | master_doc = 'index' 66 | 67 | # General information about the project. 68 | project = 'acd_cli' 69 | copyright = '2015, yadayada' 70 | author = 'yadayada' 71 | 72 | # The version info for the project you're documenting, acts as replacement for 73 | # |version| and |release|, also used in various other places throughout the 74 | # built documents. 75 | # 76 | # The short X.Y version. 77 | version = acdcli.__version__ 78 | # The full version, including alpha/beta/rc tags. 79 | release = acdcli.__version__ 80 | 81 | # The language for content autogenerated by Sphinx. Refer to documentation 82 | # for a list of supported languages. 83 | # 84 | # This is also used if you do content translation via gettext catalogs. 85 | # Usually you set "language" from the command line for these cases. 86 | language = None 87 | 88 | # There are two options for replacing |today|: either, you set today to some 89 | # non-false value, then it is used: 90 | #today = '' 91 | # Else, today_fmt is used as the format for a strftime call. 92 | #today_fmt = '%B %d, %Y' 93 | 94 | # List of patterns, relative to source directory, that match files and 95 | # directories to ignore when looking for source files. 96 | exclude_patterns = ['_build'] 97 | 98 | # The reST default role (used for this markup: `text`) to use for all 99 | # documents. 100 | #default_role = None 101 | 102 | # If true, '()' will be appended to :func: etc. cross-reference text. 103 | #add_function_parentheses = True 104 | 105 | # If true, the current module name will be prepended to all description 106 | # unit titles (such as .. function::). 107 | #add_module_names = True 108 | 109 | # If true, sectionauthor and moduleauthor directives will be shown in the 110 | # output. They are ignored by default. 111 | #show_authors = False 112 | 113 | # The name of the Pygments (syntax highlighting) style to use. 114 | pygments_style = 'sphinx' 115 | 116 | # A list of ignored prefixes for module index sorting. 117 | #modindex_common_prefix = [] 118 | 119 | # If true, keep warnings as "system message" paragraphs in the built documents. 120 | #keep_warnings = False 121 | 122 | # If true, `todo` and `todoList` produce output, else they produce nothing. 123 | todo_include_todos = True 124 | 125 | 126 | # -- Options for HTML output ---------------------------------------------- 127 | 128 | # The theme to use for HTML and HTML Help pages. See the documentation for 129 | # a list of builtin themes. 130 | html_theme = 'sphinx_rtd_theme' 131 | 132 | # Theme options are theme-specific and customize the look and feel of a theme 133 | # further. For a list of options available for each theme, see the 134 | # documentation. 135 | #html_theme_options = {} 136 | 137 | # Add any paths that contain custom themes here, relative to this directory. 138 | #html_theme_path = [] 139 | 140 | # The name for this set of Sphinx documents. If None, it defaults to 141 | # " v documentation". 142 | #html_title = None 143 | 144 | # A shorter title for the navigation bar. Default is the same as html_title. 145 | #html_short_title = None 146 | 147 | # The name of an image file (relative to this directory) to place at the top 148 | # of the sidebar. 149 | #html_logo = None 150 | 151 | # The name of an image file (within the static path) to use as favicon of the 152 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 153 | # pixels large. 154 | #html_favicon = None 155 | 156 | # Add any paths that contain custom static files (such as style sheets) here, 157 | # relative to this directory. They are copied after the builtin static files, 158 | # so a file named "default.css" will overwrite the builtin "default.css". 159 | html_static_path = ['_static'] 160 | 161 | # Add any extra paths that contain custom files (such as robots.txt or 162 | # .htaccess) here, relative to this directory. These files are copied 163 | # directly to the root of the documentation. 164 | #html_extra_path = [] 165 | 166 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 167 | # using the given strftime format. 168 | #html_last_updated_fmt = '%b %d, %Y' 169 | 170 | # If true, SmartyPants will be used to convert quotes and dashes to 171 | # typographically correct entities. 172 | #html_use_smartypants = True 173 | 174 | # Custom sidebar templates, maps document names to template names. 175 | #html_sidebars = {} 176 | 177 | # Additional templates that should be rendered to pages, maps page names to 178 | # template names. 179 | #html_additional_pages = {} 180 | 181 | # If false, no module index is generated. 182 | #html_domain_indices = True 183 | 184 | # If false, no index is generated. 185 | #html_use_index = True 186 | 187 | # If true, the index is split into individual pages for each letter. 188 | #html_split_index = False 189 | 190 | # If true, links to the reST sources are added to the pages. 191 | html_show_sourcelink = False 192 | 193 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 194 | #html_show_sphinx = True 195 | 196 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 197 | #html_show_copyright = True 198 | 199 | # If true, an OpenSearch description file will be output, and all pages will 200 | # contain a tag referring to it. The value of this option must be the 201 | # base URL from which the finished HTML is served. 202 | #html_use_opensearch = '' 203 | 204 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 205 | #html_file_suffix = None 206 | 207 | # Language to be used for generating the HTML full-text search index. 208 | # Sphinx supports the following languages: 209 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 210 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' 211 | #html_search_language = 'en' 212 | 213 | # A dictionary with options for the search language support, empty by default. 214 | # Now only 'ja' uses this config value 215 | #html_search_options = {'type': 'default'} 216 | 217 | # The name of a javascript file (relative to the configuration directory) that 218 | # implements a search results scorer. If empty, the default will be used. 219 | #html_search_scorer = 'scorer.js' 220 | 221 | # Output file base name for HTML help builder. 222 | htmlhelp_basename = 'acd_clidoc' 223 | 224 | # -- Options for LaTeX output --------------------------------------------- 225 | 226 | latex_elements = { 227 | # The paper size ('letterpaper' or 'a4paper'). 228 | #'papersize': 'letterpaper', 229 | 230 | # The font size ('10pt', '11pt' or '12pt'). 231 | #'pointsize': '10pt', 232 | 233 | # Additional stuff for the LaTeX preamble. 234 | #'preamble': '', 235 | 236 | # Latex figure (float) alignment 237 | #'figure_align': 'htbp', 238 | } 239 | 240 | # Grouping the document tree into LaTeX files. List of tuples 241 | # (source start file, target name, title, 242 | # author, documentclass [howto, manual, or own class]). 243 | latex_documents = [ 244 | (master_doc, 'acd_cli.tex', 'acd\\_cli Documentation', 245 | 'yadayada', 'manual'), 246 | ] 247 | 248 | # The name of an image file (relative to this directory) to place at the top of 249 | # the title page. 250 | #latex_logo = None 251 | 252 | # For "manual" documents, if this is true, then toplevel headings are parts, 253 | # not chapters. 254 | #latex_use_parts = False 255 | 256 | # If true, show page references after internal links. 257 | #latex_show_pagerefs = False 258 | 259 | # If true, show URL addresses after external links. 260 | #latex_show_urls = False 261 | 262 | # Documents to append as an appendix to all manuals. 263 | #latex_appendices = [] 264 | 265 | # If false, no module index is generated. 266 | #latex_domain_indices = True 267 | 268 | 269 | # -- Options for manual page output --------------------------------------- 270 | 271 | # One entry per manual page. List of tuples 272 | # (source start file, name, description, authors, manual section). 273 | man_pages = [ 274 | (master_doc, 'acd_cli', 'acd_cli Documentation', 275 | [author], 1) 276 | ] 277 | 278 | # If true, show URL addresses after external links. 279 | #man_show_urls = False 280 | 281 | 282 | # -- Options for Texinfo output ------------------------------------------- 283 | 284 | # Grouping the document tree into Texinfo files. List of tuples 285 | # (source start file, target name, title, author, 286 | # dir menu entry, description, category) 287 | texinfo_documents = [ 288 | (master_doc, 'acd_cli', 'acd_cli Documentation', 289 | author, 'acd_cli', 'One line description of project.', 290 | 'Miscellaneous'), 291 | ] 292 | 293 | # Documents to append as an appendix to all manuals. 294 | #texinfo_appendices = [] 295 | 296 | # If false, no module index is generated. 297 | #texinfo_domain_indices = True 298 | 299 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 300 | #texinfo_show_urls = 'footnote' 301 | 302 | # If true, do not generate a @detailmenu in the "Top" node's menu. 303 | #texinfo_no_detailmenu = False 304 | 305 | 306 | # Example configuration for intersphinx: refer to the Python standard library. 307 | intersphinx_mapping = {'https://docs.python.org/': None} 308 | 309 | # autodoc 310 | AUTODOC_DIR = 'apidoc' 311 | 312 | try: 313 | shutil.rmtree(AUTODOC_DIR) 314 | except FileNotFoundError: 315 | pass 316 | subprocess.call(['sphinx-apidoc', '-P', '-o', AUTODOC_DIR, '../acdcli']) 317 | 318 | 319 | # Ensure that the __init__ method gets documented. 320 | def skip(app, what, name, obj, skip, options): 321 | if name == "__init__": 322 | return False 323 | return skip 324 | 325 | 326 | def setup(app): 327 | app.connect("autodoc-skip-member", skip) 328 | 329 | -------------------------------------------------------------------------------- /docs/configuration.rst: -------------------------------------------------------------------------------- 1 | Configuration 2 | ============= 3 | 4 | Some module constants may be set in INI-style configuration files. If you want to override 5 | the defaults as described below, create a plain text file for the module using the section heading 6 | as the file name in the settings directory. 7 | 8 | acd\_cli.ini 9 | ------------ 10 | 11 | :: 12 | 13 | [download] 14 | ;do not delete corrupt files 15 | keep_corrupt = False 16 | 17 | ;do not delete partially downloaded files 18 | keep_incomplete = True 19 | 20 | [upload] 21 | ;waiting time for timed-out uploads/overwrittes to appear remotely [minutes] 22 | timeout_wait = 10 23 | 24 | acd\_client.ini 25 | --------------- 26 | 27 | :: 28 | 29 | [endpoints] 30 | filename = endpoint_data 31 | 32 | ;sets the validity of the endpoint URLs, 3 days by default [seconds] 33 | validity_duration = 259200 34 | 35 | [transfer] 36 | ;sets the read/write chunk size for the local file system [bytes] 37 | fs_chunk_size = 131072 38 | 39 | ;sets maximal consecutive chunk size for downloads, 500MiB by default [bytes] 40 | ;this limit was introduced because, in the past, files >10GiB could not be downloaded in one piece 41 | dl_chunk_size = 524288000 42 | 43 | ;sets the number of retries for failed chunk requests 44 | chunk_retries = 1 45 | 46 | ;sets the connect and idle timeout [seconds] 47 | ;the idle timeout will be used in both timeout scenarios for some old requests versions 48 | ;refer to the requests docs http://docs.python-requests.org/en/master/user/advanced/ 49 | connection_timeout = 30 50 | idle_timeout = 60 51 | 52 | [proxies] 53 | ;none by default 54 | 55 | A proxy may be set by adding a protocol to proxy mapping like 56 | ``https = https://user:pass@1.1.1.1:1234`` to the proxies section. 57 | 58 | cache.ini 59 | --------- 60 | 61 | :: 62 | 63 | [sqlite] 64 | filename = nodes.db 65 | 66 | ;sets the time to sleep if a table is locked [milliseconds] 67 | busy_timeout = 30000 68 | 69 | ;https://www.sqlite.org/pragma.html#pragma_journal_mode 70 | journal_mode = wal 71 | 72 | [blacklist] 73 | 74 | ;files contained in folders in this list will be excluded from being saved 75 | ;into the cache (not currently implemented) 76 | folders = [] 77 | 78 | fuse.ini 79 | -------- 80 | 81 | :: 82 | [fs] 83 | ;block size used for size info 84 | block_size = 512 85 | 86 | [read] 87 | ;maximal number of simultaneously opened chunks per file 88 | open_chunk_limit = 10 89 | 90 | ;sets the connection/idle timeout when creating or reading a chunk [seconds] 91 | timeout = 5 92 | 93 | [write] 94 | ;number of buffered chunks in the write queue 95 | ;the size of the chunks may vary (e.g. 512B, 4KB, or 128KB) 96 | buffer_size = 32 97 | 98 | ;sets the timeout for putting a chunk into the queue [seconds] 99 | timeout = 30 100 | -------------------------------------------------------------------------------- /docs/contributors.rst: -------------------------------------------------------------------------------- 1 | Contributors 2 | ============ 3 | 4 | Thanks to 5 | 6 | - `chrisidefix `_ for adding the find-md5 action and 7 | forcing me to create a proper package and use PyPI 8 | 9 | - `msh100 `_ for adding proxy documentation and updating the oauth scope 10 | 11 | - `hansendc `_ for revamping the usage report 12 | 13 | - `legnaleurc `_ for adding the find-regex action 14 | 15 | - `Timdawson264 `_ for fixing st_nlinks in the FUSE node stat 16 | 17 | - `Lorentz83 `_ for creating 18 | `a bash completion script `_ 19 | 20 | - `kylemanna `_ for adding a systemd service file 21 | 22 | - `calisro `_ for adding uid, gid, umask overrides for fuse layer 23 | 24 | - `memoz `_ for amending proxy documentation 25 | 26 | - `gerph `_ for making file searches faster, particularly on large repositories 27 | 28 | Also thanks to 29 | 30 | - `fibersnet `_ for pointing out a possible deadlock in ACDFuse. 31 | - and everyone else who I forgot to mention 32 | -------------------------------------------------------------------------------- /docs/dev.rst: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | Contents: 5 | 6 | .. toctree:: 7 | :glob: 8 | :maxdepth: 2 9 | 10 | apidoc/* 11 | TODO 12 | -------------------------------------------------------------------------------- /docs/find.rst: -------------------------------------------------------------------------------- 1 | Finding nodes 2 | ============= 3 | 4 | The find actions will search for normal (active) and trashed nodes and list them. 5 | 6 | find 7 | ---- 8 | 9 | The find action will perform a case-insensitive search for files and folders that include the 10 | name or name segment given as argument, so e.g. ``acdcli find foo`` will find "foo", "Foobar", etc. 11 | 12 | find-md5 13 | -------- 14 | 15 | find-md5 will search for files that match the MD5 hash given. The location of a local file may be 16 | determined like so: 17 | :: 18 | 19 | acdcli find-md5 `md5sum local/file | cut -d" " -f1` 20 | 21 | find-regex 22 | ---------- 23 | 24 | find-regex searches for the specified `regex `_ 25 | in nodes' names. 26 | -------------------------------------------------------------------------------- /docs/history.rst: -------------------------------------------------------------------------------- 1 | Ancient History 2 | =============== 3 | 4 | 0.1.3 5 | ----- 6 | * plugin mechanism added 7 | * OAuth now via Appspot; security profile no longer necessary 8 | * back-off algorithm for API requests implemented 9 | 10 | 0.1.2 11 | ----- 12 | new: 13 | * overwriting of files 14 | * recursive upload/download 15 | * hashing of downloaded files 16 | * clear-cache action 17 | 18 | fixes: 19 | * remove-child accepted status code 20 | * fix for upload of files with Unicode characters 21 | 22 | other: 23 | * changed database schema 24 | -------------------------------------------------------------------------------- /docs/hoist.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | files = ('README.rst', 'CONTRIBUTING.rst') 5 | 6 | # replace GitHub external links by :doc: links 7 | replacements = (('`([^`]*?) <(docs/)?(.*?)\.rst>`_', ':doc:`\g<1> <\g<3>>`'),) 8 | 9 | 10 | def read(fname: str) -> str: 11 | return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read() 12 | 13 | for file in files: 14 | c = read('../' + file) 15 | for r in replacements: 16 | c = re.sub(r[0], r[1], c) 17 | with open(file, 'w') as f: 18 | f.write(c) 19 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. acd_cli documentation master file 2 | 3 | acd\_cli documentation 4 | ====================== 5 | 6 | Version |version| 7 | 8 | Contents: 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | 13 | setup 14 | authorization 15 | usage 16 | configuration 17 | sync 18 | transfer 19 | find 20 | FUSE 21 | CONTRIBUTING 22 | contributors 23 | FAQ 24 | history 25 | dev 26 | 27 | .. 28 | Indices and tables 29 | ================== 30 | 31 | * :ref:`genindex` 32 | * :ref:`modindex` 33 | * :ref:`search` 34 | 35 | Overview 36 | -------- 37 | 38 | .. include:: README.rst 39 | :start-line: 3 40 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\acd_cli.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\acd_cli.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/setup.rst: -------------------------------------------------------------------------------- 1 | Setting up acd\_cli 2 | =================== 3 | 4 | Check which Python 3 version is installed on your system, e.g. by running 5 | :: 6 | 7 | python3 -V 8 | 9 | If it is Python 3.2.3, 3.3.0 or 3.3.1, you need to upgrade to a higher minor version. 10 | 11 | You may now proceed to install using PIP, your package manager if you are using 12 | Arch Linux/Devuan/Fedora or build Debian/RedHat packages using fpm. 13 | 14 | Installation with PIP 15 | --------------------- 16 | 17 | If you are new to Python, worried about dependencies or about 18 | possibly messing up your system, create and activate virtualenv like so: 19 | :: 20 | 21 | cd /parent/path/to/your/new/virtualenv 22 | virtualenv acdcli 23 | source acdcli/bin/activate 24 | 25 | You are now safe to install and test acd\_cli. When you are finished, the environment can be 26 | disabled by simply closing your shell or running ``deactivate``. 27 | 28 | Please check which pip command is appropriate for Python 3 packages in your environment. 29 | I will be using 'pip3' as superuser in the examples. 30 | 31 | The recommended and most up-to-date way is to directly install the master branch from GitHub. 32 | :: 33 | 34 | pip3 install --upgrade git+https://github.com/yadayada/acd_cli.git 35 | 36 | Or use the usual installation method by specifying the PyPI package name. This may not work 37 | flawlessly on Windows systems. 38 | :: 39 | 40 | pip3 install --upgrade --pre acdcli 41 | 42 | 43 | PIP Errors on Debian 44 | ~~~~~~~~~~~~~~~~~~~~ 45 | 46 | A version incompatibility may arise with PIP when upgrading the requests package. 47 | PIP will throw the following error: 48 | :: 49 | 50 | ImportError: cannot import name 'IncompleteRead' 51 | 52 | Run these commands to fix it: 53 | :: 54 | 55 | apt-get remove python3-pip 56 | easy_install3 pip 57 | 58 | This will remove the distribution's pip3 package and replace it with a version that is compatible 59 | with the newer requests package. 60 | 61 | Installation on Arch/Devuan/Fedora 62 | ---------------------------------- 63 | 64 | Arch Linux 65 | ~~~~~~~~~~ 66 | 67 | There are two packages for Arch Linux in the AUR, 68 | `acd_cli-git `_, which is linked to the 69 | master branch of the GitHub repository, and 70 | `acd_cli `_, which is linked to the PyPI release. 71 | 72 | Devuan 73 | ~~~~~~ 74 | 75 | The Devuan package is called "python3-acdcli" and may be installed as usual 76 | (by running `apt-get install python3-acdcli` as superuser). 77 | 78 | Fedora 79 | ~~~~~~ 80 | 81 | An official `rpm package `_ exists 82 | that may be installed. 83 | 84 | Building deb/rpm packages 85 | ------------------------- 86 | 87 | You will need to have `fpm `_ installed to build packages. 88 | 89 | There is a Makefile in the assets directory that includes commands to build Debian packages 90 | (``make deb``) or RedHat packages (``make rpm``). It will also build the required 91 | requests-toolbelt package. 92 | fpm may also be able to build packages for other distributions or operating systems. 93 | 94 | Environment Variables 95 | --------------------- 96 | 97 | Cache Path and Settings Path 98 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 99 | 100 | You will find the current path settings in the output of ``acd_cli -v init``. 101 | 102 | The cache path is where acd\_cli stores OAuth data, the node cache, logs etc. You 103 | may override the cache path by setting the ``ACD_CLI_CACHE_PATH`` environment variable. 104 | 105 | The settings path is where various configuration files are stored (refer to the 106 | :doc:`configuration section `). 107 | The default path may be overriden by setting the ``ACD_CLI_SETTINGS_PATH`` environment variable. 108 | 109 | Proxy support 110 | ~~~~~~~~~~~~~ 111 | 112 | `Requests `_ supports HTTP(S) proxies via environment 113 | variables. Since all connections to Amazon Drive are using HTTPS, you need to 114 | set the variable ``HTTPS_PROXY``. The following example shows how to do that in a bash-compatible 115 | environment. 116 | :: 117 | 118 | export HTTPS_PROXY="https://user:pass@1.2.3.4:8080/" 119 | 120 | You can also use HTTP proxies supporting CONNECT method: 121 | :: 122 | 123 | export HTTPS_PROXY="http://1.2.3.4:8888/" 124 | 125 | Another way to permanently set the proxy is via configuration file. 126 | 127 | Locale 128 | ~~~~~~ 129 | 130 | If you need non-ASCII file/directory names, please check that your system's locale is set correctly. 131 | 132 | Dependencies 133 | ------------ 134 | 135 | FUSE 136 | ~~~~ 137 | 138 | For the mounting feature, fuse >= 2.6 is needed according to 139 | `fusepy `_. 140 | On a Debian-based distribution, the package should be named simply 'fuse'. 141 | 142 | Python Packages 143 | ~~~~~~~~~~~~~~~ 144 | 145 | Under normal circumstances, it should not be necessary to install the dependencies manually. 146 | 147 | - `appdirs `_ 148 | - `colorama `_ 149 | - `dateutils `_ 150 | - `requests `_ >= 2.1.0 151 | - `requests-toolbelt `_ 152 | - `sqlalchemy `_ 153 | 154 | If you want to the dependencies using your distribution's packaging system and 155 | are using a distro based on Debian 'jessie', the necessary packages are 156 | ``python3-appdirs python3-colorama python3-dateutil python3-requests python3-sqlalchemy``. 157 | 158 | Uninstalling 159 | ------------ 160 | 161 | Please run ``acd_cli delete-everything`` first to delete your authentication 162 | and node data in the cache path. Then, use pip to uninstall 163 | :: 164 | 165 | pip3 uninstall acdcli 166 | 167 | Then, revoke the permission for ``acd_cli_oa`` to access your drive in your Amazon profile, 168 | more precisely at https://www.amazon.com/ap/adam. 169 | -------------------------------------------------------------------------------- /docs/sync.rst: -------------------------------------------------------------------------------- 1 | Syncing 2 | ======= 3 | 4 | **acd\_cli** keeps a local cache of node metadata to reduce latency. Syncing simply 5 | means updating the local cache with current data from Amazon Drive. 6 | [An Amazon Drive `node` may be file or folder.] 7 | 8 | Regular syncing 9 | --------------- 10 | 11 | Regular syncing ``acd_cli sync`` should be the preferred method to update the metadata for 12 | your whole Drive account. When invoked for the first time, it will get a complete list of 13 | the file and folder metadata. For later uses, it will utilize the saved checkpoint from the 14 | last sync to only fetch the metadata that has changed since then. 15 | 16 | The ``--full`` (``-f``) flag forces the cache to be cleared before syncing, resulting in 17 | a non-incremental, full sync. 18 | 19 | Sync changesets may also be written to or inserted from a file. 20 | 21 | Incomplete sync 22 | +++++++++++++++ 23 | 24 | For large syncsets, for instance when doing a full sync, you may get the error message 25 | "Root node not found. Sync may have been incomplete." Please try to resume the sync process 26 | later, omitting the ``--full`` flag if you had specified it prior. 27 | 28 | Partial syncing 29 | --------------- 30 | 31 | Partial syncing may be a quick-and-dirty way to synchronize the metadata of a single directory 32 | with a smallish number of files and folders. E.g. ``acd_cli psync /`` will non-recursively fetch 33 | the metadata for the root folder. 34 | 35 | The ``--recursive`` (``-r``) flag will also descend into the specified folder's subfolders. 36 | It is not advisible to use this flag for folders with many subfolders 37 | 38 | The partial sync action will need to fetch node metadata in batches of 200. T 39 | Please be aware that when using regular and partial syncing alternatingly, your metadata 40 | may be in an inconsistent state. 41 | -------------------------------------------------------------------------------- /docs/transfer.rst: -------------------------------------------------------------------------------- 1 | File transfer 2 | ============= 3 | 4 | acd\_cli offers multi-file transfer actions - upload and download - 5 | and single-file transfer actions - overwrite, stream and cat. 6 | 7 | Multi-file transfers can be done with concurrent connections by specifying the argument ``-x NUM``. 8 | If remote folder hierarchies or local directory hierarchies need to be created, this will be done 9 | prior to the file transfers. 10 | 11 | Actions 12 | ------- 13 | 14 | ``upload`` 15 | ~~~~~~~~~~ 16 | 17 | The upload action will upload files or recursively upload directories. 18 | Existing files will not be changed, normally. 19 | 20 | Syntax: 21 | :: 22 | 23 | acdcli upload /local/path [/local/next_path [...]] /remote/path 24 | 25 | If the ``--overwrite`` (``-o``) argument is specified, a remote file will be updated if 26 | a) the local file's modification time is higher or 27 | b) the local file's creation time is higher and the file size is different. 28 | The ``--force`` (``-f``) argument can be used to force overwrite. 29 | 30 | .. hint:: 31 | When uploading large files (>10GiB), a warning about a timeout may be displayed. You then need to 32 | wait a few minutes, sync and manually check if the file was uploaded correctly. 33 | 34 | ``overwrite`` 35 | ~~~~~~~~~~~~~ 36 | 37 | The upload action overwrites the content of a remote file with a local file. 38 | 39 | Syntax: 40 | :: 41 | 42 | acdcli overwrite /local/path /remote/path 43 | 44 | ``download`` 45 | ~~~~~~~~~~~~ 46 | 47 | The download action can download a single file or recursively download a directory. 48 | If a file already exists locally, it will not be overwritten. 49 | 50 | Syntax: 51 | :: 52 | 53 | acdcli download /remote/path [/local/path] 54 | 55 | If the local path is omitted, the destination path will be the current working directory. 56 | 57 | ``stream`` 58 | ~~~~~~~~~~ 59 | 60 | This action will upload the standard input stream to a file. 61 | 62 | Syntax: 63 | :: 64 | 65 | some_process | acdcli stream file_name /remote/path 66 | 67 | If the ``--overwrite`` (``-o``) argument is specified, the remote file will be overwritten if 68 | it exists. 69 | 70 | ``cat`` 71 | ~~~~~~~ 72 | 73 | This action outputs the content of a file to standard output. 74 | 75 | Hints 76 | ----- 77 | 78 | Abort/Resume 79 | 80 | Incomplete file downloads will be resumed automatically. Aborted file uploads are not resumable 81 | at the moment. 82 | 83 | Folder or directory hierarchies that were created for a transfer do not need to be recreated 84 | when resuming a transfer. 85 | 86 | Retry 87 | 88 | Failed upload, download and overwrite actions allow retries on error 89 | by specifying the ``--max-retries|-r`` argument, e.g. ``acd_cli -r MAX_RETRIES``. 90 | 91 | Exclusion 92 | 93 | Files may be excluded from upload or download by regex on their name or by file ending. 94 | Additionally, paths can be excluded from upload. Regexes and file endings are case-insensitive. 95 | 96 | It is possible to specify multiple exclusion arguments of the same kind. 97 | 98 | Remove Source Files 99 | 100 | The ``--remove-source-files|-rsf`` flag is used, local files will be deleted from the filesystem 101 | 102 | #. if the upload succeeds 103 | #. if deduplication is enabled and at least one duplicate is found 104 | #. if a file of the same name is present in the remote upload path but the file is not to be 105 | overwritten (deletion then only occurs if the file sizes match) 106 | 107 | Deduplication 108 | 109 | Server-side deduplication prevents completely uploaded files from being saved as a node if 110 | another file with the same MD5 checksum already exists. 111 | acd\_cli can prevent uploading duplicates by checking local files' sizes and MD5s. 112 | Empty files are never regarded duplicates. 113 | 114 | Progress indicator 115 | 116 | To suppress the progress indicator from being displayed on standard output, use the ``--quiet`` 117 | flag. 118 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ----- 3 | 4 | acd_cli may be invoked as ``acd_cli`` or ``acdcli``. 5 | 6 | Most actions need the node cache to be initialized and up-to-date, so please run a sync. An ordinary 7 | sync will fetch the changes since the last sync or the full node list if the cache is empty. 8 | Partially syncing will only fetch the active contents of one folder, optionally recursively. 9 | 10 | The following actions are built in 11 | 12 | .. code-block:: none 13 | 14 | sync (s) refresh node cache; prerequisite for many actions 15 | psync only refresh the contents of the specified folder 16 | clear-cache (cc) clear node cache [offline operation] 17 | 18 | tree (t) print directory tree [offline operation] 19 | children (ls) list a folder's children [offline operation] 20 | 21 | find (f) find nodes by name [offline operation] [case insensitive] 22 | find-md5 (fm) find files by MD5 hash [offline operation] 23 | find-regex (fr) find nodes by regular expression [offline operation] [case insensitive] 24 | 25 | upload (ul) file and directory upload to a remote destination 26 | overwrite (ov) overwrite file A [remote] with content of file B [local] 27 | stream (st) upload the standard input stream to a file 28 | download (dl) download a remote folder or file; will skip existing local files 29 | cat output a file to the standard output stream 30 | 31 | create (c, mkdir) create folder using an absolute path 32 | 33 | list-trash (lt) list trashed nodes [offline operation] 34 | trash (rm) move node to trash 35 | restore (re) restore node from trash 36 | 37 | move (mv) move node A into folder B 38 | rename (rn) rename a node 39 | 40 | resolve (rs) resolve a path to a node ID [offline operation] 41 | 42 | usage (u) show drive usage data 43 | quota (q) show drive quota [raw JSON] 44 | metadata (m) print a node's metadata [raw JSON] 45 | 46 | mount mount the drive at a local directory 47 | umount unmount drive(s) 48 | 49 | Please run ``acd_cli --help`` to get a current list of the available actions. A list of further 50 | arguments of an action and their order can be printed by calling ``acd_cli [action] --help``. 51 | 52 | Most node arguments may be specified as a 22 character ID or a UNIX-style path. 53 | Trashed nodes' paths might not be able to be resolved correctly; use their ID instead. 54 | 55 | There are more detailed instructions for :doc:`sycing `, 56 | :doc:`file transfer actions `, 57 | :doc:`find actions ` and the :doc:`FUSE module `. 58 | 59 | Logs will automatically be saved into the cache directory. 60 | 61 | Global Flags/Parameters 62 | ~~~~~~~~~~~~~~~~~~~~~~~ 63 | 64 | .. 65 | not using reST's option list here because it does not support (?) --foo={bar1,bar2} type args 66 | 67 | ``--verbose`` (``-v``) and ``--debug`` (``-d``) will print additional messages to standard error. 68 | 69 | ``--no-log`` (``-nl``) will disable the automatic logging feature that saves log files to the 70 | cache directory. 71 | 72 | ``--color`` will set the coloring mode according to the specified argument (``auto``, ``never`` 73 | or ``always``). Coloring is turned off by default; it is used for file/folder listings. 74 | 75 | ``--check`` (``-c``) sets the start-up database integrity check mode. The default is to perform a 76 | ``full`` check. Setting the check to ``quick`` or ``none`` may speed up the initialization for 77 | large databases. 78 | 79 | ``--utf`` (``-u``) will force the output to be encoded in UTF-8, regardless 80 | of the system's settings. 81 | 82 | 83 | Exit Status 84 | ~~~~~~~~~~~ 85 | 86 | When the script is done running, its exit status can be checked for flags. If no error occurs, 87 | the exit status will be 0. Possible flag values are: 88 | 89 | =========================== ======= 90 | flag value 91 | =========================== ======= 92 | general error 1 93 | argument error 2 94 | failed file transfer 8 95 | upload timeout 16 96 | hash mismatch 32 97 | error creating folder 64 98 | file size mismatch 128 99 | cache outdated 256 100 | remote duplicate 512 101 | duplicate inode 1024 102 | name collision 2048 103 | error deleting source file 4096 104 | =========================== ======= 105 | 106 | If multiple errors occur, their respective flag values will be compounded into the exit 107 | status value by a binary OR operation. Because exit status values may not be larger than 255, 108 | flags 256 and above cannot be returned via exit status. 109 | A warning message will be displayed at the end of execution if those errors occurred. 110 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import sys 4 | from setuptools import setup, find_packages 5 | from distutils.version import StrictVersion 6 | import acdcli 7 | 8 | 9 | def read(fname: str) -> str: 10 | return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read() 11 | 12 | # replace GitHub external links 13 | repl = ('`([^`]*?) <(docs/)?(.*?)\.rst>`_', 14 | '`\g<1> .html>`_') 15 | 16 | version = acdcli.__version__ 17 | StrictVersion(version) 18 | 19 | requests_py32 = ',<2.11.0' if sys.version_info[0:2] == (3, 2) else '' 20 | 21 | dependencies = ['appdirs', 'colorama', 'fusepy', 'python_dateutil', 22 | 'requests>=2.1.0,!=2.9.0,!=2.12.0%s' % requests_py32, 'requests_toolbelt!=0.5.0'] 23 | doc_dependencies = ['sphinx_paramlinks'] 24 | test_dependencies = ['httpretty<0.8.11', 'mock'] 25 | 26 | if os.environ.get('READTHEDOCS') == 'True': 27 | dependencies = doc_dependencies 28 | 29 | setup( 30 | name='acdcli', 31 | version=version, 32 | description='a command line interface and FUSE filesystem for Amazon Cloud Drive', 33 | long_description='a command line interface and FUSE filesystem for Amazon Cloud Drive', 34 | license='GPLv2+', 35 | author='yadayada', 36 | author_email='acd_cli@mail.com', 37 | keywords=['amazon cloud drive', 'clouddrive', 'FUSE'], 38 | url='https://github.com/yadayada/acd_cli', 39 | download_url='https://github.com/yadayada/acd_cli/tarball/' + version, 40 | zip_safe=False, 41 | packages=find_packages(exclude=['tests']), 42 | test_suite='tests.get_suite', 43 | scripts=['acd_cli.py'], 44 | entry_points={'console_scripts': ['acd_cli = acd_cli:main', 'acdcli = acd_cli:main'], 45 | # 'acd_cli.plugins': ['stream = plugins.stream', 46 | # 'template = plugins.template'] 47 | }, 48 | install_requires=dependencies, 49 | tests_require=test_dependencies, 50 | extras_require={'docs': doc_dependencies}, 51 | classifiers=[ 52 | 'Environment :: Console', 53 | 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 54 | 'Programming Language :: Python :: 3', 55 | 'Programming Language :: Python :: 3.2', 56 | 'Programming Language :: Python :: 3.3', 57 | 'Programming Language :: Python :: 3.4', 58 | 'Programming Language :: Python :: 3.5', 59 | 'Programming Language :: Python :: 3.6', 60 | 'Programming Language :: Python :: 3 :: Only', 61 | 'Development Status :: 4 - Beta', 62 | 'Topic :: System :: Archiving :: Backup', 63 | 'Topic :: System :: Filesystems' 64 | ] 65 | ) 66 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | from unittest import TestLoader, TestSuite 2 | 3 | from .test_actions import ActionTestCase 4 | from .test_api import APITestCase 5 | from .test_cache import CacheTestCase 6 | from .test_helper import HelperTestCase 7 | 8 | 9 | def get_suite() -> TestSuite: 10 | """"Returns a suite of all automated tests.""" 11 | all_tests = TestSuite() 12 | 13 | all_tests.addTest(TestLoader().loadTestsFromTestCase(ActionTestCase)) 14 | all_tests.addTest(TestLoader().loadTestsFromTestCase(APITestCase)) 15 | all_tests.addTest(TestLoader().loadTestsFromTestCase(CacheTestCase)) 16 | all_tests.addTest(TestLoader().loadTestsFromTestCase(HelperTestCase)) 17 | 18 | return all_tests 19 | -------------------------------------------------------------------------------- /tests/cache_files/README: -------------------------------------------------------------------------------- 1 | Put your 'oauth_data' file in here. -------------------------------------------------------------------------------- /tests/dummy_files/endpoint_data: -------------------------------------------------------------------------------- 1 | { 2 | "contentUrl": "https://content-na.drive.amazonaws.com/cdproxy/", 3 | "exp_time": 5000000000, 4 | "metadataUrl": "https://cdws.us-east-1.amazonaws.com/drive/v1/" 5 | } -------------------------------------------------------------------------------- /tests/dummy_files/oauth.json: -------------------------------------------------------------------------------- 1 | { 2 | "access_token": "Bar", 3 | "exp_time": 5000000000, 4 | "expires_in": 5000000000, 5 | "refresh_token": "Foo", 6 | "token_type": "bearer" 7 | } -------------------------------------------------------------------------------- /tests/test_actions.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import patch, mock_open, MagicMock, sentinel 3 | import os 4 | import sys 5 | import json 6 | import httpretty 7 | 8 | import acd_cli 9 | 10 | from acdcli.cache import db 11 | 12 | from .test_helper import gen_file, gen_folder, gen_bunch_of_nodes 13 | 14 | cache_path = os.path.join(os.path.dirname(__file__), 'dummy_files') 15 | os.environ['ACD_CLI_CACHE_PATH'] = cache_path 16 | 17 | try: 18 | from importlib import reload 19 | except ImportError: 20 | from imp import reload 21 | 22 | 23 | def run_main() -> int: 24 | try: 25 | acd_cli.main() 26 | except SystemExit as e: 27 | return e.code 28 | 29 | 30 | class ActionTestCase(unittest.TestCase): 31 | stdout = sys.stdout 32 | 33 | def setUp(self): 34 | reload(acd_cli) 35 | sys.argv = [acd_cli._app_name, '-nw'] 36 | self.cache = db.NodeCache(cache_path) 37 | 38 | def tearDown(self): 39 | db.NodeCache.remove_db_file(cache_path) 40 | 41 | # tests 42 | 43 | @patch('sys.stdout.write') 44 | def testHelp(self, print_): 45 | sys.argv.append('-h') 46 | self.assertEqual(run_main(), 0) 47 | 48 | def testClearCache(self): 49 | sys.argv.append('cc') 50 | self.assertEqual(run_main(), None) 51 | 52 | def testClearCacheNonExist(self): 53 | self.cache.remove_db_file() 54 | sys.argv.append('cc') 55 | self.assertEqual(run_main(), None) 56 | 57 | # listing 58 | 59 | @patch('sys.stdout.write') 60 | def testTree(self, print_): 61 | files, folders = gen_bunch_of_nodes(50) 62 | 63 | self.cache.insert_nodes(files + folders) 64 | sys.argv.extend(['tree', '-t']) 65 | self.assertEqual(run_main(), None) 66 | self.assertEqual(len(print_.mock_calls), 100) 67 | 68 | @patch('sys.stdout.write') 69 | def testList(self, print_): 70 | db.NodeCache(cache_path) 71 | folder = gen_folder([]) 72 | files = [gen_file([folder]) for _ in range(50)] 73 | 74 | self.cache.insert_nodes(files + [folder]) 75 | sys.argv.extend(['ls', '-t', '/']) 76 | self.assertEqual(run_main(), None) 77 | self.assertEqual(len(print_.mock_calls), 100) 78 | 79 | # find actions 80 | 81 | # transfer actions 82 | 83 | # create 84 | 85 | # trashing 86 | 87 | # move/rename, resolve 88 | 89 | # child ops 90 | 91 | # stats 92 | 93 | # FUSE 94 | 95 | # @httpretty.activate 96 | # def testMount(self): 97 | # httpretty. \ 98 | # register_uri(httpretty.GET, acd_cli.acd_client.metadata_url + 'account/quota', 99 | # body=json.dumps({'available:': 100, 'quota': 100})) 100 | # 101 | # sys.argv.extend(['-d', 'mount', '-i', '0', 102 | # os.path.join(os.path.dirname(__file__), 'dummy_files/mountpoint')]) 103 | # self.cache.insert_nodes([gen_folder()]) 104 | # self.assertEqual(run_main(), None) 105 | 106 | def testUnmount(self): 107 | sys.argv.append('umount') 108 | self.assertEqual(run_main(), 0) 109 | 110 | # undocumented actions 111 | 112 | def testInit(self): 113 | sys.argv.append('init') 114 | self.cache.insert_nodes([gen_folder()]) 115 | self.assertEqual(run_main(), None) 116 | 117 | # misc 118 | 119 | def testCheckCacheEmpty(self): 120 | sys.argv.extend(['ls', '/']) 121 | self.assertEqual(run_main(), acd_cli.INIT_FAILED_RETVAL) 122 | 123 | def testCheckCacheNonEmpty(self): 124 | folder = gen_folder() 125 | self.cache.insert_nodes([folder]) 126 | sys.argv.extend(['ls', '/']) 127 | self.assertEqual(run_main(), None) 128 | 129 | # helper functions 130 | -------------------------------------------------------------------------------- /tests/test_api.py: -------------------------------------------------------------------------------- 1 | """Isolated API unit tests.""" 2 | 3 | import unittest 4 | import httpretty 5 | from mock import patch, mock_open, MagicMock 6 | import logging 7 | import os 8 | import json 9 | import time 10 | 11 | import acdcli.api.oauth as oauth 12 | 13 | from acdcli.api.account import _Usage 14 | from acdcli.api.common import * 15 | from acdcli.api.client import ACDClient 16 | 17 | from .test_helper import gen_rand_id 18 | 19 | logging.basicConfig(level=logging.INFO) 20 | path = os.path.join(os.path.dirname(__file__), 'dummy_files') 21 | 22 | 23 | class APITestCase(unittest.TestCase): 24 | def setUp(self): 25 | self.acd = ACDClient(path) 26 | self.acd.BOReq._wait = lambda: None 27 | 28 | def testMetadataUrl(self): 29 | self.assertEqual(self.acd.metadata_url, 'https://cdws.us-east-1.amazonaws.com/drive/v1/') 30 | 31 | def testContentUrl(self): 32 | self.assertEqual(self.acd.content_url, 'https://content-na.drive.amazonaws.com/cdproxy/') 33 | 34 | def testValidID0(self): 35 | self.assertTrue(is_valid_id('abcdefghijklmnopqrstuv')) 36 | 37 | def testValidID1(self): 38 | self.assertTrue(is_valid_id('0123456789012345678901')) 39 | 40 | def testValidID2(self): 41 | self.assertTrue(is_valid_id('a0b1c2d3e4f5g6h7i8j9k0')) 42 | 43 | def testValidID3(self): 44 | self.assertTrue(is_valid_id('a0b1c2d3e4f--6h7i8j9k0')) 45 | 46 | def testValidIDs(self): 47 | for _ in range(1000): 48 | self.assertTrue(is_valid_id(gen_rand_id())) 49 | 50 | def testInvalidID0(self): 51 | self.assertFalse(is_valid_id('')) 52 | 53 | def testInvalidID1(self): 54 | self.assertFalse(is_valid_id('äbcdéfghíjklmnöpqrstüv')) 55 | 56 | def testInvalidID2(self): 57 | self.assertFalse(is_valid_id('abcdefghijklmnopqrstu')) 58 | 59 | # 60 | # account 61 | # 62 | 63 | @httpretty.activate 64 | def testUsage(self): 65 | httpretty. \ 66 | register_uri(httpretty.GET, self.acd.metadata_url + 'account/usage', 67 | body=json.dumps({"lastCalculated": "2014-08-13T23:17:41.365Z", 68 | "video": {"billable": {"bytes": 23524252, "count": 22}, 69 | "total": {"bytes": 23524252, "count": 22}}, 70 | "other": {"billable": {"bytes": 29999771, "count": 871}, 71 | "total": {"bytes": 29999771, "count": 871}}, 72 | "doc": {"billable": {"bytes": 807170, "count": 10}, 73 | "total": {"bytes": 807170, "count": 10}}, 74 | "photo": {"billable": {"bytes": 9477988, "count": 25}, 75 | "total": {"bytes": 9477988, "count": 25}}}) 76 | ) 77 | self.assertIsInstance(self.acd.get_account_usage(), _Usage) 78 | 79 | @httpretty.activate 80 | def testUsageEmpty(self): 81 | httpretty.register_uri(httpretty.GET, self.acd.metadata_url + 'account/usage', body='{}') 82 | self.assertEqual(str(self.acd.get_account_usage()), '') 83 | 84 | # 85 | # metadata 86 | # 87 | 88 | @httpretty.activate 89 | def testChanges(self): 90 | httpretty.register_uri(httpretty.POST, self.acd.metadata_url + 'changes', 91 | body='{"checkpoint": "foo", "reset": true, ' 92 | '"nodes": [ {"kind": "FILE", "status": "TRASH"} ], ' 93 | '"statusCode": 200}\n' 94 | '{"end": true}') 95 | tmp = self.acd.get_changes() 96 | changesets = [c for c in self.acd._iter_changes_lines(tmp)] 97 | self.assertEqual(len(changesets), 1) 98 | changeset = changesets[0] 99 | self.assertEqual(len(changeset.nodes), 1) 100 | self.assertEqual(len(changeset.purged_nodes), 0) 101 | self.assertEqual(changeset.checkpoint, 'foo') 102 | self.assertTrue(changeset.reset) 103 | 104 | @httpretty.activate 105 | def testChangesMissingEnd(self): 106 | httpretty.register_uri(httpretty.POST, self.acd.metadata_url + 'changes', 107 | body='{"checkpoint": "foo", "reset": true, "nodes": [], ' 108 | '"statusCode": 200}\n') 109 | tmp = self.acd.get_changes() 110 | changesets = [c for c in self.acd._iter_changes_lines(tmp)] 111 | self.assertEqual(len(changesets), 1) 112 | changeset = changesets[0] 113 | self.assertEqual(len(changeset.nodes), 0) 114 | self.assertEqual(len(changeset.purged_nodes), 0) 115 | self.assertEqual(changeset.checkpoint, 'foo') 116 | self.assertTrue(changeset.reset) 117 | 118 | @httpretty.activate 119 | def testChangesCorruptJSON(self): 120 | httpretty.register_uri(httpretty.POST, self.acd.metadata_url + 'changes', 121 | body='{"checkpoint": }') 122 | with self.assertRaises(RequestError): 123 | tmp = self.acd.get_changes() 124 | [cs for cs in self.acd._iter_changes_lines(tmp)] 125 | 126 | -------------------------------------------------------------------------------- /tests/test_api_live.py: -------------------------------------------------------------------------------- 1 | """Real, live Amazon Cloud Drive API tests""" 2 | 3 | import unittest 4 | import logging 5 | import os 6 | import io 7 | import sys 8 | import random 9 | import string 10 | import mmap 11 | import tempfile 12 | 13 | from acdcli.api import client, content, common 14 | from acdcli.api.common import RequestError 15 | from acdcli.utils import hashing 16 | 17 | logging.basicConfig(level=logging.INFO) 18 | path = os.path.join(os.path.dirname(__file__), 'cache_files') 19 | 20 | 21 | def gen_rand_sz(): 22 | return random.randint(1, 32 * 1024) 23 | 24 | 25 | def gen_rand_nm(): 26 | return str.join('', (random.choice(string.ascii_letters + string.digits) for _ in range(32))) 27 | 28 | 29 | def gen_temp_file(size=gen_rand_sz()) -> tuple: 30 | f = tempfile.NamedTemporaryFile(mode='w+b') 31 | f.write(os.urandom(size)) 32 | f.seek(0) 33 | return f, os.path.getsize(f.name) 34 | 35 | 36 | def gen_rand_anon_mmap(size=gen_rand_sz()) -> tuple: 37 | mmo = mmap.mmap(-1, size) 38 | mmo.write(os.urandom(size)) 39 | mmo.seek(0) 40 | return mmo, size 41 | 42 | 43 | def do_not_run(func): 44 | return lambda x: None 45 | 46 | print(sys.argv) 47 | 48 | 49 | class APILiveTestCase(unittest.TestCase): 50 | def setUp(self): 51 | self.acd_client = client.ACDClient(path) 52 | self.acd_client.BOReq._wait = lambda: None 53 | self.assertTrue(os.path.isfile(os.path.join(path, 'oauth_data'))) 54 | self.assertTrue(os.path.isfile(os.path.join(path, 'endpoint_data'))) 55 | 56 | def tearDown(self): 57 | pass 58 | 59 | # 60 | # common.py 61 | # 62 | 63 | def test_back_off_error(self): 64 | self.acd_client.BOReq.get(self.acd_client.content_url) 65 | self.assertEqual(self.acd_client.BOReq._BackOffRequest__retries, 1) 66 | 67 | # 68 | # account.py 69 | # 70 | 71 | def test_get_quota(self): 72 | q = self.acd_client.get_quota() 73 | self.assertIn('quota', q) 74 | self.assertIn('available', q) 75 | 76 | def test_get_usage(self): 77 | self.acd_client.get_account_usage() 78 | 79 | # 80 | # content.py 81 | # 82 | 83 | def test_upload(self): 84 | f, sz = gen_temp_file() 85 | md5 = hashing.hash_file_obj(f) 86 | n = self.acd_client.upload_file(f.name) 87 | self.assertIn('id', n) 88 | self.assertEqual(n['contentProperties']['size'], sz) 89 | self.assertEqual(n['contentProperties']['md5'], md5) 90 | n = self.acd_client.move_to_trash(n['id']) 91 | 92 | def test_upload_stream(self): 93 | s, sz = gen_rand_anon_mmap() 94 | fn = gen_rand_nm() 95 | h = hashing.IncrementalHasher() 96 | 97 | n = self.acd_client.upload_stream(s, fn, parent=None, read_callbacks=[h.update]) 98 | self.assertEqual(n['contentProperties']['md5'], h.get_result()) 99 | self.assertEqual(n['contentProperties']['size'], sz) 100 | 101 | self.acd_client.move_to_trash(n['id']) 102 | 103 | def test_upload_stream_empty(self): 104 | empty_stream = io.BufferedReader(io.BytesIO()) 105 | fn = gen_rand_nm() 106 | 107 | n = self.acd_client.upload_stream(empty_stream, fn, parent=None) 108 | self.assertEqual(n['contentProperties']['md5'], 'd41d8cd98f00b204e9800998ecf8427e') 109 | self.assertEqual(n['contentProperties']['size'], 0) 110 | 111 | self.acd_client.move_to_trash(n['id']) 112 | 113 | def test_overwrite(self): 114 | f, sz = gen_temp_file() 115 | h = hashing.IncrementalHasher() 116 | 117 | n = self.acd_client.create_file(os.path.basename(f.name)) 118 | self.assertIn('id', n) 119 | 120 | n = self.acd_client.overwrite_file(n['id'], f.name, [h.update]) 121 | self.assertEqual(n['contentProperties']['version'], 2) 122 | self.assertEqual(n['contentProperties']['md5'], h.get_result()) 123 | 124 | self.acd_client.move_to_trash(n['id']) 125 | 126 | def test_overwrite_stream(self): 127 | s, sz = gen_rand_anon_mmap() 128 | fn = gen_rand_nm() 129 | h = hashing.IncrementalHasher() 130 | 131 | n = self.acd_client.create_file(fn) 132 | self.assertIn('id', n) 133 | 134 | n = self.acd_client.overwrite_stream(s, n['id'], [h.update]) 135 | self.assertEqual(n['contentProperties']['md5'], h.get_result()) 136 | self.assertEqual(n['contentProperties']['size'], sz) 137 | 138 | empty_stream = io.BufferedReader(io.BytesIO()) 139 | n = self.acd_client.overwrite_stream(empty_stream, n['id']) 140 | self.assertEqual(n['contentProperties']['md5'], 'd41d8cd98f00b204e9800998ecf8427e') 141 | self.assertEqual(n['contentProperties']['size'], 0) 142 | 143 | self.acd_client.move_to_trash(n['id']) 144 | 145 | def test_download(self): 146 | f, sz = gen_temp_file() 147 | self.assertTrue(sz < self.acd_client._conf.getint('transfer', 'dl_chunk_size')) 148 | md5 = hashing.hash_file_obj(f) 149 | n = self.acd_client.upload_file(f.name) 150 | self.assertIn('id', n) 151 | 152 | f.close() 153 | self.assertFalse(os.path.exists(f.name)) 154 | 155 | self.acd_client.download_file(n['id'], f.name) 156 | md5_dl = hashing.hash_file(f.name) 157 | self.assertEqual(md5, md5_dl) 158 | self.acd_client.move_to_trash(n['id']) 159 | 160 | def test_download_chunked(self): 161 | ch_sz = gen_rand_sz() 162 | self.acd_client._conf['transfer']['dl_chunk_size'] = str(ch_sz) 163 | f, sz = gen_temp_file(size=5 * ch_sz) 164 | md5 = hashing.hash_file_obj(f) 165 | 166 | n = self.acd_client.upload_file(f.name) 167 | self.assertEqual(n['contentProperties']['md5'], md5) 168 | f.close() 169 | self.assertFalse(os.path.exists(f.name)) 170 | 171 | f = io.BytesIO() 172 | self.acd_client.chunked_download(n['id'], f, length=sz) 173 | self.acd_client.move_to_trash(n['id']) 174 | dl_md5 = hashing.hash_file_obj(f) 175 | self.assertEqual(sz, f.tell()) 176 | self.assertEqual(md5, dl_md5) 177 | 178 | def test_incomplete_download(self): 179 | ch_sz = gen_rand_sz() 180 | self.acd_client._conf['transfer']['dl_chunk_size'] = str(ch_sz) 181 | f, sz = gen_temp_file(size=5 * ch_sz) 182 | md5 = hashing.hash_file_obj(f) 183 | 184 | n = self.acd_client.upload_file(f.name) 185 | self.assertEqual(n['contentProperties']['md5'], md5) 186 | f.close() 187 | 188 | with self.assertRaises(RequestError) as cm: 189 | self.acd_client.download_file(n['id'], f.name, length=sz + 1) 190 | 191 | self.assertEqual(cm.exception.status_code, RequestError.CODE.INCOMPLETE_RESULT) 192 | self.acd_client.download_file(n['id'], f.name, length=sz) 193 | self.acd_client.move_to_trash(n['id']) 194 | os.remove(f.name) 195 | 196 | def test_download_resume(self): 197 | ch_sz = gen_rand_sz() 198 | self.acd_client._conf['transfer']['dl_chunk_size'] = str(ch_sz) 199 | f, sz = gen_temp_file(size=5 * ch_sz) 200 | md5 = hashing.hash_file(f.name) 201 | n = self.acd_client.upload_file(f.name) 202 | self.assertEqual(n['contentProperties']['md5'], md5) 203 | f.close() 204 | 205 | basename = os.path.basename(f.name) 206 | self.assertFalse(os.path.exists(f.name)) 207 | p_fn = basename + content.PARTIAL_SUFFIX 208 | with open(p_fn, 'wb') as f: 209 | self.acd_client.chunked_download(n['id'], f, length=int(sz * random.random())) 210 | self.assertLess(os.path.getsize(p_fn), sz) 211 | self.acd_client.download_file(n['id'], basename) 212 | self.acd_client.move_to_trash(n['id']) 213 | dl_md5 = hashing.hash_file(basename) 214 | self.assertEqual(md5, dl_md5) 215 | os.remove(basename) 216 | 217 | def test_create_file(self): 218 | name = gen_rand_nm() 219 | node = self.acd_client.create_file(name) 220 | self.acd_client.move_to_trash(node['id']) 221 | self.assertEqual(node['name'], name) 222 | self.assertEqual(node['parents'][0], self.acd_client.get_root_id()) 223 | 224 | def test_get_root_id(self): 225 | id = self.acd_client.get_root_id() 226 | self.assertTrue(common.is_valid_id(id)) 227 | 228 | # helper 229 | def create_random_dir(self): 230 | nm = gen_rand_nm() 231 | n = self.acd_client.create_folder(nm) 232 | self.assertIn('id', n) 233 | return n['id'] 234 | 235 | def test_mkdir(self): 236 | f_id = self.create_random_dir() 237 | self.acd_client.move_to_trash(f_id) 238 | 239 | # 240 | # metadata.py 241 | # 242 | 243 | @do_not_run 244 | def test_get_changes(self): 245 | nodes, purged_nodes, checkpoint, reset = self.acd_client.get_changes(include_purged=False) 246 | self.assertGreaterEqual(len(nodes), 1) 247 | self.assertEqual(len(purged_nodes), 0) 248 | self.assertTrue(reset) 249 | nodes, purged_nodes, checkpoint, reset = self.acd_client.get_changes(checkpoint=checkpoint) 250 | self.assertEqual(len(nodes), 0) 251 | self.assertEqual(len(purged_nodes), 0) 252 | self.assertFalse(reset) 253 | 254 | def test_move_node(self): 255 | f_id = self.create_random_dir() 256 | node = self.acd_client.create_file(gen_rand_nm()) 257 | old_parent = node['parents'][0] 258 | node = self.acd_client.move_node(node['id'], f_id) 259 | self.assertEqual(node['parents'][0], f_id) 260 | self.acd_client.move_to_trash(f_id) 261 | self.acd_client.move_to_trash(node['id']) 262 | 263 | def test_rename_node(self): 264 | nm = gen_rand_nm() 265 | nm2 = gen_rand_nm() 266 | node = self.acd_client.create_file(nm) 267 | self.assertEqual(node['name'], nm) 268 | node = self.acd_client.rename_node(node['id'], nm2) 269 | self.assertEqual(node['name'], nm2) 270 | self.acd_client.move_to_trash(node['id']) 271 | 272 | # 273 | # trash.py 274 | # 275 | 276 | def test_trash(self): 277 | # unnecessary 278 | pass 279 | 280 | def test_restore(self): 281 | f_id = self.create_random_dir() 282 | n = self.acd_client.move_to_trash(f_id) 283 | self.assertEqual(n['status'], 'TRASH') 284 | n = self.acd_client.restore(n['id']) 285 | self.assertEqual(n['status'], 'AVAILABLE') 286 | n = self.acd_client.move_to_trash(n['id']) 287 | self.assertEqual(n['status'], 'TRASH') 288 | 289 | def test_purge(self): 290 | f_id = self.create_random_dir() 291 | n = self.acd_client.move_to_trash(f_id) 292 | self.assertEqual(n['status'], 'TRASH') 293 | with self.assertRaises(RequestError): 294 | self.acd_client.purge(n['id']) 295 | -------------------------------------------------------------------------------- /tests/test_cache.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | 4 | from acdcli.cache import db, schema 5 | from .test_helper import gen_file, gen_folder, gen_bunch_of_nodes 6 | 7 | 8 | class CacheTestCase(unittest.TestCase): 9 | path = os.path.join(os.path.dirname(__file__), 'dummy_files') 10 | 11 | def setUp(self): 12 | self.cache = db.NodeCache(self.path) 13 | 14 | def tearDown(self): 15 | db.NodeCache.remove_db_file(self.path) 16 | 17 | def testEmpty(self): 18 | self.assertEqual(self.cache.get_node_count(), 0) 19 | 20 | def testInsertFolder(self): 21 | folder = gen_folder() 22 | self.cache.insert_node(folder) 23 | n = self.cache.get_node(folder['id']) 24 | self.assertEqual(n.id, folder['id']) 25 | self.assertEqual(self.cache.get_node_count(), 1) 26 | 27 | def testInsertFile(self): 28 | root = gen_folder() 29 | self.cache.insert_node(root) 30 | file = gen_file([root]) 31 | self.cache.insert_node(file) 32 | n = self.cache.get_node(file['id']) 33 | self.assertEqual(self.cache.get_node_count(), 2) 34 | 35 | def testFileMovement(self): 36 | root = gen_folder() 37 | folder = gen_folder([root]) 38 | self.assertNotEqual(root['id'], folder['id']) 39 | 40 | file = gen_file([root]) 41 | self.cache.insert_nodes([root, file]) 42 | 43 | _, rc = self.cache.list_children(root['id'], True) 44 | self.assertIn(file['id'], [n.id for n in rc]) 45 | 46 | file['parents'] = [folder['id']] 47 | self.cache.insert_nodes([folder, file]) 48 | 49 | _, rc = self.cache.list_children(root['id'], True) 50 | _, fc = self.cache.list_children(folder['id'], True) 51 | 52 | self.assertIn(file['id'], [n.id for n in fc]) 53 | self.assertNotIn(file['id'], [n.id for n in rc]) 54 | 55 | def testPurge(self): 56 | root = gen_folder() 57 | file = gen_file([root]) 58 | 59 | self.cache.insert_nodes([root, file]) 60 | self.assertEqual(self.cache.get_node_count(), 2) 61 | self.assertTrue(self.cache.get_node(file['id']).is_file) 62 | 63 | self.cache.remove_purged([file['id']]) 64 | self.assertIsNone(self.cache.get_node(file['id'])) 65 | self.assertEqual(self.cache.get_node_count(), 1) 66 | 67 | def testMultiParentNode(self): 68 | root = gen_folder() 69 | folder = gen_folder([root]) 70 | folder['status'] = 'AVAILABLE' 71 | 72 | file = gen_file([root]) 73 | file['parents'].append(folder['id']) 74 | self.assertEqual(len(file['parents']), 2) 75 | 76 | self.cache.insert_nodes([root, folder, file]) 77 | self.assertEqual(self.cache.get_node_count(), 3) 78 | self.assertEqual(self.cache.num_parents(file['id']), 2) 79 | 80 | def testListChildren(self): 81 | root = gen_folder() 82 | folders = [gen_folder([root]) for _ in range(25)] 83 | files = [gen_file([root]) for _ in range(25)] 84 | self.cache.insert_nodes(files + folders) 85 | fo, fi = self.cache.list_children(root['id'], trash=True) 86 | self.assertEqual(len(fo) + len(fi), len(files + folders)) 87 | 88 | def testCalculateUsageEmpty(self): 89 | self.assertEqual(self.cache.calculate_usage(), 0) 90 | 91 | def testCalculateUsageEmpty2(self): 92 | self.cache.insert_node(gen_folder()) 93 | self.assertEqual(self.cache.calculate_usage(), 0) 94 | 95 | def testCalculateUsage(self): 96 | folders, files = gen_bunch_of_nodes(50) 97 | self.cache.insert_nodes(folders + files) 98 | ttlsz = sum(f['contentProperties']['size'] for f in files) 99 | self.assertEqual(self.cache.calculate_usage(), ttlsz) 100 | -------------------------------------------------------------------------------- /tests/test_helper.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | import unittest 4 | 5 | 6 | def gen_rand_name(): 7 | return str.join('', (random.choice(string.ascii_letters + string.digits) for _ in range(64))) 8 | 9 | 10 | def gen_rand_id(): 11 | return str.join('', (random.choice(string.ascii_letters + string.digits + '-_') 12 | for _ in range(22))) 13 | 14 | 15 | def gen_rand_md5(): 16 | return str.join('', (random.choice(string.ascii_lowercase + string.digits) for _ in range(32))) 17 | 18 | 19 | def gen_folder(folders: list=None): 20 | folder = { 21 | 'createdBy': 'acd_cli_oa-', 22 | 'createdDate': '2015-01-01T00:00:00.00Z', 23 | 'eTagResponse': 'AbCdEfGhI01', 24 | 'id': gen_rand_id(), 25 | 'isShared': False, 26 | 'kind': 'FOLDER', 27 | 'labels': [], 28 | 'modifiedDate': '2015-01-01T00:00:00.000Z', 29 | 'name': gen_rand_name(), 30 | 'parents': [], 31 | 'restricted': False, 32 | 'status': 'AVAILABLE' if not folders else random.choice(['TRASH', 'AVAILABLE']), 33 | 'version': random.randint(1, 20) 34 | } 35 | if not folders: 36 | folder['name'] = None 37 | folder['isRoot'] = True 38 | elif len(folders) == 1: 39 | folder['parents'] = [folders[0]['id']] 40 | else: 41 | folder['parents'] = [folders[random.randint(0, len(folders) - 1)]['id']] 42 | return folder 43 | 44 | 45 | def gen_file(folders: list): 46 | file = { 47 | 'contentProperties': {'contentType': 'text/plain', 48 | 'extension': 'txt', 49 | 'md5': gen_rand_md5(), 50 | 'size': random.randint(0, 32 * 1024 ** 3), 51 | 'version': random.randint(1, 20)}, 52 | 'createdBy': 'acd_cli_oa-', 53 | 'createdDate': '2015-01-01T00:00:00.00Z', 54 | 'eTagResponse': 'AbCdEfGhI01', 55 | 'id': gen_rand_id(), 56 | 'isShared': False, 57 | 'kind': 'FILE', 58 | 'labels': [], 59 | 'modifiedDate': '2015-01-01T00:00:00.000Z', 60 | 'name': gen_rand_name(), 61 | 'parents': [folders[random.randint(0, len(folders) - 1)]['id']], 62 | 'restricted': False, 63 | 'status': random.choice(['AVAILABLE', 'TRASH']), 64 | 'version': random.randint(1, 20) 65 | } 66 | return file 67 | 68 | 69 | def gen_bunch_of_nodes(count: int): 70 | folders = [] 71 | files = [] 72 | for _ in range(int(count / 2)): 73 | folders.append(gen_folder(folders)) 74 | for _ in range(int(count / 2)): 75 | files.append(gen_file(folders)) 76 | 77 | return folders, files 78 | 79 | 80 | class HelperTestCase(unittest.TestCase): 81 | def setUp(self): 82 | pass 83 | 84 | def tearDown(self): 85 | pass 86 | 87 | def testCreateRootFolder(self): 88 | folder = gen_folder() 89 | self.assertIn('isRoot', folder) 90 | self.assertListEqual(folder['parents'], []) 91 | 92 | def testCreateNonRootFolder(self): 93 | root = gen_folder() 94 | folder = gen_folder([root]) 95 | self.assertNotIn('isRoot', folder) 96 | self.assertListEqual(folder['parents'], [root['id']]) 97 | 98 | def testMultiFolders(self): 99 | folders = [] 100 | for _ in range(100): 101 | folders.append(gen_folder(folders)) 102 | self.assertEqual(1, sum(f.get('isRoot', 0) for f in folders)) 103 | self.assertEqual(99, sum(len(f['parents']) for f in folders)) 104 | --------------------------------------------------------------------------------