├── asyncoss ├── __init__.py ├── http.py ├── exceptions.py ├── iterators.py ├── models.py └── api.py ├── setup.py ├── README.rst ├── LICENSE └── .gitignore /asyncoss/__init__.py: -------------------------------------------------------------------------------- 1 | from oss2.auth import Auth 2 | 3 | from asyncoss.api import Service, Bucket 4 | from asyncoss.iterators import ( 5 | BucketIterator, 6 | ObjectIterator, 7 | MultipartUploadIterator, 8 | ObjectUploadIterator, 9 | PartIterator, LiveChannelIterator) 10 | 11 | __all__ = [ 12 | 'Auth', 'Service', 'Bucket', 'BucketIterator', 13 | 'ObjectIterator', 14 | 'MultipartUploadIterator', 15 | 'ObjectUploadIterator', 16 | 'PartIterator', 17 | 'LiveChannelIterator' 18 | ] 19 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from setuptools import setup, find_packages 3 | 4 | with open('README.rst') as f: 5 | readme = f.read() 6 | 7 | setup( 8 | name='asyncoss', 9 | version='0.0.4', 10 | description='A async aliyun OSS library.', 11 | long_description=readme, 12 | author='jerevia', 13 | author_email='trilliondawn@gmail.com', 14 | license='MIT', 15 | install_requires=['aiohttp', 'oss2'], 16 | packages=find_packages(), 17 | classifiers=[ 18 | 'Development Status :: 4 - Beta', 19 | 'Intended Audience :: Developers', 20 | 'License :: OSI Approved :: MIT License', 21 | 'Programming Language :: Python :: 3.6', 22 | 'Programming Language :: Python :: 3.7' 23 | ] 24 | ) 25 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Async Alibaba Cloud OSS SDK 2 | =========================== 3 | 4 | Based on aiohttp and oss2, Require python3.6+ 5 | 6 | Installing 7 | ---------- 8 | 9 | .. code-block:: shell 10 | 11 | pip install asyncoss 12 | 13 | 14 | Getting started 15 | ---------------- 16 | 17 | .. code-block:: python 18 | 19 | import asyncoss 20 | import asyncio 21 | 22 | endpoint = 'http://oss-cn-beijing.aliyuncs.com' 23 | 24 | auth = asyncoss.Auth('', '') 25 | 26 | async def main(): 27 | # The object key in the bucket is story.txt 28 | async with asyncoss.Bucket(auth, endpoint, '') as bucket: 29 | key = 'story.txt' 30 | 31 | # Upload 32 | await bucket.put_object(key, 'Ali Baba is a happy youth.') 33 | 34 | # Download 35 | result = await bucket.get_object(key) 36 | await result.resp.read() 37 | 38 | # Delete 39 | await bucket.delete_object(key) 40 | 41 | loop = asyncio.get_event_loop() 42 | loop.run_until_complete(main()) 43 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jerevia 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | .static_storage/ 56 | .media/ 57 | local_settings.py 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /asyncoss/http.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import asyncio 3 | 4 | from oss2 import defaults, http, __version__ 5 | from requests.structures import CaseInsensitiveDict 6 | 7 | import aiohttp 8 | import platform 9 | 10 | 11 | _USER_AGENT = 'aliyun-sdk-python/{0}({1}/{2}/{3};{4})'.format( 12 | __version__, platform.system(), platform.release(), platform.machine(), platform.python_version()) 13 | 14 | 15 | class Session(object): 16 | """属于同一个Session的请求共享一组连接池,如有可能也会重用HTTP连接。""" 17 | 18 | def __init__(self, loop=None): 19 | self._loop = loop or asyncio.get_event_loop() 20 | 21 | psize = defaults.connection_pool_size 22 | connector = aiohttp.TCPConnector(limit=psize, loop=self._loop) 23 | 24 | self._aio_session = aiohttp.ClientSession( 25 | connector=connector, 26 | skip_auto_headers=['Content-Type', 'User-Agent'], 27 | loop=self._loop) 28 | 29 | async def do_request(self, req, timeout=300): 30 | resp = await self._aio_session.request(req.method, url=req.url, 31 | data=req.data, 32 | params=req.params, 33 | headers=req.headers, 34 | timeout=timeout) 35 | return Response(resp) 36 | 37 | async def __aenter__(self): 38 | await self._aio_session.__aenter__() 39 | return self 40 | 41 | async def __aexit__(self, exc_type, exc_val, exc_tb): 42 | await self._aio_session.__aexit__(exc_type, exc_val, exc_tb) 43 | 44 | async def close(self): 45 | await self._aio_session.close() 46 | 47 | 48 | class Request(object): 49 | def __init__(self, method, url, 50 | data=None, 51 | params=None, 52 | headers=None, 53 | app_name=''): 54 | self.method = method 55 | self.url = url 56 | self.data = http._convert_request_body(data) 57 | 58 | self.params = params or {} 59 | 60 | if not isinstance(headers, CaseInsensitiveDict): 61 | self.headers = CaseInsensitiveDict(headers) 62 | else: 63 | self.headers = headers 64 | 65 | if 'Accept-Encoding' not in self.headers: 66 | self.headers['Accept-Encoding'] = '' 67 | 68 | if 'User-Agent' not in self.headers: 69 | if app_name: 70 | self.headers['User-Agent'] = _USER_AGENT + '/' + app_name 71 | else: 72 | self.headers['User-Agent'] = _USER_AGENT 73 | 74 | 75 | _CHUNK_SIZE = 8 * 1024 76 | 77 | 78 | class Response(object): 79 | def __init__(self, response): 80 | self.response = response 81 | self.status = response.status 82 | self.headers = response.headers 83 | self.request_id = response.headers.get('x-oss-request-id', '') 84 | 85 | # When a response contains no body, iter_content() cannot 86 | # be run twice (requests.exceptions.StreamConsumedError will be raised). 87 | # For details of the issue, please see issue #82 88 | # 89 | # To work around this issue, we simply return b'' when everything has been read. 90 | # 91 | # Note you cannot use self.response.raw.read() to implement self.read(), because 92 | # raw.read() does not uncompress response body when the encoding is gzip etc., and 93 | # we try to avoid depends on details of self.response.raw. 94 | self.__all_read = False 95 | 96 | async def read(self, amt=None): 97 | if self.__all_read: 98 | return b'' 99 | 100 | if amt is None: 101 | content_list = [] 102 | async for chunk in self.response.content.iter_chunked(_CHUNK_SIZE): 103 | content_list.append(chunk) 104 | content = b''.join(content_list) 105 | 106 | self.__all_read = True 107 | # logger.debug("Get response body, req-id: {0}, content: {1}", self.request_id, content) 108 | return content 109 | else: 110 | return await self.response.content.read(amt) 111 | 112 | def __aiter__(self): 113 | return self.response.content 114 | -------------------------------------------------------------------------------- /asyncoss/exceptions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | oss2.exceptions 5 | ~~~~~~~~~~~~~~ 6 | 7 | 异常类。 8 | """ 9 | 10 | import re 11 | 12 | import xml.etree.ElementTree as ElementTree 13 | from xml.parsers import expat 14 | 15 | 16 | from oss2.compat import to_string 17 | from oss2.headers import * 18 | 19 | 20 | _OSS_ERROR_TO_EXCEPTION = {} # populated at end of module 21 | 22 | 23 | OSS_CLIENT_ERROR_STATUS = -1 24 | OSS_REQUEST_ERROR_STATUS = -2 25 | OSS_INCONSISTENT_ERROR_STATUS = -3 26 | OSS_FORMAT_ERROR_STATUS = -4 27 | OSS_SELECT_CLIENT_ERROR_STATUS = -5 28 | 29 | 30 | class OssError(Exception): 31 | def __init__(self, status, headers, body, details): 32 | #: HTTP 状态码 33 | self.status = status 34 | 35 | #: 请求ID,用于跟踪一个OSS请求。提交工单时,最好能够提供请求ID 36 | self.request_id = headers.get(OSS_REQUEST_ID, '') 37 | 38 | #: HTTP响应体(部分) 39 | self.body = body 40 | 41 | #: 详细错误信息,是一个string到string的dict 42 | self.details = details 43 | 44 | #: OSS错误码 45 | self.code = self.details.get('Code', '') 46 | 47 | #: OSS错误信息 48 | self.message = self.details.get('Message', '') 49 | 50 | def __str__(self): 51 | error = {'status': self.status, 52 | OSS_REQUEST_ID : self.request_id, 53 | 'details': self.details} 54 | return str(error) 55 | 56 | def _str_with_body(self): 57 | error = {'status': self.status, 58 | OSS_REQUEST_ID : self.request_id, 59 | 'details': self.body} 60 | return str(error) 61 | 62 | 63 | class ClientError(OssError): 64 | def __init__(self, message): 65 | OssError.__init__(self, OSS_CLIENT_ERROR_STATUS, {}, 'ClientError: ' + message, {}) 66 | 67 | def __str__(self): 68 | return self._str_with_body() 69 | 70 | 71 | class RequestError(OssError): 72 | def __init__(self, e): 73 | OssError.__init__(self, OSS_REQUEST_ERROR_STATUS, {}, 'RequestError: ' + str(e), {}) 74 | self.exception = e 75 | 76 | def __str__(self): 77 | return self._str_with_body() 78 | 79 | 80 | class InconsistentError(OssError): 81 | def __init__(self, message, request_id=''): 82 | OssError.__init__(self, OSS_INCONSISTENT_ERROR_STATUS, {OSS_REQUEST_ID : request_id}, 'InconsistentError: ' + message, {}) 83 | 84 | def __str__(self): 85 | return self._str_with_body() 86 | 87 | 88 | class OpenApiFormatError(OssError): 89 | def __init__(self, message): 90 | OssError.__init__(self, OSS_FORMAT_ERROR_STATUS, {}, message, {}) 91 | 92 | def __str__(self): 93 | return self._str_with_body() 94 | 95 | 96 | class OpenApiServerError(OssError): 97 | def __init__(self, status, request_id, message, error_code): 98 | OssError.__init__(self, status, {OSS_REQUEST_ID : request_id}, '', {'Code': error_code, 'Message': message}) 99 | 100 | 101 | class ServerError(OssError): 102 | pass 103 | 104 | 105 | class NotFound(ServerError): 106 | status = 404 107 | code = '' 108 | 109 | 110 | class MalformedXml(ServerError): 111 | status = 400 112 | code = 'MalformedXML' 113 | 114 | 115 | class InvalidRequest(ServerError): 116 | status = 400 117 | code = 'InvalidRequest' 118 | 119 | 120 | class OperationNotSupported(ServerError): 121 | status = 400 122 | code = 'OperationNotSupported' 123 | 124 | 125 | class RestoreAlreadyInProgress(ServerError): 126 | status = 409 127 | code = 'RestoreAlreadyInProgress' 128 | 129 | 130 | class InvalidArgument(ServerError): 131 | status = 400 132 | code = 'InvalidArgument' 133 | 134 | def __init__(self, status, headers, body, details): 135 | super(InvalidArgument, self).__init__(status, headers, body, details) 136 | self.name = details.get('ArgumentName') 137 | self.value = details.get('ArgumentValue') 138 | 139 | 140 | class InvalidDigest(ServerError): 141 | status = 400 142 | code = 'InvalidDigest' 143 | 144 | 145 | class InvalidObjectName(ServerError): 146 | status = 400 147 | code = 'InvalidObjectName' 148 | 149 | 150 | class NoSuchBucket(NotFound): 151 | status = 404 152 | code = 'NoSuchBucket' 153 | 154 | 155 | class NoSuchKey(NotFound): 156 | status = 404 157 | code = 'NoSuchKey' 158 | 159 | 160 | class NoSuchUpload(NotFound): 161 | status = 404 162 | code = 'NoSuchUpload' 163 | 164 | 165 | class NoSuchWebsite(NotFound): 166 | status = 404 167 | code = 'NoSuchWebsiteConfiguration' 168 | 169 | 170 | class NoSuchLifecycle(NotFound): 171 | status = 404 172 | code = 'NoSuchLifecycle' 173 | 174 | 175 | class NoSuchCors(NotFound): 176 | status = 404 177 | code = 'NoSuchCORSConfiguration' 178 | 179 | 180 | class NoSuchLiveChannel(NotFound): 181 | status = 404 182 | code = 'NoSuchLiveChannel' 183 | 184 | 185 | class Conflict(ServerError): 186 | status = 409 187 | code = '' 188 | 189 | 190 | class BucketNotEmpty(Conflict): 191 | status = 409 192 | code = 'BucketNotEmpty' 193 | 194 | 195 | class PositionNotEqualToLength(Conflict): 196 | status = 409 197 | code = 'PositionNotEqualToLength' 198 | 199 | def __init__(self, status, headers, body, details): 200 | super(PositionNotEqualToLength, self).__init__(status, headers, body, details) 201 | self.next_position = int(headers[OSS_NEXT_APPEND_POSITION]) 202 | 203 | 204 | class ObjectNotAppendable(Conflict): 205 | status = 409 206 | code = 'ObjectNotAppendable' 207 | 208 | 209 | class ChannelStillLive(Conflict): 210 | status = 409 211 | code = 'ChannelStillLive' 212 | 213 | 214 | class LiveChannelDisabled(Conflict): 215 | status = 409 216 | code = 'LiveChannelDisabled' 217 | 218 | 219 | class PreconditionFailed(ServerError): 220 | status = 412 221 | code = 'PreconditionFailed' 222 | 223 | 224 | class NotModified(ServerError): 225 | status = 304 226 | code = '' 227 | 228 | 229 | class AccessDenied(ServerError): 230 | status = 403 231 | code = 'AccessDenied' 232 | 233 | class SelectOperationFailed(ServerError): 234 | code = 'SelectOperationFailed' 235 | def __init__(self, status, message): 236 | self.status = status 237 | self.message = message 238 | 239 | def __str__(self): 240 | error = {'status': self.status, 241 | 'details': self.message} 242 | return str(error) 243 | 244 | class SelectOperationClientError(OssError): 245 | def __init__(self, message, request_id): 246 | OssError.__init__(self, OSS_SELECT_CLIENT_ERROR_STATUS, {'x-oss-request-id': request_id}, 'SelectOperationClientError: ' + message, {}) 247 | 248 | def __str__(self): 249 | error = {'x-oss-request-id':self.request_id, 250 | 'message': self.message} 251 | return str(error) 252 | 253 | class SignatureDoesNotMatch(ServerError): 254 | status = 403 255 | code = 'SignatureDoesNotMatch' 256 | 257 | 258 | async def make_exception(resp): 259 | status = resp.status 260 | headers = resp.headers 261 | body = await resp.read(4096) 262 | details = _parse_error_body(body) 263 | code = details.get('Code', '') 264 | 265 | try: 266 | klass = _OSS_ERROR_TO_EXCEPTION[(status, code)] 267 | return klass(status, headers, body, details) 268 | except KeyError: 269 | return ServerError(status, headers, body, details) 270 | 271 | 272 | def _walk_subclasses(klass): 273 | for sub in klass.__subclasses__(): 274 | yield sub 275 | for subsub in _walk_subclasses(sub): 276 | yield subsub 277 | 278 | 279 | for klass in _walk_subclasses(ServerError): 280 | status = getattr(klass, 'status', None) 281 | code = getattr(klass, 'code', None) 282 | 283 | if status is not None and code is not None: 284 | _OSS_ERROR_TO_EXCEPTION[(status, code)] = klass 285 | 286 | 287 | # XML parsing exceptions have changed in Python2.7 and ElementTree 1.3 288 | if hasattr(ElementTree, 'ParseError'): 289 | ElementTreeParseError = (ElementTree.ParseError, expat.ExpatError) 290 | else: 291 | ElementTreeParseError = (expat.ExpatError) 292 | 293 | 294 | def _parse_error_body(body): 295 | try: 296 | root = ElementTree.fromstring(body) 297 | if root.tag != 'Error': 298 | return {} 299 | 300 | details = {} 301 | for child in root: 302 | details[child.tag] = child.text 303 | return details 304 | except ElementTreeParseError: 305 | return _guess_error_details(body) 306 | 307 | 308 | def _guess_error_details(body): 309 | details = {} 310 | body = to_string(body) 311 | 312 | if '' not in body or '' not in body: 313 | return details 314 | 315 | m = re.search('(.*)', body) 316 | if m: 317 | details['Code'] = m.group(1) 318 | 319 | m = re.search('(.*)', body) 320 | if m: 321 | details['Message'] = m.group(1) 322 | 323 | return details -------------------------------------------------------------------------------- /asyncoss/iterators.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | oss2.iterators 5 | ~~~~~~~~~~~~~~ 6 | 7 | 该模块包含了一些易于使用的迭代器,可以用来遍历Bucket、文件、分片上传等。 8 | """ 9 | 10 | from oss2 import defaults 11 | from oss2.exceptions import ServerError 12 | from oss2.models import MultipartUploadInfo, SimplifiedObjectInfo 13 | 14 | 15 | class _BaseIterator(object): 16 | def __init__(self, marker, max_retries): 17 | self.is_truncated = True 18 | self.next_marker = marker 19 | 20 | max_retries = defaults.get(max_retries, defaults.request_retries) 21 | self.max_retries = max_retries if max_retries > 0 else 1 22 | 23 | self.entries = [] 24 | 25 | async def _fetch(self): 26 | raise NotImplemented # pragma: no cover 27 | 28 | async def __aiter__(self): 29 | return self 30 | 31 | async def __anext__(self): 32 | while True: 33 | if self.entries: 34 | return self.entries.pop(0) 35 | 36 | if not self.is_truncated: 37 | raise StopAsyncIteration 38 | 39 | await self.fetch_with_retry() 40 | 41 | async def fetch_with_retry(self): 42 | for i in range(self.max_retries): 43 | try: 44 | self.is_truncated, self.next_marker = await self._fetch() 45 | except ServerError as e: 46 | if e.status // 100 != 5: 47 | raise 48 | 49 | if i == self.max_retries - 1: 50 | raise 51 | else: 52 | return 53 | 54 | 55 | class BucketIterator(_BaseIterator): 56 | """遍历用户Bucket的迭代器。 57 | 58 | 每次迭代返回的是 :class:`SimplifiedBucketInfo ` 对象。 59 | 60 | :param service: :class:`Service ` 对象 61 | :param prefix: 只列举匹配该前缀的Bucket 62 | :param marker: 分页符。只列举Bucket名字典序在此之后的Bucket 63 | :param max_keys: 每次调用 `list_buckets` 时的max_keys参数。注意迭代器返回的数目可能会大于该值。 64 | """ 65 | 66 | def __init__(self, service, prefix='', marker='', max_keys=100, max_retries=None): 67 | super(BucketIterator, self).__init__(marker, max_retries) 68 | self.service = service 69 | self.prefix = prefix 70 | self.max_keys = max_keys 71 | 72 | async def _fetch(self): 73 | result = await self.service.list_buckets(prefix=self.prefix, 74 | marker=self.next_marker, 75 | max_keys=self.max_keys) 76 | self.entries = result.buckets 77 | 78 | return result.is_truncated, result.next_marker 79 | 80 | 81 | class ObjectIterator(_BaseIterator): 82 | """遍历Bucket里文件的迭代器。 83 | 84 | 每次迭代返回的是 :class:`SimplifiedObjectInfo ` 对象。 85 | 当 `SimplifiedObjectInfo.is_prefix()` 返回True时,表明是公共前缀(目录)。 86 | 87 | :param bucket: :class:`Bucket ` 对象 88 | :param prefix: 只列举匹配该前缀的文件 89 | :param delimiter: 目录分隔符 90 | :param marker: 分页符 91 | :param max_keys: 每次调用 `list_objects` 时的max_keys参数。注意迭代器返回的数目可能会大于该值。 92 | """ 93 | 94 | def __init__(self, bucket, prefix='', delimiter='', marker='', max_keys=100, max_retries=None): 95 | super(ObjectIterator, self).__init__(marker, max_retries) 96 | 97 | self.bucket = bucket 98 | self.prefix = prefix 99 | self.delimiter = delimiter 100 | self.max_keys = max_keys 101 | 102 | async def _fetch(self): 103 | result = await self.bucket.list_objects(prefix=self.prefix, 104 | delimiter=self.delimiter, 105 | marker=self.next_marker, 106 | max_keys=self.max_keys) 107 | self.entries = result.object_list + [SimplifiedObjectInfo(prefix, None, None, None, None, None) 108 | for prefix in result.prefix_list] 109 | self.entries.sort(key=lambda obj: obj.key) 110 | 111 | return result.is_truncated, result.next_marker 112 | 113 | 114 | class MultipartUploadIterator(_BaseIterator): 115 | """遍历Bucket里未完成的分片上传。 116 | 117 | 每次返回 :class:`MultipartUploadInfo ` 对象。 118 | 当 `MultipartUploadInfo.is_prefix()` 返回True时,表明是公共前缀(目录)。 119 | 120 | :param bucket: :class:`Bucket ` 对象 121 | :param prefix: 仅列举匹配该前缀的文件的分片上传 122 | :param delimiter: 目录分隔符 123 | :param key_marker: 文件名分页符 124 | :param upload_id_marker: 分片上传ID分页符 125 | :param max_uploads: 每次调用 `list_multipart_uploads` 时的max_uploads参数。注意迭代器返回的数目可能会大于该值。 126 | """ 127 | 128 | def __init__(self, bucket, 129 | prefix='', delimiter='', key_marker='', upload_id_marker='', 130 | max_uploads=1000, max_retries=None): 131 | super(MultipartUploadIterator, self).__init__(key_marker, max_retries) 132 | 133 | self.bucket = bucket 134 | self.prefix = prefix 135 | self.delimiter = delimiter 136 | self.next_upload_id_marker = upload_id_marker 137 | self.max_uploads = max_uploads 138 | 139 | async def _fetch(self): 140 | result = await self.bucket.list_multipart_uploads(prefix=self.prefix, 141 | delimiter=self.delimiter, 142 | key_marker=self.next_marker, 143 | upload_id_marker=self.next_upload_id_marker, 144 | max_uploads=self.max_uploads) 145 | self.entries = result.upload_list + [MultipartUploadInfo(prefix, None, None) for prefix in result.prefix_list] 146 | self.entries.sort(key=lambda u: u.key) 147 | 148 | self.next_upload_id_marker = result.next_upload_id_marker 149 | return result.is_truncated, result.next_key_marker 150 | 151 | 152 | class ObjectUploadIterator(_BaseIterator): 153 | """遍历一个Object所有未完成的分片上传。 154 | 155 | 每次返回 :class:`MultipartUploadInfo ` 对象。 156 | 当 `MultipartUploadInfo.is_prefix()` 返回True时,表明是公共前缀(目录)。 157 | 158 | :param bucket: :class:`Bucket ` 对象 159 | :param key: 文件名 160 | :param max_uploads: 每次调用 `list_multipart_uploads` 时的max_uploads参数。注意迭代器返回的数目可能会大于该值。 161 | """ 162 | 163 | def __init__(self, bucket, key, max_uploads=1000, max_retries=None): 164 | super(ObjectUploadIterator, self).__init__('', max_retries) 165 | self.bucket = bucket 166 | self.key = key 167 | self.next_upload_id_marker = '' 168 | self.max_uploads = max_uploads 169 | 170 | async def _fetch(self): 171 | result = await self.bucket.list_multipart_uploads(prefix=self.key, 172 | key_marker=self.next_marker, 173 | upload_id_marker=self.next_upload_id_marker, 174 | max_uploads=self.max_uploads) 175 | 176 | self.entries = [u for u in result.upload_list if u.key == self.key] 177 | self.next_upload_id_marker = result.next_upload_id_marker 178 | 179 | if not result.is_truncated or not self.entries: 180 | return False, result.next_key_marker 181 | 182 | if result.next_key_marker > self.key: 183 | return False, result.next_key_marker 184 | 185 | return result.is_truncated, result.next_key_marker 186 | 187 | 188 | class PartIterator(_BaseIterator): 189 | """遍历一个分片上传会话中已经上传的分片。 190 | 191 | 每次返回 :class:`PartInfo ` 对象。 192 | 193 | :param bucket: :class:`Bucket ` 对象 194 | :param key: 文件名 195 | :param upload_id: 分片上传ID 196 | :param marker: 分页符 197 | :param max_parts: 每次调用 `list_parts` 时的max_parts参数。注意迭代器返回的数目可能会大于该值。 198 | """ 199 | 200 | def __init__(self, bucket, key, upload_id, 201 | marker='0', max_parts=1000, max_retries=None): 202 | super(PartIterator, self).__init__(marker, max_retries) 203 | 204 | self.bucket = bucket 205 | self.key = key 206 | self.upload_id = upload_id 207 | self.max_parts = max_parts 208 | 209 | async def _fetch(self): 210 | result = await self.bucket.list_parts(self.key, self.upload_id, 211 | marker=self.next_marker, 212 | max_parts=self.max_parts) 213 | self.entries = result.parts 214 | 215 | return result.is_truncated, result.next_marker 216 | 217 | 218 | class LiveChannelIterator(_BaseIterator): 219 | """遍历Bucket里文件的迭代器。 220 | 221 | 每次迭代返回的是 :class:`LiveChannelInfo ` 对象。 222 | 223 | :param bucket: :class:`Bucket ` 对象 224 | :param prefix: 只列举匹配该前缀的文件 225 | :param marker: 分页符 226 | :param max_keys: 每次调用 `list_live_channel` 时的max_keys参数。注意迭代器返回的数目可能会大于该值。 227 | """ 228 | 229 | def __init__(self, bucket, prefix='', marker='', max_keys=100, max_retries=None): 230 | super(LiveChannelIterator, self).__init__(marker, max_retries) 231 | 232 | self.bucket = bucket 233 | self.prefix = prefix 234 | self.max_keys = max_keys 235 | 236 | async def _fetch(self): 237 | result = await self.bucket.list_live_channel(prefix=self.prefix, 238 | marker=self.next_marker, 239 | max_keys=self.max_keys) 240 | self.entries = result.channels 241 | 242 | return result.is_truncated, result.next_marker 243 | -------------------------------------------------------------------------------- /asyncoss/models.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | oss2.models 5 | ~~~~~~~~~~ 6 | 7 | 该模块包含Python SDK API接口所需要的输入参数以及返回值类型。 8 | """ 9 | 10 | from oss2.utils import http_to_unixtime, make_progress_adapter, make_crc_adapter 11 | from oss2.exceptions import ClientError, InconsistentError 12 | from oss2.compat import urlunquote, to_string 13 | from oss2.select_response import SelectResponseAdapter 14 | from oss2.headers import * 15 | import json 16 | 17 | class PartInfo(object): 18 | """表示分片信息的文件。 19 | 20 | 该文件既用于 :func:`list_parts ` 的输出,也用于 :func:`complete_multipart_upload 21 | ` 的输入。 22 | 23 | :param int part_number: 分片号 24 | :param str etag: 分片的ETag 25 | :param int size: 分片的大小。用在 `list_parts` 的结果里,也用与分片对象做crc combine得到整个对象crc64值 26 | :param int last_modified: 该分片最后修改的时间戳,类型为int。参考 :ref:`unix_time` 27 | :param int part_crc: 该分片的crc64值 28 | """ 29 | def __init__(self, part_number, etag, size=None, last_modified=None, part_crc=None): 30 | self.part_number = part_number 31 | self.etag = etag 32 | self.size = size 33 | self.last_modified = last_modified 34 | self.part_crc = part_crc 35 | 36 | 37 | def _hget(headers, key, converter=lambda x: x): 38 | if key in headers: 39 | return converter(headers[key]) 40 | else: 41 | return None 42 | 43 | 44 | def _get_etag(headers): 45 | return _hget(headers, 'etag', lambda x: x.strip('"')) 46 | 47 | 48 | class RequestResult(object): 49 | def __init__(self, resp): 50 | #: HTTP响应 51 | self.resp = resp 52 | 53 | #: HTTP状态码 54 | self.status = resp.status 55 | 56 | #: HTTP头 57 | self.headers = resp.headers 58 | 59 | #: 请求ID,用于跟踪一个OSS请求。提交工单时,最后能够提供请求ID 60 | self.request_id = resp.request_id 61 | 62 | class HeadObjectResult(RequestResult): 63 | def __init__(self, resp): 64 | super(HeadObjectResult, self).__init__(resp) 65 | 66 | #: 文件类型,可以是'Normal'、'Multipart'、'Appendable'等 67 | self.object_type = _hget(self.headers, OSS_OBJECT_TYPE) 68 | 69 | #: 文件最后修改时间,类型为int。参考 :ref:`unix_time` 。 70 | 71 | self.last_modified = _hget(self.headers, 'last-modified', http_to_unixtime) 72 | 73 | #: 文件的MIME类型 74 | self.content_type = _hget(self.headers, 'content-type') 75 | 76 | #: Content-Length,可能是None。 77 | self.content_length = _hget(self.headers, 'content-length', int) 78 | 79 | #: HTTP ETag 80 | self.etag = _get_etag(self.headers) 81 | 82 | #: 文件 server_crc 83 | self._server_crc = _hget(self.headers, 'x-oss-hash-crc64ecma', int) 84 | 85 | @property 86 | def server_crc(self): 87 | return self._server_crc 88 | 89 | 90 | class GetSelectObjectMetaResult(HeadObjectResult): 91 | def __init__(self, resp): 92 | super(GetSelectObjectMetaResult, self).__init__(resp) 93 | self.select_resp = SelectResponseAdapter(resp, None, None, False) 94 | 95 | for data in self.select_resp: # waiting the response body to finish 96 | pass 97 | 98 | self.csv_rows = self.select_resp.rows 99 | self.csv_splits = self.select_resp.splits 100 | 101 | 102 | class GetObjectMetaResult(RequestResult): 103 | def __init__(self, resp): 104 | super(GetObjectMetaResult, self).__init__(resp) 105 | 106 | #: 文件最后修改时间,类型为int。参考 :ref:`unix_time` 。 107 | self.last_modified = _hget(self.headers, 'last-modified', http_to_unixtime) 108 | 109 | #: Content-Length,文件大小,类型为int。 110 | self.content_length = _hget(self.headers, 'content-length', int) 111 | 112 | #: HTTP ETag 113 | self.etag = _get_etag(self.headers) 114 | 115 | 116 | class GetSymlinkResult(RequestResult): 117 | def __init__(self, resp): 118 | super(GetSymlinkResult, self).__init__(resp) 119 | 120 | #: 符号连接的目标文件 121 | self.target_key = urlunquote(_hget(self.headers, OSS_SYMLINK_TARGET)) 122 | 123 | 124 | class GetObjectResult(HeadObjectResult): 125 | def __init__(self, resp, progress_callback=None, crc_enabled=False, crypto_provider=None): 126 | super(GetObjectResult, self).__init__(resp) 127 | self.__crc_enabled = crc_enabled 128 | self.__crypto_provider = crypto_provider 129 | 130 | if _hget(resp.headers, 'x-oss-meta-oss-crypto-key') and _hget(resp.headers, 'Content-Range'): 131 | raise ClientError('Could not get an encrypted object using byte-range parameter') 132 | 133 | if progress_callback: 134 | self.stream = make_progress_adapter(self.resp, progress_callback, self.content_length) 135 | else: 136 | self.stream = self.resp 137 | 138 | if self.__crc_enabled: 139 | self.stream = make_crc_adapter(self.stream) 140 | 141 | if self.__crypto_provider: 142 | key = self.__crypto_provider.decrypt_oss_meta_data(resp.headers, 'x-oss-meta-oss-crypto-key') 143 | start = self.__crypto_provider.decrypt_oss_meta_data(resp.headers, 'x-oss-meta-oss-crypto-start') 144 | cek_alg = _hget(resp.headers, 'x-oss-meta-oss-cek-alg') 145 | if key and start and cek_alg: 146 | self.stream = self.__crypto_provider.make_decrypt_adapter(self.stream, key, start) 147 | else: 148 | raise InconsistentError('all metadata keys are required for decryption (x-oss-meta-oss-crypto-key, \ 149 | x-oss-meta-oss-crypto-start, x-oss-meta-oss-cek-alg)', self.request_id) 150 | 151 | async def read(self, amt=None): 152 | return await self.stream.read(amt) 153 | 154 | def __aiter__(self): 155 | return self.stream.response.content.iter_chunks() 156 | 157 | @property 158 | def client_crc(self): 159 | if self.__crc_enabled: 160 | return self.stream.crc 161 | else: 162 | return None 163 | 164 | class SelectObjectResult(HeadObjectResult): 165 | def __init__(self, resp, progress_callback=None, crc_enabled=False): 166 | super(SelectObjectResult, self).__init__(resp) 167 | self.__crc_enabled = crc_enabled 168 | self.select_resp = SelectResponseAdapter(resp, progress_callback, None, enable_crc = self.__crc_enabled) 169 | 170 | def read(self): 171 | return self.select_resp.read() 172 | 173 | def __iter__(self): 174 | return iter(self.select_resp) 175 | 176 | def __next__(self): 177 | return self.select_resp.next() 178 | 179 | class PutObjectResult(RequestResult): 180 | def __init__(self, resp): 181 | super(PutObjectResult, self).__init__(resp) 182 | 183 | #: HTTP ETag 184 | self.etag = _get_etag(self.headers) 185 | 186 | #: 文件上传后,OSS上文件的CRC64值 187 | self.crc = _hget(resp.headers, OSS_HASH_CRC64_ECMA, int) 188 | 189 | 190 | class AppendObjectResult(RequestResult): 191 | def __init__(self, resp): 192 | super(AppendObjectResult, self).__init__(resp) 193 | 194 | #: HTTP ETag 195 | self.etag = _get_etag(self.headers) 196 | 197 | #: 本次追加写完成后,OSS上文件的CRC64值 198 | self.crc = _hget(resp.headers, OSS_HASH_CRC64_ECMA, int) 199 | 200 | #: 下次追加写的偏移 201 | self.next_position = _hget(resp.headers, OSS_NEXT_APPEND_POSITION, int) 202 | 203 | 204 | class BatchDeleteObjectsResult(RequestResult): 205 | def __init__(self, resp): 206 | super(BatchDeleteObjectsResult, self).__init__(resp) 207 | 208 | #: 已经删除的文件名列表 209 | self.deleted_keys = [] 210 | 211 | 212 | class InitMultipartUploadResult(RequestResult): 213 | def __init__(self, resp): 214 | super(InitMultipartUploadResult, self).__init__(resp) 215 | 216 | #: 新生成的Upload ID 217 | self.upload_id = None 218 | 219 | 220 | class ListObjectsResult(RequestResult): 221 | def __init__(self, resp): 222 | super(ListObjectsResult, self).__init__(resp) 223 | 224 | #: True表示还有更多的文件可以罗列;False表示已经列举完毕。 225 | self.is_truncated = False 226 | 227 | #: 下一次罗列的分页标记符,即,可以作为 :func:`list_objects ` 的 `marker` 参数。 228 | self.next_marker = '' 229 | 230 | #: 本次罗列得到的文件列表。其中元素的类型为 :class:`SimplifiedObjectInfo` 。 231 | self.object_list = [] 232 | 233 | #: 本次罗列得到的公共前缀列表,类型为str列表。 234 | self.prefix_list = [] 235 | 236 | 237 | class SimplifiedObjectInfo(object): 238 | def __init__(self, key, last_modified, etag, type, size, storage_class): 239 | #: 文件名,或公共前缀名。 240 | self.key = key 241 | 242 | #: 文件的最后修改时间 243 | self.last_modified = last_modified 244 | 245 | #: HTTP ETag 246 | self.etag = etag 247 | 248 | #: 文件类型 249 | self.type = type 250 | 251 | #: 文件大小 252 | self.size = size 253 | 254 | #: 文件的存储类别,是一个字符串。 255 | self.storage_class = storage_class 256 | 257 | def is_prefix(self): 258 | """如果是公共前缀,返回True;是文件,则返回False""" 259 | return self.last_modified is None 260 | 261 | 262 | OBJECT_ACL_DEFAULT = 'default' 263 | OBJECT_ACL_PRIVATE = 'private' 264 | OBJECT_ACL_PUBLIC_READ = 'public-read' 265 | OBJECT_ACL_PUBLIC_READ_WRITE = 'public-read-write' 266 | 267 | 268 | class GetObjectAclResult(RequestResult): 269 | def __init__(self, resp): 270 | super(GetObjectAclResult, self).__init__(resp) 271 | 272 | #: 文件的ACL,其值可以是 `OBJECT_ACL_DEFAULT`、`OBJECT_ACL_PRIVATE`、`OBJECT_ACL_PUBLIC_READ`或 273 | #: `OBJECT_ACL_PUBLIC_READ_WRITE` 274 | self.acl = '' 275 | 276 | 277 | class SimplifiedBucketInfo(object): 278 | """:func:`list_buckets ` 结果中的单个元素类型。""" 279 | def __init__(self, name, location, creation_date, extranet_endpoint, intranet_endpoint, storage_class): 280 | #: Bucket名 281 | self.name = name 282 | 283 | #: Bucket的区域 284 | self.location = location 285 | 286 | #: Bucket的创建时间,类型为int。参考 :ref:`unix_time`。 287 | self.creation_date = creation_date 288 | 289 | #: Bucket访问的外网域名 290 | self.extranet_endpoint = extranet_endpoint 291 | 292 | #: 同区域ECS访问Bucket的内网域名 293 | self.intranet_endpoint = intranet_endpoint 294 | 295 | #: Bucket存储类型,支持“Standard”、“IA”、“Archive” 296 | self.storage_class = storage_class 297 | 298 | 299 | class ListBucketsResult(RequestResult): 300 | def __init__(self, resp): 301 | super(ListBucketsResult, self).__init__(resp) 302 | 303 | #: True表示还有更多的Bucket可以罗列;False表示已经列举完毕。 304 | self.is_truncated = False 305 | 306 | #: 下一次罗列的分页标记符,即,可以作为 :func:`list_buckets ` 的 `marker` 参数。 307 | self.next_marker = '' 308 | 309 | #: 得到的Bucket列表,类型为 :class:`SimplifiedBucketInfo` 。 310 | self.buckets = [] 311 | 312 | 313 | class MultipartUploadInfo(object): 314 | def __init__(self, key, upload_id, initiation_date): 315 | #: 文件名 316 | self.key = key 317 | 318 | #: 分片上传ID 319 | self.upload_id = upload_id 320 | 321 | #: 分片上传初始化的时间,类型为int。参考 :ref:`unix_time` 322 | self.initiation_date = initiation_date 323 | 324 | def is_prefix(self): 325 | """如果是公共前缀则返回True""" 326 | return self.upload_id is None 327 | 328 | 329 | class ListMultipartUploadsResult(RequestResult): 330 | def __init__(self, resp): 331 | super(ListMultipartUploadsResult, self).__init__(resp) 332 | 333 | #: True表示还有更多的为完成分片上传可以罗列;False表示已经列举完毕。 334 | self.is_truncated = False 335 | 336 | #: 文件名分页符 337 | self.next_key_marker = '' 338 | 339 | #: 分片上传ID分页符 340 | self.next_upload_id_marker = '' 341 | 342 | #: 分片上传列表。类型为`MultipartUploadInfo`列表。 343 | self.upload_list = [] 344 | 345 | #: 公共前缀列表。类型为str列表。 346 | self.prefix_list = [] 347 | 348 | 349 | class ListPartsResult(RequestResult): 350 | def __init__(self, resp): 351 | super(ListPartsResult, self).__init__(resp) 352 | 353 | # True表示还有更多的Part可以罗列;False表示已经列举完毕。 354 | self.is_truncated = False 355 | 356 | # 下一个分页符 357 | self.next_marker = '' 358 | 359 | # 罗列出的Part信息,类型为 `PartInfo` 列表。 360 | self.parts = [] 361 | 362 | 363 | BUCKET_ACL_PRIVATE = 'private' 364 | BUCKET_ACL_PUBLIC_READ = 'public-read' 365 | BUCKET_ACL_PUBLIC_READ_WRITE = 'public-read-write' 366 | 367 | BUCKET_STORAGE_CLASS_STANDARD = 'Standard' 368 | BUCKET_STORAGE_CLASS_IA = 'IA' 369 | BUCKET_STORAGE_CLASS_ARCHIVE = 'Archive' 370 | 371 | 372 | class GetBucketAclResult(RequestResult): 373 | def __init__(self, resp): 374 | super(GetBucketAclResult, self).__init__(resp) 375 | 376 | #: Bucket的ACL,其值可以是 `BUCKET_ACL_PRIVATE`、`BUCKET_ACL_PUBLIC_READ`或`BUCKET_ACL_PUBLIC_READ_WRITE`。 377 | self.acl = '' 378 | 379 | 380 | class GetBucketLocationResult(RequestResult): 381 | def __init__(self, resp): 382 | super(GetBucketLocationResult, self).__init__(resp) 383 | 384 | #: Bucket所在的数据中心 385 | self.location = '' 386 | 387 | 388 | class BucketLogging(object): 389 | """Bucket日志配置信息。 390 | 391 | :param str target_bucket: 存储日志到这个Bucket。 392 | :param str target_prefix: 生成的日志文件名加上该前缀。 393 | """ 394 | def __init__(self, target_bucket, target_prefix): 395 | self.target_bucket = target_bucket 396 | self.target_prefix = target_prefix 397 | 398 | 399 | class GetBucketLoggingResult(RequestResult, BucketLogging): 400 | def __init__(self, resp): 401 | RequestResult.__init__(self, resp) 402 | BucketLogging.__init__(self, '', '') 403 | 404 | 405 | class BucketCreateConfig(object): 406 | def __init__(self, storage_class): 407 | self.storage_class = storage_class 408 | 409 | 410 | class BucketStat(object): 411 | def __init__(self, storage_size_in_bytes, object_count, multi_part_upload_count): 412 | self.storage_size_in_bytes = storage_size_in_bytes 413 | self.object_count = object_count 414 | self.multi_part_upload_count = multi_part_upload_count 415 | 416 | 417 | class AccessControlList(object): 418 | def __init__(self, grant): 419 | self.grant = grant 420 | 421 | 422 | class Owner(object): 423 | def __init__(self, display_name, owner_id): 424 | self.display_name = display_name 425 | self.id = owner_id 426 | 427 | 428 | class BucketInfo(object): 429 | def __init__(self, name=None, owner=None, location=None, storage_class=None, intranet_endpoint=None, 430 | extranet_endpoint=None, creation_date=None, acl=None): 431 | self.name = name 432 | self.owner = owner 433 | self.location = location 434 | self.storage_class = storage_class 435 | self.intranet_endpoint = intranet_endpoint 436 | self.extranet_endpoint = extranet_endpoint 437 | self.creation_date = creation_date 438 | self.acl = acl 439 | 440 | 441 | class GetBucketStatResult(RequestResult, BucketStat): 442 | def __init__(self, resp): 443 | RequestResult.__init__(self, resp) 444 | BucketStat.__init__(self, 0, 0, 0) 445 | 446 | 447 | class GetBucketInfoResult(RequestResult, BucketInfo): 448 | def __init__(self, resp): 449 | RequestResult.__init__(self, resp) 450 | BucketInfo.__init__(self) 451 | 452 | 453 | class BucketReferer(object): 454 | """Bucket防盗链设置。 455 | 456 | :param bool allow_empty_referer: 是否允许空的Referer。 457 | :param referers: Referer列表,每个元素是一个str。 458 | """ 459 | def __init__(self, allow_empty_referer, referers): 460 | self.allow_empty_referer = allow_empty_referer 461 | self.referers = referers 462 | 463 | 464 | class GetBucketRefererResult(RequestResult, BucketReferer): 465 | def __init__(self, resp): 466 | RequestResult.__init__(self, resp) 467 | BucketReferer.__init__(self, False, []) 468 | 469 | 470 | class BucketWebsite(object): 471 | """静态网站托管配置。 472 | 473 | :param str index_file: 索引页面文件 474 | :param str error_file: 404页面文件 475 | """ 476 | def __init__(self, index_file, error_file): 477 | self.index_file = index_file 478 | self.error_file = error_file 479 | 480 | 481 | class GetBucketWebsiteResult(RequestResult, BucketWebsite): 482 | def __init__(self, resp): 483 | RequestResult.__init__(self, resp) 484 | BucketWebsite.__init__(self, '', '') 485 | 486 | 487 | class LifecycleExpiration(object): 488 | """过期删除操作。 489 | 490 | :param days: 表示在文件修改后过了这么多天,就会匹配规则,从而被删除 491 | :param date: 表示在该日期之后,规则就一直生效。即每天都会对符合前缀的文件执行删除操作(如,删除),而不管文件是什么时候生成的。 492 | *不建议使用* 493 | :param created_before_date: delete files if their last modified time earlier than created_before_date 494 | 495 | :type date: `datetime.date` 496 | """ 497 | def __init__(self, days=None, date=None, created_before_date=None): 498 | not_none_fields = 0 499 | if days is not None: 500 | not_none_fields += 1 501 | if date is not None: 502 | not_none_fields += 1 503 | if created_before_date is not None: 504 | not_none_fields += 1 505 | 506 | if not_none_fields > 1: 507 | raise ClientError('More than one field(days, date and created_before_date) has been specified') 508 | 509 | self.days = days 510 | self.date = date 511 | self.created_before_date = created_before_date 512 | 513 | 514 | class AbortMultipartUpload(object): 515 | """删除parts 516 | 517 | :param days: 删除相对最后修改时间days天之后的parts 518 | :param created_before_date: 删除最后修改时间早于created_before_date的parts 519 | 520 | """ 521 | def __init__(self, days=None, created_before_date=None): 522 | if days is not None and created_before_date is not None: 523 | raise ClientError('days and created_before_date should not be both specified') 524 | 525 | self.days = days 526 | self.created_before_date = created_before_date 527 | 528 | 529 | class StorageTransition(object): 530 | """transit objects 531 | 532 | :param days: 将相对最后修改时间days天之后的Object转储 533 | :param created_before_date: 将最后修改时间早于created_before_date的对象转储 534 | :param storage_class: 对象转储到OSS的目标存储类型 535 | """ 536 | def __init__(self, days=None, created_before_date=None, storage_class=None): 537 | if days is not None and created_before_date is not None: 538 | raise ClientError('days and created_before_date should not be both specified') 539 | 540 | self.days = days 541 | self.created_before_date = created_before_date 542 | self.storage_class = storage_class 543 | 544 | 545 | class LifecycleRule(object): 546 | """生命周期规则。 547 | 548 | :param id: 规则名 549 | :param prefix: 只有文件名匹配该前缀的文件才适用本规则 550 | :param expiration: 过期删除操作。 551 | :type expiration: :class:`LifecycleExpiration` 552 | :param status: 启用还是禁止该规则。可选值为 `LifecycleRule.ENABLED` 或 `LifecycleRule.DISABLED` 553 | """ 554 | 555 | ENABLED = 'Enabled' 556 | DISABLED = 'Disabled' 557 | 558 | def __init__(self, id, prefix, 559 | status=ENABLED, expiration=None, 560 | abort_multipart_upload=None, 561 | storage_transitions=None): 562 | self.id = id 563 | self.prefix = prefix 564 | self.status = status 565 | self.expiration = expiration 566 | self.abort_multipart_upload = abort_multipart_upload 567 | self.storage_transitions = storage_transitions 568 | 569 | 570 | class BucketLifecycle(object): 571 | """Bucket的生命周期配置。 572 | 573 | :param rules: 规则列表, 574 | :type rules: list of :class:`LifecycleRule` 575 | """ 576 | def __init__(self, rules=None): 577 | self.rules = rules or [] 578 | 579 | 580 | class GetBucketLifecycleResult(RequestResult, BucketLifecycle): 581 | def __init__(self, resp): 582 | RequestResult.__init__(self, resp) 583 | BucketLifecycle.__init__(self) 584 | 585 | 586 | class CorsRule(object): 587 | """CORS(跨域资源共享)规则。 588 | 589 | :param allowed_origins: 允许跨域访问的域。 590 | :type allowed_origins: list of str 591 | 592 | :param allowed_methods: 允许跨域访问的HTTP方法,如'GET'等。 593 | :type allowed_methods: list of str 594 | 595 | :param allowed_headers: 允许跨域访问的HTTP头部。 596 | :type allowed_headers: list of str 597 | 598 | 599 | """ 600 | def __init__(self, 601 | allowed_origins=None, 602 | allowed_methods=None, 603 | allowed_headers=None, 604 | expose_headers=None, 605 | max_age_seconds=None): 606 | self.allowed_origins = allowed_origins or [] 607 | self.allowed_methods = allowed_methods or [] 608 | self.allowed_headers = allowed_headers or [] 609 | self.expose_headers = expose_headers or [] 610 | self.max_age_seconds = max_age_seconds 611 | 612 | 613 | class BucketCors(object): 614 | def __init__(self, rules=None): 615 | self.rules = rules or [] 616 | 617 | 618 | class GetBucketCorsResult(RequestResult, BucketCors): 619 | def __init__(self, resp): 620 | RequestResult.__init__(self, resp) 621 | BucketCors.__init__(self) 622 | 623 | 624 | class LiveChannelInfoTarget(object): 625 | """Live channel中的Target节点,包含目标协议的一些参数。 626 | 627 | :param type: 协议,目前仅支持HLS。 628 | :type type: str 629 | 630 | :param frag_duration: HLS协议下生成的ts文件的期望时长,单位为秒。 631 | :type frag_duration: int 632 | 633 | :param frag_count: HLS协议下m3u8文件里ts文件的数量。 634 | :type frag_count: int""" 635 | 636 | def __init__(self, 637 | type = 'HLS', 638 | frag_duration = 5, 639 | frag_count = 3, 640 | playlist_name = ''): 641 | self.type = type 642 | self.frag_duration = frag_duration 643 | self.frag_count = frag_count 644 | self.playlist_name = playlist_name 645 | 646 | 647 | class LiveChannelInfo(object): 648 | """Live channel(直播频道)配置。 649 | 650 | :param status: 直播频道的状态,合法的值为"enabled"和"disabled"。 651 | :type status: str 652 | 653 | :param description: 直播频道的描述信息,最长为128字节。 654 | :type description: str 655 | 656 | :param target: 直播频道的推流目标节点,包含目标协议相关的参数。 657 | :type class:`LiveChannelInfoTarget ` 658 | 659 | :param last_modified: 直播频道的最后修改时间,这个字段仅在`ListLiveChannel`时使用。 660 | :type last_modified: int, 参考 :ref:`unix_time`。 661 | 662 | :param name: 直播频道的名称。 663 | :type name: str 664 | 665 | :param play_url: 播放地址。 666 | :type play_url: str 667 | 668 | :param publish_url: 推流地址。 669 | :type publish_url: str""" 670 | 671 | def __init__(self, 672 | status = 'enabled', 673 | description = '', 674 | target = None, 675 | last_modified = None, 676 | name = None, 677 | play_url = None, 678 | publish_url = None): 679 | self.status = status 680 | self.description = description 681 | self.target = target 682 | self.last_modified = last_modified 683 | self.name = name 684 | self.play_url = play_url 685 | self.publish_url = publish_url 686 | 687 | 688 | class LiveChannelList(object): 689 | """List直播频道的结果。 690 | 691 | :param prefix: List直播频道使用的前缀。 692 | :type prefix: str 693 | 694 | :param marker: List直播频道使用的marker。 695 | :type marker: str 696 | 697 | :param max_keys: List时返回的最多的直播频道的条数。 698 | :type max_keys: int 699 | 700 | :param is_truncated: 本次List是否列举完所有的直播频道 701 | :type is_truncated: bool 702 | 703 | :param next_marker: 下一次List直播频道使用的marker。 704 | :type marker: str 705 | 706 | :param channels: List返回的直播频道列表 707 | :type channels: list,类型为 :class:`LiveChannelInfo`""" 708 | 709 | def __init__(self, 710 | prefix = '', 711 | marker = '', 712 | max_keys = 100, 713 | is_truncated = False, 714 | next_marker = ''): 715 | self.prefix = prefix 716 | self.marker = marker 717 | self.max_keys = max_keys 718 | self.is_truncated = is_truncated 719 | self.next_marker = next_marker 720 | self.channels = [] 721 | 722 | 723 | class LiveChannelVideoStat(object): 724 | """LiveStat中的Video节点。 725 | 726 | :param width: 视频的宽度。 727 | :type width: int 728 | 729 | :param height: 视频的高度。 730 | :type height: int 731 | 732 | :param frame_rate: 帧率。 733 | :type frame_rate: int 734 | 735 | :param codec: 编码方式。 736 | :type codec: str 737 | 738 | :param bandwidth: 码率。 739 | :type bandwidth: int""" 740 | 741 | def __init__(self, 742 | width = 0, 743 | height = 0, 744 | frame_rate = 0, 745 | codec = '', 746 | bandwidth = 0): 747 | self.width = width 748 | self.height = height 749 | self.frame_rate = frame_rate 750 | self.codec = codec 751 | self.bandwidth = bandwidth 752 | 753 | 754 | class LiveChannelAudioStat(object): 755 | """LiveStat中的Audio节点。 756 | 757 | :param codec: 编码方式。 758 | :type codec: str 759 | 760 | :param sample_rate: 采样率。 761 | :type sample_rate: int 762 | 763 | :param bandwidth: 码率。 764 | :type bandwidth: int""" 765 | 766 | def __init__(self, 767 | codec = '', 768 | sample_rate = 0, 769 | bandwidth = 0): 770 | self.codec = codec 771 | self.sample_rate = sample_rate 772 | self.bandwidth = bandwidth 773 | 774 | 775 | class LiveChannelStat(object): 776 | """LiveStat结果。 777 | 778 | :param status: 直播状态。 779 | :type codec: str 780 | 781 | :param remote_addr: 客户端的地址。 782 | :type remote_addr: str 783 | 784 | :param connected_time: 本次推流开始时间。 785 | :type connected_time: int, unix time 786 | 787 | :param video: 视频描述信息。 788 | :type video: class:`LiveChannelVideoStat ` 789 | 790 | :param audio: 音频描述信息。 791 | :type audio: class:`LiveChannelAudioStat `""" 792 | 793 | def __init__(self, 794 | status = '', 795 | remote_addr = '', 796 | connected_time = '', 797 | video = None, 798 | audio = None): 799 | self.status = status 800 | self.remote_addr = remote_addr 801 | self.connected_time = connected_time 802 | self.video = video 803 | self.audio = audio 804 | 805 | 806 | class LiveRecord(object): 807 | """直播频道中的推流记录信息 808 | 809 | :param start_time: 本次推流开始时间。 810 | :type start_time: int,参考 :ref:`unix_time`。 811 | 812 | :param end_time: 本次推流结束时间。 813 | :type end_time: int, 参考 :ref:`unix_time`。 814 | 815 | :param remote_addr: 推流时客户端的地址。 816 | :type remote_addr: str""" 817 | 818 | def __init__(self, 819 | start_time = '', 820 | end_time = '', 821 | remote_addr = ''): 822 | self.start_time = start_time 823 | self.end_time = end_time 824 | self.remote_addr = remote_addr 825 | 826 | 827 | class LiveChannelHistory(object): 828 | """直播频道下的推流记录。""" 829 | 830 | def __init__(self): 831 | self.records = [] 832 | 833 | 834 | class CreateLiveChannelResult(RequestResult, LiveChannelInfo): 835 | def __init__(self, resp): 836 | RequestResult.__init__(self, resp) 837 | LiveChannelInfo.__init__(self) 838 | 839 | 840 | class GetLiveChannelResult(RequestResult, LiveChannelInfo): 841 | def __init__(self, resp): 842 | RequestResult.__init__(self, resp) 843 | LiveChannelInfo.__init__(self) 844 | 845 | 846 | class ListLiveChannelResult(RequestResult, LiveChannelList): 847 | def __init__(self, resp): 848 | RequestResult.__init__(self, resp) 849 | LiveChannelList.__init__(self) 850 | 851 | 852 | class GetLiveChannelStatResult(RequestResult, LiveChannelStat): 853 | def __init__(self, resp): 854 | RequestResult.__init__(self, resp) 855 | LiveChannelStat.__init__(self) 856 | 857 | class GetLiveChannelHistoryResult(RequestResult, LiveChannelHistory): 858 | def __init__(self, resp): 859 | RequestResult.__init__(self, resp) 860 | LiveChannelHistory.__init__(self) 861 | 862 | 863 | class ProcessObjectResult(RequestResult): 864 | def __init__(self, resp): 865 | RequestResult.__init__(self, resp) 866 | self.bucket = "" 867 | self.fileSize = 0 868 | self.object = "" 869 | self.process_status = "" 870 | result = json.loads(to_string(resp.read())) 871 | if 'bucket' in result: 872 | self.bucket = result['bucket'] 873 | if 'fileSize' in result: 874 | self.fileSize = result['fileSize'] 875 | if 'object' in result: 876 | self.object = result['object'] 877 | if 'status' in result: 878 | self.process_status = result['status'] -------------------------------------------------------------------------------- /asyncoss/api.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import shutil 3 | 4 | from oss2 import defaults, utils, xml_utils 5 | from oss2.compat import to_string, to_unicode, urlparse, urlquote 6 | from asyncoss import models, exceptions 7 | from asyncoss import http 8 | 9 | 10 | class _Base(object): 11 | def __init__(self, auth, endpoint, is_cname, session, connect_timeout, 12 | app_name='', enable_crc=False, loop=None): 13 | self.auth = auth 14 | self.endpoint = _normalize_endpoint(endpoint.strip()) 15 | self.session = session or http.Session(loop=loop) 16 | self.timeout = defaults.get(connect_timeout, defaults.connect_timeout) 17 | self.app_name = app_name 18 | self.enable_crc = enable_crc 19 | 20 | self._make_url = _UrlMaker(self.endpoint, is_cname) 21 | 22 | async def _do(self, method, bucket_name, key, **kwargs): 23 | key = to_string(key) 24 | req = http.Request(method, self._make_url(bucket_name, key), 25 | app_name=self.app_name, 26 | **kwargs) 27 | self.auth._sign_request(req, bucket_name, key) 28 | resp = await self.session.do_request(req, timeout=self.timeout) 29 | 30 | if resp.status // 100 != 2: 31 | e = await exceptions.make_exception(resp) 32 | raise e 33 | 34 | content_length = models._hget(resp.headers, 'content-length', int) 35 | if content_length is not None and content_length == 0: 36 | await resp.read() 37 | 38 | return resp 39 | 40 | async def _parse_result(self, resp, parse_func, klass): 41 | result = klass(resp) 42 | body = await resp.read() 43 | parse_func(result, body) 44 | return result 45 | 46 | async def __aenter__(self): 47 | await self.session._aio_session.__aenter__() 48 | return self 49 | 50 | async def __aexit__(self, exc_type, exc_val, exc_tb): 51 | await self.session._aio_session.__aexit__(exc_type, exc_val, exc_tb) 52 | 53 | async def close(self): 54 | await self.session._aio_session.close() 55 | 56 | 57 | class Service(_Base): 58 | def __init__(self, auth, endpoint, 59 | session=None, 60 | connect_timeout=None, 61 | app_name='', 62 | loop=None): 63 | super().__init__(auth, endpoint, False, session, connect_timeout, 64 | app_name=app_name, loop=loop) 65 | 66 | async def list_buckets(self, prefix='', marker='', max_keys=100): 67 | """根据前缀罗列用户的Bucket。 68 | 69 | :param str prefix: 只罗列Bucket名为该前缀的Bucket,空串表示罗列所有的Bucket 70 | :param str marker: 分页标志。首次调用传空串,后续使用返回值中的next_marker 71 | :param int max_keys: 每次调用最多返回的Bucket数目 72 | 73 | :return: 罗列的结果 74 | :rtype: oss2.models.ListBucketsResult 75 | """ 76 | resp = await self._do('GET', '', '', 77 | params={'prefix': prefix, 78 | 'marker': marker, 79 | 'max-keys': str(max_keys)}) 80 | return await self._parse_result(resp, xml_utils.parse_list_buckets, models.ListBucketsResult) 81 | 82 | 83 | class Bucket(_Base): 84 | """用于Bucket和Object操作的类,诸如创建、删除Bucket,上传、下载Object等。 85 | 86 | 用法(假设Bucket属于杭州区域) :: 87 | 88 | >>> import oss2 89 | >>> auth = oss2.Auth('your-access-key-id', 'your-access-key-secret') 90 | >>> bucket = oss2.Bucket(auth, 'http://oss-cn-hangzhou.aliyuncs.com', 'your-bucket') 91 | >>> bucket.put_object('readme.txt', 'content of the object') 92 | 93 | 94 | :param auth: 包含了用户认证信息的Auth对象 95 | :type auth: oss2.Auth 96 | 97 | :param str endpoint: 访问域名或者CNAME 98 | :param str bucket_name: Bucket名 99 | :param bool is_cname: 如果endpoint是CNAME则设为True;反之,则为False。 100 | 101 | :param session: 会话。如果是None表示新开会话,非None则复用传入的会话 102 | :type session: oss2.Session 103 | 104 | :param float connect_timeout: 连接超时时间,以秒为单位。 105 | 106 | :param str app_name: 应用名。该参数不为空,则在User Agent中加入其值。 107 | 注意到,最终这个字符串是要作为HTTP Header的值传输的,所以必须要遵循HTTP标准。 108 | """ 109 | 110 | ACL = 'acl' 111 | CORS = 'cors' 112 | LIFECYCLE = 'lifecycle' 113 | LOCATION = 'location' 114 | LOGGING = 'logging' 115 | REFERER = 'referer' 116 | WEBSITE = 'website' 117 | LIVE = 'live' 118 | COMP = 'comp' 119 | STATUS = 'status' 120 | VOD = 'vod' 121 | SYMLINK = 'symlink' 122 | STAT = 'stat' 123 | BUCKET_INFO = 'bucketInfo' 124 | PROCESS = 'x-oss-process' 125 | 126 | def __init__(self, auth, endpoint, bucket_name, 127 | is_cname=False, 128 | session=None, 129 | connect_timeout=None, 130 | app_name='', 131 | enable_crc=False, 132 | loop=None): 133 | super().__init__(auth, endpoint, is_cname, session, connect_timeout, 134 | app_name, enable_crc, loop=loop) 135 | 136 | self.bucket_name = bucket_name.strip() 137 | 138 | def sign_url(self, method, key, expires, headers=None, params=None, slash_safe=False): 139 | """生成签名URL。 140 | 141 | 常见的用法是生成加签的URL以供授信用户下载,如为log.jpg生成一个5分钟后过期的下载链接:: 142 | 143 | >>> bucket.sign_url('GET', 'log.jpg', 5 * 60) 144 | 'http://your-bucket.oss-cn-hangzhou.aliyuncs.com/logo.jpg?OSSAccessKeyId=YourAccessKeyId\&Expires=1447178011&Signature=UJfeJgvcypWq6Q%2Bm3IJcSHbvSak%3D' 145 | 146 | :param method: HTTP方法,如'GET'、'PUT'、'DELETE'等 147 | :type method: str 148 | :param key: 文件名 149 | :param expires: 过期时间(单位:秒),链接在当前时间再过expires秒后过期 150 | 151 | :param headers: 需要签名的HTTP头部,如名称以x-oss-meta-开头的头部(作为用户自定义元数据)、 152 | Content-Type头部等。对于下载,不需要填。 153 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 154 | 155 | :param params: 需要签名的HTTP查询参数 156 | 157 | :param slash_safe: 是否开启key名称中的‘/’转义保护,如果不开启'/'将会转义成%2F 158 | :type slash_safe: bool 159 | 160 | :return: 签名URL。 161 | """ 162 | key = to_string(key) 163 | req = http.Request(method, self._make_url(self.bucket_name, key, slash_safe), 164 | headers=headers, 165 | params=params) 166 | return self.auth._sign_url(req, self.bucket_name, key, expires) 167 | 168 | def sign_rtmp_url(self, channel_name, playlist_name, expires): 169 | """生成RTMP推流的签名URL。 170 | 常见的用法是生成加签的URL以供授信用户向OSS推RTMP流。 171 | 172 | :param channel_name: 直播频道的名称 173 | :param expires: 过期时间(单位:秒),链接在当前时间再过expires秒后过期 174 | :param playlist_name: 播放列表名称,注意与创建live channel时一致 175 | :param params: 需要签名的HTTP查询参数 176 | 177 | :return: 签名URL。 178 | """ 179 | url = self._make_url(self.bucket_name, 'live').replace( 180 | 'http://', 'rtmp://').replace('https://', 181 | 'rtmp://') + '/' + channel_name 182 | params = {} 183 | params['playlistName'] = playlist_name 184 | return self.auth._sign_rtmp_url(url, self.bucket_name, channel_name, playlist_name, expires, params) 185 | 186 | async def list_objects(self, prefix='', delimiter='', marker='', max_keys=100): 187 | """根据前缀罗列Bucket里的文件。 188 | 189 | :param str prefix: 只罗列文件名为该前缀的文件 190 | :param str delimiter: 分隔符。可以用来模拟目录 191 | :param str marker: 分页标志。首次调用传空串,后续使用返回值的next_marker 192 | :param int max_keys: 最多返回文件的个数,文件和目录的和不能超过该值 193 | 194 | :return: :class:`ListObjectsResult ` 195 | """ 196 | resp = await self.__do_object('GET', '', 197 | params={'prefix': prefix, 198 | 'delimiter': delimiter, 199 | 'marker': marker, 200 | 'max-keys': str(max_keys), 201 | 'encoding-type': 'url'}) 202 | return await self._parse_result(resp, xml_utils.parse_list_objects, models.ListObjectsResult) 203 | 204 | async def put_object(self, key, data, 205 | headers=None, 206 | progress_callback=None): 207 | """上传一个普通文件。 208 | 209 | 用法 :: 210 | >>> bucket.put_object('readme.txt', 'content of readme.txt') 211 | >>> with open(u'local_file.txt', 'rb') as f: 212 | >>> bucket.put_object('remote_file.txt', f) 213 | 214 | :param key: 上传到OSS的文件名 215 | 216 | :param data: 待上传的内容。 217 | :type data: bytes,str或file-like object 218 | 219 | :param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等 220 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 221 | 222 | :param progress_callback: 用户指定的进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。 223 | 224 | :return: :class:`PutObjectResult ` 225 | """ 226 | headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key) 227 | 228 | if progress_callback: 229 | data = utils.make_progress_adapter(data, progress_callback) 230 | 231 | if self.enable_crc: 232 | data = utils.make_crc_adapter(data) 233 | 234 | resp = await self.__do_object('PUT', key, data=data, headers=headers) 235 | result = models.PutObjectResult(resp) 236 | 237 | if self.enable_crc and result.crc is not None: 238 | utils.check_crc('put object', data.crc, result.crc, result.request_id) 239 | return result 240 | 241 | async def put_object_from_file(self, key, filename, 242 | headers=None, 243 | progress_callback=None): 244 | """上传一个本地文件到OSS的普通文件。 245 | 246 | :param str key: 上传到OSS的文件名 247 | :param str filename: 本地文件名,需要有可读权限 248 | 249 | :param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等 250 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 251 | 252 | :param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback` 253 | 254 | :return: :class:`PutObjectResult ` 255 | """ 256 | headers = utils.set_content_type(http.CaseInsensitiveDict(headers), filename) 257 | 258 | with open(to_unicode(filename), 'rb') as f: 259 | return await self.put_object(key, f, headers=headers, progress_callback=progress_callback) 260 | 261 | async def append_object(self, key, position, data, 262 | headers=None, 263 | progress_callback=None, 264 | init_crc=None): 265 | """追加上传一个文件。 266 | 267 | :param str key: 新的文件名,或已经存在的可追加文件名 268 | :param int position: 追加上传一个新的文件, `position` 设为0;追加一个已经存在的可追加文件, `position` 设为文件的当前长度。 269 | `position` 可以从上次追加的结果 `AppendObjectResult.next_position` 中获得。 270 | 271 | :param data: 用户数据 272 | :type data: str、bytes、file-like object或可迭代对象 273 | 274 | :param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-开头的头部等 275 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 276 | 277 | :param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback` 278 | 279 | :return: :class:`AppendObjectResult ` 280 | 281 | :raises: 如果 `position` 和当前文件长度不一致,抛出 :class:`PositionNotEqualToLength ` ; 282 | 如果当前文件不是可追加类型,抛出 :class:`ObjectNotAppendable ` ; 283 | 还会抛出其他一些异常 284 | """ 285 | headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key) 286 | 287 | if progress_callback: 288 | data = utils.make_progress_adapter(data, progress_callback) 289 | 290 | if self.enable_crc and init_crc is not None: 291 | data = utils.make_crc_adapter(data, init_crc) 292 | 293 | resp = await self.__do_object('POST', key, 294 | data=data, 295 | headers=headers, 296 | params={'append': '', 'position': str(position)}) 297 | result = models.AppendObjectResult(resp) 298 | 299 | if self.enable_crc and result.crc is not None and init_crc is not None: 300 | utils.check_crc('append', data.crc, result.crc) 301 | 302 | return result 303 | 304 | async def get_object(self, key, 305 | byte_range=None, 306 | headers=None, 307 | progress_callback=None, 308 | process=None, 309 | params=None): 310 | """下载一个文件。 311 | 312 | 用法 :: 313 | 314 | >>> result = await bucket.get_object('readme.txt') 315 | >>> print(result.read()) 316 | 'hello world' 317 | 318 | :param key: 文件名 319 | :param byte_range: 指定下载范围。参见 :ref:`byte_range` 320 | 321 | :param headers: HTTP头部 322 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 323 | 324 | :param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback` 325 | 326 | :param process: oss文件处理,如图像服务等。指定后process,返回的内容为处理后的文件。 327 | 328 | :return: file-like object 329 | 330 | :raises: 如果文件不存在,则抛出 :class:`NoSuchKey ` ;还可能抛出其他异常 331 | """ 332 | headers = http.CaseInsensitiveDict(headers) 333 | 334 | range_string = _make_range_string(byte_range) 335 | if range_string: 336 | headers['range'] = range_string 337 | 338 | params = {} if params is None else params 339 | if process: 340 | params.update({Bucket.PROCESS: process}) 341 | 342 | resp = await self.__do_object('GET', key, headers=headers, params=params) 343 | return models.GetObjectResult(resp, progress_callback, self.enable_crc) 344 | 345 | async def get_object_to_file(self, key, filename, 346 | byte_range=None, 347 | headers=None, 348 | progress_callback=None, 349 | process=None): 350 | """下载一个文件到本地文件。 351 | 352 | :param key: 文件名 353 | :param filename: 本地文件名。要求父目录已经存在,且有写权限。 354 | :param byte_range: 指定下载范围。参见 :ref:`byte_range` 355 | 356 | :param headers: HTTP头部 357 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 358 | 359 | :param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback` 360 | 361 | :param process: oss文件处理,如图像服务等。指定后process,返回的内容为处理后的文件。 362 | 363 | :return: 如果文件不存在,则抛出 :class:`NoSuchKey ` ;还可能抛出其他异常 364 | """ 365 | with open(to_unicode(filename), 'wb') as f: 366 | result = await self.get_object(key, byte_range=byte_range, headers=headers, 367 | progress_callback=progress_callback, 368 | process=process) 369 | 370 | if result.content_length is None: 371 | shutil.copyfileobj(result, f) 372 | else: 373 | utils.copyfileobj_and_verify(result, f, result.content_length, request_id=result.request_id) 374 | 375 | return result 376 | 377 | async def head_object(self, key, headers=None): 378 | """获取文件元信息。 379 | 380 | HTTP响应的头部包含了文件元信息,可以通过 `RequestResult` 的 `headers` 成员获得。 381 | 用法 :: 382 | 383 | >>> result = await bucket.head_object('readme.txt') 384 | >>> print(result.content_type) 385 | text/plain 386 | 387 | :param key: 文件名 388 | 389 | :param headers: HTTP头部 390 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 391 | 392 | :return: :class:`HeadObjectResult ` 393 | 394 | :raises: 如果Bucket不存在或者Object不存在,则抛出 :class:`NotFound ` 395 | """ 396 | resp = await self.__do_object('HEAD', key, headers=headers) 397 | return models.HeadObjectResult(resp) 398 | 399 | async def get_object_meta(self, key): 400 | """获取文件基本元信息,包括该Object的ETag、Size(文件大小)、LastModified,并不返回其内容。 401 | 402 | HTTP响应的头部包含了文件基本元信息,可以通过 `GetObjectMetaResult` 的 `last_modified`,`content_length`,`etag` 成员获得。 403 | 404 | :param key: 文件名 405 | 406 | :return: :class:`GetObjectMetaResult ` 407 | 408 | :raises: 如果文件不存在,则抛出 :class:`NoSuchKey ` ;还可能抛出其他异常 409 | """ 410 | resp = await self.__do_object('GET', key, params={'objectMeta': ''}) 411 | return models.GetObjectMetaResult(resp) 412 | 413 | async def object_exists(self, key): 414 | """如果文件存在就返回True,否则返回False。如果Bucket不存在,或是发生其他错误,则抛出异常。""" 415 | 416 | # 如果我们用head_object来实现的话,由于HTTP HEAD请求没有响应体,只有响应头部,这样当发生404时, 417 | # 我们无法区分是NoSuchBucket还是NoSuchKey错误。 418 | # 419 | # 2.2.0之前的实现是通过get_object的if-modified-since头部,把date设为当前时间24小时后,这样如果文件存在,则会返回 420 | # 304 (NotModified);不存在,则会返回NoSuchKey。get_object会受回源的影响,如果配置会404回源,get_object会判断错误。 421 | # 422 | # 目前的实现是通过get_object_meta判断文件是否存在。 423 | 424 | try: 425 | await self.get_object_meta(key) 426 | except exceptions.NoSuchKey: 427 | return False 428 | 429 | return True 430 | 431 | async def copy_object(self, source_bucket_name, source_key, target_key, headers=None): 432 | """拷贝一个文件到当前Bucket。 433 | 434 | :param str source_bucket_name: 源Bucket名 435 | :param str source_key: 源文件名 436 | :param str target_key: 目标文件名 437 | 438 | :param headers: HTTP头部 439 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 440 | 441 | :return: :class:`PutObjectResult ` 442 | """ 443 | headers = http.CaseInsensitiveDict(headers) 444 | headers['x-oss-copy-source'] = '/' + source_bucket_name + '/' + urlquote(source_key, '') 445 | 446 | resp = await self.__do_object('PUT', target_key, headers=headers) 447 | 448 | return models.PutObjectResult(resp) 449 | 450 | async def update_object_meta(self, key, headers): 451 | """更改Object的元数据信息,包括Content-Type这类标准的HTTP头部,以及以x-oss-meta-开头的自定义元数据。 452 | 453 | 用户可以通过 :func:`head_object` 获得元数据信息。 454 | 455 | :param str key: 文件名 456 | 457 | :param headers: HTTP头部,包含了元数据信息 458 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 459 | 460 | :return: :class:`RequestResult ` 461 | """ 462 | return await self.copy_object(self.bucket_name, key, key, headers=headers) 463 | 464 | async def delete_object(self, key): 465 | """删除一个文件。 466 | 467 | :param str key: 文件名 468 | 469 | :return: :class:`RequestResult ` 470 | """ 471 | resp = await self.__do_object('DELETE', key) 472 | return models.RequestResult(resp) 473 | 474 | async def restore_object(self, key): 475 | """restore an object 476 | 如果是第一次针对该object调用接口,返回RequestResult.status = 202; 477 | 如果已经成功调用过restore接口,且服务端仍处于解冻中,抛异常RestoreAlreadyInProgress(status=409) 478 | 如果已经成功调用过restore接口,且服务端解冻已经完成,再次调用时返回RequestResult.status = 200,且会将object的可下载时间延长一天,最多延长7天。 479 | 如果object不存在,则抛异常NoSuchKey(status=404); 480 | 对非Archive类型的Object提交restore,则抛异常OperationNotSupported(status=400) 481 | 482 | 也可以通过调用head_object接口来获取meta信息来判断是否可以restore与restore的状态 483 | 代码示例:: 484 | >>> meta = await bucket.head_object(key) 485 | >>> if meta.resp.headers['x-oss-storage-class'] == oss2.BUCKET_STORAGE_CLASS_ARCHIVE: 486 | >>> bucket.restore_object(key) 487 | >>> while True: 488 | >>> meta = await bucket.head_object(key) 489 | >>> if meta.resp.headers['x-oss-restore'] == 'ongoing-request="true"': 490 | >>> time.sleep(5) 491 | >>> else: 492 | >>> break 493 | :param str key: object name 494 | :return: :class:`RequestResult ` 495 | """ 496 | resp = await self.__do_object('POST', key, params={'restore': ''}) 497 | return models.RequestResult(resp) 498 | 499 | async def put_object_acl(self, key, permission): 500 | """设置文件的ACL。 501 | 502 | :param str key: 文件名 503 | :param str permission: 可以是oss2.OBJECT_ACL_DEFAULT、oss2.OBJECT_ACL_PRIVATE、oss2.OBJECT_ACL_PUBLIC_READ或 504 | oss2.OBJECT_ACL_PUBLIC_READ_WRITE。 505 | 506 | :return: :class:`RequestResult ` 507 | """ 508 | resp = await self.__do_object('PUT', key, params={'acl': ''}, headers={'x-oss-object-acl': permission}) 509 | return models.RequestResult(resp) 510 | 511 | async def get_object_acl(self, key): 512 | """获取文件的ACL。 513 | 514 | :return: :class:`GetObjectAclResult ` 515 | """ 516 | resp = await self.__do_object('GET', key, params={'acl': ''}) 517 | return await self._parse_result(resp, xml_utils.parse_get_object_acl, models.GetObjectAclResult) 518 | 519 | async def batch_delete_objects(self, key_list): 520 | """批量删除文件。待删除文件列表不能为空。 521 | 522 | :param key_list: 文件名列表,不能为空。 523 | :type key_list: list of str 524 | 525 | :return: :class:`BatchDeleteObjectsResult ` 526 | """ 527 | if not key_list: 528 | raise models.ClientError('key_list should not be empty') 529 | 530 | data = xml_utils.to_batch_delete_objects_request(key_list, False) 531 | resp = await self.__do_object('POST', '', 532 | data=data, 533 | params={'delete': '', 'encoding-type': 'url'}, 534 | headers={'Content-MD5': utils.content_md5(data)}) 535 | return await self._parse_result(resp, xml_utils.parse_batch_delete_objects, models.BatchDeleteObjectsResult) 536 | 537 | async def init_multipart_upload(self, key, headers=None): 538 | """初始化分片上传。 539 | 540 | 返回值中的 `upload_id` 以及Bucket名和Object名三元组唯一对应了此次分片上传事件。 541 | 542 | :param str key: 待上传的文件名 543 | 544 | :param headers: HTTP头部 545 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 546 | 547 | :return: :class:`InitMultipartUploadResult ` 548 | """ 549 | headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key) 550 | 551 | resp = await self.__do_object('POST', key, params={'uploads': ''}, headers=headers) 552 | return await self._parse_result(resp, xml_utils.parse_init_multipart_upload, models.InitMultipartUploadResult) 553 | 554 | async def upload_part(self, key, upload_id, part_number, data, progress_callback=None, headers=None): 555 | """上传一个分片。 556 | 557 | :param str key: 待上传文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。 558 | :param str upload_id: 分片上传ID 559 | :param int part_number: 分片号,最小值是1. 560 | :param data: 待上传数据。 561 | :param progress_callback: 用户指定进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。 562 | 563 | :param headers: 用户指定的HTTP头部。可以指定Content-MD5头部等 564 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 565 | 566 | :return: :class:`PutObjectResult ` 567 | """ 568 | if progress_callback: 569 | data = utils.make_progress_adapter(data, progress_callback) 570 | 571 | if self.enable_crc: 572 | data = utils.make_crc_adapter(data) 573 | 574 | resp = await self.__do_object('PUT', key, 575 | params={'uploadId': upload_id, 'partNumber': str(part_number)}, 576 | headers=headers, 577 | data=data) 578 | result = models.PutObjectResult(resp) 579 | 580 | if self.enable_crc and result.crc is not None: 581 | utils.check_crc('put', data.crc, result.crc) 582 | 583 | return result 584 | 585 | async def complete_multipart_upload(self, key, upload_id, parts, headers=None): 586 | """完成分片上传,创建文件。 587 | 588 | :param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。 589 | :param str upload_id: 分片上传ID 590 | 591 | :param parts: PartInfo列表。PartInfo中的part_number和etag是必填项。其中的etag可以从 :func:`upload_part` 的返回值中得到。 592 | :type parts: list of `PartInfo ` 593 | 594 | :param headers: HTTP头部 595 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 596 | 597 | :return: :class:`PutObjectResult ` 598 | """ 599 | data = xml_utils.to_complete_upload_request(sorted(parts, key=lambda p: p.part_number)) 600 | resp = await self.__do_object('POST', key, 601 | params={'uploadId': upload_id}, 602 | data=data, 603 | headers=headers) 604 | 605 | return models.PutObjectResult(resp) 606 | 607 | async def abort_multipart_upload(self, key, upload_id): 608 | """取消分片上传。 609 | 610 | :param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。 611 | :param str upload_id: 分片上传ID 612 | 613 | :return: :class:`RequestResult ` 614 | """ 615 | resp = await self.__do_object('DELETE', key, 616 | params={'uploadId': upload_id}) 617 | return models.RequestResult(resp) 618 | 619 | async def list_multipart_uploads(self, 620 | prefix='', 621 | delimiter='', 622 | key_marker='', 623 | upload_id_marker='', 624 | max_uploads=1000): 625 | """罗列正在进行中的分片上传。支持分页。 626 | 627 | :param str prefix: 只罗列匹配该前缀的文件的分片上传 628 | :param str delimiter: 目录分割符 629 | :param str key_marker: 文件名分页符。第一次调用可以不传,后续设为返回值中的 `next_key_marker` 630 | :param str upload_id_marker: 分片ID分页符。第一次调用可以不传,后续设为返回值中的 `next_upload_id_marker` 631 | :param int max_uploads: 一次罗列最多能够返回的条目数 632 | 633 | :return: :class:`ListMultipartUploadsResult ` 634 | """ 635 | resp = await self.__do_object('GET', '', 636 | params={'uploads': '', 637 | 'prefix': prefix, 638 | 'delimiter': delimiter, 639 | 'key-marker': key_marker, 640 | 'upload-id-marker': upload_id_marker, 641 | 'max-uploads': str(max_uploads), 642 | 'encoding-type': 'url'}) 643 | return await self._parse_result(resp, xml_utils.parse_list_multipart_uploads, models.ListMultipartUploadsResult) 644 | 645 | async def upload_part_copy(self, source_bucket_name, source_key, byte_range, 646 | target_key, target_upload_id, target_part_number, 647 | headers=None): 648 | """分片拷贝。把一个已有文件的一部分或整体拷贝成目标文件的一个分片。 649 | 650 | :param byte_range: 指定待拷贝内容在源文件里的范围。参见 :ref:`byte_range` 651 | 652 | :param headers: HTTP头部 653 | :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict 654 | 655 | :return: :class:`PutObjectResult ` 656 | """ 657 | headers = http.CaseInsensitiveDict(headers) 658 | headers['x-oss-copy-source'] = '/' + source_bucket_name + '/' + source_key 659 | 660 | range_string = _make_range_string(byte_range) 661 | if range_string: 662 | headers['x-oss-copy-source-range'] = range_string 663 | 664 | resp = await self.__do_object('PUT', target_key, 665 | params={'uploadId': target_upload_id, 666 | 'partNumber': str(target_part_number)}, 667 | headers=headers) 668 | 669 | return models.PutObjectResult(resp) 670 | 671 | async def list_parts(self, key, upload_id, 672 | marker='', max_parts=1000): 673 | """列举已经上传的分片。支持分页。 674 | 675 | :param str key: 文件名 676 | :param str upload_id: 分片上传ID 677 | :param str marker: 分页符 678 | :param int max_parts: 一次最多罗列多少分片 679 | 680 | :return: :class:`ListPartsResult ` 681 | """ 682 | resp = await self.__do_object('GET', key, 683 | params={'uploadId': upload_id, 684 | 'part-number-marker': marker, 685 | 'max-parts': str(max_parts)}) 686 | return await self._parse_result(resp, xml_utils.parse_list_parts, models.ListPartsResult) 687 | 688 | async def put_symlink(self, target_key, symlink_key, headers=None): 689 | """创建Symlink。 690 | 691 | :param str target_key: 目标文件,目标文件不能为符号连接 692 | :param str symlink_key: 符号连接类文件,其实质是一个特殊的文件,数据指向目标文件 693 | 694 | :return: :class:`RequestResult ` 695 | """ 696 | headers = headers or {} 697 | headers['x-oss-symlink-target'] = urlquote(target_key, '') 698 | resp = await self.__do_object('PUT', symlink_key, headers=headers, params={Bucket.SYMLINK: ''}) 699 | return models.RequestResult(resp) 700 | 701 | async def get_symlink(self, symlink_key): 702 | """获取符号连接文件的目标文件。 703 | 704 | :param str symlink_key: 符号连接类文件 705 | 706 | :return: :class:`GetSymlinkResult ` 707 | 708 | :raises: 如果文件的符号链接不存在,则抛出 :class:`NoSuchKey ` ;还可能抛出其他异常 709 | """ 710 | resp = await self.__do_object('GET', symlink_key, params={Bucket.SYMLINK: ''}) 711 | return models.GetSymlinkResult(resp) 712 | 713 | async def create_bucket(self, permission=None, input=None): 714 | """创建新的Bucket。 715 | 716 | :param str permission: 指定Bucket的ACL。可以是oss2.BUCKET_ACL_PRIVATE(推荐、缺省)、oss2.BUCKET_ACL_PUBLIC_READ或是 717 | oss2.BUCKET_ACL_PUBLIC_READ_WRITE。 718 | 719 | :param input: :class:`BucketCreateConfig ` object 720 | """ 721 | if permission: 722 | headers = {'x-oss-acl': permission} 723 | else: 724 | headers = None 725 | 726 | data = self.__convert_data(models.BucketCreateConfig, xml_utils.to_put_bucket_config, input) 727 | resp = await self.__do_bucket('PUT', headers=headers, data=data) 728 | return models.RequestResult(resp) 729 | 730 | async def delete_bucket(self): 731 | """删除一个Bucket。只有没有任何文件,也没有任何未完成的分片上传的Bucket才能被删除。 732 | 733 | :return: :class:`RequestResult ` 734 | 735 | ":raises: 如果试图删除一个非空Bucket,则抛出 :class:`BucketNotEmpty ` 736 | """ 737 | resp = await self.__do_bucket('DELETE') 738 | return models.RequestResult(resp) 739 | 740 | async def put_bucket_acl(self, permission): 741 | """设置Bucket的ACL。 742 | 743 | :param str permission: 新的ACL,可以是oss2.BUCKET_ACL_PRIVATE、oss2.BUCKET_ACL_PUBLIC_READ或 744 | oss2.BUCKET_ACL_PUBLIC_READ_WRITE 745 | """ 746 | resp = await self.__do_bucket('PUT', headers={'x-oss-acl': permission}, params={Bucket.ACL: ''}) 747 | return models.RequestResult(resp) 748 | 749 | async def get_bucket_acl(self): 750 | """获取Bucket的ACL。 751 | 752 | :return: :class:`GetBucketAclResult ` 753 | """ 754 | resp = await self.__do_bucket('GET', params={Bucket.ACL: ''}) 755 | return await self._parse_result(resp, xml_utils.parse_get_bucket_acl, models.GetBucketAclResult) 756 | 757 | async def put_bucket_cors(self, input): 758 | """设置Bucket的CORS。 759 | 760 | :param input: :class:`BucketCors ` 对象或其他 761 | """ 762 | data = self.__convert_data(models.BucketCors, xml_utils.to_put_bucket_cors, input) 763 | resp = await self.__do_bucket('PUT', data=data, params={Bucket.CORS: ''}) 764 | return models.RequestResult(resp) 765 | 766 | async def get_bucket_cors(self): 767 | """获取Bucket的CORS配置。 768 | 769 | :return: :class:`GetBucketCorsResult ` 770 | """ 771 | resp = await self.__do_bucket('GET', params={Bucket.CORS: ''}) 772 | return await self._parse_result(resp, xml_utils.parse_get_bucket_cors, models.GetBucketCorsResult) 773 | 774 | async def delete_bucket_cors(self): 775 | """删除Bucket的CORS配置。""" 776 | resp = await self.__do_bucket('DELETE', params={Bucket.CORS: ''}) 777 | return models.RequestResult(resp) 778 | 779 | async def put_bucket_lifecycle(self, input): 780 | """设置生命周期管理的配置。 781 | 782 | :param input: :class:`BucketLifecycle ` 对象或其他 783 | """ 784 | data = self.__convert_data(models.BucketLifecycle, xml_utils.to_put_bucket_lifecycle, input) 785 | resp = await self.__do_bucket('PUT', data=data, params={Bucket.LIFECYCLE: ''}) 786 | return models.RequestResult(resp) 787 | 788 | async def get_bucket_lifecycle(self): 789 | """获取生命周期管理配置。 790 | 791 | :return: :class:`GetBucketLifecycleResult ` 792 | 793 | :raises: 如果没有设置Lifecycle,则抛出 :class:`NoSuchLifecycle ` 794 | """ 795 | resp = await self.__do_bucket('GET', params={Bucket.LIFECYCLE: ''}) 796 | return await self._parse_result(resp, xml_utils.parse_get_bucket_lifecycle, models.GetBucketLifecycleResult) 797 | 798 | async def delete_bucket_lifecycle(self): 799 | """删除生命周期管理配置。如果Lifecycle没有设置,也返回成功。""" 800 | resp = await self.__do_bucket('DELETE', params={Bucket.LIFECYCLE: ''}) 801 | return models.RequestResult(resp) 802 | 803 | async def get_bucket_location(self): 804 | """获取Bucket的数据中心。 805 | 806 | :return: :class:`GetBucketLocationResult ` 807 | """ 808 | resp = await self.__do_bucket('GET', params={Bucket.LOCATION: ''}) 809 | return await self._parse_result(resp, xml_utils.parse_get_bucket_location, models.GetBucketLocationResult) 810 | 811 | async def put_bucket_logging(self, input): 812 | """设置Bucket的访问日志功能。 813 | 814 | :param input: :class:`BucketLogging ` 对象或其他 815 | """ 816 | data = self.__convert_data(models.BucketLogging, xml_utils.to_put_bucket_logging, input) 817 | resp = await self.__do_bucket('PUT', data=data, params={Bucket.LOGGING: ''}) 818 | return models.RequestResult(resp) 819 | 820 | async def get_bucket_logging(self): 821 | """获取Bucket的访问日志功能配置。 822 | 823 | :return: :class:`GetBucketLoggingResult ` 824 | """ 825 | resp = await self.__do_bucket('GET', params={Bucket.LOGGING: ''}) 826 | return await self._parse_result(resp, xml_utils.parse_get_bucket_logging, models.GetBucketLoggingResult) 827 | 828 | async def delete_bucket_logging(self): 829 | """关闭Bucket的访问日志功能。""" 830 | resp = await self.__do_bucket('DELETE', params={Bucket.LOGGING: ''}) 831 | return models.RequestResult(resp) 832 | 833 | async def put_bucket_referer(self, input): 834 | """为Bucket设置防盗链。 835 | 836 | :param input: :class:`BucketReferer ` 对象或其他 837 | """ 838 | data = self.__convert_data(models.BucketReferer, xml_utils.to_put_bucket_referer, input) 839 | resp = await self.__do_bucket('PUT', data=data, params={Bucket.REFERER: ''}) 840 | return models.RequestResult(resp) 841 | 842 | async def get_bucket_referer(self): 843 | """获取Bucket的防盗链配置。 844 | 845 | :return: :class:`GetBucketRefererResult ` 846 | """ 847 | resp = await self.__do_bucket('GET', params={Bucket.REFERER: ''}) 848 | return await self._parse_result(resp, xml_utils.parse_get_bucket_referer, models.GetBucketRefererResult) 849 | 850 | async def get_bucket_stat(self): 851 | """查看Bucket的状态,目前包括bucket大小,bucket的object数量,bucket正在上传的Multipart Upload事件个数等。 852 | 853 | :return: :class:`GetBucketStatResult ` 854 | """ 855 | resp = await self.__do_bucket('GET', params={Bucket.STAT: ''}) 856 | return await self._parse_result(resp, xml_utils.parse_get_bucket_stat, models.GetBucketStatResult) 857 | 858 | async def get_bucket_info(self): 859 | """获取bucket相关信息,如创建时间,访问Endpoint,Owner与ACL等。 860 | 861 | :return: :class:`GetBucketInfoResult ` 862 | """ 863 | resp = await self.__do_bucket('GET', params={Bucket.BUCKET_INFO: ''}) 864 | return await self._parse_result(resp, xml_utils.parse_get_bucket_info, models.GetBucketInfoResult) 865 | 866 | async def put_bucket_website(self, input): 867 | """为Bucket配置静态网站托管功能。 868 | 869 | :param input: :class:`BucketWebsite ` 870 | """ 871 | data = self.__convert_data(models.BucketWebsite, xml_utils.to_put_bucket_website, input) 872 | resp = await self.__do_bucket('PUT', data=data, params={Bucket.WEBSITE: ''}) 873 | return models.RequestResult(resp) 874 | 875 | async def get_bucket_website(self): 876 | """获取Bucket的静态网站托管配置。 877 | 878 | :return: :class:`GetBucketWebsiteResult ` 879 | 880 | :raises: 如果没有设置静态网站托管,那么就抛出 :class:`NoSuchWebsite ` 881 | """ 882 | resp = await self.__do_bucket('GET', params={Bucket.WEBSITE: ''}) 883 | return await self._parse_result(resp, xml_utils.parse_get_bucket_websiste, models.GetBucketWebsiteResult) 884 | 885 | async def delete_bucket_website(self): 886 | """关闭Bucket的静态网站托管功能。""" 887 | resp = await self.__do_bucket('DELETE', params={Bucket.WEBSITE: ''}) 888 | return models.RequestResult(resp) 889 | 890 | async def create_live_channel(self, channel_name, input): 891 | """创建推流直播频道 892 | 893 | :param str channel_name: 要创建的live channel的名称 894 | :param input: LiveChannelInfo类型,包含了live channel中的描述信息 895 | 896 | :return: :class:`CreateLiveChannelResult ` 897 | """ 898 | data = self.__convert_data(models.LiveChannelInfo, xml_utils.to_create_live_channel, input) 899 | resp = await self.__do_object('PUT', channel_name, data=data, params={Bucket.LIVE: ''}) 900 | return await self._parse_result(resp, xml_utils.parse_create_live_channel, models.CreateLiveChannelResult) 901 | 902 | async def delete_live_channel(self, channel_name): 903 | """删除推流直播频道 904 | 905 | :param str channel_name: 要删除的live channel的名称 906 | """ 907 | resp = await self.__do_object('DELETE', channel_name, params={Bucket.LIVE: ''}) 908 | return models.RequestResult(resp) 909 | 910 | async def get_live_channel(self, channel_name): 911 | """获取直播频道配置 912 | 913 | :param str channel_name: 要获取的live channel的名称 914 | 915 | :return: :class:`GetLiveChannelResult ` 916 | """ 917 | resp = await self.__do_object('GET', channel_name, params={Bucket.LIVE: ''}) 918 | return await self._parse_result(resp, xml_utils.parse_get_live_channel, models.GetLiveChannelResult) 919 | 920 | async def list_live_channel(self, prefix='', marker='', max_keys=100): 921 | """列举出Bucket下所有符合条件的live channel 922 | 923 | param: str prefix: list时channel_id的公共前缀 924 | param: str marker: list时指定的起始标记 925 | param: int max_keys: 本次list返回live channel的最大个数 926 | 927 | return: :class:`ListLiveChannelResult ` 928 | """ 929 | resp = await self.__do_bucket('GET', params={Bucket.LIVE: '', 930 | 'prefix': prefix, 931 | 'marker': marker, 932 | 'max-keys': str(max_keys)}) 933 | return await self._parse_result(resp, xml_utils.parse_list_live_channel, models.ListLiveChannelResult) 934 | 935 | async def get_live_channel_stat(self, channel_name): 936 | """获取live channel当前推流的状态 937 | 938 | param str channel_name: 要获取推流状态的live channel的名称 939 | 940 | return: :class:`GetLiveChannelStatResult ` 941 | """ 942 | resp = await self.__do_object('GET', channel_name, params={Bucket.LIVE: '', Bucket.COMP: 'stat'}) 943 | return await self._parse_result(resp, xml_utils.parse_live_channel_stat, models.GetLiveChannelStatResult) 944 | 945 | async def put_live_channel_status(self, channel_name, status): 946 | """更改live channel的status,仅能在“enabled”和“disabled”两种状态中更改 947 | 948 | param str channel_name: 要更改status的live channel的名称 949 | param str status: live channel的目标status 950 | """ 951 | resp = await self.__do_object('PUT', channel_name, params={Bucket.LIVE: '', Bucket.STATUS: status}) 952 | return models.RequestResult(resp) 953 | 954 | async def get_live_channel_history(self, channel_name): 955 | """获取live channel中最近的最多十次的推流记录,记录中包含推流的起止时间和远端的地址 956 | 957 | param str channel_name: 要获取最近推流记录的live channel的名称 958 | 959 | return: :class:`GetLiveChannelHistoryResult ` 960 | """ 961 | resp = await self.__do_object('GET', channel_name, params={Bucket.LIVE: '', Bucket.COMP: 'history'}) 962 | return await self._parse_result(resp, xml_utils.parse_live_channel_history, models.GetLiveChannelHistoryResult) 963 | 964 | async def post_vod_playlist(self, channel_name, playlist_name, start_time=0, end_time=0): 965 | """根据指定的playlist name以及startTime和endTime生成一个点播的播放列表 966 | 967 | param str channel_name: 要生成点播列表的live channel的名称 968 | param str playlist_name: 要生成点播列表m3u8文件的名称 969 | param int start_time: 点播的起始时间,Unix Time格式,可以使用int(time.time())获取 970 | param int end_time: 点播的结束时间,Unix Time格式,可以使用int(time.time())获取 971 | """ 972 | key = channel_name + "/" + playlist_name 973 | resp = await self.__do_object('POST', key, params={Bucket.VOD: '', 974 | 'startTime': str(start_time), 975 | 'endTime': str(end_time)}) 976 | return models.RequestResult(resp) 977 | 978 | async def _get_bucket_config(self, config): 979 | """获得Bucket某项配置,具体哪种配置由 `config` 指定。该接口直接返回 `RequestResult` 对象。 980 | 通过read()接口可以获得XML字符串。不建议使用。 981 | 982 | :param str config: 可以是 `Bucket.ACL` 、 `Bucket.LOGGING` 等。 983 | 984 | :return: :class:`RequestResult ` 985 | """ 986 | return await self.__do_bucket('GET', params={config: ''}) 987 | 988 | async def __do_object(self, method, key, **kwargs): 989 | return await self._do(method, self.bucket_name, key, **kwargs) 990 | 991 | async def __do_bucket(self, method, **kwargs): 992 | return await self._do(method, self.bucket_name, '', **kwargs) 993 | 994 | def __convert_data(self, klass, converter, data): 995 | if isinstance(data, klass): 996 | return converter(data) 997 | else: 998 | return data 999 | 1000 | 1001 | def _normalize_endpoint(endpoint): 1002 | if not endpoint.startswith('http://') and not endpoint.startswith('https://'): 1003 | return 'http://' + endpoint 1004 | else: 1005 | return endpoint 1006 | 1007 | 1008 | _ENDPOINT_TYPE_ALIYUN = 0 1009 | _ENDPOINT_TYPE_CNAME = 1 1010 | _ENDPOINT_TYPE_IP = 2 1011 | 1012 | 1013 | def _make_range_string(range): 1014 | if range is None: 1015 | return '' 1016 | 1017 | start = range[0] 1018 | last = range[1] 1019 | 1020 | if start is None and last is None: 1021 | return '' 1022 | 1023 | return 'bytes=' + _range(start, last) 1024 | 1025 | 1026 | def _range(start, last): 1027 | def to_str(pos): 1028 | if pos is None: 1029 | return '' 1030 | else: 1031 | return str(pos) 1032 | 1033 | return to_str(start) + '-' + to_str(last) 1034 | 1035 | 1036 | def _determine_endpoint_type(netloc, is_cname, bucket_name): 1037 | if utils.is_ip_or_localhost(netloc): 1038 | return _ENDPOINT_TYPE_IP 1039 | 1040 | if is_cname: 1041 | return _ENDPOINT_TYPE_CNAME 1042 | 1043 | if utils.is_valid_bucket_name(bucket_name): 1044 | return _ENDPOINT_TYPE_ALIYUN 1045 | else: 1046 | return _ENDPOINT_TYPE_IP 1047 | 1048 | 1049 | class _UrlMaker(object): 1050 | def __init__(self, endpoint, is_cname): 1051 | p = urlparse(endpoint) 1052 | 1053 | self.scheme = p.scheme 1054 | self.netloc = p.netloc 1055 | self.is_cname = is_cname 1056 | 1057 | def __call__(self, bucket_name, key, slash_safe=False): 1058 | self.type = _determine_endpoint_type(self.netloc, self.is_cname, bucket_name) 1059 | 1060 | safe = '/' if slash_safe is True else '' 1061 | key = urlquote(key, safe=safe) 1062 | 1063 | if self.type == _ENDPOINT_TYPE_CNAME: 1064 | return '{0}://{1}/{2}'.format(self.scheme, self.netloc, key) 1065 | 1066 | if self.type == _ENDPOINT_TYPE_IP: 1067 | if bucket_name: 1068 | return '{0}://{1}/{2}/{3}'.format(self.scheme, self.netloc, bucket_name, key) 1069 | else: 1070 | return '{0}://{1}/{2}'.format(self.scheme, self.netloc, key) 1071 | if not bucket_name: 1072 | assert not key 1073 | return '{0}://{1}'.format(self.scheme, self.netloc) 1074 | 1075 | return '{0}://{1}.{2}/{3}'.format(self.scheme, bucket_name, self.netloc, key) 1076 | --------------------------------------------------------------------------------